Spaces:
Running
on
Zero
Running
on
Zero
| # // Copyright (c) 2025 Bytedance Ltd. and/or its affiliates | |
| # // | |
| # // Licensed under the Apache License, Version 2.0 (the "License"); | |
| # // you may not use this file except in compliance with the License. | |
| # // You may obtain a copy of the License at | |
| # // | |
| # // http://www.apache.org/licenses/LICENSE-2.0 | |
| # // | |
| # // Unless required by applicable law or agreed to in writing, software | |
| # // distributed under the License is distributed on an "AS IS" BASIS, | |
| # // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # // See the License for the specific language governing permissions and | |
| # // limitations under the License. | |
| import torch | |
| import torch.nn as nn | |
| class ScalingLayer(nn.Module): | |
| def __init__(self, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): | |
| super().__init__() | |
| self.register_buffer('shift', torch.Tensor(mean)[None, :, None, None]) | |
| self.register_buffer('scale', torch.Tensor(std)[None, :, None, None]) | |
| def forward(self, inp): | |
| return (inp - self.shift) / self.scale | |
| def inv(self, inp): | |
| return inp * self.scale + self.shift |