xiezhy6 / PASTA-GAN-plusplus

35 stars 5 forks source link

NameError: name 'SynthesisLayer' is not defined #12

Closed FatemeZamanian closed 1 year ago

FatemeZamanian commented 1 year ago

Hi when i run sh train.sh i got this error:

Traceback (most recent call last): File "train.py", line 573, in main() # pylint: disable=no-value-for-parameter File "/home/fatemeh/anaconda3/envs/pasta/lib/python3.7/site-packages/click/core.py", line 1130, in call return self.main(args, kwargs) File "/home/fatemeh/anaconda3/envs/pasta/lib/python3.7/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/fatemeh/anaconda3/envs/pasta/lib/python3.7/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, ctx.params) File "/home/fatemeh/anaconda3/envs/pasta/lib/python3.7/site-packages/click/core.py", line 760, in invoke return __callback(args, kwargs) File "/home/fatemeh/anaconda3/envs/pasta/lib/python3.7/site-packages/click/decorators.py", line 26, in new_func return f(get_current_context(), args, kwargs) File "train.py", line 566, in main subprocess_fn(rank=0, args=args, temp_dir=temp_dir) File "train.py", line 410, in subprocess_fn training_loop.training_loop(rank=rank, args) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/training/training_loop_fullbody.py", line 408, in training_loop G = dnnlib.util.construct_class_by_name(G_kwargs, common_kwargs).train().requiresgrad(False).to(device) # subclass of torch.nn.Module File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/dnnlib/util.py", line 289, in construct_class_by_name return call_func_by_name(args, func_name=class_name, kwargs) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/dnnlib/util.py", line 284, in call_func_by_name return func_obj(*args, kwargs) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/torch_utils/persistence.py", line 104, in init super().init(args, kwargs) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/training/networks.py", line 2347, in init self.synthesis = SynthesisNetworkFull_v18(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, synthesis_kwargs) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/torch_utils/persistence.py", line 104, in init super().init(args, kwargs) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/training/networks.py", line 2227, in init img_channels=img_channels, is_last=is_last, is_style=True, use_fp16=use_fp16, *block_kwargs) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/torch_utils/persistence.py", line 104, in init super().init(args, **kwargs) File "/home/fatemeh/pasta-gan-plusplus/pasta-gan-plusplus/training/networks.py", line 2122, in init self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2, NameError: name 'SynthesisLayer' is not defined

what is the problem?

FatemeZamanian commented 1 year ago

Just add this in networks.py file

@persistence.persistent_class
  class SynthesisLayer(torch.nn.Module):
      def __init__(self,
          in_channels,                    # Number of input channels.
          out_channels,                   # Number of output channels.
          w_dim,                          # Intermediate latent (W) dimensionality.
          resolution,                     # Resolution of this layer.
          kernel_size     = 3,            # Convolution kernel size.
          up              = 1,            # Integer upsampling factor.
          use_noise       = True,         # Enable noise input?
          activation      = 'lrelu',      # Activation function: 'relu', 'lrelu', etc.
          resample_filter = [1,3,3,1],    # Low-pass filter to apply when resampling activations.
          conv_clamp      = None,         # Clamp the output of convolution layers to +-X, None = disable clamping.
          channels_last   = False,        # Use channels_last format for the weights?
      ):
        super().__init__()
        self.resolution = resolution
        self.up = up
        self.use_noise = use_noise
        self.activation = activation
        self.conv_clamp = conv_clamp
        self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
        self.padding = kernel_size // 2
        self.act_gain = bias_act.activation_funcs[activation].def_gain

        self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
        memory_format = torch.channels_last if channels_last else torch.contiguous_format
        self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
        if use_noise:
            self.register_buffer('noise_const', torch.randn([resolution, resolution]))
            self.noise_strength = torch.nn.Parameter(torch.zeros([]))
        self.bias = torch.nn.Parameter(torch.zeros([out_channels]))

    def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
        assert noise_mode in ['random', 'const', 'none']
        in_resolution = self.resolution // self.up
        misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution])
        styles = self.affine(w)

        noise = None
        if self.use_noise and noise_mode == 'random':
            noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
        if self.use_noise and noise_mode == 'const':
            noise = self.noise_const * self.noise_strength

        flip_weight = (self.up == 1) # slightly faster
        x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
            padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)

        act_gain = self.act_gain * gain
        act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
        x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
        return x