DCGAN,生成器模型,cons_transpose2d():参数' output_padding' (位置6)必须是INT的元组,而不是元组

发布于 2025-02-03 20:54:31 字数 6980 浏览 6 评论 0原文

我尝试使用以下代码实现DCGAN发电机,

class Generator(nn.Module):
    """
    Input : `(batch_size, 100)`

    Output: `(batch_size, 3, 64, 64)`

    A generator that takes noise as input and generates images.

    loss: Binary cross entropy loss
    """
    def __init__(self):
        super().__init__()
        # layer_dims = [1024, 512, 256, 128, 64]
        layer_dims = [ngf*8, ngf*4, ngf*2, ngf]
        self.layers = nn.ModuleList([self.upscale_block(in_f, out_f, kernel_size=4, stride=2, padding=1)
                      for in_f, out_f in zip(layer_dims[:-1], layer_dims[1:])])

        self.gen = nn.Sequential(
            self.input_layer(nz, ngf*8, use_bias=False),
            *self.layers,
            self.output_layer(ngf, 3, use_bias=False),
        )


    def forward(self, x):
        print("Generator input: ", x.shape)
        # x = torch.tensor(self.dense(x, 4*4*1024, use_bias=False), dtype=torch.float32)
        # x = x.view(-1, 1024, 4, 4)
        # x = self.gen(x)
        return self.gen(x)


    def upscale_block(self, in_channels, out_channels, kernel_size, stride, padding):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )
    
    def input_layer(self, in_f, out_f, kernel_size=4, stride=1, padding=0, use_bias=False):
        return nn.Sequential(
            nn.ConvTranspose2d(in_f, out_f, kernel_size, stride, padding, bias=use_bias),
            nn.BatchNorm2d(out_f),
            nn.ReLU(True),
        )
    

    def output_layer(self, in_f, out_f, use_bias=True):
        return nn.Sequential(
            nn.ConvTranspose2d(in_f, out_f, 4, 2, 1, use_bias),
            nn.Tanh()
        )

以下官方pytorch dcgan

# Generator Code

class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )

    def forward(self, input):
        return self.main(input)

但是,当我正在运行代码时,

z = torch.randn(64, nz, 1, 1)
gen_model = Generator()
gen_model_pytorch = Generator_pytorch()
print(gen_model_pytorch)
print(gen_model_pytorch)

我会在输出中获得以下内容


torch.Size([64, 3, 64, 64])
Generator input:  torch.Size([64, 100, 1, 1])
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
/tmp/ipykernel_939/3945186195.py in <module>
      3 gen_model_pytorch = Generator_pytorch()
      4 print(gen_model_pytorch(z).shape)
----> 5 print(gen_model(z).shape)
      6 
      7 # print(gen_model)

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/tmp/ipykernel_939/2092446074.py in forward(self, x)
     28         # x = x.view(-1, 1024, 4, 4)
     29         # x = self.gen(x)
---> 30         return self.gen(x)
     31 
     32 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
    139     def forward(self, input):
    140         for module in self:
--> 141             input = module(input)
    142         return input
    143 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
    139     def forward(self, input):
    140         for module in self:
--> 141             input = module(input)
    142         return input
    143 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.8/site-packages/torch/nn/modules/conv.py in forward(self, input, output_size)
    923             input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)  # type: ignore[arg-type]
    924 
--> 925         return F.conv_transpose2d(
    926             input, self.weight, self.bias, self.stride, self.padding,
    927             output_padding, self.groups, self.dilation)

TypeError: conv_transpose2d(): argument 'output_padding' (position 6) must be tuple of ints, not tuple

,它适用于教程代码(abipio),但在我的generator类中的同一自定义网络都不适当,尽管我遵循架构作为教程,在哪里交叉检查文件,通过print()它们是相同的,……什么层导致了这个问题?

I tried implementing a DCGAN generator using the following code

class Generator(nn.Module):
    """
    Input : `(batch_size, 100)`

    Output: `(batch_size, 3, 64, 64)`

    A generator that takes noise as input and generates images.

    loss: Binary cross entropy loss
    """
    def __init__(self):
        super().__init__()
        # layer_dims = [1024, 512, 256, 128, 64]
        layer_dims = [ngf*8, ngf*4, ngf*2, ngf]
        self.layers = nn.ModuleList([self.upscale_block(in_f, out_f, kernel_size=4, stride=2, padding=1)
                      for in_f, out_f in zip(layer_dims[:-1], layer_dims[1:])])

        self.gen = nn.Sequential(
            self.input_layer(nz, ngf*8, use_bias=False),
            *self.layers,
            self.output_layer(ngf, 3, use_bias=False),
        )


    def forward(self, x):
        print("Generator input: ", x.shape)
        # x = torch.tensor(self.dense(x, 4*4*1024, use_bias=False), dtype=torch.float32)
        # x = x.view(-1, 1024, 4, 4)
        # x = self.gen(x)
        return self.gen(x)


    def upscale_block(self, in_channels, out_channels, kernel_size, stride, padding):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )
    
    def input_layer(self, in_f, out_f, kernel_size=4, stride=1, padding=0, use_bias=False):
        return nn.Sequential(
            nn.ConvTranspose2d(in_f, out_f, kernel_size, stride, padding, bias=use_bias),
            nn.BatchNorm2d(out_f),
            nn.ReLU(True),
        )
    

    def output_layer(self, in_f, out_f, use_bias=True):
        return nn.Sequential(
            nn.ConvTranspose2d(in_f, out_f, 4, 2, 1, use_bias),
            nn.Tanh()
        )

following code from the official pytorch DCGAN tutorial

# Generator Code

class Generator(nn.Module):
    def __init__(self, ngpu):
        super(Generator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )

    def forward(self, input):
        return self.main(input)

but when i am running the code

z = torch.randn(64, nz, 1, 1)
gen_model = Generator()
gen_model_pytorch = Generator_pytorch()
print(gen_model_pytorch)
print(gen_model_pytorch)

i get the following as output


torch.Size([64, 3, 64, 64])
Generator input:  torch.Size([64, 100, 1, 1])
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
/tmp/ipykernel_939/3945186195.py in <module>
      3 gen_model_pytorch = Generator_pytorch()
      4 print(gen_model_pytorch(z).shape)
----> 5 print(gen_model(z).shape)
      6 
      7 # print(gen_model)

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/tmp/ipykernel_939/2092446074.py in forward(self, x)
     28         # x = x.view(-1, 1024, 4, 4)
     29         # x = self.gen(x)
---> 30         return self.gen(x)
     31 
     32 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
    139     def forward(self, input):
    140         for module in self:
--> 141             input = module(input)
    142         return input
    143 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
    139     def forward(self, input):
    140         for module in self:
--> 141             input = module(input)
    142         return input
    143 

/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
   1108         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
   1109                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110             return forward_call(*input, **kwargs)
   1111         # Do not call functions when jit is used
   1112         full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.8/site-packages/torch/nn/modules/conv.py in forward(self, input, output_size)
    923             input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)  # type: ignore[arg-type]
    924 
--> 925         return F.conv_transpose2d(
    926             input, self.weight, self.bias, self.stride, self.padding,
    927             output_padding, self.groups, self.dilation)

TypeError: conv_transpose2d(): argument 'output_padding' (position 6) must be tuple of ints, not tuple

It's working for the tutorial code(obvio) but not for the same custom network in my Generator class, albeit I followed the architecture as the tutorial, where I cross checked the file both models layers by print() they were same, ... what layer causes this issue?

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。
列表为空,暂无数据
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文