sksq96 / pytorch-summary

Model summary in PyTorch similar to `model.summary()` in Keras
MIT License
3.98k stars 412 forks source link

RuntimeError: Failed to run torchsummary. See above stack traces for more details. Executed layers up to: [] #180

Closed huynth1801 closed 2 years ago

huynth1801 commented 2 years ago

I am trying to run the following code but keep getting an error. Can anyone help me out

This is my code :

from gc_layer import GatedConv2d
import torch
import torch.nn as nn
from base_model import BaseModel
class Discriminator(BaseModel):
def __init__(self, channels = 64):
super(Discriminator, self).__init__()
self.channels = channels
input_dim = 3
self.init_weights()
self.gt_conv1 = GatedConv2d(input_dim+1, channels, kernel_size=3, dilation=1, pad_type='zero', padding=1, activation='lrelu')
# self.lk1 = nn.LeakyReLU()
self.gt_conv2 = GatedConv2d(channels, channels*2, kernel_size=3, dilation=1, pad_type='zero',padding=1, activation='lrelu')
# self.lk2 = nn.LeakyReLU()
self.gt_conv3 = GatedConv2d(channels*2, channels*4, kernel_size=3, dilation=1, pad_type='zero', padding=1, activation='lrelu')
# self.lk3 = nn.LeakyReLU()
self.gt_conv4 = GatedConv2d(channels*4, channels*8, kernel_size=3, dilation=1, pad_type='zero', padding=1, activation='lrelu')
# self.lk4 = nn.LeakyReLU()
self.gt_conv5 = GatedConv2d(channels*8, channels*8, kernel_size=3, dilation=1, pad_type='zero', padding=1, activation='lrelu')
# self.lk5 = nn.LeakyReLU()
self.gt_conv6 = GatedConv2d(channels*8, channels*8, kernel_size=3, dilation=1, pad_type='zero', padding=1, activation='lrelu')
# self.lk6 = nn.LeakyReLU()
self.gt_conv7 = GatedConv2d(channels*8, channels*8, kernel_size=3, dilation=1, pad_type='zero', padding=1, activation='lrelu')
# self.lk7 = nn.LeakyReLU()
def forward(self, inputs):
# x_in = torch.cat([inputs, mask], dim=1)
output = self.gt_conv1(inputs)
# output = self.lk1(output)
output = self.gt_conv2(output)
# output = self.lk2(output)
output = self.gt_conv3(output)
# output = self.lk3(output)
output = self.gt_conv4(output)
# output = self.lk4(output)
output = self.gt_conv5(output)
# output = self.lk5(output)
output = self.gt_conv6(output)
# output = self.lk6(output)
output = self.gt_conv7(output)
# output = self.lk7(output)
return output

if __name__ == "__main__":
model = Discriminator()
print(model)
from torchsummary import summary
print(summary(model, (3,256,256), 1))

And this is the error:

Traceback (most recent call last):

File "/home/huynth/miniconda3/envs/inpainting/lib/python3.8/site-packages/torchsummary/torchsummary.py", line 140, in summary

_ = model.to(device)(*x, *args, **kwargs) # type: ignore[misc]

File "/home/huynth/miniconda3/envs/inpainting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl

return forward_call(*input, **kwargs)

TypeError: forward() takes 2 positional arguments but 3 were given

The above exception was the direct cause of the following exception:

Traceback (most recent call last):

File "/home/huynth/Hypergraph-Inpainting/models/discriminator.py", line 57, in <module>

print(summary(model, (3,256,256), 1))

File "/home/huynth/miniconda3/envs/inpainting/lib/python3.8/site-packages/torchsummary/torchsummary.py", line 143, in summary

raise RuntimeError(

RuntimeError: Failed to run torchsummary. See above stack traces for more details. Executed layers up to: []

Thank you!

Matheus-Schmitz commented 2 years ago

Following as I'm facing the same issue on an LSTM architecture

frankTian92 commented 1 year ago

I got the same problem

starrabb1t commented 1 year ago

same

lulubbb commented 1 year ago

same