Closed galsk87 closed 6 years ago
Can you provide a preview of your code snippet where this error is occurred maybe that could help?
this is my code which defines the model:
class COCOResGen2(nn.Module): def init(self, params): super(COCOResGen2, self).init() input_dim_a = params['input_dim_a'] input_dim_b = params['input_dim_b'] ch = params['ch'] n_enc_front_blk = params['n_enc_front_blk'] n_enc_res_blk = params['n_enc_res_blk'] n_enc_shared_blk = params['n_enc_shared_blk'] n_gen_shared_blk = params['n_gen_shared_blk'] n_gen_res_blk = params['n_gen_res_blk'] n_gen_front_blk = params['n_gen_front_blk'] if 'res_dropout_ratio' in params.keys(): res_dropout_ratio = params['res_dropout_ratio'] else: res_dropout_ratio = 0
##############################################################################
# BEGIN of ENCODERS
# Convolutional front-end
encA = []
encB = []
encA += [LeakyReLUConv2d(input_dim_a, ch, kernel_size=7, stride=1, padding=3)]
encB += [LeakyReLUConv2d(input_dim_b, ch, kernel_size=7, stride=1, padding=3)]
tch = ch
for i in range(1,n_enc_front_blk):
encA += [LeakyReLUConv2d(tch, tch * 2, kernel_size=3, stride=2, padding=1)]
encB += [LeakyReLUConv2d(tch, tch * 2, kernel_size=3, stride=2, padding=1)]
tch *= 2
# Residual-block back-end
for i in range(0, n_enc_res_blk):
encA += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
encB += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
# END of ENCODERS
##############################################################################
##############################################################################
# BEGIN of SHARED LAYERS
# Shared residual-blocks
enc_shared = []
for i in range(0, n_enc_shared_blk):
enc_shared += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
enc_shared += [GaussianNoiseLayer()]
dec_shared = []
for i in range(0, n_gen_shared_blk):
dec_shared += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
# END of SHARED LAYERS
##############################################################################
##############################################################################
# BEGIN of DECODERS
decA = []
decB = []
# Residual-block front-end
for i in range(0, n_gen_res_blk):
decA += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
decB += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
# Convolutional back-end
for i in range(0, n_gen_front_blk-1):
decA += [LeakyReLUConvTranspose2d(tch, tch//2, kernel_size=3, stride=2, padding=1, output_padding=1)]
decB += [LeakyReLUConvTranspose2d(tch, tch//2, kernel_size=3, stride=2, padding=1, output_padding=1)]
tch = tch//2
decA += [nn.ConvTranspose2d(tch, input_dim_a, kernel_size=1, stride=1, padding=0)]
decB += [nn.ConvTranspose2d(tch, input_dim_b, kernel_size=1, stride=1, padding=0)]
decA += [nn.Tanh()]
decB += [nn.Tanh()]
# END of DECODERS
##############################################################################
self.encode_A = nn.Sequential(*encA)
self.encode_B = nn.Sequential(*encB)
self.enc_shared = nn.Sequential(*enc_shared)
self.dec_shared = nn.Sequential(*dec_shared)
self.decode_A = nn.Sequential(*decA)
self.decode_B = nn.Sequential(*decB)
def forward(self, x_A, x_B): out = torch.cat((self.encode_A(x_A), self.encode_B(x_B)), 0) shared = self.enc_shared(out) out = self.dec_shared(shared) out_A = self.decode_A(out) out_B = self.decode_B(out) x_Aa, x_Ba = torch.split(out_A, x_A.size(0), dim=0) x_Ab, x_Bb = torch.split(out_B, x_A.size(0), dim=0) return x_Aa, x_Ba, x_Ab, x_Bb, shared
def forward_a2b(self, x_A): out = self.encode_A(x_A) shared = self.enc_shared(out) out = self.dec_shared(shared) out = self.decode_B(out) return out, shared
def forward_b2a(self, x_B): out = self.encode_B(x_B) shared = self.enc_shared(out) out = self.dec_shared(shared) out = self.decode_A(out) return out, shared
this is the code which generated the graph
images_a = Variable(images_a.cuda(opts.gpu)) images_b = Variable(images_b.cuda(opts.gpu)) y = trainer.gen(images_a,images_b)
g = make_dot(y, params=dict(list(y.named_parameters()) + [('images_a', images_a), ('images_b', images_b)]))
this is the error
File "cocogan_train.py", line 67, in main g = make_dot(y, params=dict(list(y.named_parameters()) + [('images_a', images_a), ('images_b', images_b)])) AttributeError: 'tuple' object has no attribute 'named_parameters'
if i generate my code without the inputes cause it recieves a params file which includes all that is included in the next manner: y = trainer.gen
g = make_dot(y, params=dict(y.named_parameters()))
i get the next error
File "/opt/anaconda/lib/python2.7/site-packages/torch/nn/modules/module.py", line 366, in getattr type(self).name, name)) AttributeError: 'COCOResGen2' object has no attribute 'grad_fn'
i partially solved this problem by adding this constraint:
if isinstance(var,tuple):
pudb.set_trace()
for idx in range(var):
add_nodes(var[idx].grad_fn)
in the add_nodes function so it would take care of tuple inputs.
using the snippet above is giving the following error:
TypeError: range() integer end argument expected, got tuple.
at for idx in range(var):
Could you please update if you were able to successfully generate the graph for multiple input case
i was
On Tue, Jun 26, 2018 at 9:44 AM, Anubhav7 notifications@github.com wrote:
using the snippet above is giving the following error:
TypeError: range() integer end argument expected, got tuple.
at for idx in range(var):
Could you please update if you were able to successfully generate the graph for multiple input case
— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub https://github.com/szagoruyko/pytorchviz/issues/2#issuecomment-400197282, or mute the thread https://github.com/notifications/unsubscribe-auth/AbMDMjJsU4vDs5NR41nmHaDD9TXnQeZAks5uAdg1gaJpZM4R4syq .
wow.. that was quick :)
could you please help out. Following is my code:
x = fasterRCNN(im_data, im_info, num_boxes, gt_boxes) graph = make_dot(x, params=dict(list(fasterRCNN.named_parameters()) + [('im_data', im_data), ('im_info', im_info), ('gt_boxes', gt_boxes), ('num_boxes', num_boxes)])) graph.view()
I have modified the dot.py function like this:
if isinstance(var, tuple):
for idx in range(var):
add_nodes(var[idx].grad_fn)
else:
add_nodes(var.grad_fn)
These lines were added just after add_nodes function. Am I doing something wrong?
multiple inputs were always supported, multiple outputs were added by #15
hello. I want to visualize Siamese network and getting the below error
code for visualisation is
make_dot(model, params=dict(list(model.named_parameters())))
raise ModuleAttributeError("'{}' object has no attribute '{}'".format( torch.nn.modules.module.ModuleAttributeError: 'Siamese' object has no attribute 'grad_fn'
Hi,
The make_dot function takes as input either a Tensor or a ttuple of Tensors. not a nn.Module. You most likely want to pass "model(input)" here.
Hi,
The make_dot function takes as input either a Tensor or a ttuple of Tensors. not a nn.Module. You most likely want to pass "model(input)" here.
thanks. here is the solution for my error
y = model(img1, img2) make_dot(y, params=dict(list(model.named_parameters()))).render("siamese", format="png" )
Hi,
for example let's say i'm visualizing a GAN architecture for style transfer which has inputes
lets call the network Gen and it recieves two images from two styles image_a and image_b.
once i try to do as in the exmple i get the next error
AttributeError: 'tuple' object has no attribute 'grad_fn'