Open 35454652 opened 2 years ago
Traceback (most recent call last): File "D:/python/pycharm/MNAD-master/Evaluate.py", line 139, in outputs, feas, updated_feas, m_items_test, softmax_score_query, softmax_scorememory, , , , compactness_loss = model.forward(imgs[:,0:34], m_items_test, False) File "D:\python\pycharm\MNAD-master\model\final_future_prediction_with_memory_spatial_sumonly_weight_ranking_top1.py", line 135, in forward fea, skip1, skip2, skip3 = self.encoder(x) File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl return forward_call(input, kwargs) File "D:\python\pycharm\MNAD-master\model\final_future_prediction_with_memory_spatial_sumonly_weight_ranking_top1.py", line 46, in forward tensorConv1 = self.moduleConv1(x) File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl return forward_call(*input, *kwargs) File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\container.py", line 141, in forward input = module(input) File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl return forward_call(input, kwargs) File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\conv.py", line 446, in forward return self._conv_forward(input, self.weight, self.bias) File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\conv.py", line 443, in _conv_forward self.padding, self.dilation, self.groups) RuntimeError: Given groups=1, weight of size [64, 12, 3, 3], expected input[1, 3, 256, 256] to have 12 channels, but got 3 channels instead
Traceback (most recent call last): File "D:/python/pycharm/MNAD-master/Evaluate.py", line 139, in
outputs, feas, updated_feas, m_items_test, softmax_score_query, softmax_scorememory, , , , compactness_loss = model.forward(imgs[:,0:34], m_items_test, False)
File "D:\python\pycharm\MNAD-master\model\final_future_prediction_with_memory_spatial_sumonly_weight_ranking_top1.py", line 135, in forward
fea, skip1, skip2, skip3 = self.encoder(x)
File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(input, kwargs)
File "D:\python\pycharm\MNAD-master\model\final_future_prediction_with_memory_spatial_sumonly_weight_ranking_top1.py", line 46, in forward
tensorConv1 = self.moduleConv1(x)
File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, *kwargs)
File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\container.py", line 141, in forward
input = module(input)
File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(input, kwargs)
File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\conv.py", line 446, in forward
return self._conv_forward(input, self.weight, self.bias)
File "D:\python\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\conv.py", line 443, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: Given groups=1, weight of size [64, 12, 3, 3], expected input[1, 3, 256, 256] to have 12 channels, but got 3 channels instead