Closed zehranrgi closed 2 years ago
I resized the images to 256 but now, I got this error..What is the problem with using a custom dataset...
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "tools/train.py", line 209, in <module>
main()
File "tools/train.py", line 182, in main
test_cfg=cfg.get('test_cfg'))
File "/content/drive/MyDrive/KU/mmdetection/mmdet/models/builder.py", line 59, in build_detector
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
File "/usr/local/lib/python3.7/dist-packages/mmcv/utils/registry.py", line 212, in build
return self.build_func(*args, **kwargs, registry=self)
File "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/builder.py", line 27, in build_model_from_cfg
return build_from_cfg(cfg, registry, default_args)
File "/usr/local/lib/python3.7/dist-packages/mmcv/utils/registry.py", line 55, in build_from_cfg
raise type(e)(f'{obj_cls.__name__}: {e}')
AssertionError: CascadeRCNN: FPN:
Solved. And, I think examples are not enough...
My image size is 416*416, I changed the config.file like that but still I got this error. How can I solve this problem?
My custom config file:
My coco dataset:
Traceback (most recent call last): File "tools/train.py", line 209, in
main()
File "tools/train.py", line 205, in main
meta=meta)
File "/content/drive/MyDrive/KU/mmdetection/mmdet/apis/train.py", line 208, in train_detector
runner.run(data_loaders, cfg.workflow)
File "/usr/local/lib/python3.7/dist-packages/mmcv/runner/epoch_based_runner.py", line 127, in run
epoch_runner(data_loaders[i], kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmcv/runner/epoch_based_runner.py", line 50, in train
self.run_iter(data_batch, train_mode=True, kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmcv/runner/epoch_based_runner.py", line 30, in run_iter
kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmcv/parallel/data_parallel.py", line 75, in train_step
return self.module.train_step(inputs[0], kwargs[0])
File "/content/drive/MyDrive/KU/mmdetection/mmdet/models/detectors/base.py", line 248, in train_step
losses = self(data)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(input, kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmcv/runner/fp16_utils.py", line 109, in new_func
return old_func(args, kwargs)
File "/content/drive/MyDrive/KU/mmdetection/mmdet/models/detectors/base.py", line 172, in forward
return self.forward_train(img, img_metas, kwargs)
File "/content/drive/MyDrive/KU/mmdetection/mmdet/models/detectors/two_stage.py", line 127, in forward_train
x = self.extract_feat(img)
File "/content/drive/MyDrive/KU/mmdetection/mmdet/models/detectors/two_stage.py", line 69, in extract_feat
x = self.neck(x)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(input, kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmcv/runner/fp16_utils.py", line 109, in new_func
return old_func(*args, *kwargs)
File "/content/drive/MyDrive/KU/mmdetection/mmdet/models/necks/fpn.py", line 159, in forward
for i, lateral_conv in enumerate(self.lateral_convs)
File "/content/drive/MyDrive/KU/mmdetection/mmdet/models/necks/fpn.py", line 159, in
for i, lateral_conv in enumerate(self.lateral_convs)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call( input, kwargs)
File "/usr/local/lib/python3.7/dist-packages/mmcv/cnn/bricks/conv_module.py", line 201, in forward
x = self.conv(x)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py", line 443, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/conv.py", line 440, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: Given groups=1, weight of size [416, 416, 1, 1], expected input[2, 256, 200, 200] to have 416 channels, but got 256 channels instead