Closed kokoronokasumi closed 10 months ago
Hi, SyncBN needs to be disabled when training on a single gpu. Please delete this line and try again.
Hi, SyncBN needs to be disabled when training on a single gpu. Please delete this line and try again.
It works, thank you very much.
I run the training of object_detection on Windows, use "python train.py retinanet_transx_t_fpn_1x_coco.py", then I get the mistake: Traceback (most recent call last): File "train.py", line 195, in
main()
File "train.py", line 183, in main
train_detector(
File "D:\TransXNet\TransXNet\object_detection\mmdet_custom\apis\train.py", line 184, in train_detector
runner.run(data_loaders, cfg.workflow)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmcv\runner\epoch_based_runner.py", line 136, in run
epoch_runner(data_loaders[i], kwargs)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmcv\runner\epoch_based_runner.py", line 53, in train
self.run_iter(data_batch, train_mode=True, kwargs)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmcv\runner\epoch_based_runner.py", line 31, in run_iter
outputs = self.model.train_step(data_batch, self.optimizer,
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmcv\parallel\data_parallel.py", line 77, in train_step
return self.module.train_step(inputs[0], kwargs[0])
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmdet\models\detectors\base.py", line 248, in train_step
losses = self(data)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(input, kwargs)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmcv\runner\fp16_utils.py", line 119, in new_func
return old_func(args, kwargs)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmdet\models\detectors\base.py", line 172, in forward
return self.forward_train(img, img_metas, kwargs)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmdet\models\detectors\single_stage.py", line 82, in forward_train
x = self.extract_feat(img)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmdet\models\detectors\single_stage.py", line 43, in extract_feat
x = self.backbone(img)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(input, kwargs)
File "D:\TransXNet\TransXNet\object_detection\transxnet.py", line 717, in forward
x = self.forward_embeddings(x)
File "D:\TransXNet\TransXNet\object_detection\transxnet.py", line 693, in forward_embeddings
x = self.patch_embed(x)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, kwargs)
File "D:\TransXNet\TransXNet\object_detection\transxnet.py", line 66, in forward
return self.proj(x)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, *kwargs)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\mmcv\cnn\bricks\conv_module.py", line 209, in forward
x = self.norm(x)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(input, kwargs)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\nn\modules\batchnorm.py", line 735, in forward
world_size = torch.distributed.get_world_size(process_group)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\distributed\distributed_c10d.py", line 1067, in get_world_size
return _get_group_size(group)
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\distributed\distributed_c10d.py", line 453, in _get_group_size
default_pg = _get_default_group()
File "C:\ProgramData\miniconda3\envs\transxnet\lib\site-packages\torch\distributed\distributed_c10d.py", line 584, in _get_default_group
raise RuntimeError(
RuntimeError: Default process group has not been initialized, please make sure to call init_process_group.
I want to run non-distributed training, please help me.