open-mmlab / mmdetection

OpenMMLab Detection Toolbox and Benchmark
https://mmdetection.readthedocs.io
Apache License 2.0
29.55k stars 9.46k forks source link

ValueError: need at least one array to concatenate #11939

Closed liangzzzz233 closed 2 months ago

liangzzzz233 commented 2 months ago

{'joints_vis': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'joints': [[620.0, 394.0], [616.0, 269.0], [573.0, 185.0], [647.0, 188.0], [661.0, 221.0], [656.0, 231.0], [610.0, 187.0], [647.0, 176.0], [637.0201, 189.8183], [695.9799, 108.1817], [606.0, 217.0], [553.0, 161.0], [601.0, 167.0], [692.0, 185.0], [693.0, 240.0], [688.0, 313.0]], 'image': '015601864.jpg', 'scale': 3.021046, 'center': [594.0, 257.0]} 1111111111111111111111111111111111111111111111111111111111111111111 09/04 22:25:35 - mmengine - INFO -

System environment: sys.platform: win32 Python: 3.8.10 (tags/v3.8.10:3d8993a, May 3 2021, 11:48:03) [MSC v.1928 64 bit (AMD64)] CUDA available: True MUSA available: False numpy_random_seed: 801145617 GPU 0: NVIDIA GeForce RTX 3060 Laptop GPU CUDA_HOME: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3 NVCC: Cuda compilation tools, release 11.3, V11.3.58 MSVC: 用于 x64 的 Microsoft (R) C/C++ 优化编译器 19.29.30154 版 GCC: n/a PyTorch: 1.10.2+cu113 PyTorch compiling details: PyTorch built with:

Runtime environment: cudnn_benchmark: False mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0} dist_cfg: {'backend': 'nccl'} seed: 801145617 Distributed launcher: none Distributed training: False GPU number: 1

09/04 22:25:35 - mmengine - INFO - Config: auto_scale_lr = dict(base_batch_size=512) backend_args = dict(backend='local') codec = dict( input_size=( 256, 256, ), type='RegressionLabel') custom_hooks = [ dict(type='SyncBuffersHook'), ] data_mode = 'topdown' data_root = 'data/mpii/' dataset_type = 'MpiiDataset' default_hooks = dict( badcase=dict( badcase_thr=5, enable=False, metric_type='loss', out_dir='badcase', type='BadCaseAnalysisHook'), checkpoint=dict( interval=10, rule='greater', save_best='PCK', type='CheckpointHook'), logger=dict(interval=50, type='LoggerHook'), param_scheduler=dict(type='ParamSchedulerHook'), sampler_seed=dict(type='DistSamplerSeedHook'), timer=dict(type='IterTimerHook'), visualization=dict(enable=False, type='PoseVisualizationHook')) default_scope = 'mmpose' env_cfg = dict( cudnn_benchmark=False, dist_cfg=dict(backend='nccl'), mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) launcher = 'none' load_from = None log_level = 'INFO' log_processor = dict( by_epoch=True, num_digits=6, type='LogProcessor', window_size=50) model = dict( backbone=dict( depth=50, init_cfg=dict(checkpoint='torchvision://resnet50', type='Pretrained'), type='ResNet'), data_preprocessor=dict( bgr_to_rgb=True, mean=[ 123.675, 116.28, 103.53, ], std=[ 58.395, 57.12, 57.375, ], type='PoseDataPreprocessor'), head=dict( decoder=dict(input_size=( 256, 256, ), type='RegressionLabel'), in_channels=2048, loss=dict(type='RLELoss', use_target_weight=True), num_joints=16, type='RLEHead'), neck=dict(type='GlobalAveragePooling'), test_cfg=dict(flip_test=True, shift_coords=True), type='TopdownPoseEstimator') optim_wrapper = dict(optimizer=dict(lr=0.0005, type='Adam')) param_scheduler = [ dict( begin=0, by_epoch=False, end=500, start_factor=0.001, type='LinearLR'), dict( begin=0, by_epoch=True, end=210, gamma=0.1, milestones=[ 170, 200, ], type='MultiStepLR'), ] resume = False test_cfg = dict() test_dataloader = dict( batch_size=32, dataset=dict( ann_file='annotations/mpii_val.json', data_mode='topdown', data_prefix=dict(img='images/'), data_root='data/mpii/', headbox_file='data/mpii//annotations/mpii_gt_val.mat', pipeline=[ dict(type='LoadImage'), dict(type='GetBBoxCenterScale'), dict(input_size=( 256, 256, ), type='TopdownAffine'), dict(type='PackPoseInputs'), ], test_mode=True, type='MpiiDataset'), drop_last=False, num_workers=2, persistent_workers=True, sampler=dict(round_up=False, shuffle=False, type='DefaultSampler')) test_evaluator = dict(type='MpiiPCKAccuracy') train_cfg = dict(by_epoch=True, max_epochs=210, val_interval=10) train_dataloader = dict( batch_size=64, dataset=dict( ann_file='annotations/mpii_train.json', data_mode='topdown', data_prefix=dict(img='images/'), data_root='data/mpii/', pipeline=[ dict(type='LoadImage'), dict(type='GetBBoxCenterScale'), dict(direction='horizontal', type='RandomFlip'), dict(shift_prob=0, type='RandomBBoxTransform'), dict(input_size=( 256, 256, ), type='TopdownAffine'), dict( encoder=dict(input_size=( 256, 256, ), type='RegressionLabel'), type='GenerateTarget'), dict(type='PackPoseInputs'), ], type='MpiiDataset'), num_workers=2, persistent_workers=True, sampler=dict(shuffle=True, type='DefaultSampler')) train_pipeline = [ dict(type='LoadImage'), dict(type='GetBBoxCenterScale'), dict(direction='horizontal', type='RandomFlip'), dict(shift_prob=0, type='RandomBBoxTransform'), dict(input_size=( 256, 256, ), type='TopdownAffine'), dict( encoder=dict(input_size=( 256, 256, ), type='RegressionLabel'), type='GenerateTarget'), dict(type='PackPoseInputs'), ] val_cfg = dict() val_dataloader = dict( batch_size=32, dataset=dict( ann_file='annotations/mpii_val.json', data_mode='topdown', data_prefix=dict(img='images/'), data_root='data/mpii/', headbox_file='data/mpii//annotations/mpii_gt_val.mat', pipeline=[ dict(type='LoadImage'), dict(type='GetBBoxCenterScale'), dict(input_size=( 256, 256, ), type='TopdownAffine'), dict(type='PackPoseInputs'), ], test_mode=True, type='MpiiDataset'), drop_last=False, num_workers=2, persistent_workers=True, sampler=dict(round_up=False, shuffle=False, type='DefaultSampler')) val_evaluator = dict(type='MpiiPCKAccuracy') val_pipeline = [ dict(type='LoadImage'), dict(type='GetBBoxCenterScale'), dict(input_size=( 256, 256, ), type='TopdownAffine'), dict(type='PackPoseInputs'), ] vis_backends = [ dict(type='LocalVisBackend'), ] visualizer = dict( name='visualizer', type='PoseLocalVisualizer', vis_backends=[ dict(type='LocalVisBackend'), ]) work_dir = './work_dirs\td-reg_res50_rle-8xb64-210e_mpii-256x256'

09/04 22:25:36 - mmengine - INFO - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used. 09/04 22:25:36 - mmengine - INFO - Hooks will be executed in the following order: before_run: (VERY_HIGH ) RuntimeInfoHook (BELOW_NORMAL) LoggerHook

before_train: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (VERY_LOW ) CheckpointHook

before_train_epoch: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (NORMAL ) DistSamplerSeedHook

before_train_iter: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook

after_train_iter: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

after_train_epoch: (NORMAL ) IterTimerHook (NORMAL ) SyncBuffersHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

before_val: (VERY_HIGH ) RuntimeInfoHook

before_val_epoch: (NORMAL ) IterTimerHook (NORMAL ) SyncBuffersHook

before_val_iter: (NORMAL ) IterTimerHook

after_val_iter: (NORMAL ) IterTimerHook (NORMAL ) PoseVisualizationHook (BELOW_NORMAL) LoggerHook

after_val_epoch: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

after_val: (VERY_HIGH ) RuntimeInfoHook

after_train: (VERY_HIGH ) RuntimeInfoHook (VERY_LOW ) CheckpointHook

before_test: (VERY_HIGH ) RuntimeInfoHook

before_test_epoch: (NORMAL ) IterTimerHook

before_test_iter: (NORMAL ) IterTimerHook

after_test_iter: (NORMAL ) IterTimerHook (NORMAL ) PoseVisualizationHook (NORMAL ) BadCaseAnalysisHook (BELOW_NORMAL) LoggerHook

after_test_epoch: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (NORMAL ) BadCaseAnalysisHook (BELOW_NORMAL) LoggerHook

after_test: (VERY_HIGH ) RuntimeInfoHook

after_run: (BELOW_NORMAL) LoggerHook

Traceback (most recent call last): File "tools/train.py", line 162, in main() File "tools/train.py", line 158, in main runner.train() File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\runner\runner.py", line 1728, in train self._train_loop = self.build_train_loop( File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\runner\runner.py", line 1527, in build_train_loop loop = EpochBasedTrainLoop( File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\runner\loops.py", line 44, in init super().init(runner, dataloader) File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\runner\base_loop.py", line 26, in init self.dataloader = runner.build_dataloader( File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\runner\runner.py", line 1370, in build_dataloader dataset = DATASETS.build(dataset_cfg) File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\registry\registry.py", line 570, in build return self.build_func(cfg, *args, kwargs, registry=self) File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\registry\build_functions.py", line 121, in build_from_cfg obj = obj_cls(args) # type: ignore File "D:\mkvirtualenv\w38\lib\site-packages\mmpose\datasets\datasets\body\mpii_dataset.py", line 122, in init super().init( File "D:\mkvirtualenv\w38\lib\site-packages\mmpose\datasets\datasets\base\base_coco_style_dataset.py", line 103, in init super().init( File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\dataset\base_dataset.py", line 247, in init self.full_init() File "D:\mkvirtualenv\w38\lib\site-packages\mmengine\dataset\base_dataset.py", line 298, in full_init self.data_list = self.load_data_list() File "D:\mkvirtualenv\w38\lib\site-packages\mmpose\datasets\datasets\base\base_coco_style_dataset.py", line 205, in load_data_list instance_list, image_list = self._load_annotations() File "D:\mkvirtualenv\w38\lib\site-packages\mmpose\datasets\datasets\body\mpii_dataset.py", line 166, in _load_annotations

assert 'center' in ann, f"Annotation at index {idx} is missing 'center': {ann}" AssertionError: Annotation at index 0 is missing 'center': joints_vis

liangzzzz233 commented 2 months ago

刚刚的问题解决了,我现在在尝试训练自己的数据集时出现了新的错误 ValueError: need at least one array to concatenate 读取不到json文件