open-mmlab / mmdetection

OpenMMLab Detection Toolbox and Benchmark
https://mmdetection.readthedocs.io
Apache License 2.0
29.59k stars 9.46k forks source link

代码报错:got an unexpected keyword argument 'pipeline' #10630

Open Alexisxty opened 1 year ago

Alexisxty commented 1 year ago

我是一名本科大二的学生,我遇见了下面这个报错,按照官网文档我训练了自己的数据集,我采用的是VOC2007格式的自定义数据集: TypeError: class EpochBasedTrainLoop in mmengine/runner/loops.py: class RepeatDataset in mmengine/dataset/dataset_wrapper.py: init() got an unexpected keyword argument 'pipeline',我在网上搜索了一下,说可能是mmengine和mmdet的版本不匹配,但是我尝试了将mmdet变更为2.x,3.1,3.0.0cr6,以及将mmenging从6.0.0一路升级到8.1.1都无法处理,以下是我的训练输出,希望能得到各位大佬的帮助(训练环境是win11的乌班图子系统20.04,显卡3090,cuda11.3):

(mm) ty@DESKTOP-MAFRV84:~/mmdetection$ python tools/train.py configs/detr/detr_r50_8xb2-150e_coco.py

07/12 16:23:40 - mmengine - INFO - System environment: sys.platform: linux Python: 3.8.16 (default, Mar 2 2023, 03:21:46) [GCC 11.2.0] CUDA available: True numpy_random_seed: 2119295261 GPU 0: NVIDIA GeForce RTX 3090 CUDA_HOME: None GCC: gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 PyTorch: 1.12.1+cu113 PyTorch compiling details: PyTorch built with:

Runtime environment: cudnn_benchmark: False mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0} dist_cfg: {'backend': 'nccl'} seed: None Distributed launcher: none Distributed training: False GPU number: 1

07/12 16:23:41 - mmengine - INFO - Config: dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' file_client_args = dict(backend='disk') train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', prob=0.5), dict( type='RandomChoice', transforms=[[{ 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }], [{ 'type': 'RandomChoiceResize', 'scales': [(400, 1333), (500, 1333), (600, 1333)], 'keep_ratio': True }, { 'type': 'RandomCrop', 'crop_type': 'absolute_range', 'crop_size': (384, 600), 'allow_negative_crop': True }, { 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }]]), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=2, num_workers=2, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type='RepeatDataset', times=3, dataset=dict( type='ConcatDataset', ignore_keys=['dataset_type'], datasets=[ dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict( filter_empty_gt=True, min_size=32, bbox_min_size=32), pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ]), dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict( filter_empty_gt=True, min_size=32, bbox_min_size=32), pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ]) ]), pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', prob=0.5), dict( type='RandomChoice', transforms=[[{ 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }], [{ 'type': 'RandomChoiceResize', 'scales': [(400, 1333), (500, 1333), (600, 1333)], 'keep_ratio': True }, { 'type': 'RandomCrop', 'crop_type': 'absolute_range', 'crop_size': (384, 600), 'allow_negative_crop': True }, { 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }]]), dict(type='PackDetInputs') ])) val_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/test.txt', data_prefix=dict(sub_data_root='VOC2007/'), test_mode=True, pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ])) test_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/test.txt', data_prefix=dict(sub_data_root='VOC2007/'), test_mode=True, pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ])) val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') test_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') default_scope = 'mmdet' default_hooks = dict( timer=dict(type='IterTimerHook'), logger=dict(type='LoggerHook', interval=50), param_scheduler=dict(type='ParamSchedulerHook'), checkpoint=dict(type='CheckpointHook', interval=1), sampler_seed=dict(type='DistSamplerSeedHook'), visualization=dict(type='DetVisualizationHook')) env_cfg = dict( cudnn_benchmark=False, mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), dist_cfg=dict(backend='nccl')) vis_backends = [dict(type='LocalVisBackend')] visualizer = dict( type='DetLocalVisualizer', vis_backends=[dict(type='LocalVisBackend')], name='visualizer') log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) log_level = 'INFO' load_from = None resume = False model = dict( type='DETR', num_queries=100, data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=1), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='ChannelMapper', in_channels=[2048], kernel_size=1, out_channels=256, act_cfg=None, norm_cfg=None, num_outs=1), encoder=dict( num_layers=6, layer_cfg=dict( self_attn_cfg=dict( embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True)))), decoder=dict( num_layers=6, layer_cfg=dict( self_attn_cfg=dict( embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), cross_attn_cfg=dict( embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True))), return_intermediate=True), positional_encoding=dict(num_feats=128, normalize=True), bbox_head=dict( type='DETRHead', num_classes=2, embed_dims=256, loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), train_cfg=dict( assigner=dict( type='HungarianAssigner', match_costs=[ dict(type='ClassificationCost', weight=1.0), dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), dict(type='IoUCost', iou_mode='giou', weight=2.0) ])), test_cfg=dict(max_per_img=100)) optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), clip_grad=dict(max_norm=0.1, norm_type=2), paramwise_cfg=dict( custom_keys=dict(backbone=dict(lr_mult=0.1, decay_mult=1.0)))) max_epochs = 150 train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=150, val_interval=1) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') param_scheduler = [ dict( type='MultiStepLR', begin=0, end=150, by_epoch=True, milestones=[100], gamma=0.1) ] auto_scale_lr = dict(base_batch_size=16) launcher = 'none' work_dir = './work_dirs/detr_r50_8xb2-150e_coco'

07/12 16:23:44 - mmengine - INFO - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used. 07/12 16:23:44 - mmengine - INFO - Hooks will be executed in the following order: before_run: (VERY_HIGH ) RuntimeInfoHook (BELOW_NORMAL) LoggerHook

before_train: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (VERY_LOW ) CheckpointHook

before_train_epoch: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (NORMAL ) DistSamplerSeedHook

before_train_iter: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook

after_train_iter: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

after_train_epoch: (NORMAL ) IterTimerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

before_val_epoch: (NORMAL ) IterTimerHook

before_val_iter: (NORMAL ) IterTimerHook

after_val_iter: (NORMAL ) IterTimerHook (NORMAL ) DetVisualizationHook (BELOW_NORMAL) LoggerHook

after_val_epoch: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

after_train: (VERY_LOW ) CheckpointHook

before_test_epoch: (NORMAL ) IterTimerHook

before_test_iter: (NORMAL ) IterTimerHook

after_test_iter: (NORMAL ) IterTimerHook (NORMAL ) DetVisualizationHook (BELOW_NORMAL) LoggerHook

after_test_epoch: (VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook

after_run: (BELOW_NORMAL) LoggerHook

Traceback (most recent call last): File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg obj = obj_cls(**args) # type: ignore TypeError: init() got an unexpected keyword argument 'pipeline'

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg obj = obj_cls(*args) # type: ignore File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/loops.py", line 44, in init super().init(runner, dataloader) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/base_loop.py", line 26, in init self.dataloader = runner.build_dataloader( File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1346, in build_dataloader dataset = DATASETS.build(dataset_cfg) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/registry.py", line 545, in build return self.build_func(cfg, args, **kwargs, registry=self) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 135, in build_from_cfg raise type(e)( TypeError: class RepeatDataset in mmengine/dataset/dataset_wrapper.py: init() got an unexpected keyword argument 'pipeline'

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "tools/train.py", line 124, in main() File "tools/train.py", line 120, in main runner.train() File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1672, in train self._train_loop = self.build_train_loop( File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1464, in build_train_loop loop = LOOPS.build( File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/registry.py", line 545, in build return self.build_func(cfg, *args, **kwargs, registry=self) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 135, in build_from_cfg raise type(e)( TypeError: class EpochBasedTrainLoop in mmengine/runner/loops.py: class RepeatDataset in mmengine/dataset/dataset_wrapper.py: init() got an unexpected keyword argument 'pipeline'

mm-assistant[bot] commented 1 year ago

We recommend using English or English & Chinese for issues so that we could have broader discussion.

ZhangXG001 commented 1 year ago

我是一名本科大二的学生,我遇见了下面这个报错,按照官网文档我训练了自己的数据集,我采用的是VOC2007格式的自定义数据集: TypeError: class EpochBasedTrainLoop in mmengine/runner/loops.py: class RepeatDataset in mmengine/dataset/dataset_wrapper.py: init() got an unexpected keyword argument 'pipeline',我在网上搜索了一下,说可能是mmengine和mmdet的版本不匹配,但是我尝试了将mmdet变更为2.x,3.1,3.0.0cr6,以及将mmenging从6.0.0一路升级到8.1.1都无法处理,以下是我的训练输出,希望能得到各位大佬的帮助(训练环境是win11的乌班图子系统20.04,显卡3090,cuda11.3):

(mm) ty@DESKTOP-MAFRV84:~/mmdetection$ python tools/train.py configs/detr/detr_r50_8xb2-150e_coco.py

07/12 16:23:40 - mmengine - INFO -

System environment: sys.platform: linux Python: 3.8.16 (default, Mar 2 2023, 03:21:46) [GCC 11.2.0] CUDA available: True numpy_random_seed: 2119295261 GPU 0: NVIDIA GeForce RTX 3090 CUDA_HOME: None GCC: gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 PyTorch: 1.12.1+cu113 PyTorch compiling details: PyTorch built with:

  • GCC 9.3
  • C++ Version: 201402
  • Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
  • Intel(R) MKL-DNN v2.6.0 (Git Hash 52b5f107dd9cf10910aaa19cb47f3abf9b349815)
  • OpenMP 201511 (a.k.a. OpenMP 4.5)
  • LAPACK is enabled (usually provided by MKL)
  • NNPACK is enabled
  • CPU capability usage: AVX2
  • CUDA Runtime 11.3
  • NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
  • CuDNN 8.3.2 (built against CUDA 11.5)
  • Magma 2.5.2
  • Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.3.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.12.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, TorchVision: 0.13.1+cu113 OpenCV: 4.7.0 MMEngine: 0.7.1

Runtime environment:

cudnn_benchmark: False mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0} dist_cfg: {'backend': 'nccl'} seed: None Distributed launcher: none Distributed training: False GPU number: 1 07/12 16:23:41 - mmengine - INFO - Config: dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' file_client_args = dict(backend='disk') train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', prob=0.5), dict( type='RandomChoice', transforms=[[{ 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }], [{ 'type': 'RandomChoiceResize', 'scales': [(400, 1333), (500, 1333), (600, 1333)], 'keep_ratio': True }, { 'type': 'RandomCrop', 'crop_type': 'absolute_range', 'crop_size': (384, 600), 'allow_negative_crop': True }, { 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }]]), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=2, num_workers=2, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type='RepeatDataset', times=3, dataset=dict( type='ConcatDataset', ignore_keys=['dataset_type'], datasets=[ dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict( filter_empty_gt=True, min_size=32, bbox_min_size=32), pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ]), dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict( filter_empty_gt=True, min_size=32, bbox_min_size=32), pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ]) ]), pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', prob=0.5), dict( type='RandomChoice', transforms=[[{ 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }], [{ 'type': 'RandomChoiceResize', 'scales': [(400, 1333), (500, 1333), (600, 1333)], 'keep_ratio': True }, { 'type': 'RandomCrop', 'crop_type': 'absolute_range', 'crop_size': (384, 600), 'allow_negative_crop': True }, { 'type': 'RandomChoiceResize', 'scales': [(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], 'keep_ratio': True }]]), dict(type='PackDetInputs') ])) val_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/test.txt', data_prefix=dict(sub_data_root='VOC2007/'), test_mode=True, pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ])) test_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type='VOCDataset', data_root='data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/test.txt', data_prefix=dict(sub_data_root='VOC2007/'), test_mode=True, pipeline=[ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='Resize', scale=(1000, 600), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ])) val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') test_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points') default_scope = 'mmdet' default_hooks = dict( timer=dict(type='IterTimerHook'), logger=dict(type='LoggerHook', interval=50), param_scheduler=dict(type='ParamSchedulerHook'), checkpoint=dict(type='CheckpointHook', interval=1), sampler_seed=dict(type='DistSamplerSeedHook'), visualization=dict(type='DetVisualizationHook')) env_cfg = dict( cudnn_benchmark=False, mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), dist_cfg=dict(backend='nccl')) vis_backends = [dict(type='LocalVisBackend')] visualizer = dict( type='DetLocalVisualizer', vis_backends=[dict(type='LocalVisBackend')], name='visualizer') log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) log_level = 'INFO' load_from = None resume = False model = dict( type='DETR', num_queries=100, data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=1), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='ChannelMapper', in_channels=[2048], kernel_size=1, out_channels=256, act_cfg=None, norm_cfg=None, num_outs=1), encoder=dict( num_layers=6, layer_cfg=dict( self_attn_cfg=dict( embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True)))), decoder=dict( num_layers=6, layer_cfg=dict( self_attn_cfg=dict( embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), cross_attn_cfg=dict( embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True))), return_intermediate=True), positional_encoding=dict(num_feats=128, normalize=True), bbox_head=dict( type='DETRHead', num_classes=2, embed_dims=256, loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), train_cfg=dict( assigner=dict( type='HungarianAssigner', match_costs=[ dict(type='ClassificationCost', weight=1.0), dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), dict(type='IoUCost', iou_mode='giou', weight=2.0) ])), test_cfg=dict(max_per_img=100)) optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), clip_grad=dict(max_norm=0.1, norm_type=2), paramwise_cfg=dict( custom_keys=dict(backbone=dict(lr_mult=0.1, decay_mult=1.0)))) max_epochs = 150 train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=150, val_interval=1) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') param_scheduler = [ dict( type='MultiStepLR', begin=0, end=150, by_epoch=True, milestones=[100], gamma=0.1) ] auto_scale_lr = dict(base_batch_size=16) launcher = 'none' work_dir = './work_dirs/detr_r50_8xb2-150e_coco'

07/12 16:23:44 - mmengine - INFO - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.

07/12 16:23:44 - mmengine - INFO - Hooks will be executed in the following order: before_run: (VERY_HIGH ) RuntimeInfoHook (BELOW_NORMAL) LoggerHook

before_train:

(VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (VERY_LOW ) CheckpointHook

before_train_epoch:

(VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (NORMAL ) DistSamplerSeedHook

before_train_iter:

(VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook

after_train_iter:

(VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

after_train_epoch:

(NORMAL ) IterTimerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

before_val_epoch:

(NORMAL ) IterTimerHook

before_val_iter:

(NORMAL ) IterTimerHook

after_val_iter:

(NORMAL ) IterTimerHook (NORMAL ) DetVisualizationHook (BELOW_NORMAL) LoggerHook

after_val_epoch:

(VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook (LOW ) ParamSchedulerHook (VERY_LOW ) CheckpointHook

after_train:

(VERY_LOW ) CheckpointHook

before_test_epoch:

(NORMAL ) IterTimerHook

before_test_iter:

(NORMAL ) IterTimerHook

after_test_iter:

(NORMAL ) IterTimerHook (NORMAL ) DetVisualizationHook (BELOW_NORMAL) LoggerHook

after_test_epoch:

(VERY_HIGH ) RuntimeInfoHook (NORMAL ) IterTimerHook (BELOW_NORMAL) LoggerHook

after_run:

(BELOW_NORMAL) LoggerHook Traceback (most recent call last): File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg obj = obj_cls(args) # type: ignore TypeError: init**() got an unexpected keyword argument 'pipeline'

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 121, in build_from_cfg obj = obj_cls(args) # type: ignore File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/loops.py", line 44, in init super().init(runner, dataloader) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/base_loop.py", line 26, in init* self.dataloader = runner.build_dataloader( File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1346, in build_dataloader dataset = DATASETS.build(dataset_cfg) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/registry.py", line 545, in build return self.build_func(cfg, args, kwargs, registry=self) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 135, in build_from_cfg raise type(e)( TypeError: class RepeatDataset in mmengine/dataset/dataset_wrapper.py: init**() got an unexpected keyword argument 'pipeline'

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "tools/train.py", line 124, in main() File "tools/train.py", line 120, in main runner.train() File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1672, in train self._train_loop = self.build_train_loop( File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1464, in build_train_loop loop = LOOPS.build( File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/registry.py", line 545, in build return self.build_func(cfg, *args, kwargs, registry=self) File "/home/ty/miniconda3/envs/mm/lib/python3.8/site-packages/mmengine/registry/build_functions.py", line 135, in build_from_cfg raise type(e)( TypeError: class EpochBasedTrainLoop in mmengine/runner/loops.py: class RepeatDataset in mmengine/dataset/dataset_wrapper.py: init**() got an unexpected keyword argument 'pipeline'

refer #10622

bitterhoneyy commented 6 days ago

the same issue...