and error is below :
05/24 11:13:03 - mmengine - INFO - Epoch(val) [1][6400/7143] eta: 0:01:02 time: 0.0744 data_time: 0.0008 memory: 715 05/24 11:13:07 - mmengine - INFO - Epoch(val) [1][6450/7143] eta: 0:00:58 time: 0.0857 data_time: 0.0009 memory: 715 File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\runner\loops.py", line 102, in run self.runner.val_loop.run() File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\runner\loops.py", line 374, in run metrics = self.evaluator.evaluate(len(self.dataloader.dataset)) File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\evaluator\evaluator.py", line 79, in evaluate _results = metric.evaluate(size) File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\evaluator\metric.py", line 133, in evaluate _metrics = self.compute_metrics(results) # type: ignore File "c:\users\administrator\desktop\mmdetection-main\mmdet\evaluation\metrics\coco_metric.py", line 462, in compute_metrics coco_dt = self._coco_api.loadRes(predictions) File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\pycocotools\coco.py", line 327, in loadRes assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ AssertionError: Results do not correspond to current coco set
This is my config file `base = [ '../base/datasets/sixray_detection.py', '../base/default_runtime.py' ]
-------------------------------------------------
记录一下更改内容
第一,改上面的数据集
第二,将frozen_block=1注释掉
第三,RandomChoiceResize中的图片比例不符合320*320,需要更改
第四,类别数从80-->15
第五,由于只有一张显卡,所以auto_scale_lr的batch_size=2
第六,改为加载全部预训练模型load_from
后续可能需要更改学习率,num_workers,num_queries,以及多少轮测试与保存等
-------------------------------------------------
train_batch_size=2
metainfo = {
'classes': ('Gun', 'Knife', 'Lighter', 'Battery', 'Pliers', 'Scissors', 'Wrench', 'Hammer', 'Screwdriver', 'Dart', 'Bat', 'Fireworks', 'Saw_blade',
'Razor_blade', 'Pressure_vessel'),
'palette': [
(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230),
(106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70),
(0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0),
(175, 116, 175), (250, 0, 30), (165, 42, 42)
]
}
model = dict( type='DINO', num_queries=85, # num_matching_queries 900 with_box_refine=True, as_two_stage=True, data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=1), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(1, 2, 3),
frozen_stages=1,
train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
from the default setting in mmdet.
train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{base.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', prob=0.5), dict( type='RandomChoice', transforms=[ [ dict( type='RandomChoiceResize', scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)],
scales=[(192, 320), (205, 320), (218, 320), (230, 320),
] train_dataloader = dict( batch_size=train_batch_size, dataset=dict( filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline))
optimizer
optim_wrapper = dict( type='OptimWrapper', optimizer=dict( type='AdamW', lr=0.0002, # 0.0002 for DeformDETR weight_decay=0.0001), clip_grad=dict(max_norm=0.1, norm_type=2), paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)}) ) # custom_keys contains sampling_offsets and reference_points in DeformDETR # noqa
learning policy
max_epochs = 12 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop')
param_scheduler = [ dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[11], gamma=0.1) ]
NOTE:
auto_scale_lr
is for automatically scaling LR,USER SHOULD NOT CHANGE ITS VALUES.
base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=train_batch_size)
load_from = 'E:\D2E\Projects\DINO_mmdet3\pretrained_model\dino-4scale_r50_8xb2-12e_coco_20221202_182705-55b2bba2.pth'`
and
sixray_detection.py
filedataset settings
dataset_type = 'CocoDataset' data_root = 'D:/Dataset/SIXray/'
metainfo = { 'classes': ('Gun', 'Knife', 'Wrench','Pliers','Scissors','Hammer'), 'palette': [ (220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228), (0, 60, 100) ] }
backend_args = None
train_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', scale=(320, 320), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='Resize', scale=(320, 320), keep_ratio=True),
If you don't have a gt annotation, delete the pipeline
] train_dataloader = dict( batch_size=2, num_workers=2, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type=dataset_type, metainfo=metainfo, data_root=data_root, ann_file='AnnotationCOCO/voc07_trainval.json', data_prefix=dict(img='VOCdata/VOCdevkit/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline, backend_args=backend_args)) val_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, metainfo=metainfo, data_root=data_root, ann_file='AnnotationCOCO/voc07_trainval.json', data_prefix=dict(img='VOCdata/VOCdevkit/'), test_mode=True, pipeline=test_pipeline, backend_args=backend_args)) test_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, metainfo=metainfo, data_root=data_root, ann_file='AnnotationCOCO/voc07_test.json', data_prefix=dict(img='VOCdata/VOCdevkit/'), test_mode=True, pipeline=test_pipeline, backend_args=backend_args))
val_evaluator = dict( type='CocoMetric', ann_file=data_root + 'AnnotationCOCO/voc07_val.json', metric='bbox', format_only=False, backend_args=backend_args) test_evaluator = val_evaluator
and error is below :
05/24 11:13:03 - mmengine - INFO - Epoch(val) [1][6400/7143] eta: 0:01:02 time: 0.0744 data_time: 0.0008 memory: 715 05/24 11:13:07 - mmengine - INFO - Epoch(val) [1][6450/7143] eta: 0:00:58 time: 0.0857 data_time: 0.0009 memory: 715 File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\runner\loops.py", line 102, in run self.runner.val_loop.run() File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\runner\loops.py", line 374, in run metrics = self.evaluator.evaluate(len(self.dataloader.dataset)) File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\evaluator\evaluator.py", line 79, in evaluate _results = metric.evaluate(size) File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\mmengine\evaluator\metric.py", line 133, in evaluate _metrics = self.compute_metrics(results) # type: ignore File "c:\users\administrator\desktop\mmdetection-main\mmdet\evaluation\metrics\coco_metric.py", line 462, in compute_metrics coco_dt = self._coco_api.loadRes(predictions) File "C:\Users\Administrator\miniconda3\envs\mmdet\lib\site-packages\pycocotools\coco.py", line 327, in loadRes assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ AssertionError: Results do not correspond to current coco set