open-mmlab / mmdetection

OpenMMLab Detection Toolbox and Benchmark
https://mmdetection.readthedocs.io
Apache License 2.0
29.31k stars 9.42k forks source link

subprocess.CalledProcessError: Command '['C:\\ruanjian\\conda\\envs\\openmmlab\\python.exe', 'c:\\ruanjian\\project\\mmdetection\\mmdet\\.mim\\tools\\train.py', './z_detr_on_agar/detr_agar.py', '--launcher', 'none']' returned non-zero exit status 1. #10872

Open FengRongYue opened 1 year ago

FengRongYue commented 1 year ago

I am trying train DETR in my dataset. the config file 'detr_agar.py' i used is below. base = './detr_r50_8xb2-150e_coco.py' classes = ('S.aureus', 'B.subtilis', 'P.aeruginosa', 'E.coli', 'C.albicans') data_root = "C:/DATASET/AGAR_dataset/AGAR_COCO_13_split/" test_dataloader = dict( dataset=dict( ann_file= data_root + 'annotations/val.json', data_prefix=dict(img=data_root + 'val/'), data_root=data_root, ) ) test_evaluator = dict( ann_file=data_root + 'annotations/val.json', )

train_dataloader = dict( batch_sampler=dict(type='AspectRatioBatchSampler'), batch_size=2, dataset=dict( ann_file= data_root + 'annotations/train.json', backend_args=None, data_prefix=dict(img= data_root + 'train/'), data_root=data_root, ) )

val_dataloader = dict( batch_size=1, dataset=dict( ann_file=data_root + 'annotations/val.json', data_prefix=dict(img= data_root + 'val/'), data_root=data_root, ), ) val_evaluator = dict( ann_file=data_root + 'annotations/val.json', )

model = dict( bbox_head=dict( num_classes=5, ) )

the command i used is "mim train mmdet ./z_detr_on_agar/detr_agar.py" image

auto_scale_lr = dict(base_batch_size=16) backend_args = None classes = ( 'S.aureus', 'B.subtilis', 'P.aeruginosa', 'E.coli', 'C.albicans', ) data_root = 'C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\' dataset_type = 'CocoDataset' default_hooks = dict( checkpoint=dict(interval=1, type='CheckpointHook'), logger=dict(interval=50, type='LoggerHook'), param_scheduler=dict(type='ParamSchedulerHook'), sampler_seed=dict(type='DistSamplerSeedHook'), timer=dict(type='IterTimerHook'), visualization=dict(type='DetVisualizationHook')) default_scope = 'mmdet' env_cfg = dict( cudnn_benchmark=False, dist_cfg=dict(backend='nccl'), mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) launcher = 'none' load_from = None log_level = 'INFO' log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) max_epochs = 150 model = dict( backbone=dict( depth=50, frozen_stages=1, init_cfg=dict(checkpoint='torchvision://resnet50', type='Pretrained'), norm_cfg=dict(requires_grad=False, type='BN'), norm_eval=True, num_stages=4, out_indices=(3, ), style='pytorch', type='ResNet'), bbox_head=dict( embed_dims=256, loss_bbox=dict(loss_weight=5.0, type='L1Loss'), loss_cls=dict( bg_cls_weight=0.1, class_weight=1.0, loss_weight=1.0, type='CrossEntropyLoss', use_sigmoid=False), loss_iou=dict(loss_weight=2.0, type='GIoULoss'), num_classes=5, type='DETRHead'), data_preprocessor=dict( bgr_to_rgb=True, mean=[ 123.675, 116.28, 103.53, ], pad_size_divisor=1, std=[ 58.395, 57.12, 57.375, ], type='DetDataPreprocessor'), decoder=dict( layer_cfg=dict( cross_attn_cfg=dict( batch_first=True, dropout=0.1, embed_dims=256, num_heads=8), ffn_cfg=dict( act_cfg=dict(inplace=True, type='ReLU'), embed_dims=256, feedforward_channels=2048, ffn_drop=0.1, num_fcs=2), self_attn_cfg=dict( batch_first=True, dropout=0.1, embed_dims=256, num_heads=8)), num_layers=6, return_intermediate=True), encoder=dict( layer_cfg=dict( ffn_cfg=dict( act_cfg=dict(inplace=True, type='ReLU'), embed_dims=256, feedforward_channels=2048, ffn_drop=0.1, num_fcs=2), self_attn_cfg=dict( batch_first=True, dropout=0.1, embed_dims=256, num_heads=8)), num_layers=6), neck=dict( act_cfg=None, in_channels=[ 2048, ], kernel_size=1, norm_cfg=None, num_outs=1, out_channels=256, type='ChannelMapper'), num_queries=100, positional_encoding=dict(normalize=True, num_feats=128), test_cfg=dict(max_per_img=100), train_cfg=dict( assigner=dict( match_costs=[ dict(type='ClassificationCost', weight=1.0), dict(box_format='xywh', type='BBoxL1Cost', weight=5.0), dict(iou_mode='giou', type='IoUCost', weight=2.0), ], type='HungarianAssigner')), type='DETR') optim_wrapper = dict( clip_grad=dict(max_norm=0.1, norm_type=2), optimizer=dict(lr=0.0001, type='AdamW', weight_decay=0.0001), paramwise_cfg=dict( custom_keys=dict(backbone=dict(decay_mult=1.0, lr_mult=0.1))), type='OptimWrapper') param_scheduler = [ dict( begin=0, by_epoch=True, end=150, gamma=0.1, milestones=[ 100, ], type='MultiStepLR'), ] resume = False test_cfg = dict(type='TestLoop') test_dataloader = dict( batch_size=1, dataset=dict( ann_file= 'C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\annotations\val.json', backend_args=None, data_prefix=dict( img='C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\val\'), data_root='C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\', pipeline=[ dict(backend_args=None, type='LoadImageFromFile'), dict(keep_ratio=True, scale=( 1333, 800, ), type='Resize'), dict(type='LoadAnnotations', with_bbox=True), dict( meta_keys=( 'img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', ), type='PackDetInputs'), ], test_mode=True, type='CocoDataset'), drop_last=False, num_workers=2, persistent_workers=True, sampler=dict(shuffle=False, type='DefaultSampler')) test_evaluator = dict( ann_file= 'C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\annotations\val.json', backend_args=None, format_only=False, metric='bbox', type='CocoMetric') test_pipeline = [ dict(backend_args=None, type='LoadImageFromFile'), dict(keep_ratio=True, scale=( 1333, 800, ), type='Resize'), dict(type='LoadAnnotations', with_bbox=True), dict( meta_keys=( 'img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', ), type='PackDetInputs'), ] train_cfg = dict(max_epochs=150, type='EpochBasedTrainLoop', val_interval=1) train_dataloader = dict( batch_sampler=dict(type='AspectRatioBatchSampler'), batch_size=2, dataset=dict( ann_file= 'C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\annotations\train.json', backend_args=None, data_prefix=dict( img='C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\train\'), data_root='C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\', filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=[ dict(backend_args=None, type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(prob=0.5, type='RandomFlip'), dict( transforms=[ [ dict( keep_ratio=True, scales=[ ( 480, 1333, ), ( 512, 1333, ), ( 544, 1333, ), ( 576, 1333, ), ( 608, 1333, ), ( 640, 1333, ), ( 672, 1333, ), ( 704, 1333, ), ( 736, 1333, ), ( 768, 1333, ), ( 800, 1333, ), ], type='RandomChoiceResize'), ], [ dict( keep_ratio=True, scales=[ ( 400, 1333, ), ( 500, 1333, ), ( 600, 1333, ), ], type='RandomChoiceResize'), dict( allow_negative_crop=True, crop_size=( 384, 600, ), crop_type='absolute_range', type='RandomCrop'), dict( keep_ratio=True, scales=[ ( 480, 1333, ), ( 512, 1333, ), ( 544, 1333, ), ( 576, 1333, ), ( 608, 1333, ), ( 640, 1333, ), ( 672, 1333, ), ( 704, 1333, ), ( 736, 1333, ), ( 768, 1333, ), ( 800, 1333, ), ], type='RandomChoiceResize'), ], ], type='RandomChoice'), dict(type='PackDetInputs'), ], type='CocoDataset'), num_workers=2, persistent_workers=True, sampler=dict(shuffle=True, type='DefaultSampler')) train_pipeline = [ dict(backend_args=None, type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(prob=0.5, type='RandomFlip'), dict( transforms=[ [ dict( keep_ratio=True, scales=[ ( 480, 1333, ), ( 512, 1333, ), ( 544, 1333, ), ( 576, 1333, ), ( 608, 1333, ), ( 640, 1333, ), ( 672, 1333, ), ( 704, 1333, ), ( 736, 1333, ), ( 768, 1333, ), ( 800, 1333, ), ], type='RandomChoiceResize'), ], [ dict( keep_ratio=True, scales=[ ( 400, 1333, ), ( 500, 1333, ), ( 600, 1333, ), ], type='RandomChoiceResize'), dict( allow_negative_crop=True, crop_size=( 384, 600, ), crop_type='absolute_range', type='RandomCrop'), dict( keep_ratio=True, scales=[ ( 480, 1333, ), ( 512, 1333, ), ( 544, 1333, ), ( 576, 1333, ), ( 608, 1333, ), ( 640, 1333, ), ( 672, 1333, ), ( 704, 1333, ), ( 736, 1333, ), ( 768, 1333, ), ( 800, 1333, ), ], type='RandomChoiceResize'), ], ], type='RandomChoice'), dict(type='PackDetInputs'), ] val_cfg = dict(type='ValLoop') val_dataloader = dict( batch_size=1, dataset=dict( ann_file= 'C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\annotations\val.json', backend_args=None, data_prefix=dict( img='C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\val\'), data_root='C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\', pipeline=[ dict(backend_args=None, type='LoadImageFromFile'), dict(keep_ratio=True, scale=( 1333, 800, ), type='Resize'), dict(type='LoadAnnotations', with_bbox=True), dict( meta_keys=( 'img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', ), type='PackDetInputs'), ], test_mode=True, type='CocoDataset'), drop_last=False, num_workers=2, persistent_workers=True, sampler=dict(shuffle=False, type='DefaultSampler')) val_evaluator = dict( ann_file= 'C:\DATASET\AGAR_dataset\AGAR_COCO_13_split\annotations\val.json', backend_args=None, format_only=False, metric='bbox', type='CocoMetric') vis_backends = [ dict(type='LocalVisBackend'), ] visualizer = dict( name='visualizer', type='DetLocalVisualizer', vis_backends=[ dict(type='LocalVisBackend'), ]) work_dir = './work_dirs\detr_agar'

how can i fix that problem?

hhaAndroid commented 1 year ago

@FengRongYue The error information provided is not specific, making it difficult to troubleshoot. Please provide detailed error information.

FengRongYue commented 1 year ago

@FengRongYue The error information provided is not specific, making it difficult to troubleshoot. Please provide detailed error information.

@hhaAndroid i solved the problem, i ommit metainfo.

forestemperor commented 1 year ago

@FengRongYue The error information provided is not specific, making it difficult to troubleshoot. Please provide detailed error information.

@hhaAndroid i solved the problem, i ommit metainfo.

hello,i just encountered the same error when i trained 'cascade rcnn model'. But i am still confused about your answer. Would u please make it more specific for me?