total_epochs = 50
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './parcel_NAIP/solo_release_r50_fpn_50epoch'
load_from = None
resume_from = None
workflow = [('train', 1)]
filter_empty_gt = True
2022-03-02 22:06:42,553 - mmdet - INFO - load model from: torchvision://resnet50
2022-03-02 22:06:43,129 - mmdet - WARNING - The model and loaded state dict do not match exactly
size mismatch for conv1.weight: copying a param with shape torch.Size([64, 3, 7, 7]) from checkpoint, the shape in current model is torch.Size([64, 4, 7, 7]).
unexpected key in source state_dict: fc.weight, fc.bias
File "/home/py21/SOLO-master/mmdet/datasets/custom.py", line 131, in getitem
data = self.prepare_train_img(idx)
File "/home/py21/SOLO-master/mmdet/datasets/custom.py", line 144, in prepare_train_img
return self.pipeline(results)
File "/home/py21/SOLO-master/mmdet/datasets/pipelines/compose.py", line 24, in call
data = t(data)
File "/home/py21/SOLO-master/mmdet/datasets/pipelines/transforms.py", line 176, in call
self._resize_masks(results)
File "/home/py21/SOLO-master/mmdet/datasets/pipelines/transforms.py", line 159, in _resize_masks
results[key] = np.stack(masks)
File "<__array_function__ internals>", line 6, in stack
File "/home/py21/anaconda3/envs/solo/lib/python3.7/site-packages/numpy/core/shape_base.py", line 423, in stack
raise ValueError('need at least one array to stack')
ValueError: need at least one array to stack
当我更换了自己定义的一个数据集时,出现了上述bug,以下是我的参数, 2022-03-02 22:06:41,743 - mmdet - INFO - Distributed training: False 2022-03-02 22:06:41,744 - mmdet - INFO - MMDetection Version: 1.0.0+unknown 2022-03-02 22:06:41,744 - mmdet - INFO - Config:
model settings
model = dict( type='SOLO', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, in_channels=4, num_stages=4, out_indices=(0, 1, 2, 3), # C2, C3, C4, C5 frozen_stages=1, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, num_outs=5), bbox_head=dict( type='SOLOHead', num_classes=2, in_channels=256, stacked_convs=7, seg_feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), sigma=0.2, num_grids=[40, 36, 24, 16, 12], cate_down_pos=0, with_deform=False, loss_ins=dict( type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cate=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), ))
training and testing settings
train_cfg = dict() test_cfg = dict( nms_pre=500, score_thr=0.1, mask_thr=0.5, update_thr=0.05, kernel='gaussian', # gaussian/linear sigma=2.0, max_per_img=100)
dataset settings
dataset_type = 'CocoDataset' classes = ('cropland_parcel',) data_root = '/home/py21/data_py/San_joaquin_2014_NAIP/' img_norm_cfg = dict( mean=[125.9752,125.6327,101.7500,138.4956], std=[45.8814,38.8707,38.4250,43.5888],to_rgb=False) train_pipeline = [ dict(type='LoadRSImage',to_float32=True), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(512, 512), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadRSImage', to_float32=True), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( imgs_per_gpu=8, workers_per_gpu=0, train=dict( type=dataset_type, ann_file=data_root + 'San_joaquin_2014_NAIP_train.json', img_prefix=data_root + 'train_coco/', pipeline = train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'San_joaquin_2014_NAIP_valid.json', img_prefix=data_root + 'valid_coco/', pipeline=test_pipeline), test=dict( type = dataset_type, ann_file=data_root + 'San_joaquin_2014_NAIP_valid.json', img_prefix=data_root + 'valid_coco/', pipeline = test_pipeline), )
optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
evaluation = dict(interval=1,save_best='segm_mAP',
metric=['bbox', 'segm'])
learning policy
lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[32, 44]) checkpoint_config = dict(interval=1)
yapf:disable
log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
yapf:enable
runtime settings
total_epochs = 50 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './parcel_NAIP/solo_release_r50_fpn_50epoch' load_from = None resume_from = None workflow = [('train', 1)] filter_empty_gt = True 2022-03-02 22:06:42,553 - mmdet - INFO - load model from: torchvision://resnet50 2022-03-02 22:06:43,129 - mmdet - WARNING - The model and loaded state dict do not match exactly
size mismatch for conv1.weight: copying a param with shape torch.Size([64, 3, 7, 7]) from checkpoint, the shape in current model is torch.Size([64, 4, 7, 7]). unexpected key in source state_dict: fc.weight, fc.bias
2022-03-02 22:06:50,715 - mmdet - INFO - Start running, host: py21@gpu3, work_dir: /home/py21/SOLO-master/parcel_NAIP/solo_release_r50_fpn_50epoch 2022-03-02 22:06:50,715 - mmdet - INFO - workflow: [('train', 1)], max: 50 epochs 2022-03-02 22:11:43,107 - mmdet - INFO - Epoch [1][50/307] lr: 0.00099, eta: 1 day, 0:51:08, time: 5.848, data_time: 1.795, memory: 8342, loss_ins: 2.9401, loss_cate: 0.5971, loss: 3.5373 2022-03-02 22:16:05,093 - mmdet - INFO - Epoch [1][100/307] lr: 0.00199, eta: 23:29:00, time: 5.240, data_time: 1.742, memory: 8347, loss_ins: 2.9399, loss_cate: 0.4326, loss: 3.3725
File "/home/py21/SOLO-master/mmdet/datasets/custom.py", line 131, in getitem data = self.prepare_train_img(idx) File "/home/py21/SOLO-master/mmdet/datasets/custom.py", line 144, in prepare_train_img return self.pipeline(results) File "/home/py21/SOLO-master/mmdet/datasets/pipelines/compose.py", line 24, in call data = t(data) File "/home/py21/SOLO-master/mmdet/datasets/pipelines/transforms.py", line 176, in call self._resize_masks(results) File "/home/py21/SOLO-master/mmdet/datasets/pipelines/transforms.py", line 159, in _resize_masks results[key] = np.stack(masks) File "<__array_function__ internals>", line 6, in stack File "/home/py21/anaconda3/envs/solo/lib/python3.7/site-packages/numpy/core/shape_base.py", line 423, in stack raise ValueError('need at least one array to stack') ValueError: need at least one array to stack
请问是什么具体问题呢?