Closed CodeXiaoLingYun closed 4 years ago
i find an answer,but he says"Ok , I fixed my bugs. I mixed up width and height params in annotations.Thank you guys.'' I can not understand the mean of this sentence.
I use the code to check the type of my image id.
tmp = json1['images'][0]['id'] print(type(tmp)) #### But it show.
<class 'int'> ####
I solved the problem. I find the type of image_id is string in train_coco.py(annotation). so i change it and not make same error
I solved the problem. I find the type of image_id is string in train_coco.py(annotation). so i change it and not make same error
I have the same error. But I don't have train_coco.py. Is that a file you created yourself?
I solved the problem. I find the type of image_id is string in train_coco.py(annotation). so i change it and not make same error
I have the same error. But I don't have train_coco.py. Is that a file you created yourself?
The image_id is a variable in your dataset(type is coco),it is dict similar as {annotation : {image_id: 1, XXXX : yyyyy, ........}} . My question is the type of value in image_id is string like image_Id : '1', i use the code to change the type, and then all is OK
I solved the problem. I find the type of image_id is string in train_coco.py(annotation). so i change it and not make same error
I have the same error. But I don't have train_coco.py. Is that a file you created yourself?
The image_id is a variable in your dataset(type is coco),it is dict similar as {annotation : {image_id: 1, XXXX : yyyyy, ........}} . My question is the type of value in image_id is string like image_Id : '1', i use the code to change the type, and then all is OK
Thank you! I finally solved this problem by this way.
I have this issue on np.concatenate(indices). I used my dataset with coco format python tools/train.py configs/pn_test.py pn_test is copy from mask_rcnn_x101_64x4d_fpn_1x.py 2020-03-04 17:56:10,433 - mmdet - INFO - Distributed training: False 2020-03-04 17:56:10,433 - mmdet - INFO - Config:
model settings
model = dict( type='MaskRCNN', pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_scales=[8], anchor_ratios=[0.5, 1.0, 2.0], anchor_strides=[4, 8, 16, 32, 64], target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='SharedFCBBoxHead', num_fcs=2, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=2, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2], reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=2, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))
model training and testing settings
train_cfg = dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=2000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)) test_cfg = dict( rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100, mask_thr_binary=0.5))
dataset settings
dataset_type = 'PnDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( imgs_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/pn_train.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/pn_val.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/pn_test.json', img_prefix=data_root + 'test2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric=['bbox', 'segm'])
optimizer 这里默认的是8核GPU的学习率,0.02/8 = 0.0025
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
learning policy
lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[8, 11]) checkpoint_config = dict(interval=1)
yapf:disable
log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
yapf:enable
runtime settings
total_epochs = 12 dist_params = dict(backend='nccl') log_level = 'INFO'
work_dir = './work_dirs/mask_rcnn_x101_64x4d_fpn_1x'
work_dir = './checkpoints/pn_mask_rcnn_x101_64x4d_fpn_1x' load_from = None resume_from = None workflow = [('train', 1)]
2020-03-04 17:56:11,498 - mmdet - INFO - load model from: open-mmlab://resnext101_64x4d loading annotations into memory... Done (t=0.01s) creating index... index created! 2020-03-04 17:56:13,547 - mmdet - INFO - Start running, host: xly@xly-Ubuntu, work_dir: /home/xly/mmdetection/checkpoints/pn_mask_rcnn_x101_64x4d_fpn_1x 2020-03-04 17:56:13,548 - mmdet - INFO - workflow: [('train', 1)], max: 12 epochs Traceback (most recent call last): File "tools/train.py", line 142, in
main()
File "tools/train.py", line 138, in main
meta=meta)
File "/home/xly/mmdetection/mmdet/apis/train.py", line 111, in train_detector
meta=meta)
File "/home/xly/mmdetection/mmdet/apis/train.py", line 305, in _non_dist_train
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
File "/home/xly/anaconda3/envs/envtest/lib/python3.6/site-packages/mmcv/runner/runner.py", line 371, in run
epoch_runner(data_loaders[i], **kwargs)
File "/home/xly/anaconda3/envs/envtest/lib/python3.6/site-packages/mmcv/runner/runner.py", line 271, in train
for i, data_batch in enumerate(data_loader):
File "/home/xly/anaconda3/envs/envtest/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 279, in iter
return _MultiProcessingDataLoaderIter(self)
File "/home/xly/anaconda3/envs/envtest/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 746, in init
self._try_put_index()
File "/home/xly/anaconda3/envs/envtest/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 861, in _try_put_index
index = self._next_index()
File "/home/xly/anaconda3/envs/envtest/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 339, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "/home/xly/anaconda3/envs/envtest/lib/python3.6/site-packages/torch/utils/data/sampler.py", line 200, in iter
for idx in self.sampler:
File "/home/xly/mmdetection/mmdet/datasets/loader/sampler.py", line 63, in iter
indices = np.concatenate(indices)
File "<__array_function__ internals>", line 6, in concatenate
ValueError: need at least one array to concatenate
i print the result,i can not read the data(len(self.sampler)==0),but i check the config path is OK. sorry, i make a error close issue. Now we continue.