open-mmlab / mmdetection

OpenMMLab Detection Toolbox and Benchmark
https://mmdetection.readthedocs.io
Apache License 2.0
29.21k stars 9.4k forks source link

IndexError in /tools/test_robustness.py #2727

Closed tiltgod closed 4 years ago

tiltgod commented 4 years ago

Checklist

  1. I have searched related issues but cannot get the expected help.
  2. The bug has not been fixed in the latest version.

Describe the bug I want to test robustness with my model and my custom dataset in the COCO format. It's working in the previous version of mmdetection and normal testing on /tools/test.py but In mmdetection 2.0 It appears 'IndexError: only integers, slices (:), ellipsis (...), numpy.newaxis (None) and integer or boolean arrays are valid indices' after loading annotations and creating index process.

Reproduction

  1. What command or script did you run? I run !python3 /content/mmdetection/tools/test_robustness.py /content/gdrive/'My Drive'/panet.py /content/gdrive/'My Drive'/panet101/fold1/epoch_200.pth --eval segm bbox --out /content/gdrive/'My Drive'/panet101/panet_robust.pkl --corruptions snow --severities 1 in Google Colab

  2. Did you make any modifications on the code or config? Did you understand what you have modified? my config

model=dict( type='MaskRCNN', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict( type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict( type='L1Loss', loss_weight=1.0)), roi_head=dict( type='StandardRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', out_size=7, sample_num=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=19, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='L1Loss', loss_weight=1.0)), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', out_size=14, sample_num=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=19, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)) test_cfg=dict( rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( score_thr=0.05, nms=dict( type='nms', iou_thr=0.5), max_per_img=100, mask_thr_binary=0.5)) dataset_type='CocoDataset' classes=('back_bumper', 'back_glass', 'back_left_door', 'back_left_light', 'back_right_door', 'back_right_light', 'front_bumper', 'front_glass', 'front_left_door', 'front_left_light', 'front_right_door', 'front_right_light', 'hood', 'left_mirror', 'right_mirror', 'tailgate', 'trunk', 'wheel') data_root='' img_norm_cfg=dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1024, 1024), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])] test_pipeline=[ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])] data=dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='CocoDataset', ann_file='/content/gdrive/My Drive/workdir/trainingset/annotations.json', img_prefix='/content/gdrive/My Drive/workdir/trainingset/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1024, 1024), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.0), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])]), val=dict( type='CocoDataset', ann_file='data/coco/annotations/instances_val2017.json', img_prefix='data/coco/val2017/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])]), test=dict( type='CocoDataset', ann_file='/content/gdrive/My Drive/workdir/testset/annotations.json', img_prefix='/content/gdrive/My Drive/workdir/testset/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1024, 1024), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])])) evaluation=dict( interval=1, metric=['bbox', 'segm']) optimizer=dict( type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config=dict( grad_clip=None) lr_config=dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) total_epochs=200 checkpoint_config=dict( interval=1) log_config=dict( interval=50, hooks=[ dict(type='TextLoggerHook')]) dist_params=dict( backend='nccl') log_level='INFO' load_from=None resume_from='/content/gdrive/My Drive/panet101/fold1/epoch_150.pth' workflow=[('train', 1)] work_dir='/content/gdrive/My Drive/panet101/fold1'

modification in /mmdet/coco.py

@DATASETS.register_module() class CocoDataset(CustomDataset): CLASSES = ('back_bumper', 'back_glass', 'back_left_door', 'back_left_light', 'back_right_door', 'back_right_light', 'front_bumper', 'front_glass', 'front_left_door', 'front_left_light', 'front_right_door', 'front_right_light', 'hood', 'left_mirror', 'right_mirror', 'tailgate', 'trunk', 'wheel')

  1. What dataset did you use? My custom dataset with COCO format Environment My Google Colab env

sys.platform: linux Python: 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0] CUDA available: True CUDA_HOME: /usr/local/cuda NVCC: Cuda compilation tools, release 10.1, V10.1.243 GPU 0: Tesla P100-PCIE-16GB GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0 PyTorch: 1.5.0+cu101 PyTorch compiling details: PyTorch built with:

Error traceback If applicable, paste the error trackback here.

Testing snow at severity 1 loading annotations into memory... Done (t=0.02s) creating index... index created! [>>] 100/100, 4.1 task/s, elapsed: 25s, ETA: 0sStarting evaluate segm and bbox Traceback (most recent call last): File "/content/mmdetection/tools/test_robustness.py", line 445, in main() File "/content/mmdetection/tools/test_robustness.py", line 404, in main outputs, args.out) File "/content/mmdetection/mmdet/datasets/coco.py", line 236, in results2json json_results = self._segm2json(results) File "/content/mmdetection/mmdet/datasets/coco.py", line 204, in _segm2json if isinstance(segms[i]['counts'], bytes): IndexError: only integers, slices (:), ellipsis (...), numpy.newaxis (None) and integer or boolean arrays are valid indices

Tord-Zhang commented 4 years ago

Hi, did you solved this problem?

tiltgod commented 4 years ago

Hi, did you solved this problem?

nope ;w;

tiltgod commented 4 years ago

I print segms without indexing and get these. It's not bytes @hellock @ZwwWayne [array([[False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], ..., [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False]])]

virusapex commented 4 years ago

Trained the Mask RCNN model on COCO style dataset and got the same segmentation values, so I can't inference with them, only output bboxes. Will try to look for any workarounds, but @hellock should look into this hopefully.

tiltgod commented 4 years ago

I solved this @mangdian @virusapex try this

for i in range(bboxes.shape[0]):
                    data = dict()
                    data['image_id'] = img_id
                    data['bbox'] = self.xyxy2xywh(bboxes[i])
                    data['score'] = float(mask_score[i])
                    data['category_id'] = self.cat_ids[label]
                    # if isinstance(segms[i]['counts'], bytes):
                    #     segms[i]['counts'] = segms[i]['counts'].decode()
                    # data['segmentation'] = segms[i]
                    segm_json_results.append(data)
        return bbox_json_results, segm_json_results

in mmdet/datasets/coco.py def _segm2json

virusapex commented 4 years ago

I solved this @mangdian @virusapex try this

for i in range(bboxes.shape[0]):
                    data = dict()
                    data['image_id'] = img_id
                    data['bbox'] = self.xyxy2xywh(bboxes[i])
                    data['score'] = float(mask_score[i])
                    data['category_id'] = self.cat_ids[label]
                    # if isinstance(segms[i]['counts'], bytes):
                    #     segms[i]['counts'] = segms[i]['counts'].decode()
                    # data['segmentation'] = segms[i]
                    segm_json_results.append(data)
        return bbox_json_results, segm_json_results

in mmdet/datasets/coco.py def _segm2json

It probably worked for you, since it's for converting segmentation values to json, but I'm still looking for solution while trying to get inference from the model. The point of failure for me is this:

~/mmdetection/mmdet/models/detectors/base.py in show_result(self, img, result, score_thr, bbox_color, text_color, thickness, font_scale, win_name, show, wait_time, out_file)
    210                 i = int(i)
    211                 color_mask = color_masks[labels[i]]
--> 212                 mask = maskUtils.decode(segms[i]).astype(np.bool)
    213                 img[mask] = img[mask] * 0.5 + color_mask * 0.5
    214         # if out_file specified, do not show image in window

/usr/local/lib/python3.6/dist-packages/pycocotools/mask.py in decode(rleObjs)
     89         return _mask.decode(rleObjs)
     90     else:
---> 91         return _mask.decode([rleObjs])[:,:,0]
     92 
     93 def area(rleObjs):

pycocotools/_mask.pyx in pycocotools._mask.decode()

pycocotools/_mask.pyx in pycocotools._mask._frString()

IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
tiltgod commented 4 years ago

I solved this @mangdian @virusapex try this

for i in range(bboxes.shape[0]):
                    data = dict()
                    data['image_id'] = img_id
                    data['bbox'] = self.xyxy2xywh(bboxes[i])
                    data['score'] = float(mask_score[i])
                    data['category_id'] = self.cat_ids[label]
                    # if isinstance(segms[i]['counts'], bytes):
                    #     segms[i]['counts'] = segms[i]['counts'].decode()
                    # data['segmentation'] = segms[i]
                    segm_json_results.append(data)
        return bbox_json_results, segm_json_results

in mmdet/datasets/coco.py def _segm2json

It probably worked for you, since it's for converting segmentation values to json, but I'm still looking for solution while trying to get inference from the model. The point of failure for me is this:

~/mmdetection/mmdet/models/detectors/base.py in show_result(self, img, result, score_thr, bbox_color, text_color, thickness, font_scale, win_name, show, wait_time, out_file)
    210                 i = int(i)
    211                 color_mask = color_masks[labels[i]]
--> 212                 mask = maskUtils.decode(segms[i]).astype(np.bool)
    213                 img[mask] = img[mask] * 0.5 + color_mask * 0.5
    214         # if out_file specified, do not show image in window

/usr/local/lib/python3.6/dist-packages/pycocotools/mask.py in decode(rleObjs)
     89         return _mask.decode(rleObjs)
     90     else:
---> 91         return _mask.decode([rleObjs])[:,:,0]
     92 
     93 def area(rleObjs):

pycocotools/_mask.pyx in pycocotools._mask.decode()

pycocotools/_mask.pyx in pycocotools._mask._frString()

IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices

sorry then #2734 should help

virusapex commented 4 years ago

I solved this @mangdian @virusapex try this

for i in range(bboxes.shape[0]):
                    data = dict()
                    data['image_id'] = img_id
                    data['bbox'] = self.xyxy2xywh(bboxes[i])
                    data['score'] = float(mask_score[i])
                    data['category_id'] = self.cat_ids[label]
                    # if isinstance(segms[i]['counts'], bytes):
                    #     segms[i]['counts'] = segms[i]['counts'].decode()
                    # data['segmentation'] = segms[i]
                    segm_json_results.append(data)
        return bbox_json_results, segm_json_results

in mmdet/datasets/coco.py def _segm2json

It probably worked for you, since it's for converting segmentation values to json, but I'm still looking for solution while trying to get inference from the model. The point of failure for me is this:

~/mmdetection/mmdet/models/detectors/base.py in show_result(self, img, result, score_thr, bbox_color, text_color, thickness, font_scale, win_name, show, wait_time, out_file)
    210                 i = int(i)
    211                 color_mask = color_masks[labels[i]]
--> 212                 mask = maskUtils.decode(segms[i]).astype(np.bool)
    213                 img[mask] = img[mask] * 0.5 + color_mask * 0.5
    214         # if out_file specified, do not show image in window

/usr/local/lib/python3.6/dist-packages/pycocotools/mask.py in decode(rleObjs)
     89         return _mask.decode(rleObjs)
     90     else:
---> 91         return _mask.decode([rleObjs])[:,:,0]
     92 
     93 def area(rleObjs):

pycocotools/_mask.pyx in pycocotools._mask.decode()

pycocotools/_mask.pyx in pycocotools._mask._frString()

IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices

sorry then #2734 should help

No, there is nothing to be sorry about. I just mentioned it, since it seems to be a similar problem in a way. Thank you for linking it to me.