open-mmlab / mmsegmentation

OpenMMLab Semantic Segmentation Toolbox and Benchmark.
https://mmsegmentation.readthedocs.io/en/main/
Apache License 2.0
7.7k stars 2.53k forks source link

Train with owner data: IndexError: Target 7 is out of bounds. #3609

Open monxarat opened 3 months ago

monxarat commented 3 months ago

Hi guys, I trained DeepLabV3+ with my own data, but when I ran the code, the following error appeared. How can I fix this error?

  1. configs/gloves/gloves-viz.py

    _base_ = [
    '../_base_/models/deeplabv3plus_r50-d8.py',
    '../_base_/datasets/gloves.py', '../_base_/default_runtime.py',
    '../_base_/schedules/schedule_20k.py'
    ]
    crop_size = (512, 512)
    data_preprocessor = dict(size=crop_size)
    model = dict(data_preprocessor=data_preprocessor, decode_head=dict(num_classes=2), auxiliary_head=dict(num_classes=2))
  2. configs/base/datasets/gloves.py

    
    # dataset settings
    dataset_type = 'GlovesDataset'
    data_root = 'data/gloves'
    img_norm_cfg = dict(
    mean=[121.13, 118.48, 115.98], std=[3.14, 2.97, 3.15], to_rgb=True)
    crop_size = (512, 512)
    img_scale = (2048, 512)

train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict( type='RandomResize', scale=img_scale, ratio_range=(0.5, 2.0), keep_ratio=True), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='PackSegInputs') ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='Resize', scale=img_scale, keep_ratio=True),

add loading annotation after Resize because ground truth

# does not need to do resize data transform
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')

] img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] tta_pipeline = [ dict(type='LoadImageFromFile', backend_args=None), dict( type='TestTimeAug', transforms=[ [ dict(type='Resize', scale_factor=r, keep_ratio=True) for r in img_ratios ], [ dict(type='RandomFlip', prob=0., direction='horizontal'), dict(type='RandomFlip', prob=1., direction='horizontal') ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')] ]) ]

train_dataloader = dict( batch_size=4, num_workers=4, persistent_workers=True, sampler=dict(type='InfiniteSampler', shuffle=True), dataset=dict( type='RepeatDataset', times=40000, dataset=dict( type=dataset_type, data_root=data_root, data_prefix=dict( img_path='images/training', seg_map_path='annotations/training'), pipeline=train_pipeline)))

train_dataloader = dict(dataset=dict(pipeline=train_pipeline))

val_dataloader = dict( batch_size=1, num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, data_prefix=dict( img_path='images/validation', seg_map_path='annotations/validation'), pipeline=test_pipeline))

val_dataloader = dict(dataset=dict(pipeline=test_pipeline))

test_dataloader = val_dataloader

val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU']) test_evaluator = val_evaluator


3. mmseg/datasets/gloves.py

```python
@DATASETS.register_module()
class GlovesDataset(BaseSegDataset):
    """
        Gloves dataset.
    """
    METAINFO = dict(
        classes=('background', 'hand'),
        palette=[[0, 0, 0], [255, 255, 255]])

    def __init__(self,
                 img_suffix='.JPG',
                 seg_map_suffix='.JPG',
                 **kwargs) -> None:
        super().__init__(
            img_suffix=img_suffix,
            seg_map_suffix=seg_map_suffix,**kwargs)
        assert fileio.exists(
            self.data_prefix['img_path'], backend_args=self.backend_args)
  1. Data └── gloves ├── annotations │   ├── training │   └── validation └── images ├── training └── validation

  2. Data example Image Train IMG_5069

Image Anotaion

IMG_5069

  1. Log Error
    Traceback (most recent call last):
    File "/Users/user1/Downloads/mmsegmentation/tools/train.py", line 104, in <module>
    main()
    File "/Users/user1/Downloads/mmsegmentation/tools/train.py", line 100, in main
    runner.train()
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1777, in train
    model = self.train_loop.run()  # type: ignore
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/mmengine/runner/loops.py", line 286, in run
    self.run_iter(data_batch)
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/mmengine/runner/loops.py", line 309, in run_iter
    outputs = self.runner.model.train_step(
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/mmengine/model/base_model/base_model.py", line 114, in train_step
    losses = self._run_forward(data, mode='loss')  # type: ignore
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/mmengine/model/base_model/base_model.py", line 361, in _run_forward
    results = self(**data, mode=mode)
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
    return forward_call(*args, **kwargs)
    File "/Users/user1/Downloads/mmsegmentation/mmseg/models/segmentors/base.py", line 94, in forward
    return self.loss(inputs, data_samples)
    File "/Users/user1/Downloads/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py", line 178, in loss
    loss_decode = self._decode_head_forward_train(x, data_samples)
    File "/Users/user1/Downloads/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py", line 139, in _decode_head_forward_train
    loss_decode = self.decode_head.loss(inputs, data_samples,
    File "/Users/user1/Downloads/mmsegmentation/mmseg/models/decode_heads/decode_head.py", line 262, in loss
    losses = self.loss_by_feat(seg_logits, batch_data_samples)
    File "/Users/user1/Downloads/mmsegmentation/mmseg/models/decode_heads/decode_head.py", line 327, in loss_by_feat
    loss[loss_decode.loss_name] = loss_decode(
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
    return forward_call(*args, **kwargs)
    File "/Users/user1/Downloads/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py", line 286, in forward
    loss_cls = self.loss_weight * self.cls_criterion(
    File "/Users/user1/Downloads/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py", line 45, in cross_entropy
    loss = F.cross_entropy(
    File "/opt/anaconda3/envs/openmmlab/lib/python3.8/site-packages/torch/nn/functional.py", line 3059, in cross_entropy
    return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
    IndexError: Target 7 is out of bounds.
Zoulinx commented 3 months ago

You are training a binary classification network, but pixel 7 appears in your labels. Try checking if the labeling of your labels is correct.