open-mmlab / mmsegmentation

OpenMMLab Semantic Segmentation Toolbox and Benchmark.
https://mmsegmentation.readthedocs.io/en/main/
Apache License 2.0
8.06k stars 2.59k forks source link

AttributeError: 'tuple' object has no attribute 'dim' #2767

Closed QChhh123 closed 1 year ago

QChhh123 commented 1 year ago

I made a customize decode head. My code is normal when train, but it reported error when get into the val part. (I override the forward_train method in the decode head. I don't know whether it matters. If needed, please ask me to upload the code.) Here is my cofig file and error report information.

config file

norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
    type='EncoderDecoder',
    pretrained='open-mmlab://msra/hrnetv2_w48',
    backbone=dict(
        type='HRNet',
        norm_cfg=norm_cfg,
        norm_eval=False,
        extra=dict(
            stage1=dict(
                num_modules=1,
                num_branches=1,
                block='BOTTLENECK',
                num_blocks=(4, ),
                num_channels=(64, )),
            stage2=dict(
                num_modules=1,
                num_branches=2,
                block='BASIC',
                num_blocks=(4, 4),
                num_channels=(48, 96)),
            stage3=dict(
                num_modules=4,
                num_branches=3,
                block='BASIC',
                num_blocks=(4, 4, 4),
                num_channels=(48, 96, 192)),
            stage4=dict(
                num_modules=3,
                num_branches=4,
                block='BASIC',
                num_blocks=(4, 4, 4, 4),
                num_channels=(48,96,192,384)))),
    decode_head=dict(
        type='MultiProtoHead',
        in_channels=3840, 
        channels=sum([48, 96, 192, 384]),
        gamma=0.5,
        num_prototype=10,
        in_index=3,
        dropout_ratio=0.1,
        num_classes=19,
        norm_cfg=dict(type='BN', requires_grad=True),
        align_corners=False,
        loss_decode=dict(
            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
    train_cfg=dict(),
    test_cfg=dict(mode='whole'))
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 1024)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations'),
    dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
    dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),
    dict(type='RandomFlip', prob=0.5),
    dict(type='PhotoMetricDistortion'),
    dict(
        type='Normalize',
        mean=[123.675, 116.28, 103.53],
        std=[58.395, 57.12, 57.375],
        to_rgb=True),
    dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(2048, 1024),
        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=1,
    workers_per_gpu=1,
    train=dict(
        type=dataset_type,
        data_root=data_root,
        img_dir='leftImg8bit/train',
        ann_dir='gtFine/train',
        pipeline=train_pipeline),
    val=dict(
        type=dataset_type,
        data_root=data_root,
        img_dir='leftImg8bit/val',
        ann_dir='gtFine/val',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        data_root=data_root,
        img_dir='leftImg8bit/val',
        ann_dir='gtFine/val',
        pipeline=test_pipeline))
log_config = dict(
    interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
lr_config = dict(policy='poly', power=0.9, min_lr=0.0001, by_epoch=False)
runner = dict(type='IterBasedRunner', max_iters=40000)
checkpoint_config = dict(by_epoch=False, interval=4000)
evaluation = dict(interval=50, metric='mIoU', pre_eval=True)

error report information

[                                                  ] 0/500, elapsed: 0s, ETA:Traceback (most recent call last):
  File "tools/train.py", line 241, in <module>
    main()
  File "tools/train.py", line 230, in main
    train_segmentor(
  File "/data1/2023/qchhh/mmsegmentation/mmseg/apis/train.py", line 194, in train_segmentor
    runner.run(data_loaders, cfg.workflow)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/mmcv/runner/iter_based_runner.py", line 144, in run
    iter_runner(iter_loaders[i], **kwargs)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/mmcv/runner/iter_based_runner.py", line 70, in train
    self.call_hook('after_train_iter')
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/mmcv/runner/base_runner.py", line 317, in call_hook
    getattr(hook, fn_name)(self)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/mmcv/runner/hooks/evaluation.py", line 266, in after_train_iter
    self._do_evaluate(runner)
  File "/data1/2023/qchhh/mmsegmentation/mmseg/core/evaluation/eval_hooks.py", line 51, in _do_evaluate
    results = single_gpu_test(
  File "/data1/2023/qchhh/mmsegmentation/mmseg/apis/test.py", line 91, in single_gpu_test
    result = model(return_loss=False, **data)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
    return forward_call(*input, **kwargs)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/mmcv/parallel/data_parallel.py", line 51, in forward
    return super().forward(*inputs, **kwargs)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
    return self.module(*inputs[0], **kwargs[0])
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
    return forward_call(*input, **kwargs)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 119, in new_func
    return old_func(*args, **kwargs)
  File "/data1/2023/qchhh/mmsegmentation/mmseg/models/segmentors/base.py", line 110, in forward
    return self.forward_test(img, img_metas, **kwargs)
  File "/data1/2023/qchhh/mmsegmentation/mmseg/models/segmentors/base.py", line 92, in forward_test
    return self.simple_test(imgs[0], img_metas[0], **kwargs)
  File "/data1/2023/qchhh/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py", line 266, in simple_test
    seg_logit = self.inference(img, img_meta, rescale)
  File "/data1/2023/qchhh/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py", line 248, in inference
    seg_logit = self.whole_inference(img, img_meta, rescale)
  File "/data1/2023/qchhh/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py", line 207, in whole_inference
    seg_logit = self.encode_decode(img, img_meta)
  File "/data1/2023/qchhh/mmsegmentation/mmseg/models/segmentors/encoder_decoder.py", line 76, in encode_decode
    out = resize(
  File "/data1/2023/qchhh/mmsegmentation/mmseg/ops/wrappers.py", line 27, in resize
    return F.interpolate(input, size, scale_factor, mode, align_corners)
  File "/data1/miniconda3/envs/qcmmseg/lib/python3.8/site-packages/torch/nn/functional.py", line 3841, in interpolate
    dim = input.dim() - 2  # Number of spatial dimensions.
AttributeError: 'tuple' object has no attribute 'dim'

Thank you very much for reading a so long question.

QChhh123 commented 1 year ago

When you override the forward_train method, you'd better override the forward_test method too.