Open kingbackyang opened 3 years ago
The error report:
Traceback (most recent call last):
File "/home/mk/mmsegmentation/tools/train.py", line 166, in
Hi @kingbackyang Sorry for the late reply! The annotation labels should be label index to represent the categories.
Hi @kingbackyang Sorry for the late reply! The annotation labels should be label index to represent the categories.
Hi @Junjun2016 , I face the same error issue on getting ValueError: size shape must match input shape. Input is 2D, size is 3.
Can you give a more details/brief explanation or example on what is mean by "The annotation labels should be label index to represent the categories." ? What/where is the annotation labels and label index?
What I did was the same as @kingbackyang in his example. I made a new dataset as below:
import os.path as osp
from .builder import DATASETS from .custom import CustomDataset
@DATASETS.register_module() class ownL1Dataset(CustomDataset): """ownL1 dataset.
In segmentation map annotation for ownL1, 0 stands for background, which is
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'_.png'.
"""
CLASSES = ('background', 'foreground')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(ownL1Dataset, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir)`
I change "CLASSES" and "PALETTE" in custom.py.
and the error I get is about the same too(as below):
ValueError Traceback (most recent call last)
<ipython-input-12-2ebe9c9d56e4> in <module>()
31 mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
32 train_segmentor(model, datasets, cfg, distributed=False, validate=True,
---> 33 meta=meta) #train_segmentor() in /content/focal_phi_loss_mmsegmentation/mmseg/apis/train.py
14 frames
/content/focal_phi_loss_mmsegmentation/mmseg/apis/train.py in train_segmentor(model, dataset, cfg, distributed, validate, timestamp, meta)
114 elif cfg.load_from:
115 runner.load_checkpoint(cfg.load_from)
--> 116 runner.run(data_loaders, cfg.workflow)
/usr/local/lib/python3.7/dist-packages/mmcv/runner/iter_based_runner.py in run(self, data_loaders, workflow, max_iters, **kwargs)
128 if mode == 'train' and self.iter >= self._max_iters:
129 break
--> 130 iter_runner(iter_loaders[i], **kwargs)
131
132 time.sleep(1) # wait for some hooks like loggers to finish
/usr/local/lib/python3.7/dist-packages/mmcv/runner/iter_based_runner.py in train(self, data_loader, **kwargs)
58 self.call_hook('before_train_iter')
59 data_batch = next(data_loader)
---> 60 outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
61 if not isinstance(outputs, dict):
62 raise TypeError('model.train_step() must return a dict')
/usr/local/lib/python3.7/dist-packages/mmcv/parallel/data_parallel.py in train_step(self, *inputs, **kwargs)
65
66 inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
---> 67 return self.module.train_step(*inputs[0], **kwargs[0])
68
69 def val_step(self, *inputs, **kwargs):
/content/focal_phi_loss_mmsegmentation/mmseg/models/segmentors/base.py in train_step(self, data_batch, optimizer, **kwargs)
150 averaging the logs.
151 """
--> 152 losses = self(**data_batch)
153 loss, log_vars = self._parse_losses(losses)
154
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/mmcv/runner/fp16_utils.py in new_func(*args, **kwargs)
82 'method of nn.Module')
83 if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
---> 84 return old_func(*args, **kwargs)
85 # get the arg spec of the decorated method
86 args_info = getfullargspec(old_func)
/content/focal_phi_loss_mmsegmentation/mmseg/models/segmentors/base.py in forward(self, img, img_metas, return_loss, **kwargs)
120 """
121 if return_loss:
--> 122 return self.forward_train(img, img_metas, **kwargs)
123 else:
124 return self.forward_test(img, img_metas, **kwargs)
/content/focal_phi_loss_mmsegmentation/mmseg/models/segmentors/encoder_decoder.py in forward_train(self, img, img_metas, gt_semantic_seg)
156
157 loss_decode = self._decode_head_forward_train(x, img_metas,
--> 158 gt_semantic_seg)
159 losses.update(loss_decode)
160
/content/focal_phi_loss_mmsegmentation/mmseg/models/segmentors/encoder_decoder.py in _decode_head_forward_train(self, x, img_metas, gt_semantic_seg)
100 loss_decode = self.decode_head.forward_train(x, img_metas,
101 gt_semantic_seg,
--> 102 self.train_cfg)
103
104 losses.update(add_prefix(loss_decode, 'decode'))
/content/focal_phi_loss_mmsegmentation/mmseg/models/decode_heads/decode_head.py in forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg)
185 """
186 seg_logits = self.forward(inputs)
--> 187 losses = self.losses(seg_logits, gt_semantic_seg)
188 return losses
189
/usr/local/lib/python3.7/dist-packages/mmcv/runner/fp16_utils.py in new_func(*args, **kwargs)
162 'method of nn.Module')
163 if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
--> 164 return old_func(*args, **kwargs)
165 # get the arg spec of the decorated method
166 args_info = getfullargspec(old_func)
/content/focal_phi_loss_mmsegmentation/mmseg/models/decode_heads/decode_head.py in losses(self, seg_logit, seg_label)
220 size=seg_label.shape[2:],
221 mode='bilinear',
--> 222 align_corners=self.align_corners)
223 if self.sampler is not None:
224 seg_weight = self.sampler.sample(seg_logit, seg_label)
/content/focal_phi_loss_mmsegmentation/mmseg/ops/wrappers.py in resize(input, size, scale_factor, mode, align_corners, warning)
27 if isinstance(size, torch.Size):
28 size = tuple(int(x) for x in size)
---> 29 return F.interpolate(input, size, scale_factor, mode, align_corners)
30
31
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in interpolate(input, size, scale_factor, mode, align_corners, recompute_scale_factor)
3078 if len(size) != dim:
3079 raise ValueError('size shape must match input shape. '
-> 3080 'Input is {}D, size is {}'.format(dim, len(size)))
3081 output_size = size
3082 else:
ValueError: size shape must match input shape. Input is 2D, size is 3
Thank you.
If class = [background, road, car] then the label's pixel value on those classes is separately 0, 1 and 2. By the way, the label should only have one channel and the shape is h*w, not 3 channels.
Here is my self-defined dataset class: import os import os.path as osp from collections import OrderedDict from functools import reduce
import mmcv import numpy as np from mmcv.utils import print_log from prettytable import PrettyTable from torch.utils.data import Dataset
from mmseg.core import eval_metrics from mmseg.utils import get_root_logger from .builder import DATASETS from .pipelines import Compose
@DATASETS.register_module() class CellDataset(Dataset): """Custom dataset for semantic segmentation. An example of file structure is as followed.
I change "CLASSES" and "PALETTE" in custom.py. And the error shows that it seems gt seg image is not converted the gray image and is still the color image. How could I fix it? Thank you.