Open hannachoum opened 1 year ago
My file is mmseg/datasets/milvue.py , and i have the following error : ValueError: class IterBasedTrainLoop
in mmengine/runner/loops.py: class milvueDataset
in mmseg/datasets/milvue.py: need at least one array to concatenate
I am facing the same error when using BaseSegDataset.
Here's my config file:
_base_ = [
'../_base_/models/setr_mla.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
data_preprocessor = dict(size=crop_size)
norm_cfg = dict(type='SyncBN', requires_grad=True)
num_classes=1
metainfo = dict(classes = ('test'))
dataset_type = 'BaseSegDataset'
data_root = 'path_to_dataset_folder'
img_suffix='.png'
seg_map_suffix='.png'
pre_trained_weights_path = 'path_to_weights/weights.pth'
reduce_zero_label = True
model = dict(
data_preprocessor=data_preprocessor,
pretrained=None,
backbone=dict(
img_size=(512, 512),
drop_rate=0.,
init_cfg=dict(
type='Pretrained', checkpoint=pre_trained_weights_path)),
decode_head=dict(num_classes=num_classes),
auxiliary_head=[
dict(
type='FCNHead',
in_channels=256,
channels=256,
in_index=0,
dropout_ratio=0,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
num_convs=0,
kernel_size=1,
concat_input=False,
num_classes=num_classes,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='FCNHead',
in_channels=256,
channels=256,
in_index=1,
dropout_ratio=0,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
num_convs=0,
kernel_size=1,
concat_input=False,
num_classes=num_classes,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='FCNHead',
in_channels=256,
channels=256,
in_index=2,
dropout_ratio=0,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
num_convs=0,
kernel_size=1,
concat_input=False,
num_classes=num_classes,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='FCNHead',
in_channels=256,
channels=256,
in_index=3,
dropout_ratio=0,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
num_convs=0,
kernel_size=1,
concat_input=False,
num_classes=num_classes,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
],
test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)),
)
optimizer = dict(lr=0.001, weight_decay=0.0)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=optimizer,
paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)}))
train_dataloader = dict(
batch_size=8,
num_workers=8,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
metainfo=metainfo,
data_prefix=dict(
img_path='images/training',
seg_map_path='annotations/training'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(
type='RandomResize',
scale=(2048, 512),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(512, 512), cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackSegInputs')
]))
val_dataloader = dict(
batch_size=8,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
metainfo=metainfo,
data_prefix=dict(
img_path='images/validation',
seg_map_path='annotations/validation'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 512), keep_ratio=True),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='PackSegInputs')
]))
test_dataloader = dict(
batch_size=8,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
metainfo=metainfo,
data_prefix=dict(
img_path='images/test',
seg_map_path='annotations/test'),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 512), keep_ratio=True),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='PackSegInputs')
]))
你好,这个问题你解决了吗,是哪里的原因呀
You may try setup again, use "pip install -v -e ." blablabla in the instruction, for it needs setup again every time you change files in configs or something. It works for me.
Hello there, I think i may have a problem in my dataset file config, i have the felling that i missed settings but i dont know what to add.
import os.path as osp import numpy as np from mmseg.registry import DATASETS from .basesegdataset import BaseSegDataset
@DATASETS.register_module() class milvueDataset(BaseSegDataset): palette = [ .... ]