yassouali / pytorch-segmentation

:art: Semantic segmentation models, datasets and losses implemented in PyTorch.
MIT License
1.66k stars 379 forks source link

Can't load the CityScapes dataset #64

Closed HUAHUOO closed 4 years ago

HUAHUOO commented 4 years ago

Hi, I try to run the SegNet and use CityScapes as dataset. But I got an AssertionError. Could you give me some help? Thanks a lot. There are my config.json and the error report.

Traceback (most recent call last): File "train.py", line 61, in main(config, args.resume) File "train.py", line 22, in main train_loader = get_instance(dataloaders, 'train_loader', config) File "train.py", line 16, in get_instance return getattr(module, config[name]['type'])(*args, config[name]['args']) File "/home/hudh/zzy/pytorch_segmentation/dataloaders/cityscapes.py", line 82, in init self.dataset = CityScapesDataset(mode=mode, kwargs) File "/home/hudh/zzy/pytorch_segmentation/dataloaders/cityscapes.py", line 26, in init super(CityScapesDataset, self).init(**kwargs) File "/home/hudh/zzy/pytorch_segmentation/base/base_dataset.py", line 27, in init self._set_files() File "/home/hudh/zzy/pytorch_segmentation/dataloaders/cityscapes.py", line 40, in _set_files assert os.listdir(image_path) == os.listdir(label_path) AssertionError

{ "name": "SegNet", "n_gpu": 2, "use_synch_bn": true,

  "arch": {
    "type": "SegNet",
    "args": {
        "backbone": "resnet50",
        "freeze_bn": false,
        "freeze_backbone": false
    }
},

"train_loader": {
    "type": "CityScapes",
    "args":{
        "data_dir": "/home/hudh/zzy/cityscapes",
        "batch_size": 8,
        "base_size": 1024,
        "crop_size": 512,
        "augment": true,
        "shuffle": true,
        "scale": true,
        "flip": true,
        "rotate": true,
        "blur": false,
        "split": "train",
        "mode": "fine",
        "num_workers": 8
    }
},

"val_loader": {
    "type": "CityScapes",
    "args":{
        "data_dir": "/home/hudh/zzy/cityscapes",
        "batch_size": 1,
        "val": true,
        "split": "val",
        "mode": "fine",
        "num_workers": 4
    }
},

"optimizer": {
    "type": "SGD",
    "differential_lr": true,
    "args":{
        "lr": 0.01,
        "weight_decay": 1e-4,
        "momentum": 0.9
    }
},

"loss": "CrossEntropyLoss2d",
"ignore_index": 255,
"lr_scheduler": {
    "type": "Poly",
    "args": {}
},

"trainer": {
    "epochs": 50,
    "save_dir": "saved/",
    "save_period": 10,

    "monitor": "max Mean_IoU",
    "early_stop": 10,

    "tensorboard": false,
    "log_dir": "saved/runs",
    "log_per_iter": 20,

    "val": true,
    "val_per_epochs": 10
}

}

HUAHUOO commented 4 years ago

I've fixed it. There's a file named .DS_Store in gtFine. I deleted it and no error occurs.