Open keenJMS opened 3 years ago
葛博,你好。我用的是4卡2080Ti,pytorch1.7+cuda10.1+python3.8.5,spcl+ 在duke->msmt map只有22 ,在market->msmt只有23.3,下面是我的log文件
==========
Args:Namespace(config='SpCL/config_duke_msmt.yaml', launcher='pytorch', resume_from=None, set_cfgs=None, tcp_port='10010', work_dir='SpCL/duke_msmt/4gpu_16per/800iter')
========== cfg.LOCAL_RANK: 0 cfg.DATA_ROOT: ../datasets cfg.LOGS_ROOT: /data/OpenUnlogs/logs
cfg.MODEL = edict() cfg.MODEL.backbone: resnet50 cfg.MODEL.pooling: gem cfg.MODEL.embed_feat: 0 cfg.MODEL.dropout: 0.0 cfg.MODEL.dsbn: True cfg.MODEL.sync_bn: True cfg.MODEL.samples_per_bn: 16 cfg.MODEL.mean_net: False cfg.MODEL.alpha: 0.999 cfg.MODEL.imagenet_pretrained: True cfg.MODEL.source_pretrained: None
cfg.DATA = edict() cfg.DATA.height: 256 cfg.DATA.width: 128 cfg.DATA.norm_mean: [0.485, 0.456, 0.406] cfg.DATA.norm_std: [0.229, 0.224, 0.225]
cfg.DATA.TRAIN = edict() cfg.DATA.TRAIN.is_autoaug: False cfg.DATA.TRAIN.is_flip: True cfg.DATA.TRAIN.flip_prob: 0.5 cfg.DATA.TRAIN.is_pad: True cfg.DATA.TRAIN.pad_size: 10 cfg.DATA.TRAIN.is_blur: False cfg.DATA.TRAIN.blur_prob: 0.5 cfg.DATA.TRAIN.is_erase: True cfg.DATA.TRAIN.erase_prob: 0.5 cfg.DATA.TRAIN.is_mutual_transform: False cfg.DATA.TRAIN.mutual_times: 2
cfg.TRAIN = edict() cfg.TRAIN.seed: 1 cfg.TRAIN.deterministic: True cfg.TRAIN.amp: False
cfg.TRAIN.datasets = edict() cfg.TRAIN.datasets.msmt17: trainval cfg.TRAIN.datasets.dukemtmcreid: trainval cfg.TRAIN.unsup_dataset_indexes: [0] cfg.TRAIN.epochs: 50 cfg.TRAIN.iters: 800
cfg.TRAIN.LOSS = edict()
cfg.TRAIN.LOSS.losses = edict() cfg.TRAIN.LOSS.losses.hybrid_memory: 1.0 cfg.TRAIN.LOSS.temp: 0.05 cfg.TRAIN.LOSS.momentum: 0.2 cfg.TRAIN.val_dataset: msmt17 cfg.TRAIN.val_freq: 5
cfg.TRAIN.SAMPLER = edict() cfg.TRAIN.SAMPLER.num_instances: 4 cfg.TRAIN.SAMPLER.is_shuffle: True
cfg.TRAIN.LOADER = edict() cfg.TRAIN.LOADER.samples_per_gpu: 16 cfg.TRAIN.LOADER.workers_per_gpu: 2
cfg.TRAIN.PSEUDO_LABELS = edict() cfg.TRAIN.PSEUDO_LABELS.freq: 1 cfg.TRAIN.PSEUDO_LABELS.use_outliers: True cfg.TRAIN.PSEUDO_LABELS.norm_feat: True cfg.TRAIN.PSEUDO_LABELS.norm_center: True cfg.TRAIN.PSEUDO_LABELS.cluster: dbscan cfg.TRAIN.PSEUDO_LABELS.eps: [0.58, 0.6, 0.62] cfg.TRAIN.PSEUDO_LABELS.min_samples: 4 cfg.TRAIN.PSEUDO_LABELS.dist_metric: jaccard cfg.TRAIN.PSEUDO_LABELS.k1: 30 cfg.TRAIN.PSEUDO_LABELS.k2: 6 cfg.TRAIN.PSEUDO_LABELS.search_type: 0 cfg.TRAIN.PSEUDO_LABELS.cluster_num: None
cfg.TRAIN.OPTIM = edict() cfg.TRAIN.OPTIM.optim: adam cfg.TRAIN.OPTIM.lr: 0.00035 cfg.TRAIN.OPTIM.weight_decay: 0.0005
cfg.TRAIN.SCHEDULER = edict() cfg.TRAIN.SCHEDULER.lr_scheduler: single_step cfg.TRAIN.SCHEDULER.stepsize: 20 cfg.TRAIN.SCHEDULER.gamma: 0.1
cfg.TEST = edict() cfg.TEST.datasets: ['msmt17']
cfg.TEST.LOADER = edict() cfg.TEST.LOADER.samples_per_gpu: 32 cfg.TEST.LOADER.workers_per_gpu: 2 cfg.TEST.dist_metric: euclidean cfg.TEST.norm_feat: True cfg.TEST.dist_cuda: True cfg.TEST.rerank: False cfg.TEST.search_type: 0 cfg.TEST.k1: 20 cfg.TEST.k2: 6 cfg.TEST.lambda_value: 0.3 cfg.launcher: pytorch cfg.tcp_port: 10010 cfg.work_dir: /data/OpenUnlogs/logs/SpCL/duke_msmt/4gpu_16per/800iter cfg.rank: 0 cfg.ngpus_per_node: 4 cfg.gpu: 0 cfg.total_gpus: 4 cfg.world_size: 4 The training is in a un/semi-supervised manner with 2 dataset(s) (['msmt17', 'dukemtmcreid']), where ['msmt17'] have no labels.
Mean AP: 22.0% CMC Scores: top-1 46.6% top-5 59.3% top-10 64.6% Testing time: 0:03:25.443005
Finished testing
Total running time: 5:10:33.865417
葛博,你好。我用的是4卡2080Ti,pytorch1.7+cuda10.1+python3.8.5,spcl+ 在duke->msmt map只有22 ,在market->msmt只有23.3,下面是我的log文件
==========
Args:Namespace(config='SpCL/config_duke_msmt.yaml', launcher='pytorch', resume_from=None, set_cfgs=None, tcp_port='10010', work_dir='SpCL/duke_msmt/4gpu_16per/800iter')
========== cfg.LOCAL_RANK: 0 cfg.DATA_ROOT: ../datasets cfg.LOGS_ROOT: /data/OpenUnlogs/logs
cfg.MODEL = edict() cfg.MODEL.backbone: resnet50 cfg.MODEL.pooling: gem cfg.MODEL.embed_feat: 0 cfg.MODEL.dropout: 0.0 cfg.MODEL.dsbn: True cfg.MODEL.sync_bn: True cfg.MODEL.samples_per_bn: 16 cfg.MODEL.mean_net: False cfg.MODEL.alpha: 0.999 cfg.MODEL.imagenet_pretrained: True cfg.MODEL.source_pretrained: None
cfg.DATA = edict() cfg.DATA.height: 256 cfg.DATA.width: 128 cfg.DATA.norm_mean: [0.485, 0.456, 0.406] cfg.DATA.norm_std: [0.229, 0.224, 0.225]
cfg.DATA.TRAIN = edict() cfg.DATA.TRAIN.is_autoaug: False cfg.DATA.TRAIN.is_flip: True cfg.DATA.TRAIN.flip_prob: 0.5 cfg.DATA.TRAIN.is_pad: True cfg.DATA.TRAIN.pad_size: 10 cfg.DATA.TRAIN.is_blur: False cfg.DATA.TRAIN.blur_prob: 0.5 cfg.DATA.TRAIN.is_erase: True cfg.DATA.TRAIN.erase_prob: 0.5 cfg.DATA.TRAIN.is_mutual_transform: False cfg.DATA.TRAIN.mutual_times: 2
cfg.TRAIN = edict() cfg.TRAIN.seed: 1 cfg.TRAIN.deterministic: True cfg.TRAIN.amp: False
cfg.TRAIN.datasets = edict() cfg.TRAIN.datasets.msmt17: trainval cfg.TRAIN.datasets.dukemtmcreid: trainval cfg.TRAIN.unsup_dataset_indexes: [0] cfg.TRAIN.epochs: 50 cfg.TRAIN.iters: 800
cfg.TRAIN.LOSS = edict()
cfg.TRAIN.LOSS.losses = edict() cfg.TRAIN.LOSS.losses.hybrid_memory: 1.0 cfg.TRAIN.LOSS.temp: 0.05 cfg.TRAIN.LOSS.momentum: 0.2 cfg.TRAIN.val_dataset: msmt17 cfg.TRAIN.val_freq: 5
cfg.TRAIN.SAMPLER = edict() cfg.TRAIN.SAMPLER.num_instances: 4 cfg.TRAIN.SAMPLER.is_shuffle: True
cfg.TRAIN.LOADER = edict() cfg.TRAIN.LOADER.samples_per_gpu: 16 cfg.TRAIN.LOADER.workers_per_gpu: 2
cfg.TRAIN.PSEUDO_LABELS = edict() cfg.TRAIN.PSEUDO_LABELS.freq: 1 cfg.TRAIN.PSEUDO_LABELS.use_outliers: True cfg.TRAIN.PSEUDO_LABELS.norm_feat: True cfg.TRAIN.PSEUDO_LABELS.norm_center: True cfg.TRAIN.PSEUDO_LABELS.cluster: dbscan cfg.TRAIN.PSEUDO_LABELS.eps: [0.58, 0.6, 0.62] cfg.TRAIN.PSEUDO_LABELS.min_samples: 4 cfg.TRAIN.PSEUDO_LABELS.dist_metric: jaccard cfg.TRAIN.PSEUDO_LABELS.k1: 30 cfg.TRAIN.PSEUDO_LABELS.k2: 6 cfg.TRAIN.PSEUDO_LABELS.search_type: 0 cfg.TRAIN.PSEUDO_LABELS.cluster_num: None
cfg.TRAIN.OPTIM = edict() cfg.TRAIN.OPTIM.optim: adam cfg.TRAIN.OPTIM.lr: 0.00035 cfg.TRAIN.OPTIM.weight_decay: 0.0005
cfg.TRAIN.SCHEDULER = edict() cfg.TRAIN.SCHEDULER.lr_scheduler: single_step cfg.TRAIN.SCHEDULER.stepsize: 20 cfg.TRAIN.SCHEDULER.gamma: 0.1
cfg.TEST = edict() cfg.TEST.datasets: ['msmt17']
cfg.TEST.LOADER = edict() cfg.TEST.LOADER.samples_per_gpu: 32 cfg.TEST.LOADER.workers_per_gpu: 2 cfg.TEST.dist_metric: euclidean cfg.TEST.norm_feat: True cfg.TEST.dist_cuda: True cfg.TEST.rerank: False cfg.TEST.search_type: 0 cfg.TEST.k1: 20 cfg.TEST.k2: 6 cfg.TEST.lambda_value: 0.3 cfg.launcher: pytorch cfg.tcp_port: 10010 cfg.work_dir: /data/OpenUnlogs/logs/SpCL/duke_msmt/4gpu_16per/800iter cfg.rank: 0 cfg.ngpus_per_node: 4 cfg.gpu: 0 cfg.total_gpus: 4 cfg.world_size: 4 The training is in a un/semi-supervised manner with 2 dataset(s) (['msmt17', 'dukemtmcreid']), where ['msmt17'] have no labels.
Mean AP: 22.0% CMC Scores: top-1 46.6% top-5 59.3% top-10 64.6% Testing time: 0:03:25.443005
Finished testing
Total running time: 5:10:33.865417