NVIDIA / partialconv

A New Padding Scheme: Partial Convolution based Padding
Other
1.22k stars 213 forks source link

How to test the code with the different ratios mask? #29

Open renhaha123 opened 3 years ago

renhaha123 commented 3 years ago

Thanks for your awesome job. I load the test mask file from your project (https://www.dropbox.com/s/01dfayns9s0kevy/test_mask.zip?dl=0). However, all the masks with different ratios are in a directory. Are them in a rigid sequance? For examaple, the masks with ratio of (0.01, 0.1] are form 0.png to 999.png. If not, how can i find the masks with specific ratio?

sankalp2K commented 1 year ago

import argparse import os import random import time import warnings

import torch import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models_baseline # networks with zero padding import models as models_partial # partial conv based padding

model_baseline_names = sorted(name for name in models_baseline.dict if name.islower() and not name.startswith("__") and callable(models_baseline.dict[name]))

model_partial_names = sorted(name for name in models_partial.dict if name.islower() and not name.startswith("__") and callable(models_partial.dict[name]))

model_names = model_baseline_names + model_partial_names

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')

parser.add_argument('--data_test', metavar='DIRTEST', help='path to test dataset')

parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet50)') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size (default: 192)')

parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')

parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')

parser.add_argument('--prefix', default='', type=str) parser.add_argument('--ckptdirprefix', default='', type=str)

best_prec1 = 0

def main(): global args, best_prec1 args = parser.parse_args()

checkpoint_dir = args.ckptdirprefix + 'checkpoint_' + args.arch + '_' + args.prefix + '/'
if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)
args.logger_fname = os.path.join(checkpoint_dir, 'loss.txt')

with open(args.logger_fname, "a") as log_file:
    now = time.strftime("%c")
    log_file.write('================ Training Loss (%s) ================\n' % now)
    log_file.write('world size: %d\n' % args.world_size)

if args.seed is not None:
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.deterministic = True
    warnings.warn('You have chosen to seed training. '
                  'This will turn on the CUDNN deterministic setting, '
                  'which can slow down your training considerably! '
                  'You may see unexpected behavior when restarting '
                  'from checkpoints.')

if args.gpu is not None:
    warnings.warn('You have chosen a specific GPU. This will completely '
                  'disable data parallelism.')

args.distributed = args.world_size > 1

if args.distributed:
    dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                            world_size=args.world_size)

# create model
if args.pretrained:
    print("=> using pre-trained model '{}'".format(args.arch))
    if args.arch in models_baseline.__dict__:
        model = models_baseline.__dict__[args.arch](pretrained=True)
    else:
        model = models_partial.__dict__[args.arch](pretrained=True)
    # model = models.__dict__[args.arch](pretrained=True)
else:
    print("=> creating model '{}'".format(args.arch))
    if args.arch in models_baseline.__dict__:
        model = models_baseline.__dict__[args.arch]()
    else:
        model = models_partial.__dict__[args.arch]()
    # model = models.__dict__[args.arch]()

# logging
with open(args.logger_fname, "a") as log_file:
    log_file.write('model created\n')

if args.gpu is not None:
    model = model.cuda(args.gpu)
elif args.distributed:
    model.cuda()
    model = torch.nn.parallel.DistributedDataParallel(model)
else:
    if args.arch.startswith('alexnet') or 'vgg' in args.arch:
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()
    else:
        model = torch.nn.DataParallel(model).cuda()

test_dir = args.data_test  # os.path.join(args.data, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

test_dataset = datasets.ImageFolder(
    test_dir,
    transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ]))

if args.distributed:
    train_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
    train_sampler = None

test_loader = torch.utils.data.DataLoader(
    test_dir, batch_size=args.batch_size, shuffle=(train_sampler is None),
    num_workers=args.workers, pin_memory=True, sampler=train_sampler)

# logging
with open(args.logger_fname, "a") as log_file:
    log_file.write('training/val dataset created\n')

# logging
with open(args.logger_fname, "a") as log_file:
    log_file.write('started training\n')

for epoch in range(1):
    if args.distributed:
        train_sampler.set_epoch(epoch)
    # adjust_learning_rate(optimizer, epoch)

    # train for one epoch
    test(test_loader, model ,epoch)

def test(train_loader, model, epoch):

# switch to train mode
model.train()

for i, (input, target) in enumerate(train_loader):

    if args.gpu is not None:
        input = input.cuda(args.gpu, non_blocking=True)

    output = model(input)

if name == 'main': main()