imtiazziko / SLK-few-shot

Clustering for Few-shot Learning
11 stars 2 forks source link

Single Prediction #1

Closed asimniazi63 closed 2 years ago

asimniazi63 commented 3 years ago

How to make prediction for a single image using your pre-trained model?

asimniazi63 commented 3 years ago

I am trying to predict a single i.e. Lion out of Imagenet Labels. Given an input image to model of [1,3,84,84] it gives two outputs one of them is fc_outputs which is [1,64] which I considered to be output probabilities of 64 classes on which yours models are trained and got it with argmax (please refer to the code). I tried giving inputs like school_bus, guitar etc but output labels donot change they sometimes, 40 or 44 which does not seem correct.

Since I am looking for predicting single Image, please let me know if I am doing it wrong?

import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.optim.lr_scheduler import MultiStepLR, StepLR, CosineAnnealingLR
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.optim.lr_scheduler import MultiStepLR, StepLR, CosineAnnealingLR
import tqdm
from scipy.stats import mode
from src.utils import configuration_SLK as configuration
# from src.bound_update import bound_update
# from src.SLK import SLK
from numpy import linalg as LA
import src.datasets
import src.models as models
from scipy import sparse
import numpy as np
from sklearn.neighbors import NearestNeighbors
import copy
best_prec1 = -1

from PIL import Image
import random
import os
# import numpy as np

global args, best_prec1
# args = configuration.parser_args()
### initial logger
# filepath = os.path.join(args.save_path,args.log_file)
# log = setup_logger(filepath)
# for key, value in sorted(vars(args).items()):
#     log.info(str(key) + ': ' + str(value))

random.seed(1)
torch.manual_seed(1)

# cudnn.deterministic = True
# create model
print("=> creating model '{}'".format('wideres'))
model = models.__dict__['wideres'](num_classes=64, remove_linear=False)

print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))

model = torch.nn.DataParallel(model).cuda()

def get_optimizer(module):
    OPTIMIZER = {'SGD': torch.optim.SGD(module.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4,
                                        nesterov=True),
                 'Adam': torch.optim.Adam(module.parameters(), lr=0.1)}
    return OPTIMIZER['SGD']

# define loss function (criterion) and optimizer
# if args.label_smooth > 0:
#     criterion = SmoothCrossEntropy(epsilon=0.1).cpu()

# else:
criterion = nn.CrossEntropyLoss().cuda()

optimizer = get_optimizer(model)
pretrain = True
if pretrain:
    # pretrain = "./results/mini/softmax/resnet18/checkpoint.pth.tar"
    pretrain = "./results/mini/softmax/wideres/model_best.pth.tar"
    if os.path.isfile(pretrain):
        print("=> loading pretrained weight '{}'".format(pretrain))
        checkpoint = torch.load(pretrain)
        model_dict = model.state_dict()
        params = checkpoint['state_dict']
        params = {k: v for k, v in params.items() if k in model_dict}
        model_dict.update(params)
        model.load_state_dict(model_dict)
    else:
      print("No pretrained found")
        # print('[Attention]: Do not find pretrained model {}'.format("/results/mini/softmax/wideres"))

# resume from an exist checkpoint
# do_extract_and_evaluate(model, log)

# load checkpoint
def load_checkpoint(model, type):
    if type == 'best':
        checkpoint = torch.load('{}/model_best.pth.tar'.format("./results/mini/softmax/wideres"))
    elif type == 'last':
        checkpoint = torch.load('{}/checkpoint.pth.tar'.format("./results/mini/softmax/wideres"))
    else:
        assert False, 'type should be in [best, or last], but got {}'.format(type)
    model.load_state_dict(checkpoint['state_dict'])

load_checkpoint(model, 'best')
model.eval()
with torch.no_grad():
  # compute output
  img = Image.open("../carousel.jpg").convert('RGB')
  img = img.resize((84, 84))
  img = np.array(img)
  img = np.moveaxis(img, -1, 0) # [1,3,84,84]
  img = np.expand_dims(img,0)
  inputs = torch.from_numpy(img).float()
  # prediction
  outputs, fc_outputs = model(inputs, True)
  outputs = outputs.cpu().data.numpy()
  fc_outputs = fc_outputs.cpu().data.numpy() # [1,64]
  print("\n\n", fc_outputs.shape)
  print(np.argmax(fc_outputs,axis=1)) # get max probabilities
imtiazziko commented 3 years ago

Hello @asimniazi63, This is a Transductive few-shot learning setting which means it assumes more than 1 query samples which needed to be predicted. For single prediction it is more like getting the features extracted for support samples and the single query sample and then calculate the nearest support class. The paper is only for showing Transductive few-shot setting.

Thanks