WangWenhao0716 / DomainMix

[BMVC 2021] The official implementation of "DomainMix: Learning Generalizable Person Re-Identification Without Human Annotations"
MIT License
20 stars 1 forks source link

same features for different images #5

Open smsver2 opened 1 year ago

smsver2 commented 1 year ago

I wrote the below code:

import os.path as osp
import time

import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
from torchvision import transforms as T

from dg import models
from dg.evaluators import Evaluator
from dg.utils.serialization import load_checkpoint, copy_state_dict

import numpy as np
from sklearn.metrics.pairwise import cosine_similarity

def normalize_embedding(embedding):
    norm = np.linalg.norm(embedding)
    return embedding / norm

def preprocess_single_image(image_path, height, width):
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    test_transformer = T.Compose([
        T.Resize((height, width), interpolation=3),
        T.ToTensor(),
        normalizer
    ])
    img = Image.open(image_path).convert('RGB')
    img = test_transformer(img)
    img = img.unsqueeze(0)  
    return img

def main():
    image_paths = [
        r"D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000451_03.jpg",
        r"D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000551_01.jpg",
        r"D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0011_c1s6_027271_01.jpg",
        # Add more image paths here
    ]

    try:
        args = {
            'arch': 'resnet50',
            'resume': r'D:\Saeed\DEVS\DomainMix\logs\trained\model_best_435.pth.tar',
            'height': 256,
            'width': 128,
        }

        print(models.names())

        # Create model
        model = models.create(args['arch'], pretrained=False, num_features=0, dropout=0, num_classes=0)
        model.cuda()

        # Load from checkpoint
        checkpoint = load_checkpoint(args['resume'])
        copy_state_dict(checkpoint['state_dict'], model)

        model = model.eval()

        features = []
        for image_path in image_paths:
            # Preprocess the image
            img = preprocess_single_image(image_path, args['height'], args['width']).cuda()
            feature = normalize_embedding(model(img).cpu().data.numpy())
            features.append(feature)
            print(f"Processing {image_path}")

        # cross similarity check
        for i in range(len(features)):
            for j in range(len(features)):
                if j > i:
                    similarity = cosine_similarity(features[i].reshape(1, -1), features[j].reshape(1, -1))
                    img_name1 = osp.basename(image_paths[i])
                    img_name2 = osp.basename(image_paths[j])
                    print(f'{img_name1} - {img_name2}: {similarity}')

    except:
        print(f'Error in model {model}')

if __name__ == '__main__':
    main()

And it always return same features for different images:

Processing D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000451_03.jpg
Processing D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000551_01.jpg
Processing D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0011_c1s6_027271_01.jpg
0002_c1s1_000451_03.jpg - 0002_c1s1_000551_01.jpg: [[0.99988645]]
0002_c1s1_000451_03.jpg - 0011_c1s6_027271_01.jpg: [[0.9997024]]
0002_c1s1_000551_01.jpg - 0011_c1s6_027271_01.jpg: [[0.99968207]]
WangWenhao0716 commented 1 year ago

Sorry this repo is not under maintain.

Get Outlook for iOShttps://aka.ms/o0ukef


From: smsver2 @.> Sent: Thursday, September 14, 2023 12:03:42 AM To: WangWenhao0716/DomainMix @.> Cc: Subscribed @.***> Subject: [WangWenhao0716/DomainMix] same result for different images (Issue #5)

I write the below code:

import os.path as osp import time

import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from PIL import Image from torchvision import transforms as T

from dg import models from dg.evaluators import Evaluator from dg.utils.serialization import load_checkpoint, copy_state_dict

import numpy as np from sklearn.metrics.pairwise import cosine_similarity

def normalize_embedding(embedding): norm = np.linalg.norm(embedding) return embedding / norm

def preprocess_single_image(image_path, height, width): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test_transformer = T.Compose([ T.Resize((height, width), interpolation=3), T.ToTensor(), normalizer ]) img = Image.open(image_path).convert('RGB') img = test_transformer(img) img = img.unsqueeze(0) # Add a batch dimension return img

def main(): image_paths = [ r"D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000451_03.jpg", r"D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000551_01.jpg", r"D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0011_c1s6_027271_01.jpg",

Add more image paths here

]
# Replace these with the actual arguments or parse them as needed
try:
    args = {
        'arch': 'resnet50',
        'resume': r'D:\Saeed\DEVS\DomainMix\logs\trained\model_best_435.pth.tar',
        'height': 256,
        'width': 128,
    }

    print(models.names())

    # Create model
    model = models.create(args['arch'], pretrained=False, num_features=0, dropout=0, num_classes=0)
    model.cuda()

    # Load from checkpoint
    checkpoint = load_checkpoint(args['resume'])
    copy_state_dict(checkpoint['state_dict'], model)

    model = model.eval()

    features = []
    for image_path in image_paths:
        # Preprocess the image
        img = preprocess_single_image(image_path, args['height'], args['width']).cuda()
        feature = normalize_embedding(model(img).cpu().data.numpy())
        features.append(feature)
        print(f"Processing {image_path}")

    # cross similarity check
    for i in range(len(features)):
        for j in range(len(features)):
            if j > i:
                similarity = cosine_similarity(features[i].reshape(1, -1), features[j].reshape(1, -1))
                img_name1 = osp.basename(image_paths[i])
                img_name2 = osp.basename(image_paths[j])
                print(f'{img_name1} - {img_name2}: {similarity}')

except:
    print(f'Error in model {model}')

if name == 'main': main()

And it always return same features for different images:

Processing D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000451_03.jpg Processing D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0002_c1s1_000551_01.jpg Processing D:\Saeed\Data\Re-ID\market1501\bounding_box_train\0011_c1s6_027271_01.jpg 0002_c1s1_000451_03.jpg - 0002_c1s1_000551_01.jpg: [[0.99988645]] 0002_c1s1_000451_03.jpg - 0011_c1s6_027271_01.jpg: [[0.9997024]] 0002_c1s1_000551_01.jpg - 0011_c1s6_027271_01.jpg: [[0.99968207]]

— Reply to this email directly, view it on GitHubhttps://github.com/WangWenhao0716/DomainMix/issues/5, or unsubscribehttps://github.com/notifications/unsubscribe-auth/AMU6CYAP7AYVKLFBGE2WOTLX2G4L5ANCNFSM6AAAAAA4WPVDTE. You are receiving this because you are subscribed to this thread.Message ID: @.***>