neuralchen / SimSwap

An arbitrary face-swapping framework on images and videos with one single trained model!
Other
4.41k stars 872 forks source link

How to use insightface library directly [HELP] #427

Closed harisreedhar closed 1 year ago

harisreedhar commented 1 year ago

Hi, I'm trying to use simswap with insightface library directly installed through pip. But result look totally weird. How to use model correctly? Is latent_id and embedding different? What am i missingšŸ¤”?

Here is the entire code:

# running from simswap directory

import cv2
import PIL
import torch
import torch.nn as nn
import numpy as np
import insightface
from torchvision import transforms
from models.fs_networks import Generator_Adain_Upsample

def resize_with_padding(image, target_size):
    original_height, original_width = image.shape[:2]
    target_width, target_height = target_size
    aspect_ratio = original_width / original_height
    target_aspect_ratio = target_width / target_height
    if target_aspect_ratio > aspect_ratio:
        new_width = target_height * aspect_ratio
        new_height = target_height
    else:
        new_width = target_width
        new_height = target_width / aspect_ratio
    resized_image = cv2.resize(image, (int(new_width), int(new_height)))
    top_padding = (target_height - resized_image.shape[0]) // 2
    bottom_padding = target_height - resized_image.shape[0] - top_padding
    left_padding = (target_width - resized_image.shape[1]) // 2
    right_padding = target_width - resized_image.shape[1] - left_padding
    padded_image = cv2.copyMakeBorder(
        resized_image,
        top_padding,
        bottom_padding,
        left_padding,
        right_padding,
        cv2.BORDER_CONSTANT,
        value=(0, 0, 0)
    )
    return padded_image

## insightface analyser
face_analyser = insightface.app.FaceAnalysis(name='buffalo_l')
face_analyser.prepare(ctx_id=0, det_size=(640, 640), det_thresh=0.5)

def get_face_info(img):
    crop = lambda img,bb:img[int(bb[1]):int(bb[3]), int(bb[0]):int(bb[2])]
    face = face_analyser.get(img)[0]
    embedding = np.array([face['embedding']])
    cropped = crop(img, face['bbox'])
    return resize_with_padding(cropped, (224,224)), embedding

## read images
img1 = cv2.imread("./demo_file/Iron_man.jpg")
img2 = cv2.imread("./demo_file/specific3.png")

## cropped face and embedding
face1, embed1 = get_face_info(img1)
face2, embed2 = get_face_info(img2)

## load simswap model
device = torch.device("cuda:0")
net = Generator_Adain_Upsample(input_nc=3, output_nc=3, latent_size=512, n_blocks=9, deep=False)
net.to(device)
net.load_state_dict(torch.load("./checkpoints/simswap_224_latest_net_G.pth"))

with torch.no_grad():
    transform = transforms.ToTensor()
    tensor1 = transform(face1[:, :, ::-1].astype('float32').copy())
    tensor1 = tensor1.unsqueeze(0)

    embed2 = torch.from_numpy(embed2)
    embed2 /= torch.linalg.norm(embed2, dim=1, keepdim=True)

    tensor1 = tensor1.to('cuda')
    embed2 = embed2.to('cuda')

    img_fake = net.forward(tensor1, embed2)

cv2_img = img_fake.squeeze(0).permute(1, 2, 0).cpu().numpy()[..., ::-1]
cv2.imwrite("test_swap.jpg", (cv2_img*255).astype('uint8'))

img1: Iron_man

img2 specific3

result test_swap

jasan-s commented 1 year ago

@harisreedhar why close? I'm also looking to get better than 128x128 resolution from insightface