Open alpercanberk opened 10 months ago
You could consider this approach:
default_lama_path = '/LaMa_models/lama-places/lama-fourier/lama-model-last.pt' default_lama = torch.jit.load(default_lama_path, map_location=torch.device('cpu')) default_lama.eval()
in_img_path = '' in_mask_path = ''
in_img = cv2.imread(in_img_path)[:,:,::-1] / 255.0 in_mask = cv2.imread(in_mask_path) / 255.0
in_img = torch.from_numpy(in_img).unsqueeze(0).permute(0, 3, 1, 2).type(torch.float) in_mask = torch.from_numpy(in_mask).unsqueeze(0).permute(0, 3, 1, 2).type(torch.float) in_img = in_img * (1 - in_mask) with torch.no_grad(): default_input = torch.cat([in_img, in_mask], dim=1) default_lama_result = default_lama(default_input)
default_lama_result = default_lama_result.permute(0, 2, 3, 1).squeeze().numpy() cv2.imwrite(os.path.join(out_dir, 'default_lama.png'), default_lama_result[:,:,::-1] * 255.0)
You could consider this approach:
default_lama_path = '/LaMa_models/lama-places/lama-fourier/lama-model-last.pt' default_lama = torch.jit.load(default_lama_path, map_location=torch.device('cpu')) default_lama.eval()
in_img_path = '' in_mask_path = ''
in_img = cv2.imread(in_img_path)[:,:,::-1] / 255.0 in_mask = cv2.imread(in_mask_path) / 255.0
in_img = torch.from_numpy(in_img).unsqueeze(0).permute(0, 3, 1, 2).type(torch.float) in_mask = torch.from_numpy(in_mask).unsqueeze(0).permute(0, 3, 1, 2).type(torch.float) in_img = in_img * (1 - in_mask) with torch.no_grad(): default_input = torch.cat([in_img, in_mask], dim=1) default_lama_result = default_lama(default_input)
default_lama_result = default_lama_result.permute(0, 2, 3, 1).squeeze().numpy() cv2.imwrite(os.path.join(out_dir, 'default_lama.png'), default_lama_result[:,:,::-1] * 255.0)