tgxs002 / HPSv2

Human Preference Score v2: A Solid Benchmark for Evaluating Human Preferences of Text-to-Image Synthesis
Apache License 2.0
403 stars 12 forks source link

simple script #1

Closed NicholasCao closed 1 year ago

NicholasCao commented 1 year ago

Is there a script that can be used simply? like hps

Enderfga commented 1 year ago
import torch
from PIL import Image
from src.open_clip import create_model_and_transforms, get_tokenizer

# Setup the args and prepare the model and tokenizer
args = {
    'model': 'ViT-H-14',
    'precision': 'amp',
    'checkpoint': 'your_checkpoint_path'  # replace with your checkpoint path
}

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model, preprocess_train, preprocess_val = create_model_and_transforms(
    args['model'],
    'laion2B-s32B-b79K',
    precision=args['precision'],
    device=device,
    jit=False,
    force_quick_gelu=False,
    force_custom_text=False,
    force_patch_dropout=False,
    force_image_size=None,
    pretrained_image=False,
    image_mean=None,
    image_std=None,
    light_augmentation=True,
    aug_cfg={},
    output_dict=True,
    with_score_predictor=False,
    with_region_predictor=False
)

checkpoint = torch.load(args['checkpoint'])
model.load_state_dict(checkpoint['state_dict'])
tokenizer = get_tokenizer(args['model'])
model.eval()

# Load your image and prompt
image_path = 'path_to_your_image.jpg'  # replace with your image path
prompt = 'your prompt here'  # replace with your prompt

# Process the image
image = preprocess_val(Image.open(image_path)).unsqueeze(0).to(device)

# Process the prompt
text = tokenizer.encode(prompt).to(device)

# Calculate the HPS
with torch.no_grad():
    outputs = model(image, text)
    image_features, text_features = outputs["image_features"], outputs["text_features"]
    logits_per_image = outputs["logit_scale"] * image_features @ text_features.T

hps_score = torch.diagonal(logits_per_image).cpu().numpy()
print('HPS score:', hps_score)

Maybe you can give this a try, it's something I wrote myself.