zengqunzhao / EfficientFace

[AAAI'21] Robust Lightweight Facial Expression Recognition Network with Label Distribution Training
MIT License
187 stars 32 forks source link

about visualization code #24

Open study-clever opened 1 year ago

EHuba94 commented 4 months ago

The code didnt open "Vis" folder. i worked wrote working code.

`import os import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms from models import resnet from models import EfficientFace import glob from PIL import Image import numpy as np

np.set_printoptions(precision=3, suppress=True) import cv2 import matplotlib.pyplot as plt

def print_directory_contents(dir_path): for root, dirs, files in os.walk(dir_path): level = root.replace(dir_path, '').count(os.sep) indent = ' ' 4 (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' 4 (level + 1) for f in files: print('{}{}'.format(subindent, f))

def main(): os.environ["CUDA_VISIBLE_DEVICES"] = '0' vis_dir = os.path.join('.', 'Vis')

if not os.path.exists(vis_dir):
    print("Creating Vis directory...")
    os.makedirs(vis_dir)

print("Starting the main function.")

# create model
## EfficientFace
model_cla = EfficientFace.efficient_face()
model_cla.fc = nn.Linear(1024, 7)
model_cla = torch.nn.DataParallel(model_cla).cuda()
checkpoint = torch.load('./checkpoint/EfficientFace_Trained_on_CAERS.pth.tar')
pre_trained_dict = checkpoint['state_dict']
model_cla.load_state_dict(pre_trained_dict)
print("Model loaded successfully.")

# Data loading code
data_dir = './test_data'
print(f"Data directory: {data_dir}")

# Print directory contents for debugging
print("Contents of data directory:")
print_directory_contents(data_dir)

# Modify to look for .png files instead of .jpg
image_dir = glob.glob(os.path.join(data_dir, '*.png'))
print(f"Found {len(image_dir)} images.")
print("Image paths:", image_dir)

normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                 std=[0.5, 0.5, 0.5])

transforms_com = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), normalize])

softmax = nn.Softmax(dim=1)

label = ('Neutral', 'Happy', 'Sad', 'Surprise', 'Fear', 'Disgust', 'Anger')

final_img = np.zeros((750, 500, 3), np.uint8)

model_cla.eval()
with torch.no_grad():
    for img in image_dir:
        print(f"Processing image: {img}")
        img_t = Image.open(img).convert('RGB')
        img_t = transforms_com(img_t)
        img_t = img_t.unsqueeze(0)
        img_t = img_t.cuda()
        output = model_cla(img_t)
        output = softmax(output)
        output = output.cpu().numpy()[0]
        print(f"Prediction: {output}")

        u = list(img.split(os.sep))
        text = np.round(output, 2)
        plt.figure(figsize=(5, 2.5))
        plt.bar(range(len(text)), text, tick_label=label)
        plt.title('Distribution of the Prediction', color='red')
        plt.ylim(0, 1)
        vis_path = os.path.join(vis_dir, f'{u[-1]}')
        plt.savefig(vis_path)
        print(f"Saved bar chart to: {vis_path}")

        img_pad = cv2.imread(vis_path)
        text = str(text)
        img = cv2.imread(img)
        img = cv2.resize(img, (500, 500))
        cv2.putText(img, text, (0, 30), cv2.FONT_HERSHEY_DUPLEX, 0.6, (255, 255, 255))
        final_img[0:250, 0:500] = img_pad  # H,W
        final_img[250:750, 0:500] = img
        final_img_path = os.path.join(vis_dir, f'final_{u[-1]}')
        cv2.imwrite(final_img_path, final_img)
        print(f"Saved final image to: {final_img_path}")

print("Finished processing all images.")

class RecorderMeter(object): """Computes and stores the minimum loss value and its epoch index""" pass

if name == 'main': main() `