JDAI-CV / FaceX-Zoo

A PyTorch Toolbox for Face Recognition
https://arxiv.org/pdf/2101.04407.pdf
Other
1.85k stars 433 forks source link

Training and Testing Image Format - Was the training done on BGR images and testing on RGB? #171

Open veb-101 opened 11 months ago

veb-101 commented 11 months ago

The ImageDataset class in train.py in conventional training folder:

class ImageDataset(Dataset):
    def __init__(self, data_root, train_file, crop_eye=False):
        self.data_root = data_root
        self.train_list = []
        train_file_buf = open(train_file)
        line = train_file_buf.readline().strip()
        while line:
            image_path, image_label = line.split(' ')
            self.train_list.append((image_path, int(image_label)))
            line = train_file_buf.readline().strip()
        self.crop_eye = crop_eye
    def __len__(self):
        return len(self.train_list)
    def __getitem__(self, index):
        image_path, image_label = self.train_list[index]
        image_path = os.path.join(self.data_root, image_path)
        image = cv2.imread(image_path)
        if self.crop_eye:
            image = image[:60, :]
        #image = cv2.resize(image, (128, 128)) #128 * 128
        if random.random() > 0.5:
            image = cv2.flip(image, 1)
        if image.ndim == 2:
            image = image[:, :, np.newaxis]
        image = (image.transpose((2, 0, 1)) - 127.5) * 0.0078125
        image = torch.from_numpy(image.astype(np.float32))
        return image, image_label

The image format was never changed to RGB. It needs image = image[:, :, ::-1] either before transpose or converting to tensor.

Or are the images grayscale?

For testing, the CommonTestDataset class. I'm assuming the images that cv2.imdecode(...) is loading were already in RGB format? or were they in BGR format as well?

class CommonTestDataset(Dataset):
    """ Data processor for model evaluation.

    Attributes:
        image_root(str): root directory of test set.
        image_list_file(str): path of the image list file.
        crop_eye(bool): crop eye(upper face) as input or not.
    """
    def __init__(self, image_root, image_list_file, crop_eye=False):
        self.image_root = image_root
        self.image_list = []
        image_list_buf = open(image_list_file)
        line = image_list_buf.readline().strip()
        while line:
            self.image_list.append(line)
            line = image_list_buf.readline().strip()
        self.mean = 127.5
        self.std = 128.0
        self.crop_eye = crop_eye
    def __len__(self):
        return len(self.image_list)
    def __getitem__(self, index):
        short_image_path = self.image_list[index]
        image_path = os.path.join(self.image_root, short_image_path)
        image = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
        #image = cv2.resize(image, (128, 128))
        if self.crop_eye:
            image = image[:60, :]
        image = (image.transpose((2, 0, 1)) - self.mean) / self.std
        image = torch.from_numpy(image.astype(np.float32))
        return image, short_image_path

Can you please clarify?