Turns out this is all you need to get the 128 dim vector
# Convert to RGB
print "Converting frame to RGB"
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Align face
print "Aligning face"
alignedFace = self.aligner.align(96, frame, skipMulti=True) # skip image if more than one face is detected
# Extract features
if alignedFace is not None:
print "Initializing neural net"
with openface.TorchNeuralNet(model="./data/nn4.small2.v1.t7") as net:
print "Starting forward pass"
features = net.forward(alignedFace)
print features
else:
print "No face found"
Turns out this is all you need to get the 128 dim vector
Now I just need to put it through the cascade....