import tensorflow as tf
from keras.models import load_model
import os
# to use CPU not GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
config = tf.compat.v1.ConfigProto()
session = tf.compat.v1.Session(config=config)
saver = tf.train.Checkpoint()
model = load_model('trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5', compile=False)
sess = tf.compat.v1.keras.backend.get_session()
save_path = saver.save("extractedCheckPoints/fer2013_mini_XCEPTION.102-0.66.ckpt")
emotion_labels = get_labels('fer2013')
emotion_model_path = 'trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
emotion_classifier = load_model(emotion_model_path, compile=False)
'emotions': {
'model': emotion_classifier ,
'arg_scope': emotion_arg_scope, # what should this be changed to
'num_classes': 7,
'input_name': 'input', # what should this be changed to
'output_names': ['InceptionResnetV2/Logits/Logits/BiasAdd'],, # what should this be changed to
'input_width': 64,
'input_height': 64,
'input_channels': 1,
'preprocess_fn': preprocess_emotion, #preprocessing
'postprocess_fn': postprocess_emotion, #postprocessing
'checkpoint_filename': CHECKPOINT_DIR + 'emotions.ckpt',
'frozen_graph_filename': FROZEN_GRAPHS_DIR + 'emotions.pb',
'trt_convert_status': "works", # what should this be changed to
'plan_filename': PLAN_DIR + 'inception_resnet_v2.plan' # what should this be changed to
}}
def inception_resnet_v2_arg_scope(
weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Returns the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
activation_fn: Activation function for conv2d.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
'fused': None, # Use fused batch norm if possible.
'scale': batch_norm_scale,
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=activation_fn,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
Few variables values I would like to know here is
arg_scope This is the content of the inception_resnet_v2_arg_scope what is its equivalent of emotion_arg_scope?
I am trying to run the emotion detection on the
Jetson Nano
Trying to increase the inference speed of the model by trying to convert it into
TENSORRT
with the guidance of tf_to_trt_image_classificationI was able to convert fer2013_mini_XCEPTION.102-0.66.hdf5 into checkpoints using this
For
Tensorflow 2.x
source: how to convert hd5 file to ckpt
I have to extract the
Graph
from the modelI am using this script as guide tf_to_trt_image_classification/scripts/models_to_frozen_graphs.py
This requires information regarding the model that is to be converted for example
inception_resnet_v2
source: tf_to_trt_image_classification/scripts/model_meta.py
This is what I was able to extract for emotion
This is the content of the
inception_resnet_v2_arg_scope
source: models/research/slim/nets/inception_resnet_v2.pyFew variables values I would like to know here is
arg_scope
This is the content of theinception_resnet_v2_arg_scope
what is its equivalent ofemotion_arg_scope
?output_names
?Please guide