fabriziosalmi / tensorflow-poc

MNIST PoC using TensorFlow
GNU General Public License v3.0
0 stars 0 forks source link

poc improvements #1

Open fabriziosalmi opened 11 months ago

fabriziosalmi commented 11 months ago
import argparse
import logging
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import datetime

# Initialize Argument Parser
parser = argparse.ArgumentParser(description='TensorFlow PoC Script for MNIST Dataset')
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs for training the model')
parser.add_argument('--batch-size', type=int, default=32, help='Batch size for training')
parser.add_argument('--save-model', action='store_true', help='Flag to save the trained model')
parser.add_argument('--learning-rate', type=float, default=0.001, help='Learning rate for the optimizer')
parser.add_argument('--optimizer', type=str, default='adam', help='Type of optimizer to use')
parser.add_argument('--validation-split', type=float, default=0.2, help='Fraction of the training data to be used as validation data')
parser.add_argument('--log-level', type=str, default='INFO', help='Logging level')

args = parser.parse_args()

# Configure Logging
log_file = f"mnist_poc_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
logging.basicConfig(filename=log_file, level=args.log_level, format='%(asctime)s - %(levelname)s - %(message)s')

def load_and_preprocess_data():
    # Implementation remains the same
    pass  # Replace with actual implementation

def create_model():
    # Implementation remains the same
    pass  # Replace with actual implementation

def compile_and_train_model(model, train_images, train_labels):
    # Use args.learning_rate, args.optimizer, and args.validation_split
    optimizer = args.optimizer
    if optimizer.lower() == 'adam':
        optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate)
    elif optimizer.lower() == 'sgd':
        optimizer = tf.keras.optimizers.SGD(learning_rate=args.learning_rate)
    # Other optimizers can be added similarly

    model.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(train_images, train_labels, epochs=args.epochs, 
                        validation_split=args.validation_split, 
                        batch_size=args.batch_size)

    return history

def main():
    # Implementation remains the same
    pass  # Replace with actual implementation

if __name__ == "__main__":
    main()
fabriziosalmi commented 11 months ago

Convert keras h5 model to Tensorflow Lite:

Create a model using high-level tf.keras.* APIs

model = tf.keras.models.Sequential([ tf.keras.layers.Dense(units=1, input_shape=[1]), tf.keras.layers.Dense(units=16, activation='relu'), tf.keras.layers.Dense(units=1) ]) model.compile(optimizer='sgd', loss='mean_squared_error') # compile the model model.fit(x=[-1, 0, 1], y=[-3, -1, 1], epochs=5) # train the model

(to generate a SavedModel) tf.saved_model.save(model, "saved_model_keras_dir")

Convert the model.

converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert()

Save the model.

with open('model.tflite', 'wb') as f: f.write(tflite_model)