Hello, I have followed the examples and converted the keras model to an nnom model. For my application, I need the probability of each class as an output. When I run the model, the outputs have a very small magnitude of probability regardless of the inputs. Below is my setup. Any help or guidance you can give would be very appreciated.
python setup:
from tensorflow.keras.layers import Dense, Input
from tqdm.keras import TqdmCallback
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import SGD
from tensorflow.math import tanh
from tensorflow.keras.losses import MeanSquaredError as MSE
from tensorflow.keras import Model
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#
# define functions and train model
def train_model(xtrain,ytrain, learnRate = .01):
numFeatures = xtrain.shape[1]
inputsize = [numFeatures]
numClasses = ytrain.shape[1]
inp = Input(shape=(inputsize))
h1 = Dense(numFeatures*2)(inp)
h1 = tanh(h1)
h2 = Dense(numFeatures*3)(h1)
h2 = tanh(h2)
h3 = Dense(numFeatures*3)(h2)
h3 = tanh(h3)
h4 = Dense(numFeatures*2)(h3)
h4 = tanh(h4)
outs = Dense(numClasses)(h4)
opti = SGD(learning_rate=learnRate, momentum=0.9)
model = Model(inputs = inp, outputs = outs)
model.compile(optimizer = opti, loss = MSE())
history = model.fit(xtrain,ytrain,epochs = 5,batch_size=128, shuffle=True,callbacks=[TqdmCallback(verbose=1)])
plt.semilogy(history.history['loss'],label='loss')
plt.title('Loss History')
plt.legend()
plt.show()
return model
def scale( x, out_min, out_max): # scale a single column of data
return (x - np.min(x)) * (out_max - out_min) / (np.max(x) - np.min(x)) + out_min
def scale_data(data1,minout=-1,maxout=1): #scale a full matrix/dataframe
data = data1.copy()
for i in range(data.shape[1]):
data[:,i] = scale(data[:,i],minout,maxout)
return data
def quantize_data(data1): # quantize scaled data
data = data1.copy()
data = (data*128).clip(-128,127)
return data.astype('byte')
traindata = np.load('Features.npy')
labels = np.array(pd.read_csv('Kinematics.csv',header=None))[1:,:]
traindata = scale_data(traindata)
traindata = quantize_data(traindata)
labels = scale_data(labels)
model = train_model(traindata,labels)
# convert model and weights
import sys
sys.path.append(r'nnom\scripts')
from nnom import *
head = 'weights_mockup.h'
generate_model(model,traindata[::10],format='hwc', name=head)
# I manually fix the tanh layers in the weights_mockup.h
# generate binary testing data
def quantize_data(data1): # quantize scaled data
data = data1.copy()
data = (data*128).clip(-128,127)
return data.astype('byte')
testdata = quantize_data(scale_data(np.load('FeaturesTest.npy')))
testdata.tofile('testdata_q.bin')
#main.c is built and ran in Visual Studio. Output is saved to results.csv
#read C results
output_file = r"result.csv"
cpredin = pd.read_csv(output_file, delimiter=' ')
# format results
cpred = pd.DataFrame()
cpred[0] = cpredin[cpredin['DOF'] == 0]['Value'].values
cpred[1] = cpredin[cpredin['DOF'] == 1]['Value'].values
cpred = cpred.values
predictions_python = model.predict(testdata[::10]) # downsample for speed
numDOF = 2
testlabels = np.array(pd.read_csv('Kinematicstest.csv'))
fig,axes = plt.subplots(numDOF,1,figsize=(15,5))
for i in range(numDOF):
ax = axes[i]
ax.plot(predictions_python[:,i], label='Python', linewidth=3)
ax.plot(cpred[:,i],label='cpred', linewidth=5)
ax.legend()
plt.show()
main.c:
#ifdef __cpluplus
extern "C" {
#endif
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include "nnom.h"
#include "weights_mockup.h"
#define num_DOF 2
float instantaneous_output[num_DOF] = {0};
int8_t* load(const char* file, size_t* size) //loads binary file
{
size_t sz;
FILE* fp;// = fopen_s(file, "rb");
fopen_s(&fp, file, "rb");
int8_t* input;
assert(fp);
fseek(fp, 0, SEEK_END);
sz = ftell(fp);
fseek(fp, 0, SEEK_SET);
input = malloc(sz);
fread(input, 1, sz, fp);
fclose(fp);
*size = sz;
return input;
}
#ifdef NNOM_USING_STATIC_MEMORY
uint8_t static_buf[1024 * 500];
#endif
int main(int argc, char* argv[]) {
nnom_model_t* model;
//nnom_predict_t* pre;
int8_t* input;
float prob;
uint32_t label;
int numfeat;
size_t size = 0;
input = load("testdata.bin", &size); // load binary testing data
float true_estimate;
FILE *op;
fopen_s(&op,"result.csv", "w"); //open csv file for output
fprintf(op, "DOF Value\n"); // header for csv output file
printf("Validation Size: %d\n", (uint32_t)size);
#ifdef NNOM_USING_STATIC_MEMORY
// when use static memory buffer, we need to set it before create
nnom_set_static_buf(static_buf, sizeof(static_buf));
#endif
model = nnom_model_create(); //creates model using data from weights_mockup.h
for (size_t seek = 0; seek < size;) {
//labels
if (seek >= size)
break;
memcpy(nnom_input_data, input + seek, sizeof(nnom_input_data));
seek += sizeof(nnom_input_data);
numfeat = sizeof(nnom_input_data) / sizeof(*nnom_input_data);
// run model
model_run(model);
// save results
for (int ii = 0; ii < num_DOF; ii++) {
fprintf(op, "%d %f\n", ii, (float)nnom_output_data[ii] / (1 << DENSE_16_OUTPUT_DEC));
}
//}
printf("Processing %d%%\n", (uint32_t)(seek * 100 / size));
}
fclose(op);
// model
model_stat(model);
model_delete(model);
free(input);
return 0;
}
#ifdef __cpluplus
}
#endif
Hello, I have followed the examples and converted the keras model to an nnom model. For my application, I need the probability of each class as an output. When I run the model, the outputs have a very small magnitude of probability regardless of the inputs. Below is my setup. Any help or guidance you can give would be very appreciated.
python setup:
main.c:
weights_mockup.h: