ezkl is an engine for doing inference for deep learning models and other computational graphs in a zk-snark (ZKML). Use it from Python, Javascript, or the command line.
Below is the code to reproduce the error. Briefly, I generate random data with fixed shape then define an RNN model with lstm layer to train.
import ezkl
import os
import json
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dropout, Dense
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import tf2onnx
# Define the shape of the data (number of samples, sequence length, number of features)
shape = (1000, 32, 40)
# Generate random data and labels
data_0 = np.random.rand(*shape).astype(np.float32)
labels_0 = np.zeros(shape[0], dtype=int) # Label 0 for data_0
data_1 = np.random.rand(*shape).astype(np.float32)
labels_1 = np.ones(shape[0], dtype=int) # Label 1 for data_1
# Combine data
data_combined = np.concatenate((data_0, data_1), axis=0)
labels_combined = np.concatenate((labels_0, labels_1), axis=0)
# Fit the scaler on the combined data
scaler = StandardScaler()
data_combined_flattened = data_combined.reshape(-1, data_combined.shape[-1])
data_combined_normalized = scaler.fit_transform(data_combined_flattened).reshape(data_combined.shape)
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(data_combined_normalized, labels_combined, test_size=0.2, random_state=42)
# Reshape data for RNN (sequence_length, num_features)
X_train = X_train.reshape(X_train.shape[0], shape[1], shape[2])
X_test = X_test.reshape(X_test.shape[0], shape[1], shape[2])
# Convert labels to categorical (one-hot encoding)
y_train_categorical = to_categorical(y_train, num_classes=2)
y_test_categorical = to_categorical(y_test, num_classes=2)
# Define the RNN model using LSTM layers
model = Sequential([
LSTM(64, input_shape=(shape[1], shape[2]), return_sequences=True),
Dropout(0.5),
LSTM(64, return_sequences=False),
Dropout(0.5),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(2, activation='softmax') # 2 classes
])
# Compile
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train
model.fit(X_train, y_train_categorical, epochs=5, batch_size=32, validation_split=0.2)
# Save
model.save('model_rnn.keras')
model_path = os.path.join('model.onnx')
settings_path = os.path.join('settings.json')
data_path = os.path.join('input.json')
model.output_names = ['output']
# random input from the training set
random_index = np.random.randint(0, X_train.shape[0])
x = X_train[random_index]
# export model to onnx (network.onnx)
spec = tf.TensorSpec([None, X_train.shape[1], X_train.shape[2]], tf.float32, name='input_0')
tf2onnx.convert.from_keras(model,input_signature=[spec],opset=15,output_path=model_path)
# create data file from the random input
data_array = x.reshape([-1]).tolist()
data = dict(input_data = [data_array])
# Serialize data into file:
json.dump( data, open(data_path, 'w' ))
py_run_args = ezkl.PyRunArgs()
py_run_args.input_visibility = "public"
py_run_args.output_visibility = "public"
py_run_args.param_visibility = "fixed" # "fixed" for params means that the committed to params are used for all proofs
res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)
assert res == True
Hello, I build an RNN model with LSTM layer. However, when I tried to run
ezkl.gen_settings()
, it cause error as belowHow to fix that kind of
unimplemented(Loop)
error? Do I need to use low level framework like pytorch to design the model?There maybe a similar issue here https://github.com/zkonduit/ezkl/issues/411 but I have no idea to overcome it.
Below is the code to reproduce the error. Briefly, I generate random data with fixed shape then define an RNN model with lstm layer to train.