`import NumPy as np
import matplotlib
import matplotlib.pyplot as plt
import TensorFlow as tf
from sklearn import metrics
import random
from random import randint
import time
import os
print("(X shape, y shape, every X's mean, every X's standard deviation)")
print(X_train.shape, y_test.shape, np.mean(X_test), np.std(X_test))
print("\nThe dataset has not been preprocessed, is not normalised etc")
print("")
print("Confusion Matrix:")
print("Created using test set of {} datapoints, normalised to % of each class in the test dataset".format(len(y_test)))
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
width = 12
height = 12
plt.figure(figsize=(width, height))
plt.imshow(
normalised_confusion_matrix,
interpolation='nearest',
cmap=plt.cm.Blues
)
plt.title("Confusion matrix \n(normalised to % of total test data)")
plt.colorbar()
tick_marks = np.arange(n_classes)
plt.xticks(tick_marks, LABELS, rotation=90)
plt.yticks(tick_marks, LABELS)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
print(test_accuracies)
`
sir, I used the above code there are only a few changes in code because I am using my own dataset
made from "https://github.com/CMU-Perceptual-Computing-Lab/openpose's," I get the JSON format, and I converted it into text with the help of the following script
`import pandas as pd
import numpy as np
import os
import ijson
import json
import itertools
rootdir = r'C:\Users\TUSHAR\Desktop\final json files\0.test'
arr = []
krr = []
sum=0
for subdir, dirs, files in os.walk(rootdir):
for file in files:
t = json.load(open(os.path.join(subdir, file)))
for child in t["people"]:
i=0
for j in itertools.islice(child["pose_keypoints_2d"],0,54,1):
i += 1
if i == 3:
i = 0
continue
else:
arr.append(j)
print(*arr,sep=',')
sum += 1
arr.clear()
You see that error because you are trying to create an array with the list which is not proper multi-dimensional in shape. Another reason is related to the type of content in array. Work on your input shape.
`import NumPy as np import matplotlib import matplotlib.pyplot as plt import TensorFlow as tf from sklearn import metrics import random from random import randint import time import os
LABELS = [ "CORRECT POSTURE", #changed "INCORRECT POSTURE", ] DATASET_PATH = "/home/tushar/datamodel/RNN-HAR-2D-Pose-database/"
X_train_path = DATASET_PATH + "X_train.txt" X_test_path = DATASET_PATH + "X_test.txt"
y_train_path = DATASET_PATH + "Y_train.txt" y_test_path = DATASET_PATH + "Y_test.txt"
n_steps = 16
def load_X(X_path): file = open(Xpath, 'r') X = np.array( [elem for elem in [ row.split(',') for row in file ]], dtype=np.float32 ) file.close() blocks = int(len(X_) / n_steps)
def load_y(y_path): file = open(ypath, 'r') y = np.array( [elem for elem in [ row.replace(' ', ' ').strip().split(' ') for row in file ]], dtype=np.int32 ) file.close()
X_train = load_X(X_train_path) X_test = load_X(X_test_path)
y_train = load_y(y_train_path) y_test = load_y(y_test_path)
training_data_count = len(X_train) test_data_count = len(X_test) n_input = len(X_train[0][0])
n_hidden = 34 n_classes = 2 #changed
decaying_learning_rate = True learning_rate = 0.0025 init_learning_rate = 0.005 decay_rate = 0.96 decay_steps = 100000
global_step = tf.Variable(0, trainable=False) lambda_loss_amount = 0.0015
training_iters = training_data_count 200 #chaged batch_size = 256 #changed display_iter = batch_size 8
print("(X shape, y shape, every X's mean, every X's standard deviation)") print(X_train.shape, y_test.shape, np.mean(X_test), np.std(X_test)) print("\nThe dataset has not been preprocessed, is not normalised etc")
def LSTM_RNN(_X, _weights, _biases):
def extract_batch_size(_train, _labels, _unsampled, batch_size):
def onehot(y):
x = tf.placeholder(tf.float32, [None, n_steps, n_input]) y = tf.placeholder(tf.float32, [None, n_classes])
weights = { 'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights 'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0)) } biases = { 'hidden': tf.Variable(tf.random_normal([n_hidden])), 'out': tf.Variable(tf.random_normal([n_classes])) }
pred = LSTM_RNN(x, weights, biases)
l2 = lambda_loss_amount sum( tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() ) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss if decaying_learning_rate: learning_rate = tf.train.exponential_decay(init_learning_rate, global_step batch_size, decay_steps, decay_rate, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost, global_step=global_step) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
test_losses = [] test_accuracies = [] train_losses = [] train_accuracies = [] sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True)) init = tf.global_variables_initializer() sess.run(init)
step = 1 time_start = time.time() unsampled_indices = range(0, len(X_train))
while step * batch_size <= training_iters: if len(unsampled_indices) < batch_size: unsampled_indices = range(0, len(X_train)) batch_xs, raw_labels, unsampled_indicies = extract_batch_size(X_train, y_train, unsampled_indices, batch_size) batch_ys = one_hot(raw_labels) if len(batch_ys[0]) < n_classes: temp_ys = np.zeros((batch_size, n_classes)) temp_ys[:batch_ys.shape[0], :batch_ys.shape[1]] = batch_ys batch_ys = temp_ys
print("Optimization Fini
one_hot_predictions, accuracy, final_loss = sess.run( [pred, accuracy, cost], feed_dict={ x: X_test, y: one_hot(y_test) } )
test_losses.append(final_loss) test_accuracies.append(accuracy)
print("FINAL RESULT: " + "Batch Loss = {}".format(final_loss) + ", Accuracy = {}".format(accuracy)) time_stop = time.time() print("TOTAL TIME: {}".format(time_stop - time_start))
get_ipython().run_line_magic('matplotlib', 'inline')
font = { 'family': 'Bitstream Vera Sans', 'weight': 'bold', 'size': 18 } matplotlib.rc('font', **font)
width = 12 height = 12 plt.figure(figsize=(width, height))
indep_train_axis = np.array(range(batch_size, (len(train_losses) + 1) * batch_size, batch_size)) plt.plot(indep_train_axis, np.array(train_accuracies), "g--", label="Train accuracies")
indep_test_axis = np.append( np.array(range(batch_size, len(test_losses) * display_iter, display_iter)[:-1]), [training_iters] ) plt.plot(indep_test_axis, np.array(test_accuracies), "b-", linewidth=2.0, label="Test accuracies") print(len(test_accuracies)) print(len(train_accuracies))
plt.title("Training session's Accuracy over Iterations") plt.legend(loc='lower right', shadow=True) plt.ylabel('Training Accuracy') plt.xlabel('Training Iteration')
plt.show()
predictions = one_hot_predictions.argmax(1)
print("Testing Accuracy: {}%".format(100 * accuracy))
print("") print("Precision: {}%".format(100 metrics.precision_score(y_test, predictions, average="weighted"))) print("Recall: {}%".format(100 metrics.recall_score(y_test, predictions, average="weighted"))) print("f1_score: {}%".format(100 * metrics.f1_score(y_test, predictions, average="weighted")))
print("") print("Confusion Matrix:") print("Created using test set of {} datapoints, normalised to % of each class in the test dataset".format(len(y_test))) confusion_matrix = metrics.confusion_matrix(y_test, predictions)
normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32) / np.sum(confusion_matrix) * 100
width = 12 height = 12 plt.figure(figsize=(width, height)) plt.imshow( normalised_confusion_matrix, interpolation='nearest', cmap=plt.cm.Blues ) plt.title("Confusion matrix \n(normalised to % of total test data)") plt.colorbar() tick_marks = np.arange(n_classes) plt.xticks(tick_marks, LABELS, rotation=90) plt.yticks(tick_marks, LABELS) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
print(test_accuracies)
`
sir, I used the above code there are only a few changes in code because I am using my own dataset made from "https://github.com/CMU-Perceptual-Computing-Lab/openpose's," I get the JSON format, and I converted it into text with the help of the following script `import pandas as pd import numpy as np import os import ijson import json import itertools
rootdir = r'C:\Users\TUSHAR\Desktop\final json files\0.test' arr = [] krr = [] sum=0 for subdir, dirs, files in os.walk(rootdir): for file in files: t = json.load(open(os.path.join(subdir, file))) for child in t["people"]: i=0 for j in itertools.islice(child["pose_keypoints_2d"],0,54,1): i += 1 if i == 3: i = 0 continue else: arr.append(j) print(*arr,sep=',') sum += 1 arr.clear()
print('total :',sum)`
Now I have text files with "x" and "y" coordinates. I am attaching x_train.txt file - "https://drive.google.com/drive/folders/1G8_eRccP_qCjkTqmmMXzcDvo5N-SPDE0?usp=sharing"
The problem is that I do not have a big dataset, so I changed some variables in code and still getting an error.
"dtype=np.float32 ValueError: setting an array element with a sequence"
Please help me out sir, I would be highly obliged if you help me out on this issue.