Merge all the summaries and write them out to train_logs
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('train_logs', sess.graph)
# get sparse training data
with open('../avazu_CTR/train_sparse_data_frac_0.01.pkl', 'rb') as f:
sparse_data_fraction = pickle.load(f)
# get number of batches
num_batches = len(sparse_data_fraction)
for e in range(epochs):
num_samples = 0
losses = []
for ibatch in range(num_batches):
# batch_size data
batch_y = sparse_data_fraction[ibatch]['labels']
batch_y = np.array(batch_y)
actual_batch_size = len(batch_y)
batch_indexes = np.array(sparse_data_fraction[ibatch]['indexes'], dtype=np.int64)
batch_shape = np.array([actual_batch_size, feature_length], dtype=np.int64)
batch_values = np.ones(len(batch_indexes), dtype=np.float32)
# create a feed dictionary for this batch
feed_dict = {model.X: (batch_indexes, batch_values, batch_shape),
model.y: batch_y,
model.keep_prob:1.0}
loss, accuracy, summary, global_step, _ = sess.run([model.loss, model.accuracy,
merged,model.global_step,
model.train_op], feed_dict=feed_dict)
# aggregate performance stats
losses.append(loss*actual_batch_size)
num_samples += actual_batch_size
# Record summaries and train.csv-set accuracy
train_writer.add_summary(summary, global_step=global_step)
# print training loss and accuracy
if global_step % print_every == 0:
logging.info("Iteration {0}: with minibatch training loss = {1} and accuracy of {2}"
.format(global_step, loss, accuracy))
saver.save(sess, "checkpoints/model", global_step=global_step)
# print loss of one epoch
total_loss = np.sum(losses)/num_samples
print("Epoch {1}, Overall loss = {0:.3g}".format(total_loss, e+1))
FM/FM.py中的train_sparse_data_frac_0.01.pkl文件哪来的?
def train_model(sess, model, epochs=10, print_every=50): """training model"""
Merge all the summaries and write them out to train_logs