alexander34ro / AML-Replicating-Novel-Deep-Learning-with-CNN-and-Bi-Directional-LSTM-for-Improved-Index-Prediction

Apache License 2.0
0 stars 0 forks source link

Grid search for better hyper parameters (rho for adadelta, learning rate) #20

Open alexander34ro opened 3 years ago

amir-souri commented 3 years ago

l_rates = [1, 0.1, 0.01, 0.001, 0.0001] rhos = [0.95, 0.9] grid_loss_list = []

for l_rate, rh in product(l_rates, rhos): optimizer = tf.train.AdadeltaOptimizer(learning_rate=l_rate, rho=rh).minimize(loss) init = tf.global_variables_initializer()

with tf.Session() as session:
    session.run(init)

    for epoch in range(n_epoch):
        _, loss_value = session.run([optimizer, loss], feed_dict={x: x_train, t: t_train})
        val_loss_value = session.run(loss, feed_dict={x: x_val, t: t_val})

        grid_loss_list.append(val_loss_value)

print("Grid search done")

amir-souri commented 3 years ago

l_rates = [0.01, 0.001, 0.0001] rhos = [0.95, 0.9] grid_loss_list = []

for l_rate, rh in product(l_rates, rhos): optimizer = tf.train.AdadeltaOptimizer(learning_rate=l_rate, rho=rh).minimize(loss) init = tf.global_variables_initializer()

with tf.Session() as session:
    session.run(init)
    grid = []
    for epoch in range(n_epoch_grid):
        _, loss_value = session.run([optimizer, loss], feed_dict={x: x_train, t: t_train})
        val_loss_value = session.run(loss, feed_dict={x: x_val, t: t_val})
        grid.append(val_loss_value)

grid_loss_list.append(grid)
print(f'{l_rate, rh} done')

print("Grid search done")