deepchem / deepchem

Democratizing Deep-Learning for Drug Discovery, Quantum Chemistry, Materials Science and Biology
https://deepchem.io/
MIT License
5.35k stars 1.65k forks source link

Draft hyperparameter optimization for DNNs #92

Closed rbharath closed 8 years ago

rbharath commented 8 years ago

Only for reference for now

from deepchem.models.deep import SingleTaskDNN
import numpy.random
from operator import mul

model_params = {"activation": "relu",
                "momentum": .9,
                "batch_size": 64,
                "nb_epoch": 30,
                "dropout": .5,
                "data_shape": train_dataset.get_data_shape()}

lr_list = np.power(10., np.random.uniform(-3, -1, size=4))
decay_list = np.power(10., np.random.uniform(-6, -2, size=4))
nb_hidden_list = [1000]
nb_epoch_list = [30]
nesterov_list = [False]
dropout_list = [.5]
nb_layers_list = [1]
init_list = ["glorot_uniform"]
batchnorm_list = [False]
hyperparameters = [lr_list, decay_list, nb_hidden_list, nb_epoch_list,
                   nesterov_list, dropout_list, nb_layers_list,
                   init_list, batchnorm_list]
num_combinations = reduce(mul, [len(l) for l in hyperparameters])
best_validation_score = -np.inf
best_hyperparams = None
best_model, best_model_dir = None, None
for ind, hyperparameter_tuple in enumerate(itertools.product(*hyperparameters)):
    print("Testing %s" % str(hyperparameter_tuple))
    print("Combo %d/%d" % (ind, num_combinations))
    (lr, decay, nb_hidden, nb_epoch, nesterov, dropout,
     nb_layers, init, batchnorm) = hyperparameter_tuple
    model_params["nb_hidden"] = nb_hidden
    model_params["decay"] = decay
    model_params["learning_rate"] = lr
    model_params["nb_epoch"] = nb_epoch
    model_params["nesterov"] = nesterov
    model_params["dropout"] = dropout
    model_params["nb_layers"] = nb_layers
    model_params["init"] = init
    model_params["batchnorm"] = batchnorm
    model_dir = tempfile.mkdtemp()
    model = SingleTaskDNN(task_types, model_params)
    model.fit(train_dataset)
    model.save(model_dir)

    evaluator = Evaluator(model, valid_dataset)
    df, r2score = evaluator.compute_model_performance(
        valid_csv_out, valid_stats_out)
    valid_r2_score = r2score.iloc[0]["r2_score"]
    print("learning_rate %f, nb_hidden %d, nb_epoch %d, nesterov %s, dropout %f => Validation set R^2 %f" %
          (lr, nb_hidden, nb_epoch, str(nesterov), dropout, valid_r2_score))
    if valid_r2_score > best_validation_score:
        best_validation_score = valid_r2_score
        best_hyperparams = hyperparameter_tuple
        if best_model_dir is not None:
            shutil.rmtree(best_model_dir)
        best_model_dir = model_dir
        best_model = model
    else:
        shutil.rmtree(model_dir)
    print("Best hyperparameters so-far: %s" % str(best_hyperparams))
    print("best_validation_score so-far: %f" % best_validation_score)

print("Best hyperparameters: %s" % str(best_hyperparams))
print("best_validation_score: %f" % best_validation_score)
best_dnn = best_model
rbharath commented 8 years ago
dnn_test_csv_out = tempfile.NamedTemporaryFile()
dnn_test_stats_out = tempfile.NamedTemporaryFile()
dnn_test_evaluator = Evaluator(best_dnn, test_dataset)
dnn_test_df, dnn_test_r2score = dnn_test_evaluator.compute_model_performance(
    dnn_test_csv_out, dnn_test_stats_out)
dnn_test_r2_score = dnn_test_r2score.iloc[0]["r2_score"]
print("DNN Test set R^2 %f" % (dnn_test_r2_score))
rbharath commented 8 years ago
task = "measured log solubility in mols per litre"
dnn_predicted_test = np.array(dnn_test_df[task + "_pred"])
dnn_true_test = np.array(dnn_test_df[task])
plt.scatter(dnn_predicted_test, dnn_true_test)
plt.xlabel('Predicted log-solubility in mols/liter')
plt.ylabel('True log-solubility in mols/liter')
plt.title(r'DNN predicted vs. true log-solubilities')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.plot([-3, 3], [-3, 3], marker=".", color='k')
plt.show()
rbharath commented 8 years ago

This was only used to copy some code around conveniently. Will close.