tensorflow / probability

Probabilistic reasoning and statistical analysis in TensorFlow
https://www.tensorflow.org/probability/
Apache License 2.0
4.26k stars 1.1k forks source link

Classification problem with bayesian networks #1564

Closed caliari-italo closed 2 years ago

caliari-italo commented 2 years ago

Greetings everyone.

I'm having some trouble working with tensorflow probability in the last few days.

I have trained the frequentist version of this network and reached individual accuracies above 0.99. Although, when trying the bayesian version, the accuracies are equivalent to a dummy model. This is weird as I suspect the results might not differ much.

As I'm new to bayesian approaches I would like to know if I'm missing something here... I apologize in advance if this issue Is out the scope but I didn't found much information and examples that suits me.

In this model I'm making predictions around the presence (1) of absence (0) of 3 properties (Y), which may occur simultaneously or not.

I would really appreciate some insights.

Thank you all in advance.

import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from main import get_data

#%% Configuration
config = {"ID_prefix" : "Bayesian_CNN_Flipout",
          "mode" : "classification",
          "optimizer" : "Adam",
          "loss" : "binary_crossentropy",
          "monitor" : "val_loss",
          "patience" : 10,
          "lr" : 0.001,
          "repetitions" : 3,
          "X_reshape" : True}

#%% Get data
data = get_data("dataset.csv", config)

my data have the followings dimensions:

data["X_train"].shape
Out[8]: (39375, 1024, 1)

data["Y_train"].shape
Out[9]: (39375, 3)

data["X_val"].shape
Out[10]: (13125, 1024, 1)

data["Y_val"].shape
Out[11]: (13125, 3)

data["X_test"].shape
Out[13]: (17500, 1024, 1)

data["Y_test"].shape
Out[14]: (17500, 3)

The structure of the network is:

#%% Model structure
config["inputs"] = tf.keras.Input(shape=(data["X_train"].shape[1], data["X_train"].shape[2]))

layer = config["inputs"]
layer = tfp.layers.Convolution1DFlipout(filters=10, kernel_size=5, strides=1, activation="relu")(layer)
layer = tf.keras.layers.MaxPooling1D(pool_size=2)(layer)
layer = tfp.layers.Convolution1DFlipout(filters=10, kernel_size=5, strides=1, activation="relu")(layer)
layer = tf.keras.layers.MaxPooling1D(pool_size=2)(layer)
layer = tf.keras.layers.Flatten()(layer)

config["outputs"] = tfp.layers.DenseFlipout(units=3, activation="sigmoid")(layer)

model = tf.keras.Model(inputs=config["inputs"], outputs=config["outputs"])

model.compile(optimizer=config["optimizer"], loss=config["loss"])

tf.keras.backend.set_value(model.optimizer.learning_rate, config["lr"])

earlystopping = tf.keras.callbacks.EarlyStopping(monitor=config["monitor"],
                                                 patience=config["patience"],
                                                 restore_best_weights=True)

#%% Fit model
history = model.fit(data["X_train"], data["Y_train"],
                    validation_data=[data["X_val"], data["Y_val"]],
                    epochs=999999,
                    callbacks=[earlystopping])

#%% Classification metrics
pred_train = np.zeros([config["repetitions"], data["Y_train"].shape[0], data["Y_train"].shape[1]])
pred_val = np.zeros([config["repetitions"], data["Y_val"].shape[0], data["Y_val"].shape[1]])
pred_test = np.zeros([config["repetitions"], data["Y_test"].shape[0], data["Y_test"].shape[1]])
accuracy_train = np.zeros([config["repetitions"], 1, data["Y_train"].shape[1]])
accuracy_val = np.zeros([config["repetitions"], 1, data["Y_val"].shape[1]])
accuracy_test = np.zeros([config["repetitions"], 1, data["Y_test"].shape[1]])
for i in range(config["repetitions"]):
    pred_train[i] = model.predict(data["X_train"]).round()
    pred_val[i] = model.predict(data["X_val"]).round()
    pred_test[i] = model.predict(data["X_test"]).round()
    accuracy_train[i] = (data["Y_train"]==pred_train[i]).mean(0)
    accuracy_val[i] = (data["Y_val"]==pred_val[i]).mean(0)
    accuracy_test[i] = (data["Y_test"]==pred_test[i]).mean(0)
caliari-italo commented 2 years ago

THE QUESTION WAS ANSWERED ELSEWHERE AND THE CODE BELOW WORKED PROPERLY:

#%% Initial Imports
import shutil
import tensorflow as tf
import tensorflow_probability as tfp
from main import get_data, update_results, main

#%% Configuration
config = {"Folder_ID" : "Bayesian_CNN_Flipout",
          "mode" : "classification",
          "optimizer" : "Adam",
          "loss" : "binary_crossentropy",
          "metrics" : "binary_accuracy",
          "monitor" : "val_binary_accuracy",
          "patience" : 10,
          "repetitions" : 5,
          "X_reshape" : True}

#%% Get data
data = get_data("dataset.csv", config)

my data have the followings dimensions:

data["X_train"].shape
Out[8]: (39375, 1024, 1)

data["Y_train"].shape
Out[9]: (39375, 3)

data["X_val"].shape
Out[10]: (13125, 1024, 1)

data["Y_val"].shape
Out[11]: (13125, 3)

data["X_test"].shape
Out[13]: (17500, 1024, 1)

data["Y_test"].shape
Out[14]: (17500, 3)

The structure of the network is:


#%% Model structure
divergence_fn = lambda q, p, ignore: tfp.distributions.kl_divergence(q, p)/data['X_train'].shape[0]

config["inputs"] = tf.keras.Input(shape=(data["X_train"].shape[1], data["X_train"].shape[2]))

layer  = config["inputs"]

layer = tfp.layers.Convolution1DFlipout(filters=2, kernel_size=5, strides=1, activation="relu", kernel_divergence_fn=divergence_fn, bias_divergence_fn=divergence_fn)(layer)
layer = tf.keras.layers.MaxPooling1D(pool_size=2)(layer)
layer = tfp.layers.Convolution1DFlipout(filters=2, kernel_size=5, strides=1, activation="relu", kernel_divergence_fn=divergence_fn, bias_divergence_fn=divergence_fn)(layer)
layer = tf.keras.layers.MaxPooling1D(pool_size=2)(layer)

layer = tf.keras.layers.Flatten()(layer)
config["outputs"] = tfp.layers.DenseFlipout(3, activation='sigmoid', kernel_divergence_fn=divergence_fn, bias_divergence_fn=divergence_fn)(layer)

model = tf.keras.Model(inputs=config["inputs"], outputs=config["outputs"])

model.compile(optimizer=config["optimizer"], loss=config["loss"])

earlystopping = tf.keras.callbacks.EarlyStopping(monitor=config["monitor"],
                                                 patience=config["patience"],
                                                 restore_best_weights=True)

#%% Fit model
history = model.fit(data["X_train"], data["Y_train"],
                    validation_data=[data["X_val"], data["Y_val"]],
                    epochs=999999,
                    callbacks=[earlystopping])