keras-team / keras-tuner

A Hyperparameter Tuning Library for Keras
https://keras.io/keras_tuner/
Apache License 2.0
2.86k stars 396 forks source link

FileNotFoundError for TPU custom loop training #985

Closed h4ck4l1 closed 5 months ago

h4ck4l1 commented 9 months ago

Describe the bug

I am training with custom loop on TPU strategy

class CustomHypermodel(kt.HyperModel):

    def build(self,hp):
        with strategy.scope():
            model = Mnist(
            first_dense_units=hp.Int(name="first_dense_units",min_value=300,max_value=500,step=100),
            second_dense_units=hp.Int(name="second_dense_units",min_value=100,max_value=300,step=100),
            third_dense_units=hp.Int(name="third_dense_units",min_value=50,max_value=150,step=50)
            )
        return model

    def fit(self,
            hp,
            model,
            train_data,
            validation_data,
            epochs:Optional[int]=None,
            batch_size:Optional[int]=None,
            steps_per_epoch:Optional[int]=None,
            validation_steps:Optional[int]=None,
            callbacks:Optional[list[keras.callbacks.Callback]]=None,
            **kwargs):

        with strategy.scope():
            optimizer = keras.optimizers.Adam(learning_rate=hp.Float("l_rate",min_value=1e-5,max_value=1e-2,sampling="log",step=10,default=1e-5))
            training_loss = keras.metrics.Mean(name="train_loss",dtype=tf.float32)
            training_metric = keras.metrics.SparseCategoricalAccuracy(name="training_accuracy",dtype=tf.float32)
            validation_loss = keras.metrics.Mean(name="valid_loss",dtype=tf.float32)
            validation_metric = keras.metrics.SparseCategoricalAccuracy(name="validation_accuracy",dtype=tf.float32)

        @tf.function
        def train_step(iterator):

            def train_step_fn(inputs):

                images,y_true = inputs

                with tf.GradientTape() as tape:

                    y_pred = model(images,training=True)
                    per_example_loss = keras.losses.sparse_categorical_crossentropy(y_true,y_pred,from_logits=True)
                    loss = tf.nn.compute_average_loss(per_example_loss)

                grads = tape.gradient(loss,model.trainable_variables)
                optimizer.apply_gradients(list(zip(grads,model.trainable_variables)))
                training_loss.update_state(loss*strategy.num_replicas_in_sync)
                training_metric.update_state(y_true,y_pred)

            strategy.run(train_step_fn,args=(next(iterator),))

        @tf.function
        def valid_step(iterator):

            def valid_step_fn(inputs):

                images,y_true = inputs
                y_pred = model(images)
                loss = keras.losses.sparse_categorical_crossentropy(y_true,y_pred,from_logits=True)
                validation_loss.update_state(loss*strategy.num_replicas_in_sync)
                validation_metric.update_state(y_true,y_pred)

            strategy.run(valid_step_fn,args=(next(iterator),))

        for callback in callbacks:
            callback.set_model(model)

        best_validation_accuracy = 0

        train_iterator = iter(train_data)
        valid_iterator = iter(validation_data)

        for epoch in range(epochs):
            print(f"EPOCH: {epoch+1}/{epochs}")

            train_prog_bar = Progbar(steps_per_epoch * batch_size,stateful_metrics=["Training Loss","Training Accuracy"])
            valid_prog_bar = Progbar(validation_steps * batch_size,stateful_metrics=["Validation Loss","Validation Accuracy"])

            for step in range(steps_per_epoch):

                train_step(train_iterator)

                values = [("Training Loss",training_loss.result().numpy()),("Training Accuracy",training_metric.result().numpy())]
                train_prog_bar.add(batch_size,values=values)

            print(f"Total training loss for epoch {epoch}: {training_loss.result().numpy()}")
            print(f"Total training metric for epoch {epoch}: {training_metric.result().numpy()}")
            training_loss.reset_state()
            training_metric.reset_state()

            for step in range(validation_steps):

                valid_step(valid_iterator)

                values = [("Validation Loss",validation_loss.result().numpy()),("Validation Accuracy",validation_metric.result().numpy())]
                valid_prog_bar.add(batch_size,values=values)

            print(f"Total validation loss for epoch {epoch}: {validation_loss.result().numpy()}")
            print(f"Total validation metric for epoch {epoch}: {validation_metric.result().numpy()}")

            validation_accuracy = float(validation_metric.result().numpy())

            for callback in callbacks:
                callback.on_epoch_end(epoch,logs={"Validation Accuracy":validation_accuracy})

            validation_loss.reset_state()
            validation_metric.reset_state()

            best_validation_accuracy = max(best_validation_accuracy,validation_accuracy)

        return best_validation_accuracy

tuner = kt.Hyperband(
    objective=kt.Objective("Validation Accuracy","max"),
    max_epochs=100,
    factor=2,
    hypermodel=CustomHypermodel(),
    hyperband_iterations=2,
    directory="gs://stanfordrna/hyperband",
    project_name="mnist_hyperband",
    overwrite=True
)

per_sample_batch_size = BATCH_SIZE // strategy.num_replicas_in_sync
train_ds = strategy.distribute_datasets_from_function(lambda _: create_train_ds(train_raw,per_sample_batch_size))
valid_ds = strategy.distribute_datasets_from_function(lambda _: create_valid_ds(valid_raw,per_sample_batch_size))
tuner.search(train_data=train_ds,validation_data=valid_ds,epochs=10,batch_size=BATCH_SIZE,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps)

I get this error

WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1707676103.624328      13 device_compiler.h:186] Compiled cluster using XLA!  This line is logged at most once for the lifetime of the process.
EPOCH: 1/2
157440/157440 ━━━━━━━━━━━━━━━━━━━━ 68s 432us/step - Training Loss: 2.0163 - Training Accuracy: 0.3426
Total training loss for epoch 0: 2.016324520111084
Total training metric for epoch 0: 0.34263211488723755
31744/31744 ━━━━━━━━━━━━━━━━━━━━ 87s 3ms/step - Validation Loss: 16.8528 - Validation Accuracy: 0.3579
Total validation loss for epoch 0: 16.85284996032715
Total validation metric for epoch 0: 0.3578628897666931
Traceback (most recent call last):
  File "/usr/local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 273, in _try_run_and_update_trial
    self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
  File "/usr/local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 238, in _run_and_update_trial
    results = self.run_trial(trial, *fit_args, **fit_kwargs)
  File "/usr/local/lib/python3.10/site-packages/keras_tuner/src/tuners/hyperband.py", line 427, in run_trial
    return super().run_trial(trial, *fit_args, **fit_kwargs)
  File "/usr/local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial
    obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
  File "/usr/local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 233, in _build_and_fit_model
    results = self.hypermodel.fit(hp, model, *args, **kwargs)
  File "/tmp/ipykernel_13/2945555014.py", line 110, in fit
    callback.on_epoch_end(epoch,logs={"Validation Accuracy":validation_accuracy})
  File "/usr/local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner_utils.py", line 76, in on_epoch_end
    self._save_model()
  File "/usr/local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner_utils.py", line 86, in _save_model
    self.model.save_weights(write_filepath)
  File "/usr/local/lib/python3.10/site-packages/keras/src/utils/traceback_utils.py", line 123, in error_handler
    raise e.with_traceback(filtered_tb) from None
  File "/usr/local/lib/python3.10/site-packages/h5py/_hl/files.py", line 562, in __init__
    fid = make_fid(name, mode, userblock_size, fapl, fcpl, swmr=swmr)
  File "/usr/local/lib/python3.10/site-packages/h5py/_hl/files.py", line 241, in make_fid
    fid = h5f.create(name, h5f.ACC_TRUNC, fapl=fapl, fcpl=fcpl)
  File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
  File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
  File "h5py/h5f.pyx", line 122, in h5py.h5f.create
FileNotFoundError: [Errno 2] Unable to synchronously create file (unable to open file: name = 'gs://stanfordrna/hyperband/mnist_hyperband/trial_0000/checkpoint.weights.h5', errno = 2, error message = 'No such file or directory', flags = 13, o_flags = 242)

To Reproduce

Colab Notebook, which reproduces the bug.

Expected behavior

It should not give the error and the training should go smoothly with keras tuner accessing storage buckets

Additional context

This code also doesn't work with colab

from google.colab import auth
auth.authenticate_user()
!gcloud config set project project-id

but in kaggle we have the option to attach notebook to a gcp account(?)

from google.cloud import storage
client = storage.Client("my-project-id")
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
user_credential = user_secrets.get_gcloud_credential()
user_secrets.set_tensorflow_credential(user_credential)

I have the .tfrecords stored in my storage bucket and i am unable to access in both colab and kaggle. I would like to know why is this happening and the possible solutions Thankyou.

Would you like to help us fix it?

h4ck4l1 commented 9 months ago

I am sorry, I don't know the relevant labels, so I ended up using the bug label

h4ck4l1 commented 9 months ago

Guys, This is actually a bud? I've concluded that you can't run keras tuner when the directory is given as some gcp/aws storage bucket. I thought it was a hp5 error that wasn't able to create files but it turns out it was a cloud error(?). I need suggestions as to what can be done so that I can save keras tuner results directly to the cloud.

h4ck4l1 commented 5 months ago

new colab TPU is a vm v2-8 instance. so now its solved.