davidADSP / GDL_code

The official code repository for examples in the O'Reilly book 'Generative Deep Learning'
GNU General Public License v3.0
1.47k stars 739 forks source link

03_03_vae_digits_train ValueError: The model cannot be compiled because it has no loss to optimize. #82

Open MasaharuUno opened 4 years ago

MasaharuUno commented 4 years ago

I could not run this cell

vae.train(     
    x_train
    , batch_size = BATCH_SIZE
    , epochs = EPOCHS
    , run_folder = RUN_FOLDER
    , print_every_n_batches = PRINT_EVERY_N_BATCHES
    , initial_epoch = INITIAL_EPOCH
)

Error is as follows: WARNING:tensorflow:Output output_1 missing from loss dictionary. We assume this was done on purpose. The fit and evaluate APIs will not be expecting any data to be passed to output_1.

ValueError                                Traceback (most recent call last)
<ipython-input-12-a0cdb3ff19b5> in <module>
      5     , run_folder = RUN_FOLDER
      6     , print_every_n_batches = PRINT_EVERY_N_BATCHES
----> 7     , initial_epoch = INITIAL_EPOCH
      8 )

~\Python\GDL_code\models\VAE.py in train(self, x_train, batch_size, epochs, run_folder, print_every_n_batches, initial_epoch, lr_decay)
    224             , epochs = epochs
    225             , initial_epoch = initial_epoch
--> 226             , callbacks = callbacks_list
    227         )
    228 

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    726         max_queue_size=max_queue_size,
    727         workers=workers,
--> 728         use_multiprocessing=use_multiprocessing)
    729 
    730   def evaluate(self,

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
    222           validation_data=validation_data,
    223           validation_steps=validation_steps,
--> 224           distribution_strategy=strategy)
    225 
    226       total_samples = _get_total_number_of_samples(training_data_adapter)

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_training_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, steps_per_epoch, validation_split, validation_data, validation_steps, shuffle, distribution_strategy, max_queue_size, workers, use_multiprocessing)
    545         max_queue_size=max_queue_size,
    546         workers=workers,
--> 547         use_multiprocessing=use_multiprocessing)
    548     val_adapter = None
    549     if validation_data:

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, shuffle, steps, distribution_strategy, max_queue_size, workers, use_multiprocessing)
    592         batch_size=batch_size,
    593         check_steps=False,
--> 594         steps=steps)
    595   adapter = adapter_cls(
    596       x,

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)
   2431     is_compile_called = False
   2432     if not self._is_compiled and self.optimizer:
-> 2433       self._compile_from_inputs(all_inputs, y_input, x, y)
   2434       is_compile_called = True
   2435 

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target)
   2666         sample_weight_mode=self.sample_weight_mode,
   2667         run_eagerly=self.run_eagerly,
-> 2668         experimental_run_tf_function=self._experimental_run_tf_function)
   2669 
   2670   # TODO(omalleyt): Consider changing to a more descriptive function name.

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
    455     self._self_setattr_tracking = False  # pylint: disable=protected-access
    456     try:
--> 457       result = method(self, *args, **kwargs)
    458     finally:
    459       self._self_setattr_tracking = previous_value  # pylint: disable=protected-access

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, distribute, **kwargs)
    371 
    372       # Creates the model loss and weighted metrics sub-graphs.
--> 373       self._compile_weights_loss_and_weighted_metrics()
    374 
    375       # Functions for train, test and predict will

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
    455     self._self_setattr_tracking = False  # pylint: disable=protected-access
    456     try:
--> 457       result = method(self, *args, **kwargs)
    458     finally:
    459       self._self_setattr_tracking = previous_value  # pylint: disable=protected-access

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _compile_weights_loss_and_weighted_metrics(self, sample_weights)
   1651       #                   loss_weight_2 * output_2_loss_fn(...) +
   1652       #                   layer losses.
-> 1653       self.total_loss = self._prepare_total_loss(masks)
   1654 
   1655   def _prepare_skip_target_masks(self):

~\anaconda3\envs\generative\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _prepare_total_loss(self, masks)
   1750       if total_loss is None:
   1751         if not self.losses:
-> 1752           raise ValueError('The model cannot be compiled '
   1753                            'because it has no loss to optimize.')
   1754         else:

ValueError: The model cannot be compiled because it has no loss to optimize.

vpunia-dev commented 3 years ago

@MasaharuUno were you able to resolve this?

MasaharuUno commented 3 years ago

No. I still have the same trouble.

jhtop1972 commented 1 year ago

"from tensorflow.python.framework.ops import disable_eager_execution disable_eager_execution()"

add in first cell