Open brccabral opened 3 years ago
What do you mean?
Without importing scipy, could you please paste the full traceback you get?
I've just run it again but didn't get any error. But in the previous run I didn't have scipy installed, I guess I had to import it to make it available, but the second time it was already installed and there was no error. Thanks
We don't use Scipy in the tutorial. Could you do this though?
I created a clean virtual environment, installed canaro (which installs caer, opencv, tensorflow, numpy, and others), installed matplotlib
python -m venv .venvclean
source .venvclean/bin/activate
pip install canaro
pip install matplotlib
Executed the code. When "model.fit()", this is the trace
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
/tmp/ipykernel_63315/3468149526.py in <module>
----> 1 training = model.fit(train_gen,
2 steps_per_epoch=len(x_train)//BATCH_SIZE,
3 epochs=EPOCHS,
4 validation_data=(x_val, y_val),
5 validation_steps=len(y_val)//BATCH_SIZE,
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1132 training_utils.RespectCompiledTrainableState(self):
1133 # Creates a `tf.data.Dataset` and handles batch and epoch iteration.
-> 1134 data_handler = data_adapter.get_data_handler(
1135 x=x,
1136 y=y,
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras/engine/data_adapter.py in get_data_handler(*args, **kwargs)
1381 if getattr(kwargs["model"], "_cluster_coordinator", None):
1382 return _ClusterCoordinatorDataHandler(*args, **kwargs)
-> 1383 return DataHandler(*args, **kwargs)
1384
1385
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras/engine/data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution, distribute)
1136
1137 adapter_cls = select_data_adapter(x, y)
-> 1138 self._adapter = adapter_cls(
1139 x,
1140 y,
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, shuffle, workers, use_multiprocessing, max_queue_size, model, **kwargs)
915 self._keras_sequence = x
916 self._enqueuer = None
--> 917 super(KerasSequenceAdapter, self).__init__(
918 x,
919 shuffle=False, # Shuffle is handed in the _make_callable override.
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, workers, use_multiprocessing, max_queue_size, model, **kwargs)
792 # Since we have to know the dtype of the python generator when we build the
793 # dataset, we have to look at a batch to infer the structure.
--> 794 peek, x = self._peek_and_restore(x)
795 peek = self._standardize_batch(peek)
796 peek = _process_tensorlike(peek)
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras/engine/data_adapter.py in _peek_and_restore(x)
926 @staticmethod
927 def _peek_and_restore(x):
--> 928 return x[0], x
929
930 def _handle_multiprocessing(self, x, workers, use_multiprocessing,
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras_preprocessing/image/iterator.py in __getitem__(self, idx)
63 index_array = self.index_array[self.batch_size * idx:
64 self.batch_size * (idx + 1)]
---> 65 return self._get_batches_of_transformed_samples(index_array)
66
67 def __len__(self):
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras_preprocessing/image/numpy_array_iterator.py in _get_batches_of_transformed_samples(self, index_array)
159 x = self.x[j]
160 params = self.image_data_generator.get_random_transform(x.shape)
--> 161 x = self.image_data_generator.apply_transform(
162 x.astype(self.dtype), params)
163 x = self.image_data_generator.standardize(x)
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras_preprocessing/image/image_data_generator.py in apply_transform(self, x, transform_parameters)
861 img_channel_axis = self.channel_axis - 1
862
--> 863 x = apply_affine_transform(x, transform_parameters.get('theta', 0),
864 transform_parameters.get('tx', 0),
865 transform_parameters.get('ty', 0),
~/PythonProjects/ImageAndVideo/.venvclean/lib/python3.8/site-packages/keras_preprocessing/image/affine_transformations.py in apply_affine_transform(x, theta, tx, ty, shear, zx, zy, row_axis, col_axis, channel_axis, fill_mode, cval, order)
279 """
280 if scipy is None:
--> 281 raise ImportError('Image transformations require SciPy. '
282 'Install SciPy.')
283 transform_matrix = None
ImportError: Image transformations require SciPy. Install SciPy.
Seems to be an issue here: https://github.com/apple/tensorflow_macos/issues/87
If you install (but not import) Scipy in your Virtual Environment, does this error go away?
Yes, it does go away.
I've found this, I commented but it is closed, https://github.com/tensorflow/tensorflow/issues/51736
I have reinstall scipy and reopen jupyter notebook. It is worked. Thank you
I had to explicitly import scipy to run model.fit()