AssertionError Traceback (most recent call last)
Cell In[20], line 13
8 pred_model = nn_prediction.PredictionNetwork(n_in=beh.shape[-1], n_kp=22, identity=False,
9 n_filt=10, n_latents=0,
10 n_out=Vsv.shape[-1], n_core_layers=1,
11 relu_wavelets=False, relu_latents=False)
12 # put model on the GPU
---> 13 pred_model.to(device);
15 print(pred_model)
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:1152, in Module.to(self, *args, **kwargs)
1148 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
1149 non_blocking, memory_format=convert_to_format)
1150 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
-> 1152 return self._apply(convert)
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:802, in Module._apply(self, fn, recurse)
800 if recurse:
801 for module in self.children():
--> 802 module._apply(fn)
804 def compute_should_use_set_data(tensor, tensor_applied):
805 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
806 # If the new tensor has compatible tensor type as the existing tensor,
807 # the current behavior is to change the tensor in-place using .data =,
(...)
812 # global flag to let the user control whether they want the future
813 # behavior of overwriting the existing tensor or not.
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:802, in Module._apply(self, fn, recurse)
800 if recurse:
801 for module in self.children():
--> 802 module._apply(fn)
804 def compute_should_use_set_data(tensor, tensor_applied):
805 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
806 # If the new tensor has compatible tensor type as the existing tensor,
807 # the current behavior is to change the tensor in-place using .data =,
(...)
812 # global flag to let the user control whether they want the future
813 # behavior of overwriting the existing tensor or not.
[... skipping similar frames: Module._apply at line 802 (1 times)]
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:802, in Module._apply(self, fn, recurse)
800 if recurse:
801 for module in self.children():
--> 802 module._apply(fn)
804 def compute_should_use_set_data(tensor, tensor_applied):
805 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
806 # If the new tensor has compatible tensor type as the existing tensor,
807 # the current behavior is to change the tensor in-place using .data =,
(...)
812 # global flag to let the user control whether they want the future
813 # behavior of overwriting the existing tensor or not.
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:825, in Module._apply(self, fn, recurse)
821 # Tensors stored in modules are graph leaves, and we don't want to
822 # track autograd history of param_applied, so we have to use
823 # with torch.no_grad():
824 with torch.no_grad():
--> 825 param_applied = fn(param)
826 should_use_set_data = compute_should_use_set_data(param, param_applied)
827 if should_use_set_data:
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:1150, in Module.to..convert(t)
1147 if convert_to_format is not None and t.dim() in (4, 5):
1148 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
1149 non_blocking, memory_format=convert_to_format)
-> 1150 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\cuda__init__.py:293, in _lazy_init()
288 raise RuntimeError(
289 "Cannot re-initialize CUDA in forked subprocess. To use CUDA with "
290 "multiprocessing, you must use the 'spawn' start method"
291 )
292 if not hasattr(torch._C, "_cuda_getDeviceCount"):
--> 293 raise AssertionError("Torch not compiled with CUDA enabled")
294 if _cudart is None:
295 raise AssertionError(
296 "libcudart functions unavailable. It looks like you have a broken build?"
297 )
AssertionError: Torch not compiled with CUDA enabled
D:\repos\Rastermap>nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2019 NVIDIA Corporation
Built on Wed_Oct_23_19:32:27_Pacific_Daylight_Time_2019
Cuda compilation tools, release 10.2, V10.2.89
Hi, when I tried to run the tutorial locally, I have some issues with Torch and CUDA. https://colab.research.google.com/github/MouseLand/rastermap/blob/main/notebooks/tutorial.ipynb
"Predicting with a 1D convolution layer"
from neuropop import nn_prediction import torch
ideally we have a GPU we can use ("cuda" option)
device = torch.device("cuda")
declare the model
pred_model = nn_prediction.PredictionNetwork(n_in=beh.shape[-1], n_kp=22, identity=False, n_filt=10, n_latents=0, n_out=Vsv.shape[-1], n_core_layers=1, relu_wavelets=False, relu_latents=False)
put model on the GPU
pred_model.to(device);
print(pred_model)
AssertionError Traceback (most recent call last) Cell In[20], line 13 8 pred_model = nn_prediction.PredictionNetwork(n_in=beh.shape[-1], n_kp=22, identity=False, 9 n_filt=10, n_latents=0, 10 n_out=Vsv.shape[-1], n_core_layers=1, 11 relu_wavelets=False, relu_latents=False) 12 # put model on the GPU ---> 13 pred_model.to(device); 15 print(pred_model)
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:1152, in Module.to(self, *args, **kwargs) 1148 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, 1149 non_blocking, memory_format=convert_to_format) 1150 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking) -> 1152 return self._apply(convert)
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:802, in Module._apply(self, fn, recurse) 800 if recurse: 801 for module in self.children(): --> 802 module._apply(fn) 804 def compute_should_use_set_data(tensor, tensor_applied): 805 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): 806 # If the new tensor has compatible tensor type as the existing tensor, 807 # the current behavior is to change the tensor in-place using
.data =
, (...) 812 # global flag to let the user control whether they want the future 813 # behavior of overwriting the existing tensor or not.File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:802, in Module._apply(self, fn, recurse) 800 if recurse: 801 for module in self.children(): --> 802 module._apply(fn) 804 def compute_should_use_set_data(tensor, tensor_applied): 805 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): 806 # If the new tensor has compatible tensor type as the existing tensor, 807 # the current behavior is to change the tensor in-place using
.data =
, (...) 812 # global flag to let the user control whether they want the future 813 # behavior of overwriting the existing tensor or not.File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:802, in Module._apply(self, fn, recurse) 800 if recurse: 801 for module in self.children(): --> 802 module._apply(fn) 804 def compute_should_use_set_data(tensor, tensor_applied): 805 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): 806 # If the new tensor has compatible tensor type as the existing tensor, 807 # the current behavior is to change the tensor in-place using
.data =
, (...) 812 # global flag to let the user control whether they want the future 813 # behavior of overwriting the existing tensor or not.File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:825, in Module._apply(self, fn, recurse) 821 # Tensors stored in modules are graph leaves, and we don't want to 822 # track autograd history of
param_applied
, so we have to use 823 #with torch.no_grad():
824 with torch.no_grad(): --> 825 param_applied = fn(param) 826 should_use_set_data = compute_should_use_set_data(param, param_applied) 827 if should_use_set_data:File ~\miniconda3\envs\rastermap\lib\site-packages\torch\nn\modules\module.py:1150, in Module.to..convert(t)
1147 if convert_to_format is not None and t.dim() in (4, 5):
1148 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
1149 non_blocking, memory_format=convert_to_format)
-> 1150 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
File ~\miniconda3\envs\rastermap\lib\site-packages\torch\cuda__init__.py:293, in _lazy_init() 288 raise RuntimeError( 289 "Cannot re-initialize CUDA in forked subprocess. To use CUDA with " 290 "multiprocessing, you must use the 'spawn' start method" 291 ) 292 if not hasattr(torch._C, "_cuda_getDeviceCount"): --> 293 raise AssertionError("Torch not compiled with CUDA enabled") 294 if _cudart is None: 295 raise AssertionError( 296 "libcudart functions unavailable. It looks like you have a broken build?" 297 )
AssertionError: Torch not compiled with CUDA enabled
D:\repos\Rastermap>nvcc --version nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2019 NVIDIA Corporation Built on Wed_Oct_23_19:32:27_Pacific_Daylight_Time_2019 Cuda compilation tools, release 10.2, V10.2.89
conda list and CUDA version.txt
Many thanks. Looking forward to hearing from you!