File /opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, *kwargs)
1522 # If we don't have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1527 return forward_call(args, **kwargs)
1529 try:
1530 result = None
File /opt/conda/lib/python3.10/site-packages/kan/KANLayer.py:178, in KANLayer.forward(self, x)
176 y = y.permute(1,0) # shape (batch, size)
177 postspline = y.clone().reshape(batch, self.out_dim, self.in_dim)
--> 178 y = self.scale_base.unsqueeze(dim=0) base + self.scale_sp.unsqueeze(dim=0) y
179 y = self.mask[None,:] * y
180 postacts = y.clone().reshape(batch, self.out_dim, self.in_dim)
RuntimeError: Expected
all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!`
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/fit_loop.py:363, in _FitLoop.advance(self)
361 with self.trainer.profiler.profile("run_training_epoch"):
362 assert self._data_fetcher is not None
--> 363 self.epoch_loop.run(self._data_fetcher)
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/training_epoch_loop.py:140, in _TrainingEpochLoop.run(self, data_fetcher)
138 while not self.done:
139 try:
--> 140 self.advance(data_fetcher)
141 self.on_advance_end(data_fetcher)
142 self._restarting = False
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/training_epoch_loop.py:250, in _TrainingEpochLoop.advance(self, data_fetcher)
247 with trainer.profiler.profile("run_training_batch"):
248 if trainer.lightning_module.automatic_optimization:
249 # in automatic optimization, there can only be one optimizer
--> 250 batch_output = self.automatic_optimization.run(trainer.optimizers[0], batch_idx, kwargs)
251 else:
252 batch_output = self.manual_optimization.run(kwargs)
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/optimization/automatic.py:190, in _AutomaticOptimization.run(self, optimizer, batch_idx, kwargs)
183 closure()
185 # ------------------------------
186 # BACKWARD PASS
187 # ------------------------------
188 # gradient update with accumulated gradients
189 else:
--> 190 self._optimizer_step(batch_idx, closure)
192 result = closure.consume_result()
193 if result.loss is None:
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/loops/optimization/automatic.py:268, in _AutomaticOptimization._optimizer_step(self, batch_idx, train_step_and_backward_closure)
265 self.optim_progress.optimizer.step.increment_ready()
267 # model hook
--> 268 call._call_lightning_module_hook(
269 trainer,
270 "optimizer_step",
271 trainer.current_epoch,
272 batch_idx,
273 optimizer,
274 train_step_and_backward_closure,
275 )
277 if not should_accumulate:
278 self.optim_progress.optimizer.step.increment_completed()
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/core/module.py:1303, in LightningModule.optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure)
1264 def optimizer_step(
1265 self,
1266 epoch: int,
(...)
1269 optimizer_closure: Optional[Callable[[], Any]] = None,
1270 ) -> None:
1271 r"""Override this method to adjust the default way the :class:~pytorch_lightning.trainer.trainer.Trainer calls
1272 the optimizer.
1273
(...)
1301
1302 """
-> 1303 optimizer.step(closure=optimizer_closure)
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/core/optimizer.py:152, in LightningOptimizer.step(self, closure, kwargs)
149 raise MisconfigurationException("When optimizer.step(closure) is called, the closure should be callable")
151 assert self._strategy is not None
--> 152 step_output = self._strategy.optimizer_step(self._optimizer, closure, kwargs)
154 self._on_after_step()
156 return step_output
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/strategies/strategy.py:239, in Strategy.optimizer_step(self, optimizer, closure, model, kwargs)
237 # TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed
238 assert isinstance(model, pl.LightningModule)
--> 239 return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, kwargs)
File /opt/conda/lib/python3.10/site-packages/pytorch_lightning/plugins/precision/precision.py:122, in Precision.optimizer_step(self, optimizer, model, closure, kwargs)
120 """Hook to run the optimizer step."""
121 closure = partial(self._wrap_closure, model, optimizer, closure)
--> 122 return optimizer.step(closure=closure, kwargs)
File /opt/conda/lib/python3.10/site-packages/torch/optim/optimizer.py:373, in Optimizer.profile_hook_step..wrapper(*args, *kwargs)
368 else:
369 raise RuntimeError(
370 f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}."
371 )
--> 373 out = func(args, **kwargs)
374 self._optimizer_step_code()
376 # call optimizer step post hooks
I tried to use KANLayer with CUDA, but there is an error
test = KANLayer(2, 2, device='cuda') test(torch.tensor([[0.2, .32]]).cuda())
->
It was here:
After rebugging using cuda becomes available. BUT omptimeser felt with the same mistake
Unfortunately, here I don't know how to check mistake. How to solve that&