~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py in advance(self)
352 self._data_fetcher.setup(combined_loader)
353 with self.trainer.profiler.profile("run_training_epoch"):
--> 354 self.epoch_loop.run(self._data_fetcher)
355
356 def on_advance_end(self) -> None:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py in run(self, data_fetcher)
131 while not self.done:
132 try:
--> 133 self.advance(data_fetcher)
134 self.on_advance_end()
135 self._restarting = False
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py in advance(self, data_fetcher)
216 if trainer.lightning_module.automatic_optimization:
217 # in automatic optimization, there can only be one optimizer
--> 218 batch_output = self.automatic_optimization.run(trainer.optimizers[0], kwargs)
219 else:
220 batch_output = self.manual_optimization.run(kwargs)
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in run(self, optimizer, kwargs)
183 # gradient update with accumulated gradients
184 else:
--> 185 self._optimizer_step(kwargs.get("batch_idx", 0), closure)
186
187 result = closure.consume_result()
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in _optimizer_step(self, batch_idx, train_step_and_backward_closure)
259
260 # model hook
--> 261 call._call_lightning_module_hook(
262 trainer,
263 "optimizer_step",
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py in _call_lightning_module_hook(trainer, hook_name, pl_module, *args, *kwargs)
140
141 with trainer.profiler.profile(f"[LightningModule]{pl_module.class.name}.{hook_name}"):
--> 142 output = fn(args, **kwargs)
143
144 # restore current_fx when nested context
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in optimizer_step(self, optimizer, model, closure, kwargs)
112 """Hook to run the optimizer step."""
113 closure = partial(self._wrap_closure, model, optimizer, closure)
--> 114 return optimizer.step(closure=closure, kwargs)
115
116 def _clip_gradients(
~/anaconda3/lib/python3.9/site-packages/torch/optim/optimizer.py in wrapper(*args, *kwargs)
278 f"but got {result}.")
279
--> 280 out = func(args, **kwargs)
281 self._optimizer_step_code()
282
~/anaconda3/lib/python3.9/site-packages/torch/optim/optimizer.py in _use_grad(self, *args, *kwargs)
31 try:
32 torch.set_grad_enabled(self.defaults['differentiable'])
---> 33 ret = func(self, args, **kwargs)
34 finally:
35 torch.set_grad_enabled(prev_grad)
~/anaconda3/lib/python3.9/site-packages/torch/optim/adam.py in step(self, closure)
119 if closure is not None:
120 with torch.enable_grad():
--> 121 loss = closure()
122
123 for group in self.param_groups:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in _wrap_closure(self, model, optimizer, closure)
99 consistent with the PrecisionPlugin subclasses that cannot pass optimizer.step(closure) directly.
100 """
--> 101 closure_result = closure()
102 self._after_closure(model, optimizer)
103 return closure_result
~/Desktop/user/new_molecule/progentrl/gen_vaelp.py in training_step(self, batch, batch_idx)
133 def training_step(self, batch, batch_idx):
134 if self.current_epoch in [0, 1, 5] and batch_idx==0:
--> 135 self.reinit_from_data()
136
137 x_batch, y_batch = batch
~/Desktop/user/new_molecule/progentrl/gen_vaelp.py in reinit_from_data(self)
102
103 if (self.buf is None) or (self.buf.shape[0] < 5000):
--> 104 enc_out = self.enc(x_batch)
105 means, log_stds = torch.split(enc_out,
106 len(self.latent_descr),
~/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py in _call_impl(self, *args, *kwargs)
1499 or _global_backward_pre_hooks or _global_backward_hooks
1500 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501 return forward_call(args, **kwargs)
1502 # Do not call functions when jit is used
1503 full_backward_hooks, non_full_backward_hooks = [], []
~/Desktop/user/new_molecule/progentrl/tokenizer.py in encode(sm_list, pad_size)
62 lens = []
63 for s in sm_list:
---> 64 tokens = ([1] + [__t2i[tok]
65 for tok in smiles_tokenizer(s)])[:pad_size - 1]
66 lens.append(len(tokens))
~/Desktop/user/new_molecule/progentrl/tokenizer.py in (.0)
62 lens = []
63 for s in sm_list:
---> 64 tokens = ([1] + [__t2i[tok]
65 for tok in smiles_tokenizer(s)])[:pad_size - 1]
66 lens.append(len(tokens))
trainer.fit(model) # Training the model
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
| Name | Type | Params
0 | enc | RNNEncoder | 888 K 1 | dec | DilConvDecoder | 596 K 2 | lp | LP | 1.6 M
3.1 M Trainable params 0 Non-trainable params 3.1 M Total params 12.473 Total estimated model params size (MB)
Epoch 0: 0% 0/5333 [00:00<?, ?it/s]
KeyError Traceback (most recent call last) /tmp/ipykernel_679832/1077659130.py in
----> 1 trainer.fit(model) # Training the model
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path) 518 model = _maybe_unwrap_optimized(model) 519 self.strategy._lightning_module = model --> 520 call._call_and_handle_interrupt( 521 self, self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path 522 )
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py in _call_and_handle_interrupt(trainer, trainer_fn, *args, kwargs) 42 return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, *kwargs) 43 else: ---> 44 return trainer_fn(args, kwargs) 45 46 except _TunerExitException:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path) 557 model_connected=self.lightning_module is not None, 558 ) --> 559 self._run(model, ckpt_path=ckpt_path) 560 561 assert self.state.stopped
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path) 933 # RUN THE TRAINER 934 # ---------------------------- --> 935 results = self._run_stage() 936 937 # ----------------------------
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py in _run_stage(self) 976 self._run_sanity_check() 977 with torch.autograd.set_detect_anomaly(self._detect_anomaly): --> 978 self.fit_loop.run() 979 return None 980 raise RuntimeError(f"Unexpected state {self.state}")
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py in run(self) 199 try: 200 self.on_advance_start() --> 201 self.advance() 202 self.on_advance_end() 203 self._restarting = False
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/fit_loop.py in advance(self) 352 self._data_fetcher.setup(combined_loader) 353 with self.trainer.profiler.profile("run_training_epoch"): --> 354 self.epoch_loop.run(self._data_fetcher) 355 356 def on_advance_end(self) -> None:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py in run(self, data_fetcher) 131 while not self.done: 132 try: --> 133 self.advance(data_fetcher) 134 self.on_advance_end() 135 self._restarting = False
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/training_epoch_loop.py in advance(self, data_fetcher) 216 if trainer.lightning_module.automatic_optimization: 217 # in automatic optimization, there can only be one optimizer --> 218 batch_output = self.automatic_optimization.run(trainer.optimizers[0], kwargs) 219 else: 220 batch_output = self.manual_optimization.run(kwargs)
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in run(self, optimizer, kwargs) 183 # gradient update with accumulated gradients 184 else: --> 185 self._optimizer_step(kwargs.get("batch_idx", 0), closure) 186 187 result = closure.consume_result()
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in _optimizer_step(self, batch_idx, train_step_and_backward_closure) 259 260 # model hook --> 261 call._call_lightning_module_hook( 262 trainer, 263 "optimizer_step",
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py in _call_lightning_module_hook(trainer, hook_name, pl_module, *args, *kwargs) 140 141 with trainer.profiler.profile(f"[LightningModule]{pl_module.class.name}.{hook_name}"): --> 142 output = fn(args, **kwargs) 143 144 # restore current_fx when nested context
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/core/module.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure) 1263 pg["lr"] = lr_scale * self.learning_rate 1264 """ -> 1265 optimizer.step(closure=optimizer_closure) 1266 1267 def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer) -> None:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, kwargs) 156 157 assert self._strategy is not None --> 158 step_output = self._strategy.optimizer_step(self._optimizer, closure, kwargs) 159 160 self._on_after_step()
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py in optimizer_step(self, optimizer, closure, model, kwargs) 222 # TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed 223 assert isinstance(model, pl.LightningModule) --> 224 return self.precision_plugin.optimizer_step(optimizer, model=model, closure=closure, kwargs) 225 226 def _setup_model_and_optimizers(self, model: Module, optimizers: List[Optimizer]) -> Tuple[Module, List[Optimizer]]:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in optimizer_step(self, optimizer, model, closure, kwargs) 112 """Hook to run the optimizer step.""" 113 closure = partial(self._wrap_closure, model, optimizer, closure) --> 114 return optimizer.step(closure=closure, kwargs) 115 116 def _clip_gradients(
~/anaconda3/lib/python3.9/site-packages/torch/optim/optimizer.py in wrapper(*args, *kwargs) 278 f"but got {result}.") 279 --> 280 out = func(args, **kwargs) 281 self._optimizer_step_code() 282
~/anaconda3/lib/python3.9/site-packages/torch/optim/optimizer.py in _use_grad(self, *args, *kwargs) 31 try: 32 torch.set_grad_enabled(self.defaults['differentiable']) ---> 33 ret = func(self, args, **kwargs) 34 finally: 35 torch.set_grad_enabled(prev_grad)
~/anaconda3/lib/python3.9/site-packages/torch/optim/adam.py in step(self, closure) 119 if closure is not None: 120 with torch.enable_grad(): --> 121 loss = closure() 122 123 for group in self.param_groups:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/plugins/precision/precision_plugin.py in _wrap_closure(self, model, optimizer, closure) 99 consistent with the
PrecisionPlugin
subclasses that cannot passoptimizer.step(closure)
directly. 100 """ --> 101 closure_result = closure() 102 self._after_closure(model, optimizer) 103 return closure_result~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in call(self, *args, kwargs) 138 139 def call(self, *args: Any, *kwargs: Any) -> Optional[Tensor]: --> 140 self._result = self.closure(args, kwargs) 141 return self._result.loss 142
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in closure(self, *args, *kwargs) 124 125 def closure(self, args: Any, **kwargs: Any) -> ClosureResult: --> 126 step_output = self._step_fn() 127 128 if step_output.closure_loss is None:
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/optimization/automatic.py in _training_step(self, kwargs) 306 307 # manually capture logged metrics --> 308 training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values()) 309 self.trainer.strategy.post_training_step() 310
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/call.py in _call_strategy_hook(trainer, hook_name, *args, *kwargs) 286 287 with trainer.profiler.profile(f"[Strategy]{trainer.strategy.class.name}.{hook_name}"): --> 288 output = fn(args, **kwargs) 289 290 # restore current_fx when nested context
~/anaconda3/lib/python3.9/site-packages/pytorch_lightning/strategies/strategy.py in training_step(self, *args, *kwargs) 364 with self.precision_plugin.train_step_context(): 365 assert isinstance(self.model, TrainingStep) --> 366 return self.model.training_step(args, **kwargs) 367 368 def post_training_step(self) -> None:
~/Desktop/user/new_molecule/progentrl/gen_vaelp.py in training_step(self, batch, batch_idx) 133 def training_step(self, batch, batch_idx): 134 if self.current_epoch in [0, 1, 5] and batch_idx==0: --> 135 self.reinit_from_data() 136 137 x_batch, y_batch = batch
~/Desktop/user/new_molecule/progentrl/gen_vaelp.py in reinit_from_data(self) 102 103 if (self.buf is None) or (self.buf.shape[0] < 5000): --> 104 enc_out = self.enc(x_batch) 105 means, log_stds = torch.split(enc_out, 106 len(self.latent_descr),
~/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py in _call_impl(self, *args, *kwargs) 1499 or _global_backward_pre_hooks or _global_backward_hooks 1500 or _global_forward_hooks or _global_forward_pre_hooks): -> 1501 return forward_call(args, **kwargs) 1502 # Do not call functions when jit is used 1503 full_backward_hooks, non_full_backward_hooks = [], []
~/Desktop/user/new_molecule/progentrl/encoder.py in forward(self, sm_list) 24 """ 25 ---> 26 tokens, lens = encode(sm_list) 27 to_feed = tokens.transpose(1, 0).to(self.embs.weight.device) 28
~/Desktop/user/new_molecule/progentrl/tokenizer.py in encode(sm_list, pad_size) 62 lens = [] 63 for s in sm_list: ---> 64 tokens = ([1] + [__t2i[tok] 65 for tok in smiles_tokenizer(s)])[:pad_size - 1] 66 lens.append(len(tokens))
~/Desktop/user/new_molecule/progentrl/tokenizer.py in(.0)
62 lens = []
63 for s in sm_list:
---> 64 tokens = ([1] + [__t2i[tok]
65 for tok in smiles_tokenizer(s)])[:pad_size - 1]
66 lens.append(len(tokens))
KeyError: '7'