You are using an old version of the checkpointing format that is deprecated (We will also silently ignore gradient_checkpointing_kwargs in case you passed it).Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method _set_gradient_checkpointing in your model.
0%| | 0/19 [00:00<?, ?it/s]Traceback (most recent call last):
File "F:\xiazai\MedicalGPT-main\reward_modeling.py", line 652, in
main()
File "F:\xiazai\MedicalGPT-main\reward_modeling.py", line 620, in main
train_result = trainer.train(resume_from_checkpoint=checkpoint)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\transformers\trainer.py", line 1624, in train
return inner_training_loop(
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\transformers\trainer.py", line 1961, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\transformers\trainer.py", line 2902, in training_step
loss = self.compute_loss(model, inputs)
File "F:\xiazai\MedicalGPT-main\reward_modeling.py", line 245, in compute_loss
rewards_chosen = model(input_ids=inputs["input_ids_chosen"],
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\peft\peft_model.py", line 902, in forward
return self.base_model(
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\peft\tuners\tuners_utils.py", line 160, in forward
return self.model.forward(args, **kwargs)
TypeError: ChatGLMForSequenceClassification.forward() got an unexpected keyword argument 'output_attentions'
0%| | 0/19 [00:00<?, ?it/s]
You are using an old version of the checkpointing format that is deprecated (We will also silently ignore
main()
File "F:\xiazai\MedicalGPT-main\reward_modeling.py", line 620, in main
train_result = trainer.train(resume_from_checkpoint=checkpoint)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\transformers\trainer.py", line 1624, in train
return inner_training_loop(
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\transformers\trainer.py", line 1961, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\transformers\trainer.py", line 2902, in training_step
loss = self.compute_loss(model, inputs)
File "F:\xiazai\MedicalGPT-main\reward_modeling.py", line 245, in compute_loss
rewards_chosen = model(input_ids=inputs["input_ids_chosen"],
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\peft\peft_model.py", line 902, in forward
return self.base_model(
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Users\admin.conda\envs\newrlhf\lib\site-packages\peft\tuners\tuners_utils.py", line 160, in forward
return self.model.forward(args, **kwargs)
TypeError: ChatGLMForSequenceClassification.forward() got an unexpected keyword argument 'output_attentions'
0%| | 0/19 [00:00<?, ?it/s]
gradient_checkpointing_kwargs
in case you passed it).Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method_set_gradient_checkpointing
in your model. 0%| | 0/19 [00:00<?, ?it/s]Traceback (most recent call last): File "F:\xiazai\MedicalGPT-main\reward_modeling.py", line 652, in