Traceback (most recent call last):
File "/cfs/cfs-1vhb8svx/hkj/InternLM-XComposer/finetune/finetune.py", line 313, in
train()
File "/cfs/cfs-1vhb8svx/hkj/InternLM-XComposer/finetune/finetune.py", line 303, in train
trainer.train()
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 1885, in train
return inner_training_loop(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 2216, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 3238, in training_step
loss = self.compute_loss(model, inputs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 3264, in compute_loss
outputs = model(inputs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/deepspeed/utils/nvtx.py", line 15, in wrapped_fn
ret_val = func(args, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/deepspeed/runtime/engine.py", line 1852, in forward
loss = self.module(*inputs, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/modeling_internlm_xcomposer2.py", line 337, in forward
to_regress_embeds, attention_mask, targets, im_mask = self.interleav_wrap(
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/modeling_internlm_xcomposer2.py", line 213, in interleav_wrap
img_embeds, atts_img, img_target = self.img2emb(image)
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/modeling_internlm_xcomposer2.py", line 122, in img2emb
img_embeds = self.vision_proj(self.vit(image.to(self.device)))
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/build_mlp.py", line 132, in forward
image_forward_outs = self.vision_tower(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 926, in forward
return self.vision_model(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 853, in forward
encoder_outputs = self.encoder(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 630, in forward
layer_outputs = self._gradient_checkpointing_func(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1614, in getattr
raise AttributeError("'{}' object has no attribute '{}'".format(
AttributeError: 'CLIPEncoder' object has no attribute '_gradient_checkpointing_func'
我使用finetune.sh 全参微调的时候,报如下错误。
Traceback (most recent call last): File "/cfs/cfs-1vhb8svx/hkj/InternLM-XComposer/finetune/finetune.py", line 313, in
train()
File "/cfs/cfs-1vhb8svx/hkj/InternLM-XComposer/finetune/finetune.py", line 303, in train
trainer.train()
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 1885, in train
return inner_training_loop(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 2216, in _inner_training_loop
tr_loss_step = self.training_step(model, inputs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 3238, in training_step
loss = self.compute_loss(model, inputs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/trainer.py", line 3264, in compute_loss
outputs = model(inputs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/deepspeed/utils/nvtx.py", line 15, in wrapped_fn
ret_val = func(args, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/deepspeed/runtime/engine.py", line 1852, in forward
loss = self.module(*inputs, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/modeling_internlm_xcomposer2.py", line 337, in forward
to_regress_embeds, attention_mask, targets, im_mask = self.interleav_wrap(
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/modeling_internlm_xcomposer2.py", line 213, in interleav_wrap
img_embeds, atts_img, img_target = self.img2emb(image)
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/modeling_internlm_xcomposer2.py", line 122, in img2emb
img_embeds = self.vision_proj(self.vit(image.to(self.device)))
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/internlm-xcomposer2-vl-7b/build_mlp.py", line 132, in forward
image_forward_outs = self.vision_tower(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 926, in forward
return self.vision_model(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 853, in forward
encoder_outputs = self.encoder(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/transformers/models/clip/modeling_clip.py", line 630, in forward
layer_outputs = self._gradient_checkpointing_func(
File "/data/miniconda3/envs/intern_clean/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1614, in getattr
raise AttributeError("'{}' object has no attribute '{}'".format(
AttributeError: 'CLIPEncoder' object has no attribute '_gradient_checkpointing_func'