File "web_demo.py", line 58, in request_model
answer = generate_text_with_image(input_text, image, result_text.copy(), request_para, is_zh)
File "web_demo.py", line 31, in generate_text_with_image
answer, history, _ = chat(None, model, tokenizer, input_text, history=history, image=input_image, \
File "D:\projects\XrayGLM-main\model\chat.py", line 141, in chat
output = filling_sequence(
File "E:\Anaconda3\envs\xglm\lib\site-packages\sat\generation\autoregressive_sampling.py", line 108, in filling_sequence
logits, *output_per_layers = model(
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Anaconda3\envs\xglm\lib\site-packages\sat\model\official\chatglm_model.py", line 192, in forward
return super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, **kwargs)
File "E:\Anaconda3\envs\xglm\lib\site-packages\sat\model\base_model.py", line 144, in forward
return self.transformer(*args, **kwargs)
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Anaconda3\envs\xglm\lib\site-packages\sat\model\transformer.py", line 451, in forward
hidden_states = self.hooks['word_embedding_forward'](input_ids, output_cross_layer=output_cross_layer, **kw_args)
File "D:\projects\XrayGLM-main\model\visualglm.py", line 20, in word_embedding_forward
image_emb = self.model(**kw_args)
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "D:\projects\XrayGLM-main\model\blip2.py", line 65, in forward
enc = self.vit(image)[0]
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "D:\projects\XrayGLM-main\model\blip2.py", line 29, in forward
return super().forward(input_ids=input_ids, position_ids=None, attention_mask=attention_mask, image=image)
File "E:\Anaconda3\envs\xglm\lib\site-packages\sat\model\base_model.py", line 144, in forward
return self.transformer(*args, **kwargs)
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Anaconda3\envs\xglm\lib\site-packages\sat\model\transformer.py", line 451, in forward
hidden_states = self.hooks['word_embedding_forward'](input_ids, output_cross_layer=output_cross_layer, **kw_args)
File "E:\Anaconda3\envs\xglm\lib\site-packages\sat\model\official\vit_model.py", line 54, in word_embedding_forward
embeddings = self.proj(images)
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\conv.py", line 446, in forward
return self._conv_forward(input, self.weight, self.bias)
File "E:\Anaconda3\envs\xglm\lib\site-packages\torch\nn\modules\conv.py", line 442, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: "unfolded2d_copy" not implemented for 'Half'
报错详情
环境:win10 python3.8.5 尝试:网上说是pytorch不支持cpu fp16 把half改为float即可 但是改了还是一样的报错 求解决方法