跑web_demo.py时,提交图片后什么都没出来,检测窗口只有error
报错:
To create a public link, set share=True in launch().
[!] image path: /tmp/tmptscxx1ly.png
[!] normal image path: /tmp/tmpg0dmfofa.png
Traceback (most recent call last):
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/gradio/routes.py", line 394, in run_predict
output = await app.get_blocks().process_api(
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/gradio/blocks.py", line 1075, in process_api
result = await self.call_function(
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/gradio/blocks.py", line 884, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, args)
File "web_demo.py", line 115, in predict
response, pixel_output = model.generate({
File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 712, in generate
input_embeds, pixel_output = self.prepare_generation_embedding(inputs, web_demo)
File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 658, in prepare_generation_embedding
feature_embeds, anomaly_map = self.extract_multimodal_feature(inputs, web_demo)
File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 599, in extract_multimodal_feature
imageembeds, , patch_tokens = self.encode_image_for_web_demo(inputs['image_paths'])
File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 278, in encode_image_for_web_demo
embeddings = self.visual_encoder(inputs)
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(input, kwargs)
File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/imagebind_model.py", line 462, in forward
modality_value = self.modality_preprocessors[modality_key](
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, *kwargs)
File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/multimodal_preprocessors.py", line 278, in forward
vision_tokens = self.tokenize_input_and_cls_pos(
File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/multimodal_preprocessors.py", line 257, in tokenize_input_and_cls_pos
tokens = stem(input)
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(input, kwargs)
File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/multimodal_preprocessors.py", line 152, in forward
x = self.proj(x)
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, *kwargs)
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/container.py", line 204, in forward
input = module(input)
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(input, **kwargs)
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 613, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 608, in _conv_forward
return F.conv3d(
RuntimeError: cuDNN error: CUDNN_STATUS_NOT_INITIALIZED
跑web_demo.py时,提交图片后什么都没出来,检测窗口只有error 报错: To create a public link, set
share=True
inlaunch()
. [!] image path: /tmp/tmptscxx1ly.png [!] normal image path: /tmp/tmpg0dmfofa.pngTraceback (most recent call last): File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/gradio/routes.py", line 394, in run_predict output = await app.get_blocks().process_api( File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/gradio/blocks.py", line 1075, in process_api result = await self.call_function( File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/gradio/blocks.py", line 884, in call_function prediction = await anyio.to_thread.run_sync( File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/anyio/to_thread.py", line 33, in run_sync return await get_asynclib().run_sync_in_worker_thread( File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread return await future File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 807, in run result = context.run(func, args) File "web_demo.py", line 115, in predict response, pixel_output = model.generate({ File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 712, in generate input_embeds, pixel_output = self.prepare_generation_embedding(inputs, web_demo) File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 658, in prepare_generation_embedding feature_embeds, anomaly_map = self.extract_multimodal_feature(inputs, web_demo) File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 599, in extract_multimodal_feature imageembeds, , patch_tokens = self.encode_image_for_web_demo(inputs['image_paths']) File "/project/shawuxin/soft/AnomalyGPT/code/model/openllama.py", line 278, in encode_image_for_web_demo embeddings = self.visual_encoder(inputs) File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(input, kwargs) File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/imagebind_model.py", line 462, in forward modality_value = self.modality_preprocessors[modality_key]( File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, *kwargs) File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/multimodal_preprocessors.py", line 278, in forward vision_tokens = self.tokenize_input_and_cls_pos( File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/multimodal_preprocessors.py", line 257, in tokenize_input_and_cls_pos tokens = stem(input) File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(input, kwargs) File "/project/shawuxin/soft/AnomalyGPT/code/model/ImageBind/models/multimodal_preprocessors.py", line 152, in forward x = self.proj(x) File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, *kwargs) File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/container.py", line 204, in forward input = module(input) File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(input, **kwargs) File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 613, in forward return self._conv_forward(input, self.weight, self.bias) File "/home/shawuxin/dat01/soft/new/anaconda3/envs/py38_env/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 608, in _conv_forward return F.conv3d( RuntimeError: cuDNN error: CUDNN_STATUS_NOT_INITIALIZED