Traceback (most recent call last):
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/queueing.py", line 527, in process_events
response = await route_utils.call_process_api(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/route_utils.py", line 270, in call_process_api
output = await app.get_blocks().process_api(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/blocks.py", line 1887, in process_api
result = await self.call_function(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/blocks.py", line 1472, in call_function
prediction = await anyio.to_thread.run_sync(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 2144, in run_sync_in_worker_thread
return await future
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 851, in run
result = context.run(func, args)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/utils.py", line 808, in wrapper
response = f(args, kwargs)
File "app/hydit_app.py", line 42, in infer
success, enhanced_prompt = enhancer(prompt)
File "/root/autodl-tmp/HunyuanDiT/./dialoggen/dialoggen_demo.py", line 145, in call
enhanced_prompt = eval_model(
File "/root/autodl-tmp/HunyuanDiT/./dialoggen/dialoggen_demo.py", line 113, in eval_model
output_ids = models["model"].generate(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, *kwargs)
File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/language_model/llava_mistral.py", line 125, in generate
) = self.prepare_inputs_labels_for_multimodal(
File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/llava_arch.py", line 157, in prepare_inputs_labels_for_multimodal
image_features = self.encode_images(concat_images)
File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/llava_arch.py", line 141, in encode_images
image_features = self.get_model().get_vision_tower()(images)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(input, kwargs)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, kwargs)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, *kwargs)
File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/multimodal_encoder/clip_encoder.py", line 54, in forward
image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(input, kwargs)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/hooks.py", line 161, in new_forward
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/hooks.py", line 356, in pre_forward
return send_to_device(args, self.execution_device), send_to_device(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 180, in send_to_device
return honor_type(
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 81, in honor_type
return type(obj)(generator)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 181, in
tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 174, in send_to_device
raise error
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 158, in send_to_device
return tensor.to(device, non_blocking=non_blocking)
NotImplementedError: Cannot copy out of meta tensor; no data!
==================================================================
运行以下命令,修改了 server_port=8192,share=False
python app/hydit_app.py
==================================================================
环境
sys.platform: linux Python: 3.8.12 (default, Oct 12 2021, 13:49:34) [GCC 7.5.0] CUDA available: True MUSA available: False numpy_random_seed: 2147483648 GPU 0: NVIDIA GeForce RTX 3090 CUDA_HOME: /usr/local/cuda NVCC: Cuda compilation tools, release 11.8, V11.8.89 GCC: gcc (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0 PyTorch: 1.13.1 PyTorch compiling details: PyTorch built with:
TorchVision: 0.14.1+cu117
==================================================================
错误
Traceback (most recent call last): File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/queueing.py", line 527, in process_events response = await route_utils.call_process_api( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/route_utils.py", line 270, in call_process_api output = await app.get_blocks().process_api( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/blocks.py", line 1887, in process_api result = await self.call_function( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/blocks.py", line 1472, in call_function prediction = await anyio.to_thread.run_sync( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/anyio/to_thread.py", line 56, in run_sync return await get_async_backend().run_sync_in_worker_thread( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 2144, in run_sync_in_worker_thread return await future File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 851, in run result = context.run(func, args) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/gradio/utils.py", line 808, in wrapper response = f(args, kwargs) File "app/hydit_app.py", line 42, in infer success, enhanced_prompt = enhancer(prompt) File "/root/autodl-tmp/HunyuanDiT/./dialoggen/dialoggen_demo.py", line 145, in call enhanced_prompt = eval_model( File "/root/autodl-tmp/HunyuanDiT/./dialoggen/dialoggen_demo.py", line 113, in eval_model output_ids = models["model"].generate( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context return func(*args, *kwargs) File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/language_model/llava_mistral.py", line 125, in generate ) = self.prepare_inputs_labels_for_multimodal( File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/llava_arch.py", line 157, in prepare_inputs_labels_for_multimodal image_features = self.encode_images(concat_images) File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/llava_arch.py", line 141, in encode_images image_features = self.get_model().get_vision_tower()(images) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(input, kwargs) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/hooks.py", line 166, in new_forward output = module._old_forward(*args, kwargs) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context return func(*args, *kwargs) File "/root/autodl-tmp/HunyuanDiT/dialoggen/llava/model/multimodal_encoder/clip_encoder.py", line 54, in forward image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(input, kwargs) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/hooks.py", line 161, in new_forward args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/hooks.py", line 356, in pre_forward return send_to_device(args, self.execution_device), send_to_device( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 180, in send_to_device return honor_type( File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 81, in honor_type return type(obj)(generator) File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 181, in
tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 174, in send_to_device
raise error
File "/root/miniconda3/envs/HunyuanDiT/lib/python3.8/site-packages/accelerate/utils/operations.py", line 158, in send_to_device
return tensor.to(device, non_blocking=non_blocking)
NotImplementedError: Cannot copy out of meta tensor; no data!