Hangover3832 / ComfyUI-Hangover-Moondream

Moondream is a lightweight multimodal large language model
https://github.com/Hangover3832/ComfyUI-Hangover-Moondream
Apache License 2.0
40 stars 6 forks source link

Last UPDATE comfyui 20.03.24 #7

Closed ffdown closed 5 months ago

ffdown commented 6 months ago

CPU

Error occurred when executing Moondream:

The size of tensor a (750) must match the size of tensor b (751) at non-singleton dimension 1

File "E:\AI\ComfyUI_windows_portable\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-0246\utils.py", line 381, in new_func
res_value = old_func(*final_args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\execution.py", line 65, in map_node_over_list
results.append(getattr(obj, func)(**input_data_all))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py", line 121, in run
res=self.moondream.answer_question(image_embeds, question,self.tokenizer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\moondream.py", line 93, in answer_question
answer = self.generate(
^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\moondream.py", line 77, in generate
output_ids = self.text_model.generate(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 1544, in generate
device=inputs_tensor.device,
^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 2404, in greedy_search
if this_peer_finished and not synced_gpus:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 709, in forward
hidden_states = self.transformer(
^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 675, in forward
else func(*args)
^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 541, in forward
attn_outputs = self.mixer(
^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 514, in forward
attn_output_function(x, past_key_values, attention_mask)
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 494, in _forward_cross_attn
return attn_func(
^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 491, in
else lambda fn, *args, **kwargs: fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\amp\autocast_mode.py", line 16, in decorate_autocast
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\amp\autocast_mode.py", line 16, in decorate_autocast
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 318, in forward
padding_mask.masked_fill_(key_padding_mask, 0.0)

CUDA

Error occurred when executing Moondream:

The expanded size of the tensor (750) must match the existing size (751) at non-singleton dimension 1. Target sizes: [1, 750]. Tensor sizes: [1, 751]

File "E:\AI\ComfyUI_windows_portable\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-0246\utils.py", line 381, in new_func
res_value = old_func(*final_args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\execution.py", line 65, in map_node_over_list
results.append(getattr(obj, func)(**input_data_all))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py", line 121, in run
res=self.moondream.answer_question(image_embeds, question,self.tokenizer)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\moondream.py", line 93, in answer_question
answer = self.generate(
^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\moondream.py", line 77, in generate
output_ids = self.text_model.generate(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 1544, in generate
device=inputs_tensor.device,
^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\transformers\generation\utils.py", line 2404, in greedy_search
if this_peer_finished and not synced_gpus:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 709, in forward
hidden_states = self.transformer(
^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 675, in forward
else func(*args)
^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 541, in forward
attn_outputs = self.mixer(
^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 514, in forward
attn_output_function(x, past_key_values, attention_mask)
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 494, in _forward_cross_attn
return attn_func(
^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 491, in
else lambda fn, *args, **kwargs: fn(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\amp\autocast_mode.py", line 16, in decorate_autocast
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\python_embeded\Lib\site-packages\torch\amp\autocast_mode.py", line 16, in decorate_autocast
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "E:\AI\ComfyUI_windows_portable\ComfyUI\custom_nodes\comfyui-moondream\nodes\MoondreamNode.py\../..\moondream\modeling_phi.py", line 318, in forward
padding_mask.masked_fill_(key_padding_mask, 0.0)
Hangover3832 commented 6 months ago

Which model do you use?