RuntimeError: Input type (torch.FloatTensor) and weight type (torch.HalfTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor #45
Prompt executed in 1.56 seconds
got prompt
!!! Exception during processing !!! Input type (torch.FloatTensor) and weight type (torch.HalfTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor
Traceback (most recent call last):
File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 323, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 198, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 169, in _map_node_over_list
process_inputs(input_dict, i)
File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 158, in process_inputs
results.append(getattr(obj, func)(inputs))
File "L:\AIgongju\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 341, in generate
vision_outputs = joy_two_pipeline.clip_model.encode_image(pixel_values)
File "L:\AIgongju\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 68, in encode_image
vision_outputs = self.model(pixel_values=pixel_values, output_hidden_states=True)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_impl
return forward_call(args, kwargs)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\transformers\models\siglip\modeling_siglip.py", line 1088, in forward
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_impl
return forward_call(*args, kwargs)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\transformers\models\siglip\modeling_siglip.py", line 311, in forward
patch_embeds = self.patch_embedding(pixel_values) # shape = [, width, grid, grid]
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_impl
return forward_call(*args, kwargs)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\conv.py", line 458, in forward
return self._conv_forward(input, self.weight, self.bias)
File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\conv.py", line 454, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.HalfTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor
Prompt executed in 1.56 seconds got prompt !!! Exception during processing !!! Input type (torch.FloatTensor) and weight type (torch.HalfTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor Traceback (most recent call last): File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "L:\AIgongju\ComfyUI-aki-v1.1\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "L:\AIgongju\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 341, in generate vision_outputs = joy_two_pipeline.clip_model.encode_image(pixel_values) File "L:\AIgongju\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_SLK_joy_caption_two\joy_caption_two_node.py", line 68, in encode_image vision_outputs = self.model(pixel_values=pixel_values, output_hidden_states=True) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_impl return forward_call(args, kwargs) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\transformers\models\siglip\modeling_siglip.py", line 1088, in forward hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_impl return forward_call(*args, kwargs) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\transformers\models\siglip\modeling_siglip.py", line 311, in forward patch_embeds = self.patch_embedding(pixel_values) # shape = [, width, grid, grid] File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_impl return self._call_impl(args, kwargs) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_impl return forward_call(*args, kwargs) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\conv.py", line 458, in forward return self._conv_forward(input, self.weight, self.bias) File "L:\AIgongju\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\conv.py", line 454, in _conv_forward return F.conv2d(input, weight, bias, self.stride, RuntimeError: Input type (torch.FloatTensor) and weight type (torch.HalfTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor