Custom ComfyUI nodes for Vision Language Models, Large Language Models, Image to Music, Text to Music, Consistent and Random Creative Prompt Generation
Apache License 2.0
308
stars
24
forks
source link
Solved: After the apdate got an error with LLaVA #19
[WinError 2] The system cannot find the file specified: 'W:\ComfyUI_4ALL\ComfyUI\custom_nodes\ComfyUI-Frame-Interpolation\nvrtc_dlls\bin'
File "W:\ComfyUI_4ALL\ComfyUI\execution.py", line 152, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "W:\ComfyUI_4ALL\ComfyUI\execution.py", line 82, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "W:\ComfyUI_4ALL\ComfyUI\execution.py", line 75, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "W:\ComfyUI_4ALL\ComfyUI\custom_nodes\ComfyUI_VLM_nodes\nodes\llavaloader.py", line 57, in load_clip_checkpoint
clip = Llava15ChatHandler(clip_model_path = clip_path, verbose=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\derec\miniconda3\envs\comfyui\Lib\site-packages\llama_cpp\llama_chat_format.py", line 1064, in init
import llama_cpp.llava_cpp as llava_cpp
File "C:\Users\derec\miniconda3\envs\comfyui\Lib\site-packages\llama_cpp\llava_cpp.py", line 83, in
_libllava = _load_shared_library(_libllava_base_name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\derec\miniconda3\envs\comfyui\Lib\site-packages\llama_cpp\llava_cpp.py", line 62, in _load_shared_library
os.add_dll_directory(os.path.join(os.environ["CUDA_PATH"], "bin"))
File "", line 1119, in add_dll_directory
Error occurred when executing LlavaClipLoader:
[WinError 2] The system cannot find the file specified: 'W:\ComfyUI_4ALL\ComfyUI\custom_nodes\ComfyUI-Frame-Interpolation\nvrtc_dlls\bin'
File "W:\ComfyUI_4ALL\ComfyUI\execution.py", line 152, in recursive_execute output_data, output_ui = get_output_data(obj, input_data_all) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "W:\ComfyUI_4ALL\ComfyUI\execution.py", line 82, in get_output_data return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "W:\ComfyUI_4ALL\ComfyUI\execution.py", line 75, in map_node_over_list results.append(getattr(obj, func)(**slice_dict(input_data_all, i))) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "W:\ComfyUI_4ALL\ComfyUI\custom_nodes\ComfyUI_VLM_nodes\nodes\llavaloader.py", line 57, in load_clip_checkpoint clip = Llava15ChatHandler(clip_model_path = clip_path, verbose=False) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\derec\miniconda3\envs\comfyui\Lib\site-packages\llama_cpp\llama_chat_format.py", line 1064, in init import llama_cpp.llava_cpp as llava_cpp File "C:\Users\derec\miniconda3\envs\comfyui\Lib\site-packages\llama_cpp\llava_cpp.py", line 83, in _libllava = _load_shared_library(_libllava_base_name) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\derec\miniconda3\envs\comfyui\Lib\site-packages\llama_cpp\llava_cpp.py", line 62, in _load_shared_library os.add_dll_directory(os.path.join(os.environ["CUDA_PATH"], "bin")) File "", line 1119, in add_dll_directory![llava](https://github.com/gokayfem/ComfyUI_VLM_nodes/assets/80190186/af1159ee-e696-44dd-bfef-81c803898635)