`
Error occurred when executing DiffusersCLIPLoader:
Allocation on device 0 would exceed allowed memory. (out of memory)
Currently allocated : 7.27 GiB
Requested : 16.00 MiB
Device limit : 8.00 GiB
Free (according to CUDA): 0 bytes
PyTorch limit (set by user-supplied memory fraction)
: 17179869184.00 GiB
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(*slice_dict(input_data_all, i)))
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\custom_nodes\Comfyui-HunyuanDiT\nodes.py", line 163, in load_clip
out = CLIP(False, root, CLIP_PATH, t5_file)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\custom_nodes\Comfyui-HunyuanDiT\clip.py", line 54, in init
embedder_t5 = MT5Embedder(t5_text_encoder_path, torch_dtype=torch.float16, max_length=256, ksampler = True)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\custom_nodes\Comfyui-HunyuanDiT\hydit_v1_1\modules\text_encoder.py", line 69, in init
self.model.eval().to(self.device)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\transformers\modeling_utils.py", line 2595, in to
return super().to(args, **kwargs)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1152, in to
return self._apply(convert)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 802, in _apply
module._apply(fn)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 802, in _apply
module._apply(fn)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 802, in _apply
module._apply(fn)
[Previous line repeated 4 more times]
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 825, in _apply
param_applied = fn(param)
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1150, in convert
return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
`
能否像ComfyUI_ExtraModels一样可以选择加载到CPU?
` Error occurred when executing DiffusersCLIPLoader:
Allocation on device 0 would exceed allowed memory. (out of memory) Currently allocated : 7.27 GiB Requested : 16.00 MiB Device limit : 8.00 GiB Free (according to CUDA): 0 bytes PyTorch limit (set by user-supplied memory fraction) : 17179869184.00 GiB
File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\execution.py", line 151, in recursive_execute output_data, output_ui = get_output_data(obj, input_data_all) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\execution.py", line 81, in get_output_data return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\execution.py", line 74, in map_node_over_list results.append(getattr(obj, func)(*slice_dict(input_data_all, i))) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\custom_nodes\Comfyui-HunyuanDiT\nodes.py", line 163, in load_clip out = CLIP(False, root, CLIP_PATH, t5_file) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\custom_nodes\Comfyui-HunyuanDiT\clip.py", line 54, in init embedder_t5 = MT5Embedder(t5_text_encoder_path, torch_dtype=torch.float16, max_length=256, ksampler = True) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\SimpleSDXL\comfy\custom_nodes\Comfyui-HunyuanDiT\hydit_v1_1\modules\text_encoder.py", line 69, in init self.model.eval().to(self.device) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\transformers\modeling_utils.py", line 2595, in to return super().to(args, **kwargs) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1152, in to return self._apply(convert) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 802, in _apply module._apply(fn) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 802, in _apply module._apply(fn) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 802, in _apply module._apply(fn) [Previous line repeated 4 more times] File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 825, in _apply param_applied = fn(param) File "E:\AI\Fooocus\fooocuspy\SimpleSDXL_dev\SimpleSDXL2_win\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1150, in convert return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking) ` 能否像ComfyUI_ExtraModels一样可以选择加载到CPU?