Exception Message: Input image size (352352) doesn't match model (224224).
Stack Trace
File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 323, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 198, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 169, in _map_node_over_list
process_inputs(input_dict, i)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 158, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\clipseg.py", line 127, in segment_image
outputs = model(**input_prc)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 1436, in forward
vision_outputs = self.clip.vision_model(
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 870, in forward
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 211, in forward
raise ValueError(
ComfyUI Error Report
Error Details
Exception Message: Input image size (352352) doesn't match model (224224).
Stack Trace
2024-10-29 11:21:52,277 - root - INFO - Total VRAM 4096 MB, total RAM 7919 MB 2024-10-29 11:21:52,277 - root - INFO - pytorch version: 2.5.1+cu124 2024-10-29 11:21:52,279 - root - INFO - Set vram state to: NORMAL_VRAM 2024-10-29 11:21:52,279 - root - INFO - Device: cuda:0 NVIDIA GeForce RTX 3050 Laptop GPU : cudaMallocAsync 2024-10-29 11:21:54,167 - root - INFO - Using pytorch cross attention 2024-10-29 11:21:55,934 - root - INFO - [Prompt Server] web root: C:\StabilityMatrix\Data\Packages\ComfyUI\web 2024-10-29 11:21:55,939 - root - INFO - Adding extra search path checkpoints C:\StabilityMatrix\Data\Models\StableDiffusion 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path vae C:\StabilityMatrix\Data\Models\VAE 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path loras C:\StabilityMatrix\Data\Models\Lora 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path loras C:\StabilityMatrix\Data\Models\LyCORIS 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path upscale_models C:\StabilityMatrix\Data\Models\ESRGAN 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path upscale_models C:\StabilityMatrix\Data\Models\RealESRGAN 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path upscale_models C:\StabilityMatrix\Data\Models\SwinIR 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path embeddings C:\StabilityMatrix\Data\Models\TextualInversion 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path hypernetworks C:\StabilityMatrix\Data\Models\Hypernetwork 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path controlnet C:\StabilityMatrix\Data\Models\ControlNet 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path controlnet C:\StabilityMatrix\Data\Models\T2IAdapter 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path clip C:\StabilityMatrix\Data\Models\CLIP 2024-10-29 11:21:55,940 - root - INFO - Adding extra search path clip_vision C:\StabilityMatrix\Data\Models\InvokeClipVision 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path diffusers C:\StabilityMatrix\Data\Models\Diffusers 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path gligen C:\StabilityMatrix\Data\Models\GLIGEN 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path vae_approx C:\StabilityMatrix\Data\Models\ApproxVAE 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path ipadapter C:\StabilityMatrix\Data\Models\IpAdapter 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path ipadapter C:\StabilityMatrix\Data\Models\InvokeIpAdapters15 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path ipadapter C:\StabilityMatrix\Data\Models\InvokeIpAdaptersXl 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path prompt_expansion C:\StabilityMatrix\Data\Models\PromptExpansion 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path ultralytics C:\StabilityMatrix\Data\Models\Ultralytics 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path ultralytics_bbox C:\StabilityMatrix\Data\Models\Ultralytics\bbox 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path ultralytics_segm C:\StabilityMatrix\Data\Models\Ultralytics\segm 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path sams C:\StabilityMatrix\Data\Models\Sams 2024-10-29 11:21:55,941 - root - INFO - Adding extra search path diffusion_models C:\StabilityMatrix\Data\Models\unet 2024-10-29 11:22:01,781 - root - INFO - Total VRAM 4096 MB, total RAM 7919 MB 2024-10-29 11:22:01,781 - root - INFO - pytorch version: 2.5.1+cu124 2024-10-29 11:22:01,782 - root - INFO - Set vram state to: NORMAL_VRAM 2024-10-29 11:22:01,783 - root - INFO - Device: cuda:0 NVIDIA GeForce RTX 3050 Laptop GPU : cudaMallocAsync 2024-10-29 11:22:03,461 - root - INFO - -------------- 2024-10-29 11:22:03,461 - root - INFO - [91m ### Mixlab Nodes: [93mLoaded 2024-10-29 11:22:03,464 - root - INFO - ChatGPT.available False 2024-10-29 11:22:03,465 - root - INFO - editmask.available True 2024-10-29 11:22:04,501 - root - INFO - ClipInterrogator.available True 2024-10-29 11:22:04,752 - root - INFO - PromptGenerate.available True 2024-10-29 11:22:04,752 - root - INFO - ChinesePrompt.available True 2024-10-29 11:22:04,752 - root - INFO - RembgNode.available True 2024-10-29 11:22:05,363 - root - INFO - TripoSR.available 2024-10-29 11:22:05,364 - root - INFO - MiniCPMNode.available 2024-10-29 11:22:05,367 - root - INFO - Scenedetect.available False 2024-10-29 11:22:05,510 - root - INFO - FishSpeech.available False 2024-10-29 11:22:05,514 - root - INFO - SenseVoice.available False 2024-10-29 11:22:05,518 - root - INFO - Whisper.available False 2024-10-29 11:22:05,538 - root - INFO - FalVideo.available 2024-10-29 11:22:05,538 - root - INFO - [93m -------------- [0m 2024-10-29 11:22:11,311 - root - INFO - Import times for custom nodes: 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\websocket_image_save.py 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_IPAdapter_plus 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\sdxl_prompt_styler 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\stability-ComfyUI-nodes 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\efficiency-nodes-comfyui 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\cg-use-everywhere 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Universal-Styler 2024-10-29 11:22:11,312 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_Gemini_Flash 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\masquerade-nodes-comfyui 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUi_PromptStylers 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-YOLO 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_IPAdapter_plus.old 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-WD14-Tagger 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfyui-photoshop 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyLiterals 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfyui_controlnet_aux 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfyui-copilot 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_JPS-Nodes 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfyui-stablesr 2024-10-29 11:22:11,313 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Impact-Pack 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Custom-Scripts 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyMath 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\Derfuu_ComfyUI_ModdedNodes 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfyui-various 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_essentials 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_UltimateSDUpscale 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\rgthree-comfy 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-KJNodes-main 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-GGUF 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet 2024-10-29 11:22:11,314 - root - INFO - 0.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_Comfyroll_CustomNodes 2024-10-29 11:22:11,314 - root - INFO - 0.1 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-KJNodes 2024-10-29 11:22:11,314 - root - INFO - 0.1 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfy_mtb 2024-10-29 11:22:11,314 - root - INFO - 0.1 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\Comfyui-ergouzi-Nodes 2024-10-29 11:22:11,315 - root - INFO - 0.1 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI_LayerStyle 2024-10-29 11:22:11,315 - root - INFO - 0.2 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\eden_comfy_pipelines 2024-10-29 11:22:11,315 - root - INFO - 0.4 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Gemini 2024-10-29 11:22:11,315 - root - INFO - 0.5 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Crystools 2024-10-29 11:22:11,315 - root - INFO - 0.8 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Manager 2024-10-29 11:22:11,315 - root - INFO - 0.8 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\clipseg.py 2024-10-29 11:22:11,315 - root - INFO - 0.8 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Easy-Use 2024-10-29 11:22:11,315 - root - INFO - 1.0 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-SUPIR 2024-10-29 11:22:11,315 - root - INFO - 1.1 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfyui-art-venture 2024-10-29 11:22:11,315 - root - INFO - 1.1 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-Allor 2024-10-29 11:22:11,315 - root - INFO - 2.1 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\ComfyUI-YoloWorld-EfficientSAM 2024-10-29 11:22:11,315 - root - INFO - 2.2 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\was-node-suite-comfyui 2024-10-29 11:22:11,315 - root - INFO - 2.9 seconds: C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\comfyui-mixlab-nodes 2024-10-29 11:22:11,315 - root - INFO - 2024-10-29 11:22:11,348 - root - INFO - Starting server
2024-10-29 11:22:11,349 - root - INFO - To see the GUI go to: http://127.0.0.1:8188 2024-10-29 11:23:20,514 - root - INFO - got prompt 2024-10-29 11:23:36,196 - root - INFO - model weight dtype torch.float16, manual cast: None 2024-10-29 11:23:36,263 - root - INFO - model_type EPS 2024-10-29 11:23:40,782 - root - INFO - Using pytorch attention in VAE 2024-10-29 11:23:40,814 - root - INFO - Using pytorch attention in VAE 2024-10-29 11:23:42,538 - root - INFO - loaded straight to GPU 2024-10-29 11:23:42,538 - root - INFO - Requested to load BaseModel 2024-10-29 11:23:42,538 - root - INFO - Loading 1 new model 2024-10-29 11:23:42,594 - root - INFO - loaded completely 0.0 1639.406135559082 True 2024-10-29 11:23:42,804 - root - INFO - Requested to load SD1ClipModel 2024-10-29 11:23:42,804 - root - INFO - Loading 1 new model 2024-10-29 11:23:43,179 - root - INFO - loaded completely 0.0 235.84423828125 True 2024-10-29 11:23:44,218 - root - INFO - Using pytorch attention in VAE 2024-10-29 11:23:44,222 - root - INFO - Using pytorch attention in VAE 2024-10-29 11:23:49,892 - root - INFO - Requested to load CLIPVisionModelProjection 2024-10-29 11:23:49,893 - root - INFO - Loading 1 new model 2024-10-29 11:24:06,321 - root - INFO - loaded completely 0.0 1208.09814453125 True 2024-10-29 11:24:19,982 - root - INFO - Requested to load AutoencoderKL 2024-10-29 11:24:19,999 - root - INFO - Loading 1 new model 2024-10-29 11:24:23,251 - root - INFO - loaded completely 0.0 159.55708122253418 True 2024-10-29 11:24:28,836 - root - ERROR - !!! Exception during processing !!! Input image size (352352) doesn't match model (224224). 2024-10-29 11:24:28,878 - root - ERROR - Traceback (most recent call last): File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\clipseg.py", line 127, in segment_image outputs = model(input_prc) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 1436, in forward vision_outputs = self.clip.vision_model( File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 870, in forward hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 211, in forward raise ValueError( ValueError: Input image size (352352) doesn't match model (224224).
2024-10-29 11:24:28,899 - root - INFO - Prompt executed in 68.34 seconds 2024-10-29 11:25:42,810 - root - INFO - got prompt 2024-10-29 11:25:53,158 - root - INFO - Requested to load SD1ClipModel 2024-10-29 11:25:53,161 - root - INFO - Loading 1 new model 2024-10-29 11:25:54,069 - root - INFO - loaded completely 0.0 235.84423828125 True 2024-10-29 11:25:55,048 - root - INFO - loaded completely 1386.798677253723 1208.09814453125 True 2024-10-29 11:25:59,724 - root - ERROR - !!! Exception during processing !!! Input image size (352352) doesn't match model (224224). 2024-10-29 11:25:59,724 - root - ERROR - Traceback (most recent call last): File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\clipseg.py", line 127, in segment_image outputs = model(input_prc) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 1436, in forward vision_outputs = self.clip.vision_model( File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 870, in forward hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 211, in forward raise ValueError( ValueError: Input image size (352352) doesn't match model (224224).
2024-10-29 11:25:59,732 - root - INFO - Prompt executed in 16.87 seconds 2024-10-29 11:26:39,031 - root - INFO - got prompt 2024-10-29 11:26:39,434 - root - INFO - got prompt 2024-10-29 11:27:00,370 - root - INFO - Requested to load SD1ClipModel 2024-10-29 11:27:00,379 - root - INFO - Loading 1 new model 2024-10-29 11:27:02,082 - root - INFO - loaded completely 0.0 235.84423828125 True 2024-10-29 11:27:02,420 - root - INFO - Requested to load CLIPVisionModelProjection 2024-10-29 11:27:02,420 - root - INFO - Loading 1 new model 2024-10-29 11:27:04,563 - root - INFO - loaded completely 0.0 1208.09814453125 True 2024-10-29 11:27:09,787 - root - ERROR - !!! Exception during processing !!! Input image size (352352) doesn't match model (224224). 2024-10-29 11:27:09,803 - root - ERROR - Traceback (most recent call last): File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\clipseg.py", line 127, in segment_image outputs = model(input_prc) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 1436, in forward vision_outputs = self.clip.vision_model( File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 870, in forward hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 211, in forward raise ValueError( ValueError: Input image size (352352) doesn't match model (224224).
2024-10-29 11:27:09,819 - root - INFO - Prompt executed in 30.75 seconds 2024-10-29 11:27:16,011 - root - ERROR - !!! Exception during processing !!! Input image size (352352) doesn't match model (224224). 2024-10-29 11:27:16,011 - root - ERROR - Traceback (most recent call last): File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\clipseg.py", line 127, in segment_image outputs = model(input_prc) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 1436, in forward vision_outputs = self.clip.vision_model( File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 870, in forward hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 211, in forward raise ValueError( ValueError: Input image size (352352) doesn't match model (224224).
2024-10-29 11:27:16,011 - root - INFO - Prompt executed in 2.62 seconds 2024-10-29 11:28:06,490 - root - INFO - got prompt 2024-10-29 11:28:13,565 - root - INFO - Requested to load SD1ClipModel 2024-10-29 11:28:13,568 - root - INFO - Loading 1 new model 2024-10-29 11:28:14,369 - root - INFO - loaded completely 0.0 235.84423828125 True 2024-10-29 11:28:14,894 - root - INFO - loaded completely 1386.798677253723 1208.09814453125 True 2024-10-29 11:28:19,253 - root - ERROR - !!! Exception during processing !!! Input image size (352352) doesn't match model (224224). 2024-10-29 11:28:19,257 - root - ERROR - Traceback (most recent call last): File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "C:\StabilityMatrix\Data\Packages\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "C:\StabilityMatrix\Data\Packages\ComfyUI\custom_nodes\clipseg.py", line 127, in segment_image outputs = model(input_prc) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 1436, in forward vision_outputs = self.clip.vision_model( File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 870, in forward hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(args, kwargs) File "C:\StabilityMatrix\Data\Packages\ComfyUI\venv\lib\site-packages\transformers\models\clipseg\modeling_clipseg.py", line 211, in forward raise ValueError( ValueError: Input image size (352352) doesn't match model (224224).
2024-10-29 11:28:19,259 - root - INFO - Prompt executed in 12.73 seconds
Workflow too large. Please manually upload the workflow from local file system.