got prompt
model weight dtype torch.float16, manual cast: None
model_type EPS
Using split attention in VAE
Using split attention in VAE
Requested to load SD1ClipModel
Loading 1 new model
loaded completely 0.0 235.84423828125 True
INFO: Clip Vision model loaded from K:\ComfyUI-aki-v1.4\models\clip_vision\CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors
Warning torch.load doesn't support weights_only on this pytorch version, loading unsafely.
INFO: IPAdapter model loaded from K:\ComfyUI-aki-v1.4\models\ipadapter\models\ip-adapter_sd15_vit-G.bin
model_path is K:\ComfyUI-aki-v1.4\custom_nodes\comfyui_controlnet_aux\ckpts\yzd-v/DWPose\yolox_l.onnx
model_path is K:\ComfyUI-aki-v1.4\custom_nodes\comfyui_controlnet_aux\ckpts\yzd-v/DWPose\dw-ll_ucoco_384.onnx
DWPose: Using yolox_l.onnx for bbox detection and dw-ll_ucoco_384.onnx for pose estimation
DWPose: Caching OpenCV DNN module yolox_l.onnx on cv2.DNN...
DWPose: Caching OpenCV DNN module dw-ll_ucoco_384.onnx on cv2.DNN...
DWPose: Bbox 1453.33ms
DWPose: Pose 324.07ms on 1 people
Warning torch.load doesn't support weights_only on this pytorch version, loading unsafely.
Requested to load CLIPVisionModelProjection
Loading 1 new model
loaded completely 0.0 3522.953369140625 True
Warning torch.load doesn't support weights_only on this pytorch version, loading unsafely.
Requested to load BaseModel
Requested to load ControlNet
Loading 2 new models
loaded completely 0.0 1639.406135559082 True
loaded completely 0.0 689.0852355957031 True
!!! Exception during processing !!! The size of tensor a (8) must match the size of tensor b (16) at non-singleton dimension 3
Traceback (most recent call last):
File "K:\ComfyUI-aki-v1.4\execution.py", line 317, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "K:\ComfyUI-aki-v1.4\execution.py", line 192, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "K:\ComfyUI-aki-v1.4\execution.py", line 169, in _map_node_over_list
process_inputs(input_dict, i)
File "K:\ComfyUI-aki-v1.4\execution.py", line 158, in process_inputs
results.append(getattr(obj, func)(inputs))
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 1475, in adv_sample
return ttN_pipeKSampler_v2.sample(self, pipe, lora_name, lora_strength, steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, noise,
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 1390, in sample
return process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_strength, lora_strength,
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 1314, in process_sample_state
samp_samples = sampler.common_ksampler(samp_model, samp_seed, steps, cfg, sampler_name, scheduler, samp_positive, samp_negative, samp_samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise)
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 426, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample
raise e
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, *kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 420, in motion_sample
return orig_comfy_sample(model, noise, args, kwargs)
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\sampling.py", line 116, in acn_sample
return orig_comfy_sample(model, *args, kwargs)
File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 117, in uncond_multiplier_check_cn_sample
return orig_comfy_sample(model, args, kwargs)
File "K:\ComfyUI-aki-v1.4\comfy\sample.py", line 43, in sample
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 829, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 729, in sample
return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 716, in sample
output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 695, in inner_sample
samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 600, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, self.extra_options)
File "K:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(args, kwargs)
File "K:\ComfyUI-aki-v1.4\comfy\k_diffusion\sampling.py", line 144, in sample_euler
denoised = model(x, sigma_hat * s_in, *extra_args)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 299, in call
out = self.inner_model(x, sigma, model_options=model_options, seed=seed)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 682, in call
return self.predict_noise(args, **kwargs)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 685, in predict_noise
return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 279, in sampling_function
out = calc_cond_batch(model, conds, x, timestep, model_options)
File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 202, in calc_cond_batch
c['control'] = control.get_control(inputx, timestep, c, len(cond_or_uncond))
File "K:\ComfyUI-aki-v1.4\comfy\controlnet.py", line 649, in get_control
return self.control_merge(control_input, control_prev, x_noisy.dtype)
File "K:\ComfyUI-aki-v1.4\comfy\controlnet.py", line 168, in control_merge
o[i] = prev_val + o[i]
RuntimeError: The size of tensor a (8) must match the size of tensor b (16) at non-singleton dimension 3
got prompt model weight dtype torch.float16, manual cast: None model_type EPS Using split attention in VAE Using split attention in VAE Requested to load SD1ClipModel Loading 1 new model loaded completely 0.0 235.84423828125 True INFO: Clip Vision model loaded from K:\ComfyUI-aki-v1.4\models\clip_vision\CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors Warning torch.load doesn't support weights_only on this pytorch version, loading unsafely. INFO: IPAdapter model loaded from K:\ComfyUI-aki-v1.4\models\ipadapter\models\ip-adapter_sd15_vit-G.bin model_path is K:\ComfyUI-aki-v1.4\custom_nodes\comfyui_controlnet_aux\ckpts\yzd-v/DWPose\yolox_l.onnx model_path is K:\ComfyUI-aki-v1.4\custom_nodes\comfyui_controlnet_aux\ckpts\yzd-v/DWPose\dw-ll_ucoco_384.onnx
DWPose: Using yolox_l.onnx for bbox detection and dw-ll_ucoco_384.onnx for pose estimation DWPose: Caching OpenCV DNN module yolox_l.onnx on cv2.DNN... DWPose: Caching OpenCV DNN module dw-ll_ucoco_384.onnx on cv2.DNN... DWPose: Bbox 1453.33ms DWPose: Pose 324.07ms on 1 people
Warning torch.load doesn't support weights_only on this pytorch version, loading unsafely. Requested to load CLIPVisionModelProjection Loading 1 new model loaded completely 0.0 3522.953369140625 True Warning torch.load doesn't support weights_only on this pytorch version, loading unsafely. Requested to load BaseModel Requested to load ControlNet Loading 2 new models loaded completely 0.0 1639.406135559082 True loaded completely 0.0 689.0852355957031 True !!! Exception during processing !!! The size of tensor a (8) must match the size of tensor b (16) at non-singleton dimension 3 Traceback (most recent call last): File "K:\ComfyUI-aki-v1.4\execution.py", line 317, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "K:\ComfyUI-aki-v1.4\execution.py", line 192, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "K:\ComfyUI-aki-v1.4\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "K:\ComfyUI-aki-v1.4\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(inputs)) File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 1475, in adv_sample return ttN_pipeKSampler_v2.sample(self, pipe, lora_name, lora_strength, steps, cfg, sampler_name, scheduler, image_output, save_prefix, file_type, embed_workflow, noise, File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 1390, in sample return process_sample_state(samp_model, samp_images, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, lora_name, lora_strength, lora_strength, File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 1314, in process_sample_state samp_samples = sampler.common_ksampler(samp_model, samp_seed, steps, cfg, sampler_name, scheduler, samp_positive, samp_negative, samp_samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise) File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI_tinyterraNodes\ttNpy\tinyterraNodes.py", line 426, in common_ksampler samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample raise e File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample return original_sample(*args, *kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations. File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 420, in motion_sample return orig_comfy_sample(model, noise, args, kwargs) File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\sampling.py", line 116, in acn_sample return orig_comfy_sample(model, *args, kwargs) File "K:\ComfyUI-aki-v1.4\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 117, in uncond_multiplier_check_cn_sample return orig_comfy_sample(model, args, kwargs) File "K:\ComfyUI-aki-v1.4\comfy\sample.py", line 43, in sample samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 829, in sample return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 729, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 716, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 695, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 600, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, self.extra_options) File "K:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(args, kwargs) File "K:\ComfyUI-aki-v1.4\comfy\k_diffusion\sampling.py", line 144, in sample_euler denoised = model(x, sigma_hat * s_in, *extra_args) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 299, in call out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 682, in call return self.predict_noise(args, **kwargs) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 685, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 279, in sampling_function out = calc_cond_batch(model, conds, x, timestep, model_options) File "K:\ComfyUI-aki-v1.4\comfy\samplers.py", line 202, in calc_cond_batch c['control'] = control.get_control(inputx, timestep, c, len(cond_or_uncond)) File "K:\ComfyUI-aki-v1.4\comfy\controlnet.py", line 649, in get_control return self.control_merge(control_input, control_prev, x_noisy.dtype) File "K:\ComfyUI-aki-v1.4\comfy\controlnet.py", line 168, in control_merge o[i] = prev_val + o[i] RuntimeError: The size of tensor a (8) must match the size of tensor b (16) at non-singleton dimension 3
Prompt executed in 10.91 seconds