ComfyUI-IF_AI_tools is a set of custom nodes for ComfyUI that allows you to generate prompts using a local Large Language Model (LLM) via Ollama. This tool enables you to enhance your image generation workflow by leveraging the power of language models.
Sampling error after node "omost layout cond(omostdensediffusion)" :
Requested to load SDXL
Loading 1 new model
Requested to load SDXL
Loading 1 new model
!!! Exception during processing!!! The expanded size of the tensor (2304) must match the existing size (2256) at non-singleton dimension 2. Target sizes: [1, 10, 2304, 1309]. Tensor sizes: [1, 10, 2256, 1309]
Traceback (most recent call last):
File "D:\AI\ComfyUI-aki-v1\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\AI\ComfyUI-aki-v1\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\AI\ComfyUI-aki-v1\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(slice_dict(input_data_all, i)))
File "D:\AI\ComfyUI-aki-v1\nodes.py", line 1373, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "D:\AI\ComfyUI-aki-v1\nodes.py", line 1343, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample
raise e
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, *kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-AnimateLCM\animatediff\sampling.py", line 241, in motion_sample
return orig_comfy_sample(model, noise, args, kwargs)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 410, in motion_sample
return orig_comfy_sample(model, noise, *args, kwargs)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\control_reference.py", line 47, in refcn_sample
return orig_comfy_sample(model, *args, *kwargs)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 112, in uncond_multiplier_check_cn_sample
return orig_comfy_sample(model, args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\sample.py", line 43, in sample
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1447, in KSampler_sample
return _KSampler_sample(*args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 801, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1470, in sample
return _sample(args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 703, in sample
return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 690, in sample
output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 669, in inner_sample
samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 574, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, self.extra_options)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\k_diffusion\sampling.py", line 732, in sample_dpmpp_2m_sde_gpu
return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\k_diffusion\sampling.py", line 635, in sample_dpmpp_2m_sde
denoised = model(x, sigmas[i] * s_in, *extra_args)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 297, in call
out = self.inner_model(x, sigma, model_options=model_options, seed=seed)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 993, in call
return self.predict_noise(args, kwargs)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1043, in predict_noise
out = super().predict_noise(args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 659, in predict_noise
return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 277, in sampling_function
out = calc_cond_batch(model, conds, x, timestep, model_options)
File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 226, in calc_cond_batch
output = model.apply_model(inputx, timestep, c).chunk(batch_chunks)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 64, in apply_model_uncond_cleanup_wrapper
return orig_apply_model(self, args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\model_base.py", line 114, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, extra_conds).float()
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, *kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 44, in forward_timestep_embed
x = layer(x, context, transformer_options)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, kwargs)
File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\attention.py", line 694, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(args, kwargs)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-layerdiffuse\lib_layerdiffusion\attention_sharing.py", line 253, in forward
return func(self, x, context, transformer_options)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Easy-Use\py\layer_diffuse\attension_sharing.py", line 253, in forward
return func(self, x, context, transformer_options)
File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\attention.py", line 618, in forward
n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(args, **kwargs)
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_densediffusion\densediffusion_node.py", line 179, in forward
out = self.scaled_dot_product_attention(
File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_densediffusion\densediffusion_node.py", line 152, in scaled_dot_product_attention
attn_weight.maskedfill(mask_bool.logical_not(), float("-inf"))
RuntimeError: The expanded size of the tensor (2304) must match the existing size (2256) at non-singleton dimension 2. Target sizes: [1, 10, 2304, 1309]. Tensor sizes: [1, 10, 2256, 1309]
Sampling error after node "omost layout cond(omostdensediffusion)" :
Requested to load SDXL Loading 1 new model Requested to load SDXL Loading 1 new model !!! Exception during processing!!! The expanded size of the tensor (2304) must match the existing size (2256) at non-singleton dimension 2. Target sizes: [1, 10, 2304, 1309]. Tensor sizes: [1, 10, 2256, 1309] Traceback (most recent call last): File "D:\AI\ComfyUI-aki-v1\execution.py", line 151, in recursive_execute output_data, output_ui = get_output_data(obj, input_data_all) File "D:\AI\ComfyUI-aki-v1\execution.py", line 81, in get_output_data return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) File "D:\AI\ComfyUI-aki-v1\execution.py", line 74, in map_node_over_list results.append(getattr(obj, func)(slice_dict(input_data_all, i))) File "D:\AI\ComfyUI-aki-v1\nodes.py", line 1373, in sample return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) File "D:\AI\ComfyUI-aki-v1\nodes.py", line 1343, in common_ksampler samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample raise e File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample return original_sample(*args, *kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations. File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-AnimateLCM\animatediff\sampling.py", line 241, in motion_sample return orig_comfy_sample(model, noise, args, kwargs) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 410, in motion_sample return orig_comfy_sample(model, noise, *args, kwargs) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\control_reference.py", line 47, in refcn_sample return orig_comfy_sample(model, *args, *kwargs) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 112, in uncond_multiplier_check_cn_sample return orig_comfy_sample(model, args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\sample.py", line 43, in sample samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1447, in KSampler_sample return _KSampler_sample(*args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 801, in sample return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1470, in sample return _sample(args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 703, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 690, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 669, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 574, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, self.extra_options) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\k_diffusion\sampling.py", line 732, in sample_dpmpp_2m_sde_gpu return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\k_diffusion\sampling.py", line 635, in sample_dpmpp_2m_sde denoised = model(x, sigmas[i] * s_in, *extra_args) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 297, in call out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 993, in call return self.predict_noise(args, kwargs) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1043, in predict_noise out = super().predict_noise(args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 659, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 277, in sampling_function out = calc_cond_batch(model, conds, x, timestep, model_options) File "D:\AI\ComfyUI-aki-v1\comfy\samplers.py", line 226, in calc_cond_batch output = model.apply_model(inputx, timestep, c).chunk(batch_chunks) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 64, in apply_model_uncond_cleanup_wrapper return orig_apply_model(self, args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\model_base.py", line 114, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, extra_conds).float() File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, *kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 44, in forward_timestep_embed x = layer(x, context, transformer_options) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(args, kwargs) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, kwargs) File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\attention.py", line 694, in forward x = block(x, context=context[i], transformer_options=transformer_options) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(args, kwargs) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-layerdiffuse\lib_layerdiffusion\attention_sharing.py", line 253, in forward return func(self, x, context, transformer_options) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI-Easy-Use\py\layer_diffuse\attension_sharing.py", line 253, in forward return func(self, x, context, transformer_options) File "D:\AI\ComfyUI-aki-v1\comfy\ldm\modules\attention.py", line 618, in forward n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "D:\AI\ComfyUI-aki-v1\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(args, **kwargs) File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_densediffusion\densediffusion_node.py", line 179, in forward out = self.scaled_dot_product_attention( File "D:\AI\ComfyUI-aki-v1\custom_nodes\ComfyUI_densediffusion\densediffusion_node.py", line 152, in scaled_dot_product_attention attn_weight.maskedfill(mask_bool.logical_not(), float("-inf")) RuntimeError: The expanded size of the tensor (2304) must match the existing size (2256) at non-singleton dimension 2. Target sizes: [1, 10, 2304, 1309]. Tensor sizes: [1, 10, 2256, 1309]