Exception Message: Given groups=1, weight of size [320, 4, 3, 3], expected input[2, 8, 96, 64] to have 4 channels, but got 8 channels instead
Stack Trace
File "/root/ComfyUI/execution.py", line 317, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "/root/ComfyUI/execution.py", line 192, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "/root/ComfyUI/execution.py", line 169, in _map_node_over_list
process_inputs(input_dict, i)
File "/root/ComfyUI/execution.py", line 158, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "/root/ComfyUI/custom_nodes/ComfyUI-Easy-Use/py/easyNodes.py", line 5414, in simple
return super().run(pipe, None, None, None, None, None, image_output, link_id, save_prefix,
File "/root/ComfyUI/custom_nodes/ComfyUI-Easy-Use/py/easyNodes.py", line 5383, in run
return process_sample_state(pipe, samp_model, samp_clip, samp_samples, samp_vae, samp_seed, samp_positive, samp_negative, steps, start_step, last_step, cfg, sampler_name, scheduler, denoise, image_output, link_id, save_prefix, tile_size, prompt, extra_pnginfo, my_unique_id, preview_latent, force_full_denoise, disable_noise, samp_custom)
File "/root/ComfyUI/custom_nodes/ComfyUI-Easy-Use/py/easyNodes.py", line 5167, in process_sample_state
samp_samples = sampler.common_ksampler(samp_model, samp_seed, steps, cfg, sampler_name, scheduler, samp_positive, samp_negative, samp_samples, denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise)
File "/root/ComfyUI/custom_nodes/ComfyUI-Easy-Use/py/libs/sampler.py", line 115, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative,
File "/root/ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/animatediff/sampling.py", line 420, in motion_sample
return orig_comfy_sample(model, noise, *args, **kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-Advanced-ControlNet/adv_control/sampling.py", line 116, in acn_sample
return orig_comfy_sample(model, *args, **kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-Advanced-ControlNet/adv_control/utils.py", line 116, in uncond_multiplier_check_cn_sample
return orig_comfy_sample(model, *args, **kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py", line 22, in informative_sample
raise e
File "/root/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "/root/ComfyUI/comfy/sample.py", line 43, in sample
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "/root/ComfyUI/custom_nodes/ComfyUI_smZNodes/smZNodes.py", line 1451, in KSampler_sample
return _KSampler_sample(*args, **kwargs)
File "/root/ComfyUI/comfy/samplers.py", line 829, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "/root/ComfyUI/custom_nodes/ComfyUI_smZNodes/smZNodes.py", line 1474, in sample
return _sample(*args, **kwargs)
File "/root/ComfyUI/comfy/samplers.py", line 729, in sample
return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "/root/ComfyUI/comfy/samplers.py", line 716, in sample
output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "/root/ComfyUI/comfy/samplers.py", line 695, in inner_sample
samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "/root/ComfyUI/comfy/samplers.py", line 600, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
File "/root/miniconda3/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/root/ComfyUI/comfy/k_diffusion/sampling.py", line 225, in sample_dpm_2
denoised = model(x, sigma_hat * s_in, **extra_args)
File "/root/ComfyUI/comfy/samplers.py", line 299, in __call__
out = self.inner_model(x, sigma, model_options=model_options, seed=seed)
File "/root/ComfyUI/custom_nodes/ComfyUI_smZNodes/smZNodes.py", line 997, in __call__
return self.predict_noise(*args, **kwargs)
File "/root/ComfyUI/custom_nodes/ComfyUI_smZNodes/smZNodes.py", line 1047, in predict_noise
out = super().predict_noise(*args, **kwargs)
File "/root/ComfyUI/comfy/samplers.py", line 685, in predict_noise
return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed)
File "/root/ComfyUI/comfy/samplers.py", line 279, in sampling_function
out = calc_cond_batch(model, conds, x, timestep, model_options)
File "/root/ComfyUI/custom_nodes/ComfyUI-TiledDiffusion/.patches.py", line 4, in calc_cond_batch
return calc_cond_batch_original_tiled_diffusion_8c4bae96(model, conds, x_in, timestep, model_options)
File "/root/ComfyUI/comfy/samplers.py", line 226, in calc_cond_batch
output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks)
File "/root/ComfyUI/custom_nodes/ComfyUI-Easy-Use/py/ic_light/__init__.py", line 173, in wrapper_func
return existing_wrapper(unet_apply, params=self.apply_c_concat(params, concat_conds))
File "/root/ComfyUI/custom_nodes/ComfyUI-Easy-Use/py/ic_light/__init__.py", line 166, in unet_dummy_apply
return unet_apply(x=params["input"], t=params["timestep"], **params["c"])
File "/root/ComfyUI/custom_nodes/ComfyUI-Advanced-ControlNet/adv_control/utils.py", line 68, in apply_model_uncond_cleanup_wrapper
return orig_apply_model(self, *args, **kwargs)
File "/root/ComfyUI/comfy/model_base.py", line 142, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/root/ComfyUI/comfy/ldm/modules/diffusionmodules/openaimodel.py", line 852, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
File "/root/ComfyUI/comfy/ldm/modules/diffusionmodules/openaimodel.py", line 50, in forward_timestep_embed
x = layer(x)
File "/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "/root/ComfyUI/comfy/ops.py", line 102, in forward
return super().forward(*args, **kwargs)
File "/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 460, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/root/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 456, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
## System Information
- **OS:** posix
- **Python Version:** 3.10.6 (main, Oct 24 2022, 16:07:47) [GCC 11.2.0]
- **Embedded Python:** false
## Devices
- **Name:** cuda:0 Tesla T4 : cudaMallocAsync
- **Type:** cuda
- **VRAM Total:** 15653470208
- **VRAM Free:** 7350087948
- **Torch VRAM Total:** 5771362304
- **Torch VRAM Free:** 61108492
## Attached Workflow
Please make sure that workflow does not contain any sensitive information such as API keys or passwords.
Workflow too large. Please manually upload the workflow from local file system.
## Additional Context
(Please add any additional context or steps to reproduce the error here)
更新EasyUse到最新版后,以前使用过EasyUse ICLight的工作流全部都报错了。
ComfyUI Error Report
Error Details
Exception Message: Given groups=1, weight of size [320, 4, 3, 3], expected input[2, 8, 96, 64] to have 4 channels, but got 8 channels instead
Stack Trace
Workflow too large. Please manually upload the workflow from local file system.