MinusZoneAI / ComfyUI-Kolors-MZ

Kolors的ComfyUI原生采样器实现(Kolors ComfyUI Native Sampler Implementation)
GNU General Public License v3.0
451 stars 22 forks source link

intel显卡ipadapter没问题,但是faceID报错 #71

Open cunkai opened 1 month ago

cunkai commented 1 month ago

用ip-plus作者提供的默认流 KolorsUNETLoaderV2 加载后在采样器绘制那步出现问题报错如下:

Error occurred when executing KSampler:

Input and gamma should be of the same datatype.

File "D:\Apps\ComfyUI\execution.py", line 152, in recursive_execute output_data, output_ui = get_output_data(obj, input_data_all) File "D:\Apps\ComfyUI\execution.py", line 82, in get_output_data return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) File "D:\Apps\ComfyUI\execution.py", line 75, in map_node_over_list results.append(getattr(obj, func)(slice_dict(input_data_all, i))) File "D:\Apps\ComfyUI\nodes.py", line 1382, in sample return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) File "D:\Apps\ComfyUI\nodes.py", line 1352, in common_ksampler samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, File "D:\Apps\ComfyUI\comfy\sample.py", line 43, in sample samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 829, in sample return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 729, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 716, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 695, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "D:\Apps\ComfyUI\comfy\samplers.py", line 600, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, self.extra_options) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, kwargs) File "D:\Apps\ComfyUI\comfy\k_diffusion\sampling.py", line 600, in sample_dpmpp_2m denoised = model(x, sigmas[i] * s_in, *extra_args) File "D:\Apps\ComfyUI\comfy\samplers.py", line 299, in call out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 682, in call return self.predict_noise(args, kwargs) File "D:\Apps\ComfyUI\comfy\samplers.py", line 685, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 279, in sampling_function out = calc_cond_batch(model, conds, x, timestep, model_options) File "D:\Apps\ComfyUI\comfy\samplers.py", line 228, in calc_cond_batch output = model.apply_model(inputx, timestep, c).chunk(batch_chunks) File "D:\Apps\ComfyUI\comfy\model_base.py", line 122, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, extra_conds).float() File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, *kwargs) File "D:\Apps\ComfyUI\custom_nodes\ComfyUI-Kolors-MZ\hook_comfyui_kolors_v2.py", line 71, in forward result = super().forward(args, kwargs) File "D:\Apps\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) File "D:\Apps\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 38, in forward_timestep_embed x = layer(x, emb) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, kwargs) File "D:\Apps\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 230, in forward return checkpoint( File "D:\Apps\ComfyUI\comfy\ldm\modules\diffusionmodules\util.py", line 191, in checkpoint return func(inputs) File "D:\Apps\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 243, in _forward h = self.in_layers(x) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\container.py", line 215, in forward input = module(input) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, *kwargs) File "D:\Apps\ComfyUI\comfy\ops.py", line 121, in forward return super().forward(args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\normalization.py", line 279, in forward return F.group_norm( File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\functional.py", line 2558, in group_norm return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)

如果用之前的Kolors UNET loader加载报错如下:

Error occurred when executing KSampler:

mat1 and mat2 shapes cannot be multiplied (512x4096 and 2048x640)

File "D:\Apps\ComfyUI\execution.py", line 152, in recursive_execute output_data, output_ui = get_output_data(obj, input_data_all) File "D:\Apps\ComfyUI\execution.py", line 82, in get_output_data return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) File "D:\Apps\ComfyUI\execution.py", line 75, in map_node_over_list results.append(getattr(obj, func)(slice_dict(input_data_all, i))) File "D:\Apps\ComfyUI\nodes.py", line 1382, in sample return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) File "D:\Apps\ComfyUI\nodes.py", line 1352, in common_ksampler samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, File "D:\Apps\ComfyUI\comfy\sample.py", line 43, in sample samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 829, in sample return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 729, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 716, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 695, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "D:\Apps\ComfyUI\comfy\samplers.py", line 600, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, self.extra_options) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, kwargs) File "D:\Apps\ComfyUI\comfy\k_diffusion\sampling.py", line 600, in sample_dpmpp_2m denoised = model(x, sigmas[i] * s_in, *extra_args) File "D:\Apps\ComfyUI\comfy\samplers.py", line 299, in call out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 682, in call return self.predict_noise(args, kwargs) File "D:\Apps\ComfyUI\comfy\samplers.py", line 685, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "D:\Apps\ComfyUI\comfy\samplers.py", line 279, in sampling_function out = calc_cond_batch(model, conds, x, timestep, model_options) File "D:\Apps\ComfyUI\comfy\samplers.py", line 228, in calc_cond_batch output = model.apply_model(inputx, timestep, c).chunk(batch_chunks) File "D:\Apps\ComfyUI\comfy\model_base.py", line 122, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, extra_conds).float() File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, *kwargs) File "D:\Apps\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) File "D:\Apps\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 44, in forward_timestep_embed x = layer(x, context, transformer_options) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, kwargs) File "D:\Apps\ComfyUI\comfy\ldm\modules\attention.py", line 694, in forward x = block(x, context=context[i], transformer_options=transformer_options) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(args, kwargs) File "D:\Apps\ComfyUI\comfy\ldm\modules\attention.py", line 616, in forward context_attn2 = self.attn2.to_k(context_attn2) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, *kwargs) File "D:\Apps\ComfyUI\comfy\ops.py", line 65, in forward return super().forward(args, kwargs) File "D:\Apps\AnaConda3\envs\ComfyUI\lib\site-packages\torch\nn\modules\linear.py", line 114, in forward return F.linear(input, self.weight, self.bias)

KINGLIFER commented 1 month ago

curious how to use faceid the node in the examples says legacy.

cunkai commented 1 month ago

https://github.com/MinusZoneAI/ComfyUI-Kolors-MZ/blob/main/examples/workflow_ipa_faceid.png

https://github.com/cubiq/ComfyUI_IPAdapter_plus/blob/main/examples/IPAdapter_FaceIDv2_Kolors.json