Closed Gushousekai195 closed 1 year ago
It seems like you're using ControlNet (and potentially other plugins) in conjunction with FABRIC, is that accurate? I haven't tested FABRIC's compatibility with other plugins, so it would be helpful if you could try turning off all other plugins to see if it's an issue with the plugin itself or a compatibility issue between this and another plugin. Also, if you can figure out which plugins is incompatible with FABRIC, please let me know so I can investigate.
Keep in mind that this is an alpha version of the plugin, so issues like this are not unexpected.
I've just tested compatibility with ControlNet and as far as I can tell, it seems to work well with canny
but does not work with reference
(which is expected as this mode also works through attention injection). Make sure to update all your plugins to the latest version and let me know if you find any incompatibilities.
I disabled every single extension and still...
Error completing request
Arguments: ('task(fl205u9ussi8xsw)', 'xxxxx', 'xxxxx', [], 20, 16, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, False, 768, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 0, '', '', [], 0, [<PIL.Image.Image image mode=RGB size=944x1200 at 0x14BEDE4CDC0>], [], False, 4, 0, 0.8, 0, 0.8, 0.5, False, '0', False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0) {}
Traceback (most recent call last):
File "C:\Users\mattb\stable-diffusion-webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "C:\Users\mattb\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\modules\txt2img.py", line 57, in txt2img
processed = processing.process_images(p)
File "C:\Users\mattb\stable-diffusion-webui\modules\processing.py", line 610, in process_images
res = process_images_inner(p)
File "C:\Users\mattb\stable-diffusion-webui\modules\processing.py", line 728, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "C:\Users\mattb\stable-diffusion-webui\modules\processing.py", line 976, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\Users\mattb\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 383, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\Users\mattb\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 257, in launch_sampling
return func()
File "C:\Users\mattb\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 383, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\Users\mattb\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\Users\mattb\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 156, in forward
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
File "C:\Users\mattb\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in <lambda>
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "C:\Users\mattb\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in __call__
return self.__orig_func(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "C:\Users\mattb\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "C:\Users\mattb\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\mattb\stable-diffusion-webui\extensions\sd-webui-fabric\scripts\patching.py", line 57, in new_forward
ctx = context[batch_size:][:1].expand(all_zs.size(0), -1, -1) # (n_pos + n_neg, p_seq, p_dim)
RuntimeError: The expanded size of the tensor (1) must match the existing size (0) at non-singleton dimension 0. Target sizes: [1, -1, -1]. Tensor sizes: [0, 77, 768]
Latest update has fixed the issue.
Just installed the plugin and now the webui will not work at all, feedback disabled or not.
Good feelings gone