Closed matrix4767 closed 1 year ago
Are you using another plugin alongside FABRIC? It's possible that there's some compatibility issues. If not, what settings did you use to?
I think I have found the issue. The BREAK command in a prompt caused the error, and I suspect the Regional Prompter would too, since it also tinkers with tokens.
I see, can you give me a prompt that doesn't work for you?
Unfortunately loras break it as well. Any prompt that has loras in it would.
OK, it's not BREAK or loras by themselves, but rather it's not supporting more than 75 tokens.
Oh, I see. Yeah that sounds like something that could be an issue, I'll look into it.
Should be fixed now.
Attempting to use Fabric will lead to the first error, then the second one when feedback is cleared and/or is disabled. GPU memory will also not clear.
Traceback (most recent call last): File "G:\stable-webui\modules\call_queue.py", line 55, in f res = list(func(*args, kwargs)) File "G:\stable-webui\modules\call_queue.py", line 35, in f res = func(*args, *kwargs) File "G:\stable-webui\modules\txt2img.py", line 57, in txt2img processed = processing.process_images(p) File "G:\stable-webui\modules\processing.py", line 620, in process_images res = process_images_inner(p) File "G:\stable-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs) File "G:\stable-webui\modules\processing.py", line 739, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "G:\stable-webui\modules\processing.py", line 992, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "G:\stable-webui\modules\sd_samplers_kdiffusion.py", line 439, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ File "G:\stable-webui\modules\sd_samplers_kdiffusion.py", line 278, in launch_sampling return func() File "G:\stable-webui\modules\sd_samplers_kdiffusion.py", line 439, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "G:\stable-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "G:\stable-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, *extra_args)
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "G:\stable-webui\modules\sd_samplers_kdiffusion.py", line 177, in forward
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "G:\stable-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), *kwargs)
File "G:\stable-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(args, kwargs)
File "G:\stable-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, kwargs: self(*args, *kwargs))
File "G:\stable-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(args, kwargs)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, cond)
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "G:\stable-webui\extensions\sd-webui-fabric\scripts\patching.py", line 50, in newforward
= self._fabric_old_forward(all_zs, ts, ctx)
File "G:\stable-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, kwargs)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 797, in forward
h = module(h, emb, context)
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 84, in forward
x = layer(x, context)
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 334, in forward
x = block(x, context=context[i])
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 269, in forward
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint
return CheckpointFunction.apply(func, len(inputs), args)
File "G:\stable-webui\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply
return super().apply(args, kwargs) # type: ignore[misc]
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward
output_tensors = ctx.run_function(ctx.input_tensors)
File "G:\stable-webui\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 272, in _forward
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "G:\stable-webui\extensions\sd-webui-fabric\scripts\patching.py", line 37, in patched_attn1_forward
out = attn1._fabric_old_forward(x, kwargs)
File "G:\stable-webui\extensions\sd-webui-fabric\scripts\patching.py", line 68, in patched_attn1_forward
ctx_uncond = torch.cat([context[batch_size:], neg_hs], dim=1) # (bs, seq * (1 + n_neg), dim)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 0 but got size 1 for tensor number 1 in the list.