TheLastBen / fast-stable-diffusion

fast-stable-diffusion + DreamBooth
MIT License
7.41k stars 1.28k forks source link

Error using control net with Google Colab Premium instance #1630

Open xanderjm opened 1 year ago

xanderjm commented 1 year ago

Im trying to use control net on colab which seems to work fine on 'standard' GPU instances, when i try to use faster premium instance on colab i get the following errors

Error completing request Arguments: ('task(7vzw93lwi91y0ks)', 0, 'Photography of a sandy rocky desert.', [], <PIL.Image.Image image mode=RGBA size=1200x1920 at 0x7FF81A5D9430>, None, None, None, None, None, None, 80, 0, 4, 0, 1, False, False, 1, 1, 20, 1.5, 0.45, -1.0, -1.0, 0, 0, 0, False, 1920, 1280, 1, 0, 32, 0, '', '', '', [], 0, True, True, 'depth', 'control_depth-fp16 [400750f6]', 1, None, False, 'Scale to Fit (Inner Fit)', False, False, 384, 64, 64, 1, False, False, 'none', 'None', 1, None, False, 'Scale to Fit (Inner Fit)', False, False, 64, 64, 64, 1, False, False, 'none', 'None', 1, None, False, 'Scale to Fit (Inner Fit)', False, False, 64, 64, 64, 1, False, '<ul>\n<li><code>CFG Scale</code> should be 2 or lower.</li>\n</ul>\n', True, True, '', '', True, 50, True, 1, 0, False, 4, 1, 'None', '<p style="margin-bottom:0.75em">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>', 128, 8, ['left', 'right', 'up', 'down'], 1, 0.05, 128, 4, 0, ['left', 'right', 'up', 'down'], False, False, 'positive', 'comma', 0, False, False, '', '<p style="margin-bottom:0.75em">Will upscale the image by the selected scale factor; use width and height sliders to set tile size</p>', 64, 0, 2, 1, '', 0, '', 0, '', True, False, False, False, 0) {} Traceback (most recent call last): File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/call_queue.py", line 56, in f res = list(func(*args, **kwargs)) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/call_queue.py", line 37, in f res = func(*args, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/img2img.py", line 171, in img2img processed = process_images(p) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/processing.py", line 486, in process_images res = process_images_inner(p) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/processing.py", line 632, in process_images_inner samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/processing.py", line 1048, in sample samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 322, in sample_img2img samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 225, in launch_sampling return func() File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 322, in <lambda> samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "/usr/local/lib/python3.8/dist-packages/torch/autograd/grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File "/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion/sampling.py", line 145, in sample_euler_ancestral denoised = model(x, sigmas[i] * s_in, **extra_args) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 136, in forward x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": c_crossattn, "c_concat": [image_cond_in[a:b]]}) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion/external.py", line 167, in forward return self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip File "/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion/external.py", line 177, in get_v return self.inner_model.apply_model(x, t, cond) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_hijack_utils.py", line 17, in <lambda> setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_hijack_utils.py", line 28, in __call__ return self.__orig_func(*args, **kwargs) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/models/diffusion/ddpm.py", line 858, in apply_model x_recon = self.model(x_noisy, t, **cond) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/models/diffusion/ddpm.py", line 1329, in forward out = self.diffusion_model(x, t, context=cc) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/scripts/hook.py", line 190, in forward2 return forward(*args, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/scripts/hook.py", line 133, in forward control = param.control_model(x=x_in, hint=param.hint_cond, timesteps=timesteps, context=context) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/scripts/cldm.py", line 115, in forward return self.control_model(*args, **kwargs) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/scripts/cldm.py", line 383, in forward h = module(h, emb, context) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/diffusionmodules/openaimodel.py", line 84, in forward x = layer(x, context) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/attention.py", line 334, in forward x = block(x, context=context[i]) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/attention.py", line 269, in forward return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/diffusionmodules/util.py", line 114, in checkpoint return CheckpointFunction.apply(func, len(inputs), *args) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/diffusionmodules/util.py", line 129, in forward output_tensors = ctx.run_function(*ctx.input_tensors) File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/attention.py", line 273, in _forward x = self.attn2(self.norm2(x), context=context) + x File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_hijack_optimizations.py", line 332, in xformers_attention_forward k_in = self.to_k(context_k) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/extensions-builtin/Lora/lora.py", line 178, in lora_Linear_forward return lora_forward(self, input, torch.nn.Linear_forward_before_lora(self, input)) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py", line 114, in forward return F.linear(input, self.weight, self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied (154x1024 and 768x320)

Taikakim commented 1 year ago

Controlnet is not compatible with V2 AFAIK.