Will upscale the image depending on the selected target size type
', 512, 0, 8, 32, 64, 0.35, 32, 0, True, 0, False, 8, 0, 0, 2048, 2048, 2) {}
Traceback (most recent call last):
File "D:\sd-webui-aki-v4.4\modules\call_queue.py", line 57, in f
res = list(func(args, kwargs))
File "D:\sd-webui-aki-v4.4\modules\call_queue.py", line 36, in f
res = func(*args, kwargs)
File "D:\sd-webui-aki-v4.4\modules\img2img.py", line 208, in img2img
processed = process_images(p)
File "D:\sd-webui-aki-v4.4\modules\processing.py", line 732, in process_images
res = process_images_inner(p)
File "D:\sd-webui-aki-v4.4\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, kwargs)
File "D:\sd-webui-aki-v4.4\modules\processing.py", line 867, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "D:\sd-webui-aki-v4.4\modules\processing.py", line 1528, in sample
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
File "D:\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 188, in sample_img2img
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "D:\sd-webui-aki-v4.4\modules\sd_samplers_common.py", line 261, in launch_sampling
return func()
File "D:\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 188, in
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, *extra_params_kwargs))
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(args, kwargs)
File "D:\sd-webui-aki-v4.4\modules\sd_samplers_extra.py", line 71, in restart_sampler
x = heun_step(x, old_sigma, new_sigma)
File "D:\sd-webui-aki-v4.4\modules\sd_samplers_extra.py", line 19, in heun_step
denoised = model(x, old_sigma * s_in, extra_args)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "D:\sd-webui-aki-v4.4\modules\sd_samplers_cfg_denoiser.py", line 169, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "D:\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_utils\utils.py", line 249, in wrapper
return fn(*args, *kwargs)
File "D:\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_methods\multidiffusion.py", line 70, in kdiff_forward
return self.sample_one_step(x_in, org_func, repeat_func, custom_func)
File "D:\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_methods\multidiffusion.py", line 165, in sample_one_step
x_tile_out = repeat_func(x_tile, bboxes)
File "D:\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_methods\multidiffusion.py", line 65, in repeat_func
return self.sampler_forward(x_tile, sigma_tile, cond=cond_tile)
File "D:\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input c_in, self.sigma_to_t(sigma), kwargs)
File "D:\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, kwargs)
File "D:\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, *kwargs: self(args, kwargs))
File "D:\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(args, kwargs)
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, cond)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1538, in _call_impl
result = forward_call(*args, *kwargs)
File "D:\sd-webui-aki-v4.4\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, args, kwargs)
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 797, in forward
h = module(h, emb, context)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, kwargs)
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 84, in forward
x = layer(x, context)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 334, in forward
x = block(x, context=context[i])
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 269, in forward
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint
return CheckpointFunction.apply(func, len(inputs), args)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\autograd\function.py", line 506, in apply
return super().apply(args, kwargs) # type: ignore[misc]
File "D:\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward
output_tensors = ctx.run_function(ctx.input_tensors)
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\tomesd\patch.py", line 63, in _forward
x = u_c(self.attn2(m_c(self.norm2(x)), context=context)) + x
File "D:\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "D:\sd-webui-aki-v4.4\extensions\sd-webui-negpip\scripts\negpip.py", line 330, in forward
return sub_forward(x, context, mask, additional_tokens, n_times_crossframe_attn_in_self,self.conds[0],self.contokens[0],self.unconds[0],self.untokens[0])
File "D:\sd-webui-aki-v4.4\extensions\sd-webui-negpip\scripts\negpip.py", line 311, in sub_forward
context = torch.cat([context,conds],1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 8 but got size 1 for tensor number 1 in the list.
当negpip与multidiffusion(tiled diffusion功能)同时启用时,提示以下错误 系统环境:Windows 11 WebUI版本:秋叶整合4.4 A41WebUI1.6
经测试,关闭negpip后,tiled diffusion功能正常无报错