Closed sd2530615 closed 1 year ago
Thank you for reminding me. will fix it very soon because this is very simple.
Same problem here. It just crash with this error.
When i try with Highres Fix enabled (since recent updates) :
Error completing request
Arguments: ('task(pnu2dtxsswv0011)', 'close-up to a nymph , <lora:LeahLora:1>, with curly golden hair , and leafy dress , she comb her hair , on a mossy log , near a crystal-clear lake , in a magical forest , with glowing mushrooms , and fairies, stockings, lace, nostalgia, sexy, a small pendant, majestic oil painting by Ed Blinkey, Atey Ghailan, Studio Ghibli, by Jeremy Mann, Greg Manchess, Antonio Moro, trending on ArtStation, trending on CGSociety, Intricate, High Detail, Sharp focus, dramatic, photorealistic painting art by midjourney and greg rutkowski\n', 'cartoon, 3d, ((disfigured)), ((bad art)), ((deformed)),((extra limbs)),((close up)),((b&w)), wierd colors, blurry, (((duplicate))), ((morbid)), ((mutilated)), out of frame, extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))), out of frame, ugly, extra limbs, (bad anatomy), gross proportions, (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck))), Photoshop, video game, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blur, bad art, bad anatomy, 3d render, freckles, watermark, hat', [], 50, 0, False, False, 1, 1, 13, -1.0, -1.0, 0, 0, 0, False, 512, 512, True, 0.2, 2.95, '4x-UltraSharp', 50, 0, 0, ['Model hash: c35782bad8'], 0, True, 'MultiDiffusion', False, True, 1024, 1024, 96, 96, 48, 4, 'None', 2, False, False, False, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, True, True, True, True, 0, 1600, 192, False, '', 0, False, False, 'LoRA', 'None', 0, 0, 'LoRA', 'None', 0, 0, 'LoRA', 'None', 0, 0, 'LoRA', 'None', 0, 0, 'LoRA', 'None', 0, 0, None, 'Refresh models', <scripts.external_code.ControlNetUnit object at 0x0000014993843430>, <scripts.external_code.ControlNetUnit object at 0x0000014993842290>, <scripts.external_code.ControlNetUnit object at 0x0000014993841930>, False, '1:1,1:2,1:2', '0:0,0:0,0:1', '0.2,0.8,0.8', 20, False, False, 'positive', 'comma', 0, False, False, '', 1, '', 0, '', 0, '', True, False, False, False, 0, None, False, None, False, None, False, 50) {}
Traceback (most recent call last):
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\call_queue.py", line 56, in f
res = list(func(*args, **kwargs))
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\txt2img.py", line 56, in txt2img
processed = process_images(p)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\processing.py", line 486, in process_images
res = process_images_inner(p)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\processing.py", line 636, in process_images_inner
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\processing.py", line 924, in sample
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 324, in sample_img2img
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 227, in launch_sampling
return func()
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 324, in <lambda>
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 138, in forward
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": c_crossattn, "c_concat": [image_cond_in[a:b]]})
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\extensions\multidiffusion-upscaler-for-automatic1111\methods\multidiffusion.py", line 79, in kdiff_repeat
return self.compute_x_tile(x_in, repeat_func, custom_func)
File "C:\Users\Seb\Desktop\Stable Diffusion\stable-diffusion-webui\extensions\multidiffusion-upscaler-for-automatic1111\methods\multidiffusion.py", line 118, in compute_x_tile
assert H == self.h and W == self.w
AssertionError
Yes this assertion error suggests that I have to re split the tiles when meeting highres. This is really really weird because they use two different sets of height and width. And this also bring many difficulties for me. Need time to fix
Please update and check again. I now silently disable multidiff at highres stage so it should work smoothly as two days ago.
According to another user's feedback the problem has been solved. So I will close the issue. If you find anything wrong you can reopen it here.
it works fine until it goes to upscale,then SD stops working and i got these on CMD panel
Error completing request Arguments: ('task(d58rlok9n0wo2vx)', 'masterpiece,best quality,loli,\n\n1girl,japanese clothes,white hair,long hair,geta,calm,(looking at viewer:1.2),rim lighting,happy,light particals,shining eyes,(outdoors:1.5),dancing,geta,hair buns,forehead mark, light smile,yellow eyes,\n\n(on an vast ocean:1.2),sunbeam,scenery,(ripplings under feet:1.1),clouds,(sunrise:1.2),waterscape,(a lot of light sparkles:1.2),(sunset glow:1.3),god light,ripplings,countless laterns floating on sea surface,burning sky,sea wave,water splash,torri,japanese temple on sea surface at background,\n\n(serene:1.1), (pure:1.1), (graceful:1.1), (spiritual:1.1), (mystical:1.1), (tranquil:1.1), (holy:1.1), (sacred:1.1), (devotion:0.8),\n\nsurrounded by flames,fire,\n\nyys,\n,\n\n,', '[EasyNegative:0.5], bad_prompt_version2, (worst quality, low quality:1.5), bad anatomy, fewer digits, text, old, signature, watermark, username, artist name, bad proportions,lowres, polar lowres, bad anatomy, bad face, bad hands, bad body, bad shose, bad feet, bad proportions, {bad leg}, {{more legs}}, worst quality, low quality, normal quality, gross proportions, blurry, poorly drawn asymmetric eyes, text,error, missing fingers, missing arms, missing legs, short legs, extra digit,vivid color,bag,bad-hands-5,', [], 30, 15, False, False, 1, 1, 7.5, -1.0, -1.0, 0, 0, 0, False, 512, 768, True, 0.45, 2, 'R-ESRGAN 4x+', 10, 0, 0, [], 0, True, 'MultiDiffusion', False, True, 1024, 1024, 96, 96, 48, 1, 'None', 2, False, False, 1, False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, 1, 0.4, 0.4, 0.2, 0.2, '', '', False, False, True, True, 0, 3072, 192, False, '', 0, <scripts.external_code.ControlNetUnit object at 0x0000027880A14CA0>, <scripts.external_code.ControlNetUnit object at 0x0000027880A143A0>, <scripts.external_code.ControlNetUnit object at 0x0000027880A17400>, <scripts.external_code.ControlNetUnit object at 0x00000278808CE1A0>, False, '', 0.5, True, False, '', 'Lerp', False, False, 1, 0.15, False, 'OUT', ['OUT'], 5, 0, 'Bilinear', False, 'Pooling Max', False, 'Lerp', '', '', False, 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5\nKEEPFACE_STRONG:1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1\nKEEPFACE_WEAK:1,1,1,1,1,1,0.2,1,0.2,0,0,0.8,1,1,1,1,1\nCHANGEFACE_STRONG:1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0\nCHANGEFACE_WEAK:1,0,0,0,0,0,0,0,0.8,1,1,0.2,0,0,0,0,0\nCLOTHES:1,1,1,1,1,0,0.2,0,0.8,1,1,0.2,0,0,0,0,0\nPOSES:1,0,0,0,0,0,0.2,1,1,1,0,0,0,0,0,0,0\nARTSTYLE:1,0,0,0,0,0,0,0,0,0,0,0.8,1,1,1,1,1\nCHARACTER_DESTYLE:1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,0,0\nBACKGROUND_DESTYLE:1,1,1,1,1,1,0.2,1,0.2,0,0,0.8,1,1,1,0,0\nTEST:1,0,0,0,0,0.15,0.25,0,1,1,1,1,1,1,1,1,1\n', False, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 'black', '20', False, False, False, False, False, '1:1,1:2,1:2', '0:0,0:0,0:1', '0.2,0.8,0.8', 20, False, False, 'positive', 'comma', 0, False, False, '', 1, '', 0, '', 0, '', True, False, False, False, 0, None, False, None, False, None, False, None, False, 50) {}
Traceback (most recent call last):
File "F:\AI_SD\modules\call_queue.py", line 56, in f
res = list(func(*args, kwargs))
File "F:\AI_SD\modules\call_queue.py", line 37, in f
res = func(*args, kwargs)
File "F:\AI_SD\modules\txt2img.py", line 56, in txt2img
processed = process_images(p)
File "F:\AI_SD\modules\processing.py", line 486, in process_images
res = process_images_inner(p)
File "F:\AI_SD\modules\processing.py", line 636, in process_images_inner
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
File "F:\AI_SD\modules\processing.py", line 908, in sample
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
File "F:\AI_SD\modules\sd_samplers_kdiffusion.py", line 324, in sample_img2img
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "F:\AI_SD\modules\sd_samplers_kdiffusion.py", line 227, in launch_sampling
return func()
File "F:\AI_SD\modules\sd_samplers_kdiffusion.py", line 324, in
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, *extra_params_kwargs))
File "F:\AI_SD\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(args, kwargs)
File "F:\AI_SD\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, extra_args)
File "F:\AI_SD\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, *kwargs)
File "F:\AI_SD\modules\sd_samplers_kdiffusion.py", line 138, in forward
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": c_crossattn, "c_concat": [image_cond_in[a:b]]})
File "F:\AI_SD\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "F:\AI_SD\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "F:\AI_SD\extensions\multidiffusion-upscaler-for-automatic1111\methods\multidiffusion.py", line 78, in kdiff_repeat
return self.compute_x_tile(x_in, repeat_func, custom_func)
File "F:\AI_SD\extensions\multidiffusion-upscaler-for-automatic1111\methods\multidiffusion.py", line 117, in compute_x_tile
assert H == self.h and W == self.w
AssertionError