Open AndreyRGW opened 7 months ago
*** Error completing request | 0/4 [00:00<?, ?it/s] *** Arguments: ('task(7th7oq4pc64mwcp)', <gradio.routes.Request object at 0x000001F3F28C6C80>, 'perfect artwork, abandoned city, post-apocalypse, anime,', '(worst quality, low quality, blurry:1.2), (bad teeth, deformed teeth, deformed lips), (bad anatomy, bad proportions:1.1), (deformed iris, deformed pupils), (deformed eyes, bad eyes), (deformed face, ugly face, bad face), (deformed hands, bad hands, fused fingers), morbid, mutilated, mutation, disfigured', [], 1, 1, 2, 512, 912, True, 0.501, 2.5, 'Latent (nearest)', 4, 0, 0, 'Use same checkpoint', 'DPM++ 2M', 'Automatic', '', '', [], 0, 8, 'Euler Dy', 'Karras', False, '', 0.8, 3050637858, False, -1, 0, 0, 0, False, False, {'ad_model': 'face_yolov8n.pt', 'ad_model_classes': '', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'None', 'ad_model_classes': '', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, False, 'MultiDiffusion', False, True, 1024, 1024, 96, 96, 48, 4, 'None', 2, False, 10, 1, 1, 64, False, False, False, False, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 'DemoFusion', False, 128, 64, 4, 2, False, 10, 1, 1, 64, False, True, 3, 1, 1, True, 0.85, 0.6, 4, False, False, 2048, 128, True, True, True, False, False, 1.6, 0.97, 0.4, 0, 20, 0, 12, '', True, False, False, False, 512, False, True, ['Face'], False, '{\n "face_detector": "RetinaFace",\n "rules": {\n "then": {\n "face_processor": "img2img",\n "mask_generator": {\n "name": "BiSeNet",\n "params": {\n "fallback_ratio": 0.1\n }\n }\n }\n }\n}', 'None', 40, False, False, 20, 4, 4, 0.4, 0.95, 2, 2, 0.4, 0.5, False, 1, False, False, 0.6, 0.9, 0.25, 1, True, False, UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), False, '1.5', 0, False, 0.01, 0.5, -0.13, 0, 0, 0, 0, True, 0, 1, 0, 'Version 2', 1.1, 0.6, 0, 0.5, 0, 1, 1.2, 0.4, 0, 0.5, 0, 1, 1, 1, 0, 0.5, 0, 1, 0, False, 'Default', 'Default', 1, False, 0, False, 0, 0, 0, 0, False, True, 3, 4, 0.15, 0.3, 'bicubic', 0.5, 2, True, False, 0, None, False, '0', '0', 'inswapper_128.onnx', 'CodeFormer', 1, True, 'None', 1, 1, False, True, 1, 0, 0, False, 0.5, True, False, 'CUDA', False, 0, 'None', '', None, False, False, 0.5, 0, False, 0, 0, False, False, 0, 0, 1, 0, 0, 0, False, False, 'Straight Abs.', 'Flat', False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False, 'General', True, False, '', '', '', '', '', 'Reset ALL General', 'Not set', 'Not set', 'Not set', 1, 'Not set', 'Disabled', 1.3, 'Not set', 1, 'Not set', 'Not set', 1.3, 1.3, 'Not set', False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, 'Not set', 'Disabled', 1.3, 'Not set', 'Not set', 1.3, 'Not set', 'Disabled', 1, 'Not set', 1.3, 'Not set', 'Not set', 1, 1.3, 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 'Disabled', 'Not set', 1.3, 1.3, 1.3, 1.3, 'Not set', 'Not set', 1, True, True, 'Not set', 'Not set', 1, 'Not set', 'Disabled', 'Not set', 'Disabled', 1, 1, 1, 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1.3, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 1, 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Disabled', 1, 'Not set', 'Disabled', 1, 'Not set', 'Not set', 1, 'Not set', 'Disabled', 'Not set', 'Disabled', 1, 1, 'Not set', 1, 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Disabled', 1, 'Not set', 'Disabled', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 1, 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Disabled', 1, 'Not set', 1, 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Not set', 1, 'Not set', 'Disabled', 1, 'Not set', 1, 1.6, 0.97, 0.4, 0, 20, 0, 12, '', True, False, False, False, 512, False, True, ['Face'], False, '{\n "face_detector": "RetinaFace",\n "rules": {\n "then": {\n "face_processor": "img2img",\n "mask_generator": {\n "name": "BiSeNet",\n "params": {\n "fallback_ratio": 0.1\n }\n }\n }\n }\n}', 'None', 40, None, None, False, None, None, False, None, None, False, 50, 7, 1.5, True, '16bpc', '.tiff', 1.2) {} Traceback (most recent call last): File "F:\WBC\automatic1111_dev\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) File "F:\WBC\automatic1111_dev\modules\call_queue.py", line 36, in f res = func(*args, **kwargs) File "F:\WBC\automatic1111_dev\modules\txt2img.py", line 109, in txt2img processed = processing.process_images(p) File "F:\WBC\automatic1111_dev\modules\processing.py", line 845, in process_images res = process_images_inner(p) File "F:\WBC\automatic1111_dev\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 59, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs) File "F:\WBC\automatic1111_dev\modules\processing.py", line 981, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "F:\WBC\automatic1111_dev\modules\processing.py", line 1328, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "F:\WBC\automatic1111_dev\modules\sd_samplers_kdiffusion.py", line 218, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "F:\WBC\automatic1111_dev\modules\sd_samplers_common.py", line 272, in launch_sampling return func() File "F:\WBC\automatic1111_dev\modules\sd_samplers_kdiffusion.py", line 218, in <lambda> samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "F:\WBC\automatic1111_dev\extensions\sd-webui-smea\scripts\sd-webui-smea.py", line 185, in sample_euler_dy denoised = model(x, sigma_hat * s_in, **extra_args) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\modules\sd_samplers_cfg_denoiser.py", line 237, in forward x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in)) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs) File "F:\WBC\automatic1111_dev\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps return self.inner_model.apply_model(*args, **kwargs) File "F:\WBC\automatic1111_dev\modules\sd_models_xl.py", line 44, in apply_model return self.model(x, t, cond) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\modules\sd_hijack_utils.py", line 18, in <lambda> setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) File "F:\WBC\automatic1111_dev\modules\sd_hijack_utils.py", line 32, in __call__ return self.__orig_func(*args, **kwargs) File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\diffusionmodules\wrappers.py", line 28, in forward return self.diffusion_model( File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1568, in _call_impl result = forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\modules\sd_unet.py", line 91, in UNetModel_forward return original_forward(self, x, timesteps, context, *args, **kwargs) File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 993, in forward h = module(h, emb, context) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 100, in forward x = layer(x, context) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\attention.py", line 627, in forward x = block(x, context=context[i]) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\attention.py", line 459, in forward return checkpoint( File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\diffusionmodules\util.py", line 165, in checkpoint return CheckpointFunction.apply(func, len(inputs), *args) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\autograd\function.py", line 539, in apply return super().apply(*args, **kwargs) # type: ignore[misc] File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\diffusionmodules\util.py", line 182, in forward output_tensors = ctx.run_function(*ctx.input_tensors) File "F:\WBC\automatic1111_dev\repositories\generative-models\sgm\modules\attention.py", line 467, in _forward self.attn1( File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "F:\WBC\automatic1111_dev\venv\lib\site-packages\torch\nn\modules\module.py", line 1568, in _call_impl result = forward_call(*args, **kwargs) File "F:\WBC\automatic1111_dev\extensions-builtin\hypertile\hypertile.py", line 307, in wrapper out = params.forward(x, *args[1:], **kwargs) File "F:\WBC\automatic1111_dev\modules\sd_hijack_optimizations.py", line 523, in scaled_dot_product_attention_forward k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) RuntimeError: shape '[12, -1, 10, 64]' is invalid for input of size 573440 ---
I don't know why, but smea just isn't compatible with HyperTile on some resolutions. So far I have found at least two incompatible resolutions, 912x512 and 1000x768
I'm not sure how exactly HyperTile works so I don't know how to fix it for now
I don't know why, but smea just isn't compatible with HyperTile on some resolutions. So far I have found at least two incompatible resolutions, 912x512 and 1000x768