Closed Neytiri7 closed 1 year ago
Oh, nice, they renamed a class upstream. Fixed.
FYI, that broke it for v1.5.1
*** Error loading script: dynamic_thresholding.py
Traceback (most recent call last):
File "/content/stable-diffusion-webui/modules/scripts.py", line 319, in load_scripts
script_module = script_loading.load_module(scriptfile.path)
File "/content/stable-diffusion-webui/modules/script_loading.py", line 10, in load_module
module_spec.loader.exec_module(module)
File "<frozen importlib._bootstrap_external>", line 883, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/content/stable-diffusion-webui/extensions/sd-dynamic-thresholding/scripts/dynamic_thresholding.py", line 192, in <module>
class CustomCFGDenoiser(sd_samplers_kdiffusion.CFGDenoiserKDiffusion):
AttributeError: module 'modules.sd_samplers_kdiffusion' has no attribute 'CFGDenoiserKDiffusion'
I had to git checkout c02d806cac2a280bbcc90b586fc37bd560cd3274
to get it working again.
it, uh, probably works on both now?
I'm getting the same error after updating A1111 to the latest version:
File "/content/sdw/modules/sd_samplers_cfg_denoiser.py", line 63, in inner_model
raise NotImplementedError()
Error completing request?it/s] Arguments: ('task(5r1za5e5hh3gugb)', '', '', [], 73, 'DPM++ 3M SDE Karras', 1, 1, 7.5, 768, 512, True, 0.56, 2, 'R-ESRGAN 4x+', 23, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x0000028D3117B130>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, 0.01, True, {'ad_model': 'face_yolov8n.pt', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'hand_yolov8n.pt', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'person_yolov8n-seg.pt', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'mediapipe_face_mesh_eyes_only', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, False, 'MultiDiffusion', False, True, 1024, 1024, 96, 96, 48, 4, 'None', 2, False, 10, 1, 1, 64, False, False, False, False, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 960, 64, True, True, True, False, False, '', 0, True, 13.5, 100, 'Constant', 0, 'Constant', 0, 4, True, 'MEAN', 'AD', 1, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000028DF9383D90>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000028DF9381B40>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000028D707C2B00>, False, False, 'Matrix', 'Horizontal', 'Mask', 'Prompt', '1,1', '0.2', False, False, False, 'Attention', False, '0', '0', '0.4', None, '0', '0', False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {} Traceback (most recent call last): File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\call_queue.py", line 58, in f res = list(func(*args, kwargs)) File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\call_queue.py", line 37, in f res = func(*args, *kwargs) File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\txt2img.py", line 55, in txt2img processed = processing.process_images(p) File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\processing.py", line 725, in process_images res = process_images_inner(p) File "O:\AI\SynologyDrive\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs) File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\processing.py", line 860, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\processing.py", line 1133, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 227, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs)) File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\sd_samplers_common.py", line 244, in launch_sampling return func() File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 227, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "O:\AI\SynologyDrive\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "O:\AI\SynologyDrive\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 668, in sample_dpmpp_3m_sde
denoised = model(x, sigmas[i] * s_in, *extra_args)
File "O:\AI\SynologyDrive\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\sd_samplers_cfg_denoiser.py", line 175, in forward
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b]))
File "O:\AI\SynologyDrive\stable-diffusion-webui\modules\sd_samplers_cfg_denoiser.py", line 63, in inner_model
raise NotImplementedError()
NotImplementedError