Provide large guidance scale correction for Stable Diffusion web UI (AUTOMATIC1111), implementing the paper "Characteristic Guidance: Non-linear Correction for Diffusion Model at Large Guidance Scale"
The extension seems to work on SDXL and plain SD1.5 versions, but seems to have some issue with 1.5 models that are trained with VPRED (an uncommon, but very useful feature). At least that's the only common denominator I've found while testing it on various models.
Characteristic Guidance injecting the CFGDenoiser
Characteristic Guidance sampling:
0%| | 0/20 [00:00<?, ?it/s]
Characteristic Guidance recorded iterations info for 0 steps
Characteristic Guidance recovering the CFGDenoiser
Error completing request
Arguments: ('task(e7npzgzvnog4o3q)', 'ww', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x0000024E27FC1300>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, 1, 1, 30, 1, 0, -4, 1, 0.4, 0.5, 2, True, 'How to set parameters? Check our github!', 'More ControlNet', True, False, 1, False, False, False, 1.1, 1.5, 100, 0.7, False, False, True, False, False, 0, 'Gustavosta/MagicPrompt-Stable-Diffusion', '', False, 7, 100, 'Constant', 0, 'Constant', 0, 4, True, 'MEAN', 'AD', 1, True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, False, -1, -1, 0, '1,1', 'Horizontal', '', 2, 1, UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), False, 0, 1, 0, 'Version 2', 1.2, 0.9, 0, 0.5, 0, 1, 1.4, 0.2, 0, 0.5, 0, 1, 1, 1, 0, 0.5, 0, 1, False, True, 3, 4, 0.15, 0.3, 'bicubic', 0.5, 2, True, False, False, 0.75, 1, 1, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "C:\SDV_17\stable-diffusion-webui\modules\call_queue.py", line 57, in f
res = list(func(*args, kwargs))
File "C:\SDV_17\stable-diffusion-webui\modules\call_queue.py", line 36, in f
res = func(*args, *kwargs)
File "C:\SDV_17\stable-diffusion-webui\modules\txt2img.py", line 55, in txt2img
processed = processing.process_images(p)
File "C:\SDV_17\stable-diffusion-webui\modules\processing.py", line 734, in process_images
res = process_images_inner(p)
File "C:\SDV_17\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs)
File "C:\SDV_17\stable-diffusion-webui\modules\processing.py", line 868, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 770, in wrapper
raise e
File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 766, in wrapper
result = sample(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength,
File "C:\SDV_17\stable-diffusion-webui\modules\processing.py", line 1142, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\SDV_17\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "C:\SDV_17\stable-diffusion-webui\modules\sd_samplers_common.py", line 261, in launch_sampling
return func()
File "C:\SDV_17\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "C:\SDV_17\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "C:\SDV_17\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, *extra_args)
File "C:\SDV_17\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 128, in forward
x_out = self.Chara_iteration(None, x_in, sigma_in, uncond, cond_scale, conds_list,
File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 226, in Chara_iteration
c_out, c_in = [utils.append_dims(x, x_in.ndim) for x in self.inner_model.get_scalings(sigma_in)]
ValueError: too many values to unpack (expected 2)
The extension seems to work on SDXL and plain SD1.5 versions, but seems to have some issue with 1.5 models that are trained with VPRED (an uncommon, but very useful feature). At least that's the only common denominator I've found while testing it on various models.
Characteristic Guidance injecting the CFGDenoiser Characteristic Guidance sampling: 0%| | 0/20 [00:00<?, ?it/s] Characteristic Guidance recorded iterations info for 0 steps Characteristic Guidance recovering the CFGDenoiser Error completing request Arguments: ('task(e7npzgzvnog4o3q)', 'ww', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x0000024E27FC1300>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, 1, 1, 30, 1, 0, -4, 1, 0.4, 0.5, 2, True, 'How to set parameters? Check our github!', 'More ControlNet', True, False, 1, False, False, False, 1.1, 1.5, 100, 0.7, False, False, True, False, False, 0, 'Gustavosta/MagicPrompt-Stable-Diffusion', '', False, 7, 100, 'Constant', 0, 'Constant', 0, 4, True, 'MEAN', 'AD', 1, True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, False, -1, -1, 0, '1,1', 'Horizontal', '', 2, 1, UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), False, 0, 1, 0, 'Version 2', 1.2, 0.9, 0, 0.5, 0, 1, 1.4, 0.2, 0, 0.5, 0, 1, 1, 1, 0, 0.5, 0, 1, False, True, 3, 4, 0.15, 0.3, 'bicubic', 0.5, 2, True, False, False, 0.75, 1, 1, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {} Traceback (most recent call last): File "C:\SDV_17\stable-diffusion-webui\modules\call_queue.py", line 57, in f res = list(func(*args, kwargs)) File "C:\SDV_17\stable-diffusion-webui\modules\call_queue.py", line 36, in f res = func(*args, *kwargs) File "C:\SDV_17\stable-diffusion-webui\modules\txt2img.py", line 55, in txt2img processed = processing.process_images(p) File "C:\SDV_17\stable-diffusion-webui\modules\processing.py", line 734, in process_images res = process_images_inner(p) File "C:\SDV_17\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs) File "C:\SDV_17\stable-diffusion-webui\modules\processing.py", line 868, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 770, in wrapper raise e File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 766, in wrapper result = sample(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, File "C:\SDV_17\stable-diffusion-webui\modules\processing.py", line 1142, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "C:\SDV_17\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs)) File "C:\SDV_17\stable-diffusion-webui\modules\sd_samplers_common.py", line 261, in launch_sampling return func() File "C:\SDV_17\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))
File "C:\SDV_17\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, kwargs)
File "C:\SDV_17\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, *extra_args)
File "C:\SDV_17\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(args, kwargs)
File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 128, in forward
x_out = self.Chara_iteration(None, x_in, sigma_in, uncond, cond_scale, conds_list,
File "C:\SDV_17\stable-diffusion-webui\extensions\CharacteristicGuidanceWebUI\scripts\CHGextension.py", line 226, in Chara_iteration
c_out, c_in = [utils.append_dims(x, x_in.ndim) for x in self.inner_model.get_scalings(sigma_in)]
ValueError: too many values to unpack (expected 2)