feynlee / latent-upscale

Provides more options for latent upscale in img2img for Stable Diffusion Automatic1111.
MIT License
70 stars 3 forks source link

error if i want upscale, a1111 v1.9.3 #1

Open kalle07 opened 1 month ago

kalle07 commented 1 month ago

no idea ?

....


set Upscale method in run: nearest scheduler: exponential Error completing request Arguments: ('task(9f3vdnrm8rnz8eu)', <gradio.routes.Request object at 0x0000024B88017490>, 0, 'portrait, young girl, brown hair, dark eyes, hand on chin', '', [], <PIL.Image.Image image mode=RGBA size=256x379 at 0x24B853B7D90>, None, None, None, None, None, None, 4, 0, 1, 1, 1, 4, 1.5, 0.5, 1.0, 379, 256, 2, 3, 0, 32, 0, '', '', '', [], False, [], '', 9, False, 1, 0.5, 4, 0, 0.5, 2, 20, 'DPM++ 3M SDE', 'Karras', False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, {'ad_model': 'face_yolov8n.pt', 'ad_model_classes': '', 'ad_tap_enable': True, 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 8, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 5, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'None', 'ad_model_classes': '', 'ad_tap_enable': True, 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, False, False, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, None, 'Refresh models', ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=512, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=512, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=512, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), False, 0, 1, 0, 'Version 2', 1.2, 0.9, 0, 0.5, 0, 1, 1.4, 0.2, 0, 0.5, 0, 1, 1, 1, 0, 0.5, 0, 1, True, False, None, False, '0', '0', 'inswapper_128.onnx', 'CodeFormer', 1, True, 'None', 1, 1, False, True, 1, 0, 0, False, 0.5, True, False, 'CUDA', False, 0, 'None', '', None, False, False, 0.5, 0, False, 'None', 20, False, False, 0, None, [], 0, False, [], [], False, 0, 1, False, False, 0, None, [], -2, False, [], False, 0, None, None, ' CFG Scale should be 2 or lower.', True, True, '', '', True, 50, True, 1, 0, False, 4, 0.5, 'Linear', 'None', '

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

', 128, 8, ['left', 'right', 'up', 'down'], 1, 0.05, 128, 4, 0, ['left', 'right', 'up', 'down'], False, False, 'positive', 'comma', 0, False, False, 'start', '', '

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

', 64, 0, 2, 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False, 'nearest', 'exponential', None, None, False, None, None, False, None, None, False, 50, '

Will upscale the image depending on the selected target size type

', 512, 0, 8, 32, 64, 0.35, 32, 0, True, 0, False, 8, 0, 0, 2048, 2048, 2) {} Traceback (most recent call last): File "D:\stable-diffusion\webui\modules\call_queue.py", line 57, in f res = list(func(
args, kwargs)) File "D:\stable-diffusion\webui\modules\call_queue.py", line 36, in f res = func(*args, *kwargs) File "D:\stable-diffusion\webui\modules\img2img.py", line 232, in img2img processed = process_images(p) File "D:\stable-diffusion\webui\modules\processing.py", line 845, in process_images res = process_images_inner(p) File "D:\stable-diffusion\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 59, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs) File "D:\stable-diffusion\webui\modules\processing.py", line 915, in process_images_inner p.init(p.all_prompts, p.all_seeds, p.all_subseeds) File "D:\stable-diffusion\webui\extensions\latent-upscale\scripts\latent_upscale.py", line 67, in p.init = lambda all_prompts, all_seeds, all_subseeds, kwargs: init( File "D:\stable-diffusion\webui\extensions\latent-upscale\latent_upscale_overrides\init.py", line 147, in init p.sd_model.encode_first_stage(image)) File "d:\stable-diffusion\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, *kwargs) File "D:\stable-diffusion\webui\repositories\generative-models\sgm\models\diffusion.py", line 127, in encode_first_stage z = self.first_stage_model.encode(x) File "D:\stable-diffusion\webui\repositories\generative-models\sgm\models\autoencoder.py", line 321, in encode return super().encode(x).sample() File "D:\stable-diffusion\webui\repositories\generative-models\sgm\models\autoencoder.py", line 308, in encode h = self.encoder(x) File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "D:\stable-diffusion\webui\repositories\generative-models\sgm\modules\diffusionmodules\model.py", line 576, in forward hs = [self.conv_in(x)] File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "D:\stable-diffusion\webui\extensions-builtin\Lora\networks.py", line 518, in network_Conv2d_forward return originals.Conv2d_forward(self, input) File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\conv.py", line 463, in forward return self._conv_forward(input, self.weight, self.bias) File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\conv.py", line 459, in _conv_forward return F.conv2d(input, weight, bias, self.stride, RuntimeError: Input type (float) and bias type (struct c10::Half) should be the same


GeneralJarrett97 commented 1 month ago

no idea ?

....

set Upscale method in run: nearest scheduler: exponential Error completing request Arguments: ('task(9f3vdnrm8rnz8eu)', <gradio.routes.Request object at 0x0000024B88017490>, 0, 'portrait, young girl, brown hair, dark eyes, hand on chin', '', [], <PIL.Image.Image image mode=RGBA size=256x379 at 0x24B853B7D90>, None, None, None, None, None, None, 4, 0, 1, 1, 1, 4, 1.5, 0.5, 1.0, 379, 256, 2, 3, 0, 32, 0, '', '', '', [], False, [], '', 9, False, 1, 0.5, 4, 0, 0.5, 2, 20, 'DPM++ 3M SDE', 'Karras', False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, {'ad_model': 'face_yolov8n.pt', 'ad_model_classes': '', 'ad_tap_enable': True, 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 8, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 5, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'None', 'ad_model_classes': '', 'ad_tap_enable': True, 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M', 'ad_scheduler': 'Use same scheduler', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, False, False, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, None, 'Refresh models', ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=512, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=512, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), ControlNetUnit(is_ui=True, input_mode=<InputMode.SIMPLE: 'simple'>, batch_images='', output_dir='', loopback=False, enabled=False, module='none', model='None', weight=1.0, image=None, resize_mode=<ResizeMode.INNER_FIT: 'Crop and Resize'>, low_vram=False, processor_res=512, threshold_a=-1.0, threshold_b=-1.0, guidance_start=0.0, guidance_end=1.0, pixel_perfect=False, control_mode=<ControlMode.BALANCED: 'Balanced'>, inpaint_crop_input_image=False, hr_option=<HiResFixOption.BOTH: 'Both'>, save_detected_map=True, advanced_weighting=None, effective_region_mask=None, pulid_mode=<PuLIDMode.FIDELITY: 'Fidelity'>, ipadapter_input=None, mask=None, batch_mask_dir=None, animatediff_batch=False, batch_modifiers=[], batch_image_files=[], batch_keyframe_idx=None), False, 0, 1, 0, 'Version 2', 1.2, 0.9, 0, 0.5, 0, 1, 1.4, 0.2, 0, 0.5, 0, 1, 1, 1, 0, 0.5, 0, 1, True, False, None, False, '0', '0', 'inswapper_128.onnx', 'CodeFormer', 1, True, 'None', 1, 1, False, True, 1, 0, 0, False, 0.5, True, False, 'CUDA', False, 0, 'None', '', None, False, False, 0.5, 0, False, 'None', 20, False, False, 0, None, [], 0, False, [], [], False, 0, 1, False, False, 0, None, [], -2, False, [], False, 0, None, None, '* CFG Scale should be 2 or lower.', True, True, '', '', True, 50, True, 1, 0, False, 4, 0.5, 'Linear', 'None', '

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8 ', 128, 8, ['left', 'right', 'up', 'down'], 1, 0.05, 128, 4, 0, ['left', 'right', 'up', 'down'], False, False, 'positive', 'comma', 0, False, False, 'start', '', '

Will upscale the image by the selected scale factor; use width and height sliders to set tile size ', 64, 0, 2, 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False, 'nearest', 'exponential', None, None, False, None, None, False, None, None, False, 50, '

Will upscale the image depending on the selected target size type ', 512, 0, 8, 32, 64, 0.35, 32, 0, True, 0, False, 8, 0, 0, 2048, 2048, 2) {} Traceback (most recent call last): File "D:\stable-diffusion\webui\modules\call_queue.py", line 57, in f res = list(func(*args, kwargs)) File "D:\stable-diffusion\webui\modules\call_queue.py", line 36, in f res = func(*args, *kwargs) File "D:\stable-diffusion\webui\modules\img2img.py", line 232, in img2img processed = process_images(p) File "D:\stable-diffusion\webui\modules\processing.py", line 845, in process_images res = process_images_inner(p) File "D:\stable-diffusion\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 59, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs) File "D:\stable-diffusion\webui\modules\processing.py", line 915, in process_images_inner p.init(p.all_prompts, p.all_seeds, p.all_subseeds) File "D:\stable-diffusion\webui\extensions\latent-upscale\scripts\latent_upscale.py", line 67, in p.init = lambda all_prompts, all_seeds, all_subseeds, kwargs: init( File "D:\stable-diffusion\webui\extensions\latent-upscale\latent_upscale_overrides\init.py", line 147, in init p.sd_model.encode_first_stage(image)) File "d:\stable-diffusion\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, *kwargs) File "D:\stable-diffusion\webui\repositories\generative-models\sgm\models\diffusion.py", line 127, in encode_first_stage z = self.first_stage_model.encode(x) File "D:\stable-diffusion\webui\repositories\generative-models\sgm\models\autoencoder.py", line 321, in encode return super().encode(x).sample() File "D:\stable-diffusion\webui\repositories\generative-models\sgm\models\autoencoder.py", line 308, in encode h = self.encoder(x) File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(args, kwargs) File "D:\stable-diffusion\webui\repositories\generative-models\sgm\modules\diffusionmodules\model.py", line 576, in forward hs = [self.conv_in(x)] File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "D:\stable-diffusion\webui\extensions-builtin\Lora\networks.py", line 518, in network_Conv2d_forward return originals.Conv2d_forward(self, input) File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\conv.py", line 463, in forward return self._conv_forward(input, self.weight, self.bias) File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\conv.py", line 459, in _conv_forward return F.conv2d(input, weight, bias, self.stride, RuntimeError: Input type (float) and bias type (struct c10::Half) should be the same

Adding --no-half or --no-half-vae to the COMMANDLINE_ARGS= might help you. I'm still getting a memory error but it did remove the "RuntimeError:" and mostly generates the image before CUDA craps out, so might work for you.