Closed elfgoose closed 1 year ago
img2img generates fine, but trying to generate anything from it in img2img causes error
it should generate new image
{ "Platform": "Windows-10-10.0.19045-SP0", "Python": "3.10.6", "Version": "v1.6.0", "Commit": "5ef669de080814067961f28357256e8fe27544f4", "Script path": "C:\AI\Image AI\Stable-diffusion\webui", "Data path": "C:\AI\Image AI\Stable-diffusion\webui", "Extensions dir": "C:\AI\Image AI\Stable-diffusion\webui\extensions", "Checksum": "3609579b82d745a1dc5f2aa35fb6635f3779da905557cacbd0432a3ac1a57221", "Commandline": [ "launch.py", "--listen", "--medvram", "--reinstall-xformers", "--xformers", "--theme", "dark", "--opt-sdp-no-mem-attention", "--enable-insecure-extension-access" ], "Torch env info": "'NoneType' object has no attribute 'splitlines'", "Exceptions": [ { "exception": "Cannot copy out of meta tensor; no data!", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 57, f", "res = list(func(*args, kwargs))" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 36, f", "res = func(*args, *kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\img2img.py, line 208, img2img", "processed = process_images(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 719, process_images", "sd_models.reload_model_weights()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 732, reload_model_weights", "sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 681, reuse_model_from_already_loaded", "send_model_to_cpu(sd_model)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 542, send_model_to_cpu", "lowvram.send_everything_to_cpu()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 12, send_everything_to_cpu", "module_in_gpu.to(cpu)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py, line 54, to", "return super().to(args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1145, to", "return self._apply(convert)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 820, _apply", "param_applied = fn(param)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1143, convert", "return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)" ] ] }, { "exception": "Cannot copy out of meta tensor; no data!", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 57, f", "res = list(func(*args, kwargs))" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 36, f", "res = func(*args, *kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\img2img.py, line 206, img2img", "processed = modules.scripts.scripts_img2img.run(p, args)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\scripts.py, line 601, run", "processed = script.run(p, script_args)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\scripts\loopback.py, line 95, run", "processed = processing.process_images(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 732, process_images", "res = process_images_inner(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py, line 42, processing_process_images_hijack", "return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 803, process_images_inner", "p.init(p.all_prompts, p.all_seeds, p.all_subseeds)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 1496, init", "self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_common.py, line 110, images_tensor_to_samples", "x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\utils\_contextlib.py, line 115, decorate_context", "return func(*args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\repositories\generative-models\sgm\models\diffusion.py, line 127, encode_first_stage", "z = self.first_stage_model.encode(x)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 66, first_stage_model_encode_wrap", "send_me_to_gpu(first_stage_model, None)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 52, send_me_to_gpu", "module_in_gpu.to(cpu)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py, line 54, to", "return super().to(*args, *kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1145, to", "return self._apply(convert)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 820, _apply", "param_applied = fn(param)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1143, convert", "return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)" ] ] }, { "exception": "Cannot copy out of meta tensor; no data!", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\options.py, line 140, set", "option.onchange()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 13, f", "res = func(args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\initialize_util.py, line 170, ", "shared.opts.onchange(\"sd_model_checkpoint\", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 751, reload_model_weights", "load_model(checkpoint_info, already_loaded_state_dict=state_dict)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 649, load_model", "sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 534, get_empty_cond", "d = sd_model.get_learned_conditioning([\"\"])" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models_xl.py, line 31, get_learned_conditioning", "c = self.conditioner(sdxl_conds, force_zero_embeddings=['txt'] if force_zero_negative_prompt else [])" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1527, _call_impl", "result = hook(self, args)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 52, send_me_to_gpu", "module_in_gpu.to(cpu)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py, line 54, to", "return super().to(*args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1145, to", "return self._apply(convert)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 820, _apply", "param_applied = fn(param)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1143, convert", "return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)" ] ] }, { "exception": "[WinError 2] The system cannot find the file specified: 'C:\\AI\\Image AI\\Stable-diffusion\\webui\\models\\Stable-diffusion\\juggernautXL_version1.safetensors'", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\options.py, line 140, set", "option.onchange()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 13, f", "res = func(*args, *kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\initialize_util.py, line 170, ", "shared.opts.onchange(\"sd_model_checkpoint\", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 741, reload_model_weights", "state_dict = get_checkpoint_state_dict(checkpoint_info, timer)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 306, get_checkpoint_state_dict", "sd_model_hash = checkpoint_info.calculate_shorthash()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 99, calculate_shorthash", "self.sha256 = hashes.sha256(self.filename, f\"checkpoint/{self.name}\")" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\hashes.py, line 41, sha256", "sha256_value = sha256_from_cache(filename, title, use_addnet_hash)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\hashes.py, line 24, sha256_from_cache", "ondisk_mtime = os.path.getmtime(filename)" ], [ "genericpath.py, line 55, getmtime", "" ] ] }, { "exception": "A tensor with all NaNs was produced in Unet. This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this. Use --disable-nan-check commandline argument to disable this check.", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 57, f", "res = list(func(args, kwargs))" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 36, f", "res = func(*args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\img2img.py, line 206, img2img", "processed = modules.scripts.scripts_img2img.run(p, args)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\scripts.py, line 601, run", "processed = script.run(p, script_args)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\scripts\loopback.py, line 95, run", "processed = processing.process_images(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 732, process_images", "res = process_images_inner(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py, line 42, processing_process_images_hijack", "return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 867, process_images_inner", "samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 1528, sample", "samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_kdiffusion.py, line 188, sample_img2img", "samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_common.py, line 261, launch_sampling", "return func()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_kdiffusion.py, line 188, ", "samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, *extra_params_kwargs))" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\utils\_contextlib.py, line 115, decorate_context", "return func(args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\repositories\k-diffusion\k_diffusion\sampling.py, line 594, sample_dpmpp_2m", "denoised = model(x, sigmas[i] * s_in, *extra_args)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1501, _call_impl", "return forward_call(args, **kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_cfg_denoiser.py, line 201, forward", "devices.test_for_nans(x_out, \"unet\")" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\devices.py, line 136, test_for_nans", "raise NansException(message)" ] ] } ], "CPU": { "model": "Intel64 Family 6 Model 94 Stepping 3, GenuineIntel", "count logical": 8, "count physical": 4 }, "RAM": { "total": "32GB", "used": "23GB", "free": "9GB" }, "Extensions": [ { "name": "DWPose", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\DWPose", "version": "c6af7ac1", "branch": "onnx", "remote": "https://github.com/IDEA-Research/DWPose" }, { "name": "a1111-sd-webui-tagcomplete", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\a1111-sd-webui-tagcomplete", "version": "e23bb6d4", "branch": "main", "remote": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" }, { "name": "adetailer", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\adetailer", "version": "451b0985", "branch": "main", "remote": "https://github.com/Bing-su/adetailer.git" }, { "name": "clip-interrogator-ext", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\clip-interrogator-ext", "version": "489da6fb", "branch": "main", "remote": "https://github.com/pharmapsychotic/clip-interrogator-ext.git" }, { "name": "openpose-editor", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\openpose-editor", "version": "722bca6f", "branch": "master", "remote": "https://github.com/fkunn1326/openpose-editor.git" }, { "name": "sd-webui-controlnet", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-controlnet", "version": "e2cd3b91", "branch": "main", "remote": "https://github.com/Mikubill/sd-webui-controlnet.git" }, { "name": "sd-webui-openpose-editor", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-openpose-editor", "version": "870f9113", "branch": "main", "remote": "https://github.com/huchenlei/sd-webui-openpose-editor.git" }, { "name": "sd-webui-prompt-all-in-one", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-prompt-all-in-one", "version": "d294e83e", "branch": "main", "remote": "https://github.com/Physton/sd-webui-prompt-all-in-one" }, { "name": "sd-webui-reactor", "path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-reactor", "version": "0bdc069f", "branch": "main", "remote": "https://github.com/Gourieff/sd-webui-reactor" } ], "Inactive extensions": [], "Environment": { "COMMANDLINE_ARGS": " --listen --medvram --reinstall-xformers --xformers --theme dark --opt-sdp-no-mem-attention --enable-insecure-extension-access", "GRADIO_ANALYTICS_ENABLED": "False" }, "Config": { "samples_save": true, "samples_format": "jpg", "samples_filename_pattern": "", "save_images_add_number": true, "grid_save": false, "grid_format": "png", "grid_extended_filename": false, "grid_only_if_multiple": true, "grid_prevent_empty_spots": false, "grid_zip_filename_pattern": "", "n_rows": -1, "font": "", "grid_text_active_color": "#000000", "grid_text_inactive_color": "#999999", "grid_background_color": "#ffffff", "enable_pnginfo": true, "save_txt": true, "save_images_before_face_restoration": false, "save_images_before_highres_fix": false, "save_images_before_color_correction": false, "save_mask": false, "save_mask_composite": false, "jpeg_quality": 80, "webp_lossless": false, "export_for_4chan": true, "img_downscale_threshold": 4.0, "target_side_length": 4000, "img_max_size_mp": 200, "use_original_name_batch": true, "use_upscaler_name_as_suffix": false, "save_selected_only": true, "save_init_img": false, "temp_dir": "", "clean_temp_dir_at_start": false, "save_incomplete_images": false, "outdir_samples": "P:\imgsrc\SD images", "outdir_txt2img_samples": "outputs/txt2img-images", "outdir_img2img_samples": "outputs/img2img-images", "outdir_extras_samples": "outputs/extras-images", "outdir_grids": "", "outdir_txt2img_grids": "outputs/txt2img-grids", "outdir_img2img_grids": "outputs/img2img-grids", "outdir_save": "log/images", "outdir_init_images": "outputs/init-images", "save_to_dirs": true, "grid_save_to_dirs": true, "use_save_to_dirs_for_ui": false, "directories_filename_pattern": "[date]", "directories_max_prompt_words": 8, "ESRGAN_tile": 192, "ESRGAN_tile_overlap": 8, "realesrgan_enabled_models": [ "R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B" ], "upscaler_for_img2img": null, "face_restoration": true, "face_restoration_model": "CodeFormer", "code_former_weight": 0.5, "face_restoration_unload": false, "auto_launch_browser": "Local", "show_warnings": false, "show_gradio_deprecation_warnings": true, "memmon_poll_rate": 8, "samples_log_stdout": false, "multiple_tqdm": true, "print_hypernet_extra": false, "list_hidden_files": true, "disable_mmap_load_safetensors": false, "hide_ldm_prints": true, "api_enable_requests": true, "api_forbid_local_requests": true, "api_useragent": "", "unload_models_when_training": false, "pin_memory": false, "save_optimizer_state": false, "save_training_settings_to_txt": true, "dataset_filename_word_regex": "", "dataset_filename_join_string": " ", "training_image_repeats_per_epoch": 1, "training_write_csv_every": 500, "training_xattention_optimizations": false, "training_enable_tensorboard": false, "training_tensorboard_save_images": false, "training_tensorboard_flush_every": 120, "sd_model_checkpoint": "revAnimated_v122.safetensors [4199bcdd14]", "sd_checkpoints_limit": 1, "sd_checkpoints_keep_in_cpu": true, "sd_checkpoint_cache": 0, "sd_unet": "Automatic", "enable_quantization": true, "enable_emphasis": true, "enable_batch_seeds": true, "comma_padding_backtrack": 20, "CLIP_stop_at_last_layers": 1, "upcast_attn": false, "randn_source": "GPU", "tiling": false, "hires_fix_refiner_pass": "second pass", "sdxl_crop_top": 0, "sdxl_crop_left": 0, "sdxl_refiner_low_aesthetic_score": 2.5, "sdxl_refiner_high_aesthetic_score": 6.0, "sd_vae_explanation": "VAE is a neural network that transforms a standard RGB\nimage into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling\n(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.\nFor img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.", "sd_vae_checkpoint_cache": 0, "sd_vae": "sdxl_vae.safetensors", "sd_vae_overrides_per_model_preferences": true, "auto_vae_precision": true, "sd_vae_encode_method": "Full", "sd_vae_decode_method": "Full", "inpainting_mask_weight": 1.0, "initial_noise_multiplier": 1.0, "img2img_extra_noise": 0.0, "img2img_color_correction": false, "img2img_fix_steps": false, "img2img_background_color": "#ffffff", "img2img_editor_height": 720, "img2img_sketch_default_brush_color": "#ffffff", "img2img_inpaint_mask_brush_color": "#ffffff", "img2img_inpaint_sketch_default_brush_color": "#ffffff", "return_mask": false, "return_mask_composite": false, "cross_attention_optimization": "Automatic", "s_min_uncond": 0.0, "token_merging_ratio": 0.0, "token_merging_ratio_img2img": 0.0, "token_merging_ratio_hr": 0.0, "pad_cond_uncond": false, "persistent_cond_cache": true, "batch_cond_uncond": true, "use_old_emphasis_implementation": false, "use_old_karras_scheduler_sigmas": false, "no_dpmpp_sde_batch_determinism": false, "use_old_hires_fix_width_height": false, "dont_fix_second_order_samplers_schedule": false, "hires_fix_use_firstpass_conds": false, "use_old_scheduling": false, "interrogate_keep_models_in_memory": false, "interrogate_return_ranks": false, "interrogate_clip_num_beams": 1, "interrogate_clip_min_length": 24, "interrogate_clip_max_length": 48, "interrogate_clip_dict_limit": 1500, "interrogate_clip_skip_categories": [], "interrogate_deepbooru_score_threshold": 0.5, "deepbooru_sort_alpha": true, "deepbooru_use_spaces": true, "deepbooru_escape": true, "deepbooru_filter_tags": "", "extra_networks_show_hidden_directories": true, "extra_networks_hidden_models": "When searched", "extra_networks_default_multiplier": 1.0, "extra_networks_card_width": 0, "extra_networks_card_height": 0, "extra_networks_card_text_scale": 1.0, "extra_networks_card_show_desc": true, "extra_networks_add_text_separator": " ", "ui_extra_networks_tab_reorder": "", "textual_inversion_print_at_load": false, "textual_inversion_add_hashes_to_infotext": true, "sd_hypernetwork": "None", "localization": "None", "gradio_theme": "Default", "gradio_themes_cache": true, "gallery_height": "", "return_grid": true, "do_not_show_images": false, "send_seed": true, "send_size": true, "js_modal_lightbox": true, "js_modal_lightbox_initially_zoomed": true, "js_modal_lightbox_gamepad": false, "js_modal_lightbox_gamepad_repeat": 250, "show_progress_in_title": true, "samplers_in_dropdown": true, "dimensions_and_batch_together": true, "keyedit_precision_attention": 0.1, "keyedit_precision_extra": 0.05, "keyedit_delimiters": ".,\/!?%^*;:{}=`~()", "keyedit_move": true, "quicksettings_list": [ "sd_model_checkpoint", "CLIP_stop_at_last_layers", "sd_vae" ], "ui_tab_order": [], "hidden_tabs": [], "ui_reorder_list": [], "hires_fix_show_sampler": true, "hires_fix_show_prompts": false, "disable_token_counters": false, "add_model_hash_to_info": true, "add_model_name_to_info": true, "add_user_name_to_info": false, "add_version_to_infotext": true, "disable_weights_auto_swap": true, "infotext_styles": "Apply if any", "show_progressbar": true, "live_previews_enable": true, "live_previews_image_format": "jpeg", "show_progress_grid": true, "show_progress_every_n_steps": 5, "show_progress_type": "Approx NN", "live_preview_allow_lowvram_full": false, "live_preview_content": "Prompt", "live_preview_refresh_period": 1000, "live_preview_fast_interrupt": false, "hide_samplers": [], "eta_ddim": 0.0, "eta_ancestral": 1.0, "ddim_discretize": "uniform", "s_churn": 0.0, "s_tmin": 0.0, "s_tmax": 0.0, "s_noise": 1.0, "k_sched_type": "Automatic", "sigma_min": 0.0, "sigma_max": 0.0, "rho": 0.0, "eta_noise_seed_delta": 0, "always_discard_next_to_last_sigma": false, "sgm_noise_multiplier": false, "uni_pc_variant": "bh1", "uni_pc_skip_type": "time_uniform", "uni_pc_order": 3, "uni_pc_lower_order_final": true, "postprocessing_enable_in_main_ui": [], "postprocessing_operation_order": [], "upscaling_max_images_in_cache": 5, "disabled_extensions": [], "disable_all_extensions": "none", "restore_config_state_file": "", "sd_checkpoint_hash": "700528894b5ec7f8709d5a911c3c0b4e84877a0103126e5ea5cc2b003921acea", "ldsr_steps": 100, "ldsr_cached": false, "SCUNET_tile": 256, "SCUNET_tile_overlap": 8, "SWIN_tile": 192, "SWIN_tile_overlap": 8, "lora_functional": false, "sd_lora": "None", "lora_preferred_name": "Alias from file", "lora_add_hashes_to_infotext": true, "lora_show_all": false, "lora_hide_unknown_for_versions": [], "lora_in_memory_limit": 0, "extra_options_txt2img": [], "extra_options_img2img": [], "extra_options_cols": 1, "extra_options_accordion": false, "canvas_hotkey_zoom": "Alt", "canvas_hotkey_adjust": "Ctrl", "canvas_hotkey_move": "F", "canvas_hotkey_fullscreen": "S", "canvas_hotkey_reset": "R", "canvas_hotkey_overlap": "O", "canvas_show_tooltip": true, "canvas_auto_expand": true, "canvas_blur_prompt": false, "canvas_disabled_functions": [ "Overlap" ], "tac_tagFile": "danbooru.csv", "tac_active": true, "tac_activeIn.txt2img": true, "tac_activeIn.img2img": true, "tac_activeIn.negativePrompts": true, "tac_activeIn.thirdParty": true, "tac_activeIn.modelList": "", "tac_activeIn.modelListMode": "Blacklist", "tac_slidingPopup": true, "tac_maxResults": 5.0, "tac_showAllResults": false, "tac_resultStepLength": 100.0, "tac_delayTime": 100.0, "tac_useWildcards": true, "tac_sortWildcardResults": true, "tac_useEmbeddings": true, "tac_useHypernetworks": true, "tac_useLoras": true, "tac_useLycos": true, "tac_showWikiLinks": false, "tac_showExtraNetworkPreviews": true, "tac_replaceUnderscores": true, "tac_escapeParentheses": true, "tac_appendComma": true, "tac_appendSpace": true, "tac_alwaysSpaceAtEnd": true, "tac_modelKeywordCompletion": "Never", "tac_wildcardCompletionMode": "To next folder level", "tac_alias.searchByAlias": true, "tac_alias.onlyShowAlias": false, "tac_translation.translationFile": "None", "tac_translation.oldFormat": false, "tac_translation.searchByTranslation": true, "tac_translation.liveTranslation": false, "tac_extra.extraFile": "extra-quality-tags.csv", "tac_extra.addMode": "Insert before", "tac_chantFile": "demo-chants.json", "tac_keymap": "{\n \"MoveUp\": \"ArrowUp\",\n \"MoveDown\": \"ArrowDown\",\n \"JumpUp\": \"PageUp\",\n \"JumpDown\": \"PageDown\",\n \"JumpToStart\": \"Home\",\n \"JumpToEnd\": \"End\",\n \"ChooseSelected\": \"Enter\",\n \"ChooseFirstOrSelected\": \"Tab\",\n \"Close\": \"Escape\"\n}", "tac_colormap": "{\n \"danbooru\": {\n \"-1\": [\"red\", \"maroon\"],\n \"0\": [\"lightblue\", \"dodgerblue\"],\n \"1\": [\"indianred\", \"firebrick\"],\n \"3\": [\"violet\", \"darkorchid\"],\n \"4\": [\"lightgreen\", \"darkgreen\"],\n \"5\": [\"orange\", \"darkorange\"]\n },\n \"e621\": {\n \"-1\": [\"red\", \"maroon\"],\n \"0\": [\"lightblue\", \"dodgerblue\"],\n \"1\": [\"gold\", \"goldenrod\"],\n \"3\": [\"violet\", \"darkorchid\"],\n \"4\": [\"lightgreen\", \"darkgreen\"],\n \"5\": [\"tomato\", \"darksalmon\"],\n \"6\": [\"red\", \"maroon\"],\n \"7\": [\"whitesmoke\", \"black\"],\n \"8\": [\"seagreen\", \"darkseagreen\"]\n }\n}", "tac_refreshTempFiles": "Refresh TAC temp files", "ad_max_models": 2, "ad_save_previews": false, "ad_save_images_before": false, "ad_only_seleted_scripts": true, "ad_script_names": "dynamic_prompting,dynamic_thresholding,wildcard_recursive,wildcards,lora_block_weight", "ad_bbox_sortby": "None", "control_net_model_config": "models\cldm_v15.yaml", "control_net_model_adapter_config": "models\t2iadapter_sketch_sd14v1.yaml", "control_net_detectedmap_dir": "detected_maps", "control_net_models_path": "", "control_net_modules_path": "", "control_net_max_models_num": 3, "control_net_model_cache_size": 1, "control_net_inpaint_blur_sigma": 7, "control_net_no_high_res_fix": false, "control_net_no_detectmap": false, "control_net_detectmap_autosaving": false, "control_net_allow_script_control": false, "control_net_sync_field_args": true, "controlnet_show_batch_images_in_ui": false, "controlnet_increment_seed_during_batch": false, "controlnet_disable_control_type": false, "controlnet_disable_openpose_edit": false, "controlnet_ignore_noninpaint_mask": false }, "Startup": { "total": 38.792277336120605, "records": { "initial startup": 0.0010004043579101562, "prepare environment/checks": 0.04799938201904297, "prepare environment/git version info": 0.05100059509277344, "prepare environment/torch GPU test": 2.32013201713562, "prepare environment/install xformers": 4.488053798675537, "prepare environment/clone repositores": 0.36800122261047363, "prepare environment/run extensions installers/a1111-sd-webui-tagcomplete": 0.0, "prepare environment/run extensions installers/adetailer": 0.20000123977661133, "prepare environment/run extensions installers/clip-interrogator-ext": 3.456998586654663, "prepare environment/run extensions installers/DWPose": 0.0, "prepare environment/run extensions installers/openpose-editor": 0.0, "prepare environment/run extensions installers/sd-webui-controlnet": 0.4199995994567871, "prepare environment/run extensions installers/sd-webui-openpose-editor": 0.0, "prepare environment/run extensions installers/sd-webui-prompt-all-in-one": 0.14400029182434082, "prepare environment/run extensions installers/sd-webui-reactor": 7.77902364730835, "prepare environment/run extensions installers": 12.000023365020752, "prepare environment": 19.3402099609375, "launcher": 0.004001140594482422, "import torch": 0.0, "import gradio": 0.0, "setup paths": 0.0009992122650146484, "import ldm": 0.0030002593994140625, "import sgm": 0.0, "initialize shared": 0.13000059127807617, "other imports": 0.8380126953125, "opts onchange": 0.0009882450103759766, "setup SD model": 0.003997325897216797, "setup codeformer": 0.13400006294250488, "setup gfpgan": 0.02500128746032715, "set samplers": 0.0, "list extensions": 0.0009987354278564453, "restore config state file": 0.0, "list SD models": 0.14600181579589844, "list localizations": 0.0009984970092773438, "load scripts/custom_code.py": 0.007999897003173828, "load scripts/img2imgalt.py": 0.0, "load scripts/loopback.py": 0.0009984970092773438, "load scripts/outpainting_mk_2.py": 0.0, "load scripts/poor_mans_outpainting.py": 0.0, "load scripts/postprocessing_codeformer.py": 0.0010004043579101562, "load scripts/postprocessing_gfpgan.py": 0.0, "load scripts/postprocessing_upscale.py": 0.0, "load scripts/prompt_matrix.py": 0.0, "load scripts/prompts_from_file.py": 0.00099945068359375, "load scripts/refiner.py": 0.0, "load scripts/sd_upscale.py": 0.0, "load scripts/seed.py": 0.0, "load scripts/xyz_grid.py": 0.0030062198638916016, "load scripts/model_keyword_support.py": 0.0049974918365478516, "load scripts/shared_paths.py": 0.0009992122650146484, "load scripts/tag_autocomplete_helper.py": 1.4869987964630127, "load scripts/!adetailer.py": 1.6630668640136719, "load scripts/clip_interrogator_ext.py": 0.1490027904510498, "load scripts/main.py": 0.04199624061584473, "load scripts/adapter.py": 0.0009999275207519531, "load scripts/api.py": 0.26399946212768555, "load scripts/batch_hijack.py": 0.0010025501251220703, "load scripts/cldm.py": 0.0, "load scripts/controlnet.py": 0.20000386238098145, "load scripts/controlnet_version.py": 0.0009958744049072266, "load scripts/external_code.py": 0.0, "load scripts/global_state.py": 0.0010023117065429688, "load scripts/hook.py": 0.0010008811950683594, "load scripts/infotext.py": 0.0, "load scripts/logging.py": 0.0009989738464355469, "load scripts/lvminthin.py": 0.0, "load scripts/movie2movie.py": 0.0010004043579101562, "load scripts/processor.py": 0.0, "load scripts/utils.py": 0.0, "load scripts/xyz_grid_support.py": 0.0009989738464355469, "load scripts/openpose_editor.py": 0.033998727798461914, "load scripts/on_app_started.py": 0.06600069999694824, "load scripts/console_log_patch.py": 0.3380007743835449, "load scripts/reactor_api.py": 0.14099931716918945, "load scripts/reactor_faceswap.py": 0.004004240036010742, "load scripts/reactor_globals.py": 0.0, "load scripts/reactor_helpers.py": 0.0, "load scripts/reactor_logger.py": 0.0009984970092773438, "load scripts/reactor_swapper.py": 0.0009982585906982422, "load scripts/reactor_version.py": 0.0, "load scripts/ldsr_model.py": 0.0449979305267334, "load scripts/lora_script.py": 0.3549997806549072, "load scripts/scunet_model.py": 0.045000553131103516, "load scripts/swinir_model.py": 0.0449986457824707, "load scripts/hotkey_config.py": 0.0, "load scripts/extra_options_section.py": 0.0, "load scripts": 4.908066511154175, "load upscalers": 0.0029993057250976562, "refresh VAE": 0.005004167556762695, "refresh textual inversion templates": 0.0, "scripts list_optimizers": 0.0019991397857666016, "scripts list_unets": 0.0, "reload hypernetworks": 0.007998228073120117, "initialize extra networks": 0.03600144386291504, "scripts before_ui_callback": 0.005001544952392578, "create ui": 0.9709973335266113, "gradio launch": 6.328001499176025, "add APIs": 0.008998870849609375, "app_started_callback/tag_autocomplete_helper.py": 0.0019991397857666016, "app_started_callback/clip_interrogator_ext.py": 0.0, "app_started_callback/api.py": 0.004004001617431641, "app_started_callback/openpose_editor.py": 0.0019986629486083984, "app_started_callback/on_app_started.py": 5.942998886108398, "app_started_callback/reactor_api.py": 0.0019979476928710938, "app_started_callback/lora_script.py": 0.0, "app_started_callback": 5.952998638153076 } }, "Packages": [ "-rotobuf==3.20.0", "absl-py==1.4.0", "accelerate==0.21.0", "addict==2.4.0", "aenum==3.1.15", "aiofiles==23.2.1", "aiohttp==3.8.5", "aiosignal==1.3.1", "albumentations==1.3.1", "aliyun-python-sdk-alimt==3.2.0", "aliyun-python-sdk-core==2.13.10", "altair==5.1.1", "antlr4-python3-runtime==4.9.3", "anyio==3.7.1", "async-timeout==4.0.3", "attrs==23.1.0", "basicsr==1.4.2", "beautifulsoup4==4.12.2", "blendmodes==2022", "blip-ci==0.0.5", "boltons==23.0.0", "boto3==1.28.39", "botocore==1.31.39", "cachetools==5.3.1", "certifi==2023.7.22", "cffi==1.15.1", "chardet==5.2.0", "charset-normalizer==3.2.0", "clean-fid==0.1.35", "click==8.1.7", "clip-interrogator==0.5.4", "clip==1.0", "colorama==0.4.6", "coloredlogs==15.0.1", "contourpy==1.1.0", "cryptography==41.0.3", "cssselect2==0.7.0", "cycler==0.11.0", "cython==3.0.2", "deprecation==2.1.0", "dill==0.3.7", "easydict==1.10", "einops==0.4.1", "exceptiongroup==1.1.3", "facexlib==0.3.0", "fairscale==0.4.4", "fastapi==0.94.0", "ffmpy==0.3.1", "filelock==3.12.3", "filterpy==1.4.5", "flatbuffers==23.5.26", "fonttools==4.42.1", "frozenlist==1.4.0", "fsspec==2023.6.0", "ftfy==6.1.1", "future==0.18.3", "fvcore==0.1.5.post20221221", "gdown==4.7.1", "gfpgan==1.3.8", "gitdb==4.0.10", "gitpython==3.1.32", "google-auth-oauthlib==1.0.0", "google-auth==2.22.0", "gradio-client==0.5.0", "gradio==3.41.2", "grpcio==1.57.0", "h11==0.12.0", "httpcore==0.15.0", "httpx==0.24.1", "huggingface-hub==0.16.4", "humanfriendly==10.0", "idna==3.4", "imageio==2.31.2", "importlib-metadata==6.8.0", "importlib-resources==6.0.1", "inflection==0.5.1", "insightface==0.7.3", "iopath==0.1.9", "jinja2==3.1.2", "jmespath==0.10.0", "joblib==1.3.2", "jsonmerge==1.8.0", "jsonschema-specifications==2023.7.1", "jsonschema==4.19.0", "kiwisolver==1.4.5", "kornia==0.6.7", "lark==1.1.2", "lazy-loader==0.3", "lightning-utilities==0.9.0", "llvmlite==0.40.1", "lmdb==1.4.1", "lpips==0.1.4", "lxml==4.9.3", "markdown-it-py==3.0.0", "markdown==3.4.4", "markupsafe==2.1.3", "matplotlib==3.7.2", "mdurl==0.1.2", "mediapipe==0.10.3", "mpmath==1.3.0", "multidict==6.0.4", "multiprocess==0.70.15", "networkx==3.1", "numba==0.57.1", "numpy==1.23.5", "oauthlib==3.2.2", "omegaconf==2.2.3", "onnx==1.14.0", "onnxruntime==1.15.0", "open-clip-torch==2.20.0", "openai==0.28.0", "opencv-contrib-python==4.8.0.76", "opencv-python-headless==4.8.0.76", "opencv-python==4.8.0.76", "orjson==3.9.5", "packaging==23.1", "pandas==2.1.0", "pathos==0.3.1", "piexif==1.1.3", "pillow==9.5.0", "pip==23.2.1", "platformdirs==3.10.0", "portalocker==2.7.0", "pox==0.3.3", "ppft==1.7.6.7", "prettytable==3.8.0", "protobuf==3.20.3", "psutil==5.9.5", "py-cpuinfo==9.0.0", "pyasn1-modules==0.3.0", "pyasn1==0.5.0", "pycparser==2.21", "pydantic==1.10.12", "pydub==0.25.1", "pyexecjs==1.5.1", "pygments==2.16.1", "pyparsing==3.0.9", "pyreadline3==3.4.1", "pysocks==1.7.1", "python-dateutil==2.8.2", "python-multipart==0.0.6", "pytorch-lightning==1.9.4", "pytz==2023.3", "pywavelets==1.4.1", "pywin32==306", "pyyaml==6.0.1", "qudida==0.0.4", "realesrgan==0.3.0", "referencing==0.30.2", "regex==2023.8.8", "reportlab==4.0.4", "requests-oauthlib==1.3.1", "requests==2.31.0", "resize-right==0.0.2", "rich==13.5.2", "rpds-py==0.10.0", "rsa==4.9", "s3transfer==0.6.2", "safetensors==0.3.1", "scikit-image==0.21.0", "scikit-learn==1.3.0", "scipy==1.11.2", "seaborn==0.12.2", "semantic-version==2.10.0", "sentencepiece==0.1.99", "setuptools==68.1.2", "six==1.16.0", "smmap==5.0.0", "sniffio==1.3.0", "sounddevice==0.4.6", "soupsieve==2.4.1", "starlette==0.26.1", "svglib==1.5.1", "sympy==1.12", "tabulate==0.9.0", "tb-nightly==2.15.0a20230901", "tensorboard-data-server==0.7.1", "termcolor==2.3.0", "threadpoolctl==3.2.0", "tifffile==2023.8.30", "timm==0.9.2", "tinycss2==1.2.1", "tokenizers==0.13.3", "tomesd==0.1.3", "tomli==2.0.1", "toolz==0.12.0", "torch==2.0.1+cu118", "torchdiffeq==0.2.3", "torchmetrics==1.1.1", "torchsde==0.2.5", "torchvision==0.15.2+cu118", "tqdm==4.66.1", "trampoline==0.1.2", "transformers==4.30.2", "typing-extensions==4.7.1", "tzdata==2023.3", "ultralytics==8.0.168", "urllib3==1.26.16", "uvicorn==0.23.2", "wcwidth==0.2.6", "webencodings==0.5.1", "websockets==11.0.3", "werkzeug==2.3.7", "wheel==0.41.2", "xformers==0.0.20", "yacs==0.1.8", "yapf==0.40.1", "yarl==1.9.2", "zipp==3.16.2" ] }
No response
*** Error completing request *** Arguments: (prompt) ', [], <PIL.Image.Image image mode=RGBA size=1024x1024 at 0x26A63DB9DB0>, None, None, None, None, None, None, 25, 'DPM++ 2M Karras', 4, 0, 1, 1, 1, 11, 1.5, 0.55, 0, 1024, 1024, 1, 0, 0, 32, 0, '', '', '', [], False, [], '', <gradio.routes.Request object at 0x0000026A57B202E0>, 2, True, 'sd_xl_refiner_1.0.safetensors [7440042bbd]', 0.8, -1, False, -1, 0, 0, 0, False, {'ad_model': 'face_yolov8n.pt', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'None', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000026A57B22560>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000026A561AA200>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x0000026A561AB100>, None, False, '0', '0', 'inswapper_128.onnx', 'CodeFormer', 1, True, '', 1, 1, False, True, 1, 0, 0, False, '* `CFG Scale` should be 2 or lower.', True, True, '', '', True, 50, True, 1, 0, False, 4, 0.75, 'Linear', 'None', '<p style="margin-bottom:0.75em">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>', 128, 8, ['left', 'right', 'up', 'down'], 1, 0.05, 128, 4, 0, ['left', 'right', 'up', 'down'], False, False, 'positive', 'comma', 0, False, False, '', '<p style="margin-bottom:0.75em">Will upscale the image by the selected scale factor; use width and height sliders to set tile size</p>', 64, 0, 2, 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {} Traceback (most recent call last): File "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) File "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py", line 36, in f res = func(*args, **kwargs) File "C:\AI\Image AI\Stable-diffusion\webui\modules\img2img.py", line 206, in img2img processed = modules.scripts.scripts_img2img.run(p, *args) File "C:\AI\Image AI\Stable-diffusion\webui\modules\scripts.py", line 601, in run processed = script.run(p, *script_args) File "C:\AI\Image AI\Stable-diffusion\webui\scripts\loopback.py", line 95, in run processed = processing.process_images(p) File "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py", line 732, in process_images res = process_images_inner(p) File "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs) File "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py", line 803, in process_images_inner p.init(p.all_prompts, p.all_seeds, p.all_subseeds) File "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py", line 1496, in init self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model) File "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_common.py", line 110, in images_tensor_to_samples x_latent = model.get_first_stage_encoding(model.encode_first_stage(image)) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "C:\AI\Image AI\Stable-diffusion\webui\repositories\generative-models\sgm\models\diffusion.py", line 127, in encode_first_stage z = self.first_stage_model.encode(x) File "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py", line 66, in first_stage_model_encode_wrap send_me_to_gpu(first_stage_model, None) File "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py", line 52, in send_me_to_gpu module_in_gpu.to(cpu) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py", line 54, in to return super().to(*args, **kwargs) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1145, in to return self._apply(convert) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 797, in _apply module._apply(fn) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 797, in _apply module._apply(fn) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 797, in _apply module._apply(fn) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 820, in _apply param_applied = fn(param) File "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1143, in convert return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking) NotImplementedError: Cannot copy out of meta tensor; no data!
actually, looks like it was either the DW pose extension or the openpose editor extension. Disabled them and now it's working
Is there an existing issue for this?
What happened?
img2img generates fine, but trying to generate anything from it in img2img causes error
Steps to reproduce the problem
What should have happened?
it should generate new image
Sysinfo
{ "Platform": "Windows-10-10.0.19045-SP0", "Python": "3.10.6", "Version": "v1.6.0", "Commit": "5ef669de080814067961f28357256e8fe27544f4", "Script path": "C:\AI\Image AI\Stable-diffusion\webui", "Data path": "C:\AI\Image AI\Stable-diffusion\webui", "Extensions dir": "C:\AI\Image AI\Stable-diffusion\webui\extensions", "Checksum": "3609579b82d745a1dc5f2aa35fb6635f3779da905557cacbd0432a3ac1a57221", "Commandline": [ "launch.py", "--listen", "--medvram", "--reinstall-xformers", "--xformers", "--theme", "dark", "--opt-sdp-no-mem-attention", "--enable-insecure-extension-access" ], "Torch env info": "'NoneType' object has no attribute 'splitlines'", "Exceptions": [ { "exception": "Cannot copy out of meta tensor; no data!", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 57, f", "res = list(func(*args, kwargs))" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 36, f", "res = func(*args, *kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\img2img.py, line 208, img2img", "processed = process_images(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 719, process_images", "sd_models.reload_model_weights()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 732, reload_model_weights", "sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 681, reuse_model_from_already_loaded", "send_model_to_cpu(sd_model)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 542, send_model_to_cpu", "lowvram.send_everything_to_cpu()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 12, send_everything_to_cpu", "module_in_gpu.to(cpu)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py, line 54, to", "return super().to(args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1145, to", "return self._apply(convert)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 820, _apply", "param_applied = fn(param)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1143, convert", "return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)" ] ] }, { "exception": "Cannot copy out of meta tensor; no data!", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 57, f", "res = list(func(*args, kwargs))" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 36, f", "res = func(*args, *kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\img2img.py, line 206, img2img", "processed = modules.scripts.scripts_img2img.run(p, args)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\scripts.py, line 601, run", "processed = script.run(p, script_args)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\scripts\loopback.py, line 95, run", "processed = processing.process_images(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 732, process_images", "res = process_images_inner(p)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py, line 42, processing_process_images_hijack", "return getattr(processing, '__controlnet_original_process_images_inner')(p, args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 803, process_images_inner", "p.init(p.all_prompts, p.all_seeds, p.all_subseeds)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 1496, init", "self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_common.py, line 110, images_tensor_to_samples", "x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\utils\_contextlib.py, line 115, decorate_context", "return func(*args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\repositories\generative-models\sgm\models\diffusion.py, line 127, encode_first_stage", "z = self.first_stage_model.encode(x)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 66, first_stage_model_encode_wrap", "send_me_to_gpu(first_stage_model, None)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 52, send_me_to_gpu", "module_in_gpu.to(cpu)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py, line 54, to", "return super().to(*args, *kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1145, to", "return self._apply(convert)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply", "module._apply(fn)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 820, _apply", "param_applied = fn(param)" ], [ "C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1143, convert", "return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)" ] ] }, { "exception": "Cannot copy out of meta tensor; no data!", "traceback": [ [ "C:\AI\Image AI\Stable-diffusion\webui\modules\options.py, line 140, set", "option.onchange()" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 13, f", "res = func(args, kwargs)" ], [ "C:\AI\Image AI\Stable-diffusion\webui\modules\initialize_util.py, line 170,",
"shared.opts.onchange(\"sd_model_checkpoint\", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 751, reload_model_weights",
"load_model(checkpoint_info, already_loaded_state_dict=state_dict)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 649, load_model",
"sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 534, get_empty_cond",
"d = sd_model.get_learned_conditioning([\"\"])"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models_xl.py, line 31, get_learned_conditioning",
"c = self.conditioner(sdxl_conds, force_zero_embeddings=['txt'] if force_zero_negative_prompt else [])"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1527, _call_impl",
"result = hook(self, args)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\lowvram.py, line 52, send_me_to_gpu",
"module_in_gpu.to(cpu)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\lightning_fabric\utilities\device_dtype_mixin.py, line 54, to",
"return super().to(*args, kwargs)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1145, to",
"return self._apply(convert)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply",
"module._apply(fn)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply",
"module._apply(fn)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 797, _apply",
"module._apply(fn)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 820, _apply",
"param_applied = fn(param)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1143, convert",
"return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)"
]
]
},
{
"exception": "[WinError 2] The system cannot find the file specified: 'C:\\AI\\Image AI\\Stable-diffusion\\webui\\models\\Stable-diffusion\\juggernautXL_version1.safetensors'",
"traceback": [
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\options.py, line 140, set",
"option.onchange()"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 13, f",
"res = func(*args, *kwargs)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\initialize_util.py, line 170, ",
"shared.opts.onchange(\"sd_model_checkpoint\", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 741, reload_model_weights",
"state_dict = get_checkpoint_state_dict(checkpoint_info, timer)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 306, get_checkpoint_state_dict",
"sd_model_hash = checkpoint_info.calculate_shorthash()"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_models.py, line 99, calculate_shorthash",
"self.sha256 = hashes.sha256(self.filename, f\"checkpoint/{self.name}\")"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\hashes.py, line 41, sha256",
"sha256_value = sha256_from_cache(filename, title, use_addnet_hash)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\hashes.py, line 24, sha256_from_cache",
"ondisk_mtime = os.path.getmtime(filename)"
],
[
"genericpath.py, line 55, getmtime",
""
]
]
},
{
"exception": "A tensor with all NaNs was produced in Unet. This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this. Use --disable-nan-check commandline argument to disable this check.",
"traceback": [
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 57, f",
"res = list(func( args, kwargs))"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\call_queue.py, line 36, f",
"res = func(*args, kwargs)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\img2img.py, line 206, img2img",
"processed = modules.scripts.scripts_img2img.run(p, args)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\scripts.py, line 601, run",
"processed = script.run(p, script_args)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\scripts\loopback.py, line 95, run",
"processed = processing.process_images(p)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 732, process_images",
"res = process_images_inner(p)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py, line 42, processing_process_images_hijack",
"return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, kwargs)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 867, process_images_inner",
"samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\processing.py, line 1528, sample",
"samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_kdiffusion.py, line 188, sample_img2img",
"samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs))"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_common.py, line 261, launch_sampling",
"return func()"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_kdiffusion.py, line 188, ",
"samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, *extra_params_kwargs))"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\utils\_contextlib.py, line 115, decorate_context",
"return func(args, kwargs)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\repositories\k-diffusion\k_diffusion\sampling.py, line 594, sample_dpmpp_2m",
"denoised = model(x, sigmas[i] * s_in, *extra_args)"
],
[
"C:\AI\Image AI\Stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py, line 1501, _call_impl",
"return forward_call(args, **kwargs)"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\sd_samplers_cfg_denoiser.py, line 201, forward",
"devices.test_for_nans(x_out, \"unet\")"
],
[
"C:\AI\Image AI\Stable-diffusion\webui\modules\devices.py, line 136, test_for_nans",
"raise NansException(message)"
]
]
}
],
"CPU": {
"model": "Intel64 Family 6 Model 94 Stepping 3, GenuineIntel",
"count logical": 8,
"count physical": 4
},
"RAM": {
"total": "32GB",
"used": "23GB",
"free": "9GB"
},
"Extensions": [
{
"name": "DWPose",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\DWPose",
"version": "c6af7ac1",
"branch": "onnx",
"remote": "https://github.com/IDEA-Research/DWPose"
},
{
"name": "a1111-sd-webui-tagcomplete",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\a1111-sd-webui-tagcomplete",
"version": "e23bb6d4",
"branch": "main",
"remote": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git"
},
{
"name": "adetailer",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\adetailer",
"version": "451b0985",
"branch": "main",
"remote": "https://github.com/Bing-su/adetailer.git"
},
{
"name": "clip-interrogator-ext",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\clip-interrogator-ext",
"version": "489da6fb",
"branch": "main",
"remote": "https://github.com/pharmapsychotic/clip-interrogator-ext.git"
},
{
"name": "openpose-editor",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\openpose-editor",
"version": "722bca6f",
"branch": "master",
"remote": "https://github.com/fkunn1326/openpose-editor.git"
},
{
"name": "sd-webui-controlnet",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-controlnet",
"version": "e2cd3b91",
"branch": "main",
"remote": "https://github.com/Mikubill/sd-webui-controlnet.git"
},
{
"name": "sd-webui-openpose-editor",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-openpose-editor",
"version": "870f9113",
"branch": "main",
"remote": "https://github.com/huchenlei/sd-webui-openpose-editor.git"
},
{
"name": "sd-webui-prompt-all-in-one",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-prompt-all-in-one",
"version": "d294e83e",
"branch": "main",
"remote": "https://github.com/Physton/sd-webui-prompt-all-in-one"
},
{
"name": "sd-webui-reactor",
"path": "C:\AI\Image AI\Stable-diffusion\webui\extensions\sd-webui-reactor",
"version": "0bdc069f",
"branch": "main",
"remote": "https://github.com/Gourieff/sd-webui-reactor"
}
],
"Inactive extensions": [],
"Environment": {
"COMMANDLINE_ARGS": " --listen --medvram --reinstall-xformers --xformers --theme dark --opt-sdp-no-mem-attention --enable-insecure-extension-access",
"GRADIO_ANALYTICS_ENABLED": "False"
},
"Config": {
"samples_save": true,
"samples_format": "jpg",
"samples_filename_pattern": "",
"save_images_add_number": true,
"grid_save": false,
"grid_format": "png",
"grid_extended_filename": false,
"grid_only_if_multiple": true,
"grid_prevent_empty_spots": false,
"grid_zip_filename_pattern": "",
"n_rows": -1,
"font": "",
"grid_text_active_color": "#000000",
"grid_text_inactive_color": "#999999",
"grid_background_color": "#ffffff",
"enable_pnginfo": true,
"save_txt": true,
"save_images_before_face_restoration": false,
"save_images_before_highres_fix": false,
"save_images_before_color_correction": false,
"save_mask": false,
"save_mask_composite": false,
"jpeg_quality": 80,
"webp_lossless": false,
"export_for_4chan": true,
"img_downscale_threshold": 4.0,
"target_side_length": 4000,
"img_max_size_mp": 200,
"use_original_name_batch": true,
"use_upscaler_name_as_suffix": false,
"save_selected_only": true,
"save_init_img": false,
"temp_dir": "",
"clean_temp_dir_at_start": false,
"save_incomplete_images": false,
"outdir_samples": "P:\imgsrc\SD images",
"outdir_txt2img_samples": "outputs/txt2img-images",
"outdir_img2img_samples": "outputs/img2img-images",
"outdir_extras_samples": "outputs/extras-images",
"outdir_grids": "",
"outdir_txt2img_grids": "outputs/txt2img-grids",
"outdir_img2img_grids": "outputs/img2img-grids",
"outdir_save": "log/images",
"outdir_init_images": "outputs/init-images",
"save_to_dirs": true,
"grid_save_to_dirs": true,
"use_save_to_dirs_for_ui": false,
"directories_filename_pattern": "[date]",
"directories_max_prompt_words": 8,
"ESRGAN_tile": 192,
"ESRGAN_tile_overlap": 8,
"realesrgan_enabled_models": [
"R-ESRGAN 4x+",
"R-ESRGAN 4x+ Anime6B"
],
"upscaler_for_img2img": null,
"face_restoration": true,
"face_restoration_model": "CodeFormer",
"code_former_weight": 0.5,
"face_restoration_unload": false,
"auto_launch_browser": "Local",
"show_warnings": false,
"show_gradio_deprecation_warnings": true,
"memmon_poll_rate": 8,
"samples_log_stdout": false,
"multiple_tqdm": true,
"print_hypernet_extra": false,
"list_hidden_files": true,
"disable_mmap_load_safetensors": false,
"hide_ldm_prints": true,
"api_enable_requests": true,
"api_forbid_local_requests": true,
"api_useragent": "",
"unload_models_when_training": false,
"pin_memory": false,
"save_optimizer_state": false,
"save_training_settings_to_txt": true,
"dataset_filename_word_regex": "",
"dataset_filename_join_string": " ",
"training_image_repeats_per_epoch": 1,
"training_write_csv_every": 500,
"training_xattention_optimizations": false,
"training_enable_tensorboard": false,
"training_tensorboard_save_images": false,
"training_tensorboard_flush_every": 120,
"sd_model_checkpoint": "revAnimated_v122.safetensors [4199bcdd14]",
"sd_checkpoints_limit": 1,
"sd_checkpoints_keep_in_cpu": true,
"sd_checkpoint_cache": 0,
"sd_unet": "Automatic",
"enable_quantization": true,
"enable_emphasis": true,
"enable_batch_seeds": true,
"comma_padding_backtrack": 20,
"CLIP_stop_at_last_layers": 1,
"upcast_attn": false,
"randn_source": "GPU",
"tiling": false,
"hires_fix_refiner_pass": "second pass",
"sdxl_crop_top": 0,
"sdxl_crop_left": 0,
"sdxl_refiner_low_aesthetic_score": 2.5,
"sdxl_refiner_high_aesthetic_score": 6.0,
"sd_vae_explanation": "VAE is a neural network that transforms a standard RGB\nimage into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling\n(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.\nFor img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.",
"sd_vae_checkpoint_cache": 0,
"sd_vae": "sdxl_vae.safetensors",
"sd_vae_overrides_per_model_preferences": true,
"auto_vae_precision": true,
"sd_vae_encode_method": "Full",
"sd_vae_decode_method": "Full",
"inpainting_mask_weight": 1.0,
"initial_noise_multiplier": 1.0,
"img2img_extra_noise": 0.0,
"img2img_color_correction": false,
"img2img_fix_steps": false,
"img2img_background_color": "#ffffff",
"img2img_editor_height": 720,
"img2img_sketch_default_brush_color": "#ffffff",
"img2img_inpaint_mask_brush_color": "#ffffff",
"img2img_inpaint_sketch_default_brush_color": "#ffffff",
"return_mask": false,
"return_mask_composite": false,
"cross_attention_optimization": "Automatic",
"s_min_uncond": 0.0,
"token_merging_ratio": 0.0,
"token_merging_ratio_img2img": 0.0,
"token_merging_ratio_hr": 0.0,
"pad_cond_uncond": false,
"persistent_cond_cache": true,
"batch_cond_uncond": true,
"use_old_emphasis_implementation": false,
"use_old_karras_scheduler_sigmas": false,
"no_dpmpp_sde_batch_determinism": false,
"use_old_hires_fix_width_height": false,
"dont_fix_second_order_samplers_schedule": false,
"hires_fix_use_firstpass_conds": false,
"use_old_scheduling": false,
"interrogate_keep_models_in_memory": false,
"interrogate_return_ranks": false,
"interrogate_clip_num_beams": 1,
"interrogate_clip_min_length": 24,
"interrogate_clip_max_length": 48,
"interrogate_clip_dict_limit": 1500,
"interrogate_clip_skip_categories": [],
"interrogate_deepbooru_score_threshold": 0.5,
"deepbooru_sort_alpha": true,
"deepbooru_use_spaces": true,
"deepbooru_escape": true,
"deepbooru_filter_tags": "",
"extra_networks_show_hidden_directories": true,
"extra_networks_hidden_models": "When searched",
"extra_networks_default_multiplier": 1.0,
"extra_networks_card_width": 0,
"extra_networks_card_height": 0,
"extra_networks_card_text_scale": 1.0,
"extra_networks_card_show_desc": true,
"extra_networks_add_text_separator": " ",
"ui_extra_networks_tab_reorder": "",
"textual_inversion_print_at_load": false,
"textual_inversion_add_hashes_to_infotext": true,
"sd_hypernetwork": "None",
"localization": "None",
"gradio_theme": "Default",
"gradio_themes_cache": true,
"gallery_height": "",
"return_grid": true,
"do_not_show_images": false,
"send_seed": true,
"send_size": true,
"js_modal_lightbox": true,
"js_modal_lightbox_initially_zoomed": true,
"js_modal_lightbox_gamepad": false,
"js_modal_lightbox_gamepad_repeat": 250,
"show_progress_in_title": true,
"samplers_in_dropdown": true,
"dimensions_and_batch_together": true,
"keyedit_precision_attention": 0.1,
"keyedit_precision_extra": 0.05,
"keyedit_delimiters": ".,\/!?%^*;:{}=`~()",
"keyedit_move": true,
"quicksettings_list": [
"sd_model_checkpoint",
"CLIP_stop_at_last_layers",
"sd_vae"
],
"ui_tab_order": [],
"hidden_tabs": [],
"ui_reorder_list": [],
"hires_fix_show_sampler": true,
"hires_fix_show_prompts": false,
"disable_token_counters": false,
"add_model_hash_to_info": true,
"add_model_name_to_info": true,
"add_user_name_to_info": false,
"add_version_to_infotext": true,
"disable_weights_auto_swap": true,
"infotext_styles": "Apply if any",
"show_progressbar": true,
"live_previews_enable": true,
"live_previews_image_format": "jpeg",
"show_progress_grid": true,
"show_progress_every_n_steps": 5,
"show_progress_type": "Approx NN",
"live_preview_allow_lowvram_full": false,
"live_preview_content": "Prompt",
"live_preview_refresh_period": 1000,
"live_preview_fast_interrupt": false,
"hide_samplers": [],
"eta_ddim": 0.0,
"eta_ancestral": 1.0,
"ddim_discretize": "uniform",
"s_churn": 0.0,
"s_tmin": 0.0,
"s_tmax": 0.0,
"s_noise": 1.0,
"k_sched_type": "Automatic",
"sigma_min": 0.0,
"sigma_max": 0.0,
"rho": 0.0,
"eta_noise_seed_delta": 0,
"always_discard_next_to_last_sigma": false,
"sgm_noise_multiplier": false,
"uni_pc_variant": "bh1",
"uni_pc_skip_type": "time_uniform",
"uni_pc_order": 3,
"uni_pc_lower_order_final": true,
"postprocessing_enable_in_main_ui": [],
"postprocessing_operation_order": [],
"upscaling_max_images_in_cache": 5,
"disabled_extensions": [],
"disable_all_extensions": "none",
"restore_config_state_file": "",
"sd_checkpoint_hash": "700528894b5ec7f8709d5a911c3c0b4e84877a0103126e5ea5cc2b003921acea",
"ldsr_steps": 100,
"ldsr_cached": false,
"SCUNET_tile": 256,
"SCUNET_tile_overlap": 8,
"SWIN_tile": 192,
"SWIN_tile_overlap": 8,
"lora_functional": false,
"sd_lora": "None",
"lora_preferred_name": "Alias from file",
"lora_add_hashes_to_infotext": true,
"lora_show_all": false,
"lora_hide_unknown_for_versions": [],
"lora_in_memory_limit": 0,
"extra_options_txt2img": [],
"extra_options_img2img": [],
"extra_options_cols": 1,
"extra_options_accordion": false,
"canvas_hotkey_zoom": "Alt",
"canvas_hotkey_adjust": "Ctrl",
"canvas_hotkey_move": "F",
"canvas_hotkey_fullscreen": "S",
"canvas_hotkey_reset": "R",
"canvas_hotkey_overlap": "O",
"canvas_show_tooltip": true,
"canvas_auto_expand": true,
"canvas_blur_prompt": false,
"canvas_disabled_functions": [
"Overlap"
],
"tac_tagFile": "danbooru.csv",
"tac_active": true,
"tac_activeIn.txt2img": true,
"tac_activeIn.img2img": true,
"tac_activeIn.negativePrompts": true,
"tac_activeIn.thirdParty": true,
"tac_activeIn.modelList": "",
"tac_activeIn.modelListMode": "Blacklist",
"tac_slidingPopup": true,
"tac_maxResults": 5.0,
"tac_showAllResults": false,
"tac_resultStepLength": 100.0,
"tac_delayTime": 100.0,
"tac_useWildcards": true,
"tac_sortWildcardResults": true,
"tac_useEmbeddings": true,
"tac_useHypernetworks": true,
"tac_useLoras": true,
"tac_useLycos": true,
"tac_showWikiLinks": false,
"tac_showExtraNetworkPreviews": true,
"tac_replaceUnderscores": true,
"tac_escapeParentheses": true,
"tac_appendComma": true,
"tac_appendSpace": true,
"tac_alwaysSpaceAtEnd": true,
"tac_modelKeywordCompletion": "Never",
"tac_wildcardCompletionMode": "To next folder level",
"tac_alias.searchByAlias": true,
"tac_alias.onlyShowAlias": false,
"tac_translation.translationFile": "None",
"tac_translation.oldFormat": false,
"tac_translation.searchByTranslation": true,
"tac_translation.liveTranslation": false,
"tac_extra.extraFile": "extra-quality-tags.csv",
"tac_extra.addMode": "Insert before",
"tac_chantFile": "demo-chants.json",
"tac_keymap": "{\n \"MoveUp\": \"ArrowUp\",\n \"MoveDown\": \"ArrowDown\",\n \"JumpUp\": \"PageUp\",\n \"JumpDown\": \"PageDown\",\n \"JumpToStart\": \"Home\",\n \"JumpToEnd\": \"End\",\n \"ChooseSelected\": \"Enter\",\n \"ChooseFirstOrSelected\": \"Tab\",\n \"Close\": \"Escape\"\n}",
"tac_colormap": "{\n \"danbooru\": {\n \"-1\": [\"red\", \"maroon\"],\n \"0\": [\"lightblue\", \"dodgerblue\"],\n \"1\": [\"indianred\", \"firebrick\"],\n \"3\": [\"violet\", \"darkorchid\"],\n \"4\": [\"lightgreen\", \"darkgreen\"],\n \"5\": [\"orange\", \"darkorange\"]\n },\n \"e621\": {\n \"-1\": [\"red\", \"maroon\"],\n \"0\": [\"lightblue\", \"dodgerblue\"],\n \"1\": [\"gold\", \"goldenrod\"],\n \"3\": [\"violet\", \"darkorchid\"],\n \"4\": [\"lightgreen\", \"darkgreen\"],\n \"5\": [\"tomato\", \"darksalmon\"],\n \"6\": [\"red\", \"maroon\"],\n \"7\": [\"whitesmoke\", \"black\"],\n \"8\": [\"seagreen\", \"darkseagreen\"]\n }\n}",
"tac_refreshTempFiles": "Refresh TAC temp files",
"ad_max_models": 2,
"ad_save_previews": false,
"ad_save_images_before": false,
"ad_only_seleted_scripts": true,
"ad_script_names": "dynamic_prompting,dynamic_thresholding,wildcard_recursive,wildcards,lora_block_weight",
"ad_bbox_sortby": "None",
"control_net_model_config": "models\cldm_v15.yaml",
"control_net_model_adapter_config": "models\t2iadapter_sketch_sd14v1.yaml",
"control_net_detectedmap_dir": "detected_maps",
"control_net_models_path": "",
"control_net_modules_path": "",
"control_net_max_models_num": 3,
"control_net_model_cache_size": 1,
"control_net_inpaint_blur_sigma": 7,
"control_net_no_high_res_fix": false,
"control_net_no_detectmap": false,
"control_net_detectmap_autosaving": false,
"control_net_allow_script_control": false,
"control_net_sync_field_args": true,
"controlnet_show_batch_images_in_ui": false,
"controlnet_increment_seed_during_batch": false,
"controlnet_disable_control_type": false,
"controlnet_disable_openpose_edit": false,
"controlnet_ignore_noninpaint_mask": false
},
"Startup": {
"total": 38.792277336120605,
"records": {
"initial startup": 0.0010004043579101562,
"prepare environment/checks": 0.04799938201904297,
"prepare environment/git version info": 0.05100059509277344,
"prepare environment/torch GPU test": 2.32013201713562,
"prepare environment/install xformers": 4.488053798675537,
"prepare environment/clone repositores": 0.36800122261047363,
"prepare environment/run extensions installers/a1111-sd-webui-tagcomplete": 0.0,
"prepare environment/run extensions installers/adetailer": 0.20000123977661133,
"prepare environment/run extensions installers/clip-interrogator-ext": 3.456998586654663,
"prepare environment/run extensions installers/DWPose": 0.0,
"prepare environment/run extensions installers/openpose-editor": 0.0,
"prepare environment/run extensions installers/sd-webui-controlnet": 0.4199995994567871,
"prepare environment/run extensions installers/sd-webui-openpose-editor": 0.0,
"prepare environment/run extensions installers/sd-webui-prompt-all-in-one": 0.14400029182434082,
"prepare environment/run extensions installers/sd-webui-reactor": 7.77902364730835,
"prepare environment/run extensions installers": 12.000023365020752,
"prepare environment": 19.3402099609375,
"launcher": 0.004001140594482422,
"import torch": 0.0,
"import gradio": 0.0,
"setup paths": 0.0009992122650146484,
"import ldm": 0.0030002593994140625,
"import sgm": 0.0,
"initialize shared": 0.13000059127807617,
"other imports": 0.8380126953125,
"opts onchange": 0.0009882450103759766,
"setup SD model": 0.003997325897216797,
"setup codeformer": 0.13400006294250488,
"setup gfpgan": 0.02500128746032715,
"set samplers": 0.0,
"list extensions": 0.0009987354278564453,
"restore config state file": 0.0,
"list SD models": 0.14600181579589844,
"list localizations": 0.0009984970092773438,
"load scripts/custom_code.py": 0.007999897003173828,
"load scripts/img2imgalt.py": 0.0,
"load scripts/loopback.py": 0.0009984970092773438,
"load scripts/outpainting_mk_2.py": 0.0,
"load scripts/poor_mans_outpainting.py": 0.0,
"load scripts/postprocessing_codeformer.py": 0.0010004043579101562,
"load scripts/postprocessing_gfpgan.py": 0.0,
"load scripts/postprocessing_upscale.py": 0.0,
"load scripts/prompt_matrix.py": 0.0,
"load scripts/prompts_from_file.py": 0.00099945068359375,
"load scripts/refiner.py": 0.0,
"load scripts/sd_upscale.py": 0.0,
"load scripts/seed.py": 0.0,
"load scripts/xyz_grid.py": 0.0030062198638916016,
"load scripts/model_keyword_support.py": 0.0049974918365478516,
"load scripts/shared_paths.py": 0.0009992122650146484,
"load scripts/tag_autocomplete_helper.py": 1.4869987964630127,
"load scripts/!adetailer.py": 1.6630668640136719,
"load scripts/clip_interrogator_ext.py": 0.1490027904510498,
"load scripts/main.py": 0.04199624061584473,
"load scripts/adapter.py": 0.0009999275207519531,
"load scripts/api.py": 0.26399946212768555,
"load scripts/batch_hijack.py": 0.0010025501251220703,
"load scripts/cldm.py": 0.0,
"load scripts/controlnet.py": 0.20000386238098145,
"load scripts/controlnet_version.py": 0.0009958744049072266,
"load scripts/external_code.py": 0.0,
"load scripts/global_state.py": 0.0010023117065429688,
"load scripts/hook.py": 0.0010008811950683594,
"load scripts/infotext.py": 0.0,
"load scripts/logging.py": 0.0009989738464355469,
"load scripts/lvminthin.py": 0.0,
"load scripts/movie2movie.py": 0.0010004043579101562,
"load scripts/processor.py": 0.0,
"load scripts/utils.py": 0.0,
"load scripts/xyz_grid_support.py": 0.0009989738464355469,
"load scripts/openpose_editor.py": 0.033998727798461914,
"load scripts/on_app_started.py": 0.06600069999694824,
"load scripts/console_log_patch.py": 0.3380007743835449,
"load scripts/reactor_api.py": 0.14099931716918945,
"load scripts/reactor_faceswap.py": 0.004004240036010742,
"load scripts/reactor_globals.py": 0.0,
"load scripts/reactor_helpers.py": 0.0,
"load scripts/reactor_logger.py": 0.0009984970092773438,
"load scripts/reactor_swapper.py": 0.0009982585906982422,
"load scripts/reactor_version.py": 0.0,
"load scripts/ldsr_model.py": 0.0449979305267334,
"load scripts/lora_script.py": 0.3549997806549072,
"load scripts/scunet_model.py": 0.045000553131103516,
"load scripts/swinir_model.py": 0.0449986457824707,
"load scripts/hotkey_config.py": 0.0,
"load scripts/extra_options_section.py": 0.0,
"load scripts": 4.908066511154175,
"load upscalers": 0.0029993057250976562,
"refresh VAE": 0.005004167556762695,
"refresh textual inversion templates": 0.0,
"scripts list_optimizers": 0.0019991397857666016,
"scripts list_unets": 0.0,
"reload hypernetworks": 0.007998228073120117,
"initialize extra networks": 0.03600144386291504,
"scripts before_ui_callback": 0.005001544952392578,
"create ui": 0.9709973335266113,
"gradio launch": 6.328001499176025,
"add APIs": 0.008998870849609375,
"app_started_callback/tag_autocomplete_helper.py": 0.0019991397857666016,
"app_started_callback/clip_interrogator_ext.py": 0.0,
"app_started_callback/api.py": 0.004004001617431641,
"app_started_callback/openpose_editor.py": 0.0019986629486083984,
"app_started_callback/on_app_started.py": 5.942998886108398,
"app_started_callback/reactor_api.py": 0.0019979476928710938,
"app_started_callback/lora_script.py": 0.0,
"app_started_callback": 5.952998638153076
}
},
"Packages": [
"-rotobuf==3.20.0",
"absl-py==1.4.0",
"accelerate==0.21.0",
"addict==2.4.0",
"aenum==3.1.15",
"aiofiles==23.2.1",
"aiohttp==3.8.5",
"aiosignal==1.3.1",
"albumentations==1.3.1",
"aliyun-python-sdk-alimt==3.2.0",
"aliyun-python-sdk-core==2.13.10",
"altair==5.1.1",
"antlr4-python3-runtime==4.9.3",
"anyio==3.7.1",
"async-timeout==4.0.3",
"attrs==23.1.0",
"basicsr==1.4.2",
"beautifulsoup4==4.12.2",
"blendmodes==2022",
"blip-ci==0.0.5",
"boltons==23.0.0",
"boto3==1.28.39",
"botocore==1.31.39",
"cachetools==5.3.1",
"certifi==2023.7.22",
"cffi==1.15.1",
"chardet==5.2.0",
"charset-normalizer==3.2.0",
"clean-fid==0.1.35",
"click==8.1.7",
"clip-interrogator==0.5.4",
"clip==1.0",
"colorama==0.4.6",
"coloredlogs==15.0.1",
"contourpy==1.1.0",
"cryptography==41.0.3",
"cssselect2==0.7.0",
"cycler==0.11.0",
"cython==3.0.2",
"deprecation==2.1.0",
"dill==0.3.7",
"easydict==1.10",
"einops==0.4.1",
"exceptiongroup==1.1.3",
"facexlib==0.3.0",
"fairscale==0.4.4",
"fastapi==0.94.0",
"ffmpy==0.3.1",
"filelock==3.12.3",
"filterpy==1.4.5",
"flatbuffers==23.5.26",
"fonttools==4.42.1",
"frozenlist==1.4.0",
"fsspec==2023.6.0",
"ftfy==6.1.1",
"future==0.18.3",
"fvcore==0.1.5.post20221221",
"gdown==4.7.1",
"gfpgan==1.3.8",
"gitdb==4.0.10",
"gitpython==3.1.32",
"google-auth-oauthlib==1.0.0",
"google-auth==2.22.0",
"gradio-client==0.5.0",
"gradio==3.41.2",
"grpcio==1.57.0",
"h11==0.12.0",
"httpcore==0.15.0",
"httpx==0.24.1",
"huggingface-hub==0.16.4",
"humanfriendly==10.0",
"idna==3.4",
"imageio==2.31.2",
"importlib-metadata==6.8.0",
"importlib-resources==6.0.1",
"inflection==0.5.1",
"insightface==0.7.3",
"iopath==0.1.9",
"jinja2==3.1.2",
"jmespath==0.10.0",
"joblib==1.3.2",
"jsonmerge==1.8.0",
"jsonschema-specifications==2023.7.1",
"jsonschema==4.19.0",
"kiwisolver==1.4.5",
"kornia==0.6.7",
"lark==1.1.2",
"lazy-loader==0.3",
"lightning-utilities==0.9.0",
"llvmlite==0.40.1",
"lmdb==1.4.1",
"lpips==0.1.4",
"lxml==4.9.3",
"markdown-it-py==3.0.0",
"markdown==3.4.4",
"markupsafe==2.1.3",
"matplotlib==3.7.2",
"mdurl==0.1.2",
"mediapipe==0.10.3",
"mpmath==1.3.0",
"multidict==6.0.4",
"multiprocess==0.70.15",
"networkx==3.1",
"numba==0.57.1",
"numpy==1.23.5",
"oauthlib==3.2.2",
"omegaconf==2.2.3",
"onnx==1.14.0",
"onnxruntime==1.15.0",
"open-clip-torch==2.20.0",
"openai==0.28.0",
"opencv-contrib-python==4.8.0.76",
"opencv-python-headless==4.8.0.76",
"opencv-python==4.8.0.76",
"orjson==3.9.5",
"packaging==23.1",
"pandas==2.1.0",
"pathos==0.3.1",
"piexif==1.1.3",
"pillow==9.5.0",
"pip==23.2.1",
"platformdirs==3.10.0",
"portalocker==2.7.0",
"pox==0.3.3",
"ppft==1.7.6.7",
"prettytable==3.8.0",
"protobuf==3.20.3",
"psutil==5.9.5",
"py-cpuinfo==9.0.0",
"pyasn1-modules==0.3.0",
"pyasn1==0.5.0",
"pycparser==2.21",
"pydantic==1.10.12",
"pydub==0.25.1",
"pyexecjs==1.5.1",
"pygments==2.16.1",
"pyparsing==3.0.9",
"pyreadline3==3.4.1",
"pysocks==1.7.1",
"python-dateutil==2.8.2",
"python-multipart==0.0.6",
"pytorch-lightning==1.9.4",
"pytz==2023.3",
"pywavelets==1.4.1",
"pywin32==306",
"pyyaml==6.0.1",
"qudida==0.0.4",
"realesrgan==0.3.0",
"referencing==0.30.2",
"regex==2023.8.8",
"reportlab==4.0.4",
"requests-oauthlib==1.3.1",
"requests==2.31.0",
"resize-right==0.0.2",
"rich==13.5.2",
"rpds-py==0.10.0",
"rsa==4.9",
"s3transfer==0.6.2",
"safetensors==0.3.1",
"scikit-image==0.21.0",
"scikit-learn==1.3.0",
"scipy==1.11.2",
"seaborn==0.12.2",
"semantic-version==2.10.0",
"sentencepiece==0.1.99",
"setuptools==68.1.2",
"six==1.16.0",
"smmap==5.0.0",
"sniffio==1.3.0",
"sounddevice==0.4.6",
"soupsieve==2.4.1",
"starlette==0.26.1",
"svglib==1.5.1",
"sympy==1.12",
"tabulate==0.9.0",
"tb-nightly==2.15.0a20230901",
"tensorboard-data-server==0.7.1",
"termcolor==2.3.0",
"threadpoolctl==3.2.0",
"tifffile==2023.8.30",
"timm==0.9.2",
"tinycss2==1.2.1",
"tokenizers==0.13.3",
"tomesd==0.1.3",
"tomli==2.0.1",
"toolz==0.12.0",
"torch==2.0.1+cu118",
"torchdiffeq==0.2.3",
"torchmetrics==1.1.1",
"torchsde==0.2.5",
"torchvision==0.15.2+cu118",
"tqdm==4.66.1",
"trampoline==0.1.2",
"transformers==4.30.2",
"typing-extensions==4.7.1",
"tzdata==2023.3",
"ultralytics==8.0.168",
"urllib3==1.26.16",
"uvicorn==0.23.2",
"wcwidth==0.2.6",
"webencodings==0.5.1",
"websockets==11.0.3",
"werkzeug==2.3.7",
"wheel==0.41.2",
"xformers==0.0.20",
"yacs==0.1.8",
"yapf==0.40.1",
"yarl==1.9.2",
"zipp==3.16.2"
]
}
What browsers do you use to access the UI ?
No response
Console logs
Additional information
No response