AUTOMATIC1111 / stable-diffusion-webui

Stable Diffusion web UI
GNU Affero General Public License v3.0
143.63k stars 27.03k forks source link

[Bug]: Issue with /progress endpoint #15509

Open monsieurpooh opened 7 months ago

monsieurpooh commented 7 months ago

Checklist

What happened?

/progress now takes up to 2-3 seconds to respond per query. It used to be almost instant. Using a highly outdated of this code base, I was able to query progress very rapidly. Using a more recent version, it's very slow.

Steps to reproduce the problem

Call the /progress endpoint and count how many milliseconds elapsed

What should have happened?

We expect a very fast response for snappy progress bar updating (under 250 ms), but it takes about 2-3 seconds to respond

What browsers do you use to access the UI ?

No response

Sysinfo

{ "Platform": "Windows-10-10.0.19045-SP0", "Python": "3.10.6", "Version": "1.8.0-RC", "Commit": "", "Script path": "C:\Max\stable-diffusion-webui-master", "Data path": "C:\Max\stable-diffusion-webui-master", "Extensions dir": "C:\Max\stable-diffusion-webui-master\extensions", "Checksum": "fdad020d36f25d89efa333f36e71bbe032b6b5fd1f2d4c043354e52fcfc2477e", "Commandline": [ "launch.py", "--api" ], "Torch env info": { "torch_version": "2.1.2+cu121", "is_debug_build": "False", "cuda_compiled_version": "12.1", "gcc_version": null, "clang_version": null, "cmake_version": null, "os": "Microsoft Windows 10 Pro", "libc_version": "N/A", "python_version": "3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)] (64-bit runtime)", "python_platform": "Windows-10-10.0.19045-SP0", "is_cuda_available": "True", "cuda_runtime_version": "11.8.89\r", "cuda_module_loading": "LAZY", "nvidia_driver_version": "546.01", "nvidia_gpu_models": "GPU 0: NVIDIA GeForce RTX 2060 SUPER", "cudnn_version": null, "pip_version": "pip3", "pip_packages": [ "numpy==1.26.2", "open-clip-torch==2.20.0", "pytorch-lightning==1.9.4", "torch==2.1.2+cu121", "torchdiffeq==0.2.3", "torchmetrics==1.3.2", "torchsde==0.2.6", "torchvision==0.16.2+cu121" ], "conda_packages": null, "hip_compiled_version": "N/A", "hip_runtime_version": "N/A", "miopen_runtime_version": "N/A", "caching_allocator_config": "", "is_xnnpack_available": "True", "cpu_info": [ "Architecture=9", "CurrentClockSpeed=3500", "DeviceID=CPU0", "Family=107", "L2CacheSize=3072", "L2CacheSpeed=", "Manufacturer=AuthenticAMD", "MaxClockSpeed=3500", "Name=AMD Ryzen 5 3600 6-Core Processor ", "ProcessorType=3", "Revision=28928" ] }, "Exceptions": [], "CPU": { "model": "AMD64 Family 23 Model 113 Stepping 0, AuthenticAMD", "count logical": 12, "count physical": 6 }, "RAM": { "total": "64GB", "used": "33GB", "free": "31GB" }, "Extensions": [], "Inactive extensions": [], "Environment": { "COMMANDLINE_ARGS": "--api", "GRADIO_ANALYTICS_ENABLED": "False" }, "Config": { "ldsr_steps": 100, "ldsr_cached": false, "SCUNET_tile": 256, "SCUNET_tile_overlap": 8, "SWIN_tile": 192, "SWIN_tile_overlap": 8, "SWIN_torch_compile": false, "hypertile_enable_unet": false, "hypertile_enable_unet_secondpass": false, "hypertile_max_depth_unet": 3, "hypertile_max_tile_unet": 256, "hypertile_swap_size_unet": 3, "hypertile_enable_vae": false, "hypertile_max_depth_vae": 3, "hypertile_max_tile_vae": 128, "hypertile_swap_size_vae": 3, "sd_model_checkpoint": "v1-5-pruned-emaonly.safetensors [6ce0161689]", "sd_checkpoint_hash": "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa" }, "Startup": { "total": 12.934616565704346, "records": { "initial startup": 0.02251911163330078, "prepare environment/checks": 0.008507728576660156, "prepare environment/git version info": 0.03803229331970215, "prepare environment/torch GPU test": 2.395559549331665, "prepare environment/clone repositores": 0.15162944793701172, "prepare environment/run extensions installers": 0.0, "prepare environment": 2.630760908126831, "launcher": 0.0025022029876708984, "import torch": 4.942748308181763, "import gradio": 1.000359296798706, "setup paths": 1.1354761123657227, "import ldm": 0.006005048751831055, "import sgm": 0.0, "initialize shared": 0.2747361660003662, "other imports": 0.608522891998291, "opts onchange": 0.0005004405975341797, "setup SD model": 0.0005002021789550781, "setup codeformer": 0.001501321792602539, "setup gfpgan": 0.014012336730957031, "set samplers": 0.0, "list extensions": 0.0015010833740234375, "restore config state file": 0.0, "list SD models": 0.0010008811950683594, "list localizations": 0.0, "load scripts/custom_code.py": 0.003503084182739258, "load scripts/img2imgalt.py": 0.0005006790161132812, "load scripts/loopback.py": 0.0005002021789550781, "load scripts/outpainting_mk_2.py": 0.0, "load scripts/poor_mans_outpainting.py": 0.0005006790161132812, "load scripts/postprocessing_caption.py": 0.0005002021789550781, "load scripts/postprocessing_codeformer.py": 0.0, "load scripts/postprocessing_create_flipped_copies.py": 0.0005004405975341797, "load scripts/postprocessing_focal_crop.py": 0.001001119613647461, "load scripts/postprocessing_gfpgan.py": 0.0, "load scripts/postprocessing_split_oversized.py": 0.0005002021789550781, "load scripts/postprocessing_upscale.py": 0.0005006790161132812, "load scripts/processing_autosized_crop.py": 0.0, "load scripts/prompt_matrix.py": 0.0005002021789550781, "load scripts/prompts_from_file.py": 0.0005004405975341797, "load scripts/sd_upscale.py": 0.0, "load scripts/xyz_grid.py": 0.0020017623901367188, "load scripts/ldsr_model.py": 0.759652853012085, "load scripts/lora_script.py": 0.1301114559173584, "load scripts/scunet_model.py": 0.0245211124420166, "load scripts/swinir_model.py": 0.025524139404296875, "load scripts/hotkey_config.py": 0.0004982948303222656, "load scripts/extra_options_section.py": 0.0005002021789550781, "load scripts/hypertile_script.py": 0.047541141510009766, "load scripts/hypertile_xyz.py": 0.0005004405975341797, "load scripts/soft_inpainting.py": 0.0005004405975341797, "load scripts/comments.py": 0.022018909454345703, "load scripts/refiner.py": 0.0005006790161132812, "load scripts/seed.py": 0.0005002021789550781, "load scripts": 1.0233795642852783, "load upscalers": 0.004503726959228516, "refresh VAE": 0.0015015602111816406, "refresh textual inversion templates": 0.0, "scripts list_optimizers": 0.0010006427764892578, "scripts list_unets": 0.0, "reload hypernetworks": 0.0010008811950683594, "initialize extra networks": 0.011510133743286133, "scripts before_ui_callback": 0.0020012855529785156, "create ui": 0.5604817867279053, "gradio launch": 0.47290897369384766, "add APIs": 0.2502129077911377, "app_started_callback/lora_script.py": 0.0005006790161132812, "app_started_callback": 0.0005006790161132812 } }, "Packages": [ "accelerate==0.21.0", "aenum==3.1.15", "aiofiles==23.2.1", "aiohttp==3.9.3", "aiosignal==1.3.1", "altair==5.3.0", "antlr4-python3-runtime==4.9.3", "anyio==3.7.1", "async-timeout==4.0.3", "attrs==23.2.0", "blendmodes==2022", "certifi==2024.2.2", "charset-normalizer==3.3.2", "clean-fid==0.1.35", "click==8.1.7", "clip==1.0", "colorama==0.4.6", "contourpy==1.2.1", "cycler==0.12.1", "deprecation==2.1.0", "einops==0.4.1", "exceptiongroup==1.2.0", "facexlib==0.3.0", "fastapi==0.94.0", "ffmpy==0.3.2", "filelock==3.13.3", "filterpy==1.4.5", "fonttools==4.51.0", "frozenlist==1.4.1", "fsspec==2024.3.1", "ftfy==6.2.0", "gitdb==4.0.11", "gitpython==3.1.32", "gradio-client==0.5.0", "gradio==3.41.2", "h11==0.12.0", "httpcore==0.15.0", "httpx==0.24.1", "huggingface-hub==0.22.2", "idna==3.6", "imageio==2.34.0", "importlib-resources==6.4.0", "inflection==0.5.1", "jinja2==3.1.3", "jsonmerge==1.8.0", "jsonschema-specifications==2023.12.1", "jsonschema==4.21.1", "kiwisolver==1.4.5", "kornia==0.6.7", "lark==1.1.2", "lazy-loader==0.4", "lightning-utilities==0.11.2", "llvmlite==0.42.0", "markupsafe==2.1.5", "matplotlib==3.8.4", "mpmath==1.3.0", "multidict==6.0.5", "networkx==3.2.1", "numba==0.59.1", "numpy==1.26.2", "omegaconf==2.2.3", "open-clip-torch==2.20.0", "opencv-python==4.9.0.80", "orjson==3.10.0", "packaging==24.0", "pandas==2.2.1", "piexif==1.1.3", "pillow==9.5.0", "pip==22.2.1", "protobuf==3.20.0", "psutil==5.9.5", "pydantic==1.10.15", "pydub==0.25.1", "pyparsing==3.1.2", "python-dateutil==2.9.0.post0", "python-multipart==0.0.9", "pytorch-lightning==1.9.4", "pytz==2024.1", "pywavelets==1.6.0", "pyyaml==6.0.1", "referencing==0.34.0", "regex==2023.12.25", "requests==2.31.0", "resize-right==0.0.2", "rpds-py==0.18.0", "safetensors==0.4.2", "scikit-image==0.21.0", "scipy==1.13.0", "semantic-version==2.10.0", "sentencepiece==0.2.0", "setuptools==63.2.0", "six==1.16.0", "smmap==5.0.1", "sniffio==1.3.1", "spandrel==0.1.6", "starlette==0.26.1", "sympy==1.12", "tifffile==2024.2.12", "timm==0.9.16", "tokenizers==0.13.3", "tomesd==0.1.3", "toolz==0.12.1", "torch==2.1.2+cu121", "torchdiffeq==0.2.3", "torchmetrics==1.3.2", "torchsde==0.2.6", "torchvision==0.16.2+cu121", "tqdm==4.66.2", "trampoline==0.1.2", "transformers==4.30.2", "typing-extensions==4.11.0", "tzdata==2024.1", "urllib3==2.2.1", "uvicorn==0.29.0", "wcwidth==0.2.13", "websockets==11.0.3", "yarl==1.9.4" ] }

Console logs

venv "C:\Max\stable-diffusion-webui-master\venv\Scripts\Python.exe"
fatal: not a git repository (or any of the parent directories): .git
fatal: not a git repository (or any of the parent directories): .git
Python 3.10.6 (tags/v3.10.6:9c7b4bd, Aug  1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]
Version: 1.8.0-RC
Commit hash: <none>
Launching Web UI with arguments: --api
no module 'xformers'. Processing without...
no module 'xformers'. Processing without...
No module 'xformers'. Proceeding without it.
Loading weights [6ce0161689] from C:\Max\stable-diffusion-webui-master\models\Stable-diffusion\v1-5-pruned-emaonly.safetensors
Creating model from config: C:\Max\stable-diffusion-webui-master\configs\v1-inference.yaml
Running on local URL:  http://127.0.0.1:7860

To create a public link, set `share=True` in `launch()`.
Startup time: 12.9s (prepare environment: 2.6s, import torch: 4.9s, import gradio: 1.0s, setup paths: 1.1s, initialize shared: 0.3s, other imports: 0.6s, load scripts: 1.0s, create ui: 0.6s, gradio launch: 0.5s, add APIs: 0.3s).
Applying attention optimization: Doggettx... done.
Model loaded in 4.9s (load weights from disk: 0.7s, create model: 0.6s, apply weights to model: 2.8s, apply dtype to VAE: 0.2s, load textual inversion embeddings: 0.3s, calculate empty prompt: 0.3s).
100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:13<00:00,  7.64it/s]
Total progress: 100%|████████████████████████████████████████████████████████████████| 100/100 [00:12<00:00,  7.97it/s]
Total progress: 100%|████████████████████████████████████████████████████████████████| 100/100 [00:12<00:00,  7.94it/s]

Additional information

No response

monsieurpooh commented 7 months ago

I think the issue is /progress is now blocking until the next "preview" image is reached (e.g. preview the img gen after every 10 steps). This wasn't how it behaved before

missionfloyd commented 7 months ago

I think the issue is /progress is now blocking until the next "preview" image is reached (e.g. preview the img gen after every 10 steps). This wasn't how it behaved before

It doesn't wait for the next preview.

In this example, I've set previews to every 10 steps and call /sdapi/v1/progress every 100ms.

https://github.com/AUTOMATIC1111/stable-diffusion-webui/assets/4073789/d729eef1-d40e-48ec-a2f5-4b1e5a95f952

Code for the above ```html
``` ```js document.querySelector("#btn_generate").addEventListener("click", txt2img); var progressInterval; async function txt2img() { const payload = { prompt: document.querySelector("#prompt").value, negative_prompt: document.querySelector("#neg_prompt").value, sampler_name: "Euler a", steps: 20, width: 512, height: 768, cfg_scale: 4, }; progressInterval = setInterval(progress, 100); const response = await fetch("http://192.168.1.171:7860/sdapi/v1/txt2img", { headers: { "Content-Type": "application/json" }, method: "POST", body: JSON.stringify(payload) }); const json = await response.json(); document.querySelector("#output").src = "data:image/png;base64," + json.images[0]; document.querySelector("#info").textContent = JSON.parse(json.info).infotexts[0]; clearInterval(progressInterval); progressInterval = null; } async function progress() { const response = await fetch("http://192.168.1.171:7860/sdapi/v1/progress"); const json = await response.json(); document.querySelector("#progress").value = json.progress; if (json.current_image && progressInterval) { document.querySelector("#output").src = "data:image/png;base64," + json.current_image; } } ```
Python example ```python import threading import time import requests import base64 url = "http://192.168.1.171:7860" payload = { "prompt": "cat", "steps": 20, } response = None def post(): global response response = requests.post(url=f"{url}/sdapi/v1/txt2img", json=payload) t = threading.Thread(target=post) t.start() while t.is_alive(): progress = requests.get(f"{url}/sdapi/v1/progress").json() print(progress["progress"]) time.sleep(0.1) r = response.json() with open("output.png", "wb") as f: f.write(base64.b64decode(r["images"][0])) ```