Open matyhtf opened 4 months ago
出现的概率非常高,20% 左右。revert 到旧版本没问题
commit a2c759409fe793ff3fd9f93cab5d63729191168c (master)
Author: lizhuang <lizhuang@stumail.neu.edu.cn>
Date: Fri Jul 5 23:42:11 2024 +0800
Update README_CN.md
可能最新的 commit 中引入了一些 bug
出现的概率非常高,20% 左右。revert 到旧版本没问题
commit a2c759409fe793ff3fd9f93cab5d63729191168c (master) Author: lizhuang <lizhuang@stumail.neu.edu.cn> Date: Fri Jul 5 23:42:11 2024 +0800 Update README_CN.md
可能最新的 commit 中引入了一些 bug
Which Python script have you run?
The code is copied to scripts/sample.py, and only the parameters are obtained from the fastapi as http apis
from fastapi import FastAPI, Form
from fastapi.staticfiles import StaticFiles
import os, torch
import signal
import random
from datetime import datetime
# from PIL import Image
from kolors.pipelines.pipeline_stable_diffusion_xl_chatglm_256 import StableDiffusionXLPipeline
from kolors.models.modeling_chatglm import ChatGLMModel
from kolors.models.tokenization_chatglm import ChatGLMTokenizer
from diffusers import UNet2DConditionModel, AutoencoderKL
from diffusers import EulerDiscreteScheduler
def handler(signum, frame):
os._exit(0)
signal.signal(signal.SIGUSR1, handler)
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
app = FastAPI()
app.mount("/static", StaticFiles(directory=f'{root_dir}/outputs'), name="static")
ckpt_dir = f'{root_dir}/weights/Kolors'
text_encoder = ChatGLMModel.from_pretrained(
f'{ckpt_dir}/text_encoder',
torch_dtype=torch.float16).half()
tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder')
vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half()
scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler")
unet = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
pipe = StableDiffusionXLPipeline(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
force_zeros_for_empty_prompt=False)
pipe = pipe.to("cuda")
pipe.enable_model_cpu_offload()
@app.post("/imagine")
def imagine(prompt: str = Form(), width: int = Form(default=1024), height: int = Form(default=1024), n: int = Form(default=4)):
random_number = random.randint(1, 2 << 30)
now = datetime.now()
formatted_date_time = now.strftime('%Y%m%d%H%M')
n = min(n, 4)
width = min(width, 1280)
height = min(height, 1280)
try:
images = pipe(prompt=prompt,
height=height,
width=width,
num_inference_steps=50,
guidance_scale=5.0,
num_images_per_prompt=n,
generator= torch.Generator(pipe.device).manual_seed(random_number)).images
except torch.cuda.OutOfMemoryError as e:
os.kill(os.getpid(), signal.SIGUSR1)
return {'data': None, 'code': 5000, 'message': f"Error: {e}"}
outputs = []
for i in range(n):
image = images[i]
file = f'{formatted_date_time}-{random_number}-{i}.png'
image.save(f'{root_dir}/outputs/{file}')
outputs.append(file)
return {'data': {'images': outputs, 'seed': random_number}, 'code': 0}
gunicorn -w 1 -k uvicorn.workers.UvicornWorker -b 0.0.0.0:9380 main:app
环境
错误信息