Open thebestannie opened 1 year ago
I have met this issue by running python gen_img_diffusers.py --network_module="networks.lora" --network_weight="./chase/last.safetensors" --network_mul=0.7 --ckpt="XpucT/Deliberate" --outdir="./output/chase" --fp16 --W=512 --H=768 --scale=7 --sampler="ddim" --steps=28 --max_embeddings_multiples=3 --batch_size=1 --images_per_prompt=1 --clip_skip=2 --prompt="masterpiece, best quality, 1girl, aqua eyes, baseball cap, blonde hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" where is the wrong QAQ
File "gen_img_diffusers.py", line 2800, in
main(args)
File "gen_img_diffusers.py", line 2667, in main
prev_image = process_batch(batch_data, highres_fix)[0]
File "gen_img_diffusers.py", line 2460, in process_batch
clip_prompts=clip_prompts, clip_guide_images=guide_images)[0]
File "/workspace/anaconda3/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, kwargs)
File "gen_img_diffusers.py", line 917, in call
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
File "/workspace/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, *kwargs)
File "/workspace/anaconda3/lib/python3.7/site-packages/diffusers/models/unet_2d_condition.py", line 585, in forward
cross_attention_kwargs=cross_attention_kwargs,
File "/workspace/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(input, kwargs)
File "/workspace/anaconda3/lib/python3.7/site-packages/diffusers/models/unet_2d_blocks.py", line 840, in forward
cross_attention_kwargs=cross_attention_kwargs,
File "/workspace/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, kwargs)
File "/workspace/anaconda3/lib/python3.7/site-packages/diffusers/models/transformer_2d.py", line 270, in forward
class_labels=class_labels,
File "/workspace/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(input, kwargs)
File "/workspace/anaconda3/lib/python3.7/site-packages/diffusers/models/attention.py", line 295, in forward
cross_attention_kwargs,
File "/workspace/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(input, kwargs)
TypeError: forward_flash_attn() got an unexpected keyword argument 'encoder_hidden_states'