Open Fqlox opened 1 month ago
I installed diffusers
from the source and this seems to work:
import torch
from diffusers import DiffusionPipeline, ControlNetModel
from diffusers import UniPCMultistepScheduler
controlnet_path = f'path/to/instant/id'
# load IdentityNet
identityNet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
custom_pipeline="stable_diffusion_xl_reference",
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16").to('cuda')
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe_instant = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
custom_pipeline="pipeline_stable_diffusion_xl_instantid",
#vae = pipe.vae, # I tried both witout and with the VAE
text_encoder = pipe.text_encoder,
text_encoder_2 = pipe.text_encoder_2,
tokenizer = pipe.tokenizer,
tokenizer_2 = pipe.tokenizer_2,
unet = pipe.unet,
scheduler = pipe.scheduler,
feature_extractor = pipe.feature_extractor,
controlnet = identityNet,
)
you can create your instant ID pipeline from SDXL reference pipeline with this script
import torch
from diffusers import DiffusionPipeline, ControlNetModel
from diffusers import UniPCMultistepScheduler
# load IdentityNet
identityNet = ControlNetModel.from_pretrained("InstantX/InstantID", subfolder ="ControlNetModel", torch_dtype=torch.float16)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
custom_pipeline="stable_diffusion_xl_reference",
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16").to('cuda')
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe_instant = DiffusionPipeline.from_pipe(
pipe,
custom_pipeline="pipeline_stable_diffusion_xl_instantid",
controlnet = identityNet,
)
@standardAI I did install diffuser from source then I had to downgrade due to an error on instantID. When I generate using both reference and instantId arguments, and with only instant id argument. It does not impact the generation.
image_plus_ref = pipe_instant(
prompt,
negative_prompt=negative_prompt,
num_inference_steps=4,
guidance_scale=1.2,
image_proj_model_in_features=face_emb,
image_embeds=face_emb,
image=face_kps,
controlnet_conditioning_scale=0.8,
seed = 42,
reference_attn=True,
reference_adain=True,
ref_image = ref_image
).images[0]
And
image = pipe_instant(
prompt,
negative_prompt=negative_prompt,
num_inference_steps=4,
guidance_scale=1.2,
image_proj_model_in_features=face_emb,
image_embeds=face_emb,
image=face_kps,
controlnet_conditioning_scale=0.8,
seed = 42,
).images[0]
[note that I use lightning diffuser model]
@yiyixuxu since I'm on diffusers==0.26.3 the method from_pipe does not seems to work.
Describe the bug
I cannot use both stable diffusion XL reference and Instant ID in the same pipeline. I get
'FrozenDict' object has no attribute 'block_out_channels'"
Reproduction
Logs
System Info
diffusers
version: 0.25.0Who can help?
@yiyixuxu @sayakpaul @DN6 @stevhliu