Closed sumanthdonapati closed 1 year ago
This issue has been automatically marked as stale because it has not had recent activity. If you think this still needs to be addressed please comment on this thread.
Please note that issues that do not follow the contributing guidelines are likely to be ignored.
I converted "stablediffusionapi/juggernaut-xl-v5" this model to a single safetensors file using this (https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_sdxl.py). Conversion happened properly but unable to load it using StableDiffusionXLPipeline.from_single_file()
Below is the error I am facing:
ValueError Traceback (most recent call last) /home/example_notebook.ipynb Cell 3 line 1 ----> 1 pipe = StableDiffusionXLPipeline.from_single_file(model_path,local_files_only = True)
File ~/diff_test_env/lib/python3.8/site-packages/diffusers/loaders.py:2308, in FromSingleFileMixin.from_single_file(cls, pretrained_model_link_or_path, **kwargs) 2294 file_path = file_path[len("main/") :] 2296 pretrained_model_link_or_path = hf_hub_download( 2297 repo_id, 2298 filename=file_path, (...) 2305 force_download=force_download, 2306 ) -> 2308 pipe = download_from_original_stable_diffusion_ckpt( 2309 pretrained_model_link_or_path, 2310 pipeline_class=cls, 2311 model_type=model_type, 2312 stable_unclip=stable_unclip, 2313 controlnet=controlnet, 2314 from_safetensors=from_safetensors, 2315 extract_ema=extract_ema, 2316 image_size=image_size, 2317 scheduler_type=scheduler_type, 2318 num_in_channels=num_in_channels, 2319 upcast_attention=upcast_attention, 2320 load_safety_checker=load_safety_checker, 2321 prediction_type=prediction_type, 2322 text_encoder=text_encoder, 2323 vae=vae, 2324 tokenizer=tokenizer, 2325 original_config_file=original_config_file, 2326 config_files=config_files, 2327 ) 2329 if torch_dtype is not None: 2330 pipe.to(torch_dtype=torch_dtype)
File ~/diff_test_env/lib/python3.8/site-packages/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py:1605, in download_from_original_stable_diffusion_ckpt(checkpoint_path_or_dict, original_config_file, image_size, prediction_type, model_type, extract_ema, scheduler_type, num_in_channels, upcast_attention, device, from_safetensors, stable_unclip, stable_unclip_prior, clip_stats_path, controlnet, load_safety_checker, pipeline_class, local_files_only, vae_path, vae, text_encoder, tokenizer, config_files) 1603 config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" 1604 config_kwargs = {"projection_dim": 1280} -> 1605 text_encoder_2 = convert_open_clip_checkpoint( 1606 checkpoint, config_name, prefix="conditioner.embedders.1.model.", has_projection=True, **config_kwargs 1607 ) 1609 if is_accelerate_available(): # SBM Now move model to cpu. 1610 if model_type in ["SDXL", "SDXL-Refiner"]:
File ~/diff_test_env/lib/python3.8/site-packages/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py:971, in convert_open_clip_checkpoint(checkpoint, config_name, prefix, has_projection, local_files_only, **config_kwargs) 969 if is_accelerate_available(): 970 for param_name, param in text_model_dict.items(): --> 971 set_module_tensor_to_device(text_model, param_name, "cpu", value=param) 972 else: 973 if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
File ~/diff_test_env/lib/python3.8/site-packages/accelerate/utils/modeling.py:285, in set_module_tensor_to_device(module, tensor_name, device, value, dtype, fp16_statistics) 283 if value is not None: 284 if old_value.shape != value.shape: --> 285 raise ValueError( 286 f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.' 287 ) 289 if dtype is None: 290 # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model 291 value = value.to(old_value.dtype)
ValueError: Trying to set a tensor of shape torch.Size([1024]) in "bias" (which has shape torch.Size([1280])), this look incorrect.