KohakuBlueleaf / LyCORIS

Lora beYond Conventional methods, Other Rank adaptation Implementations for Stable diffusion.
Apache License 2.0
2.2k stars 151 forks source link

ValueError: Attempting to unscale FP16 gradients when using algo=full #149

Closed TPreece101 closed 9 months ago

TPreece101 commented 9 months ago

Hi, I'm having trouble training a LyCORIS using the "full" algorithm. This is the error I get:

Traceback (most recent call last):
  File "/content/kohya-trainer/train_network.py", line 1033, in <module>
    trainer.train(args)
  File "/content/kohya-trainer/train_network.py", line 849, in train
    accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
  File "/usr/local/lib/python3.10/dist-packages/accelerate/accelerator.py", line 2040, in clip_grad_norm_
    self.unscale_gradients()
  File "/usr/local/lib/python3.10/dist-packages/accelerate/accelerator.py", line 2003, in unscale_gradients
    self.scaler.unscale_(opt)
  File "/usr/local/lib/python3.10/dist-packages/torch/cuda/amp/grad_scaler.py", line 307, in unscale_
    optimizer_state["found_inf_per_device"] = self._unscale_grads_(
  File "/usr/local/lib/python3.10/dist-packages/torch/cuda/amp/grad_scaler.py", line 229, in _unscale_grads_
    raise ValueError("Attempting to unscale FP16 gradients.")
ValueError: Attempting to unscale FP16 gradients.

I'm wondering if there is anything obviously wrong in my config file (below). It works fine with other algorithms such as lora and lokr which is strange.

[Basics]
pretrained_model_name_or_path = "/content/drive/MyDrive/ComfyUI/models/checkpoints/SD15/sd-v1-5-pruned-noema-fp16.safetensors"
train_data_dir = "/content/drive/MyDrive/Loras/AveryCharacter/dataset"
resolution = "768"
seed = 23
max_train_steps = 10000 # This is overwritten by max_train_epochs anyway
max_train_epochs = 10
clip_skip = 1

[Save]
output_dir = "/content/drive/MyDrive/Loras/AveryCharacter/output"
output_name = "AveryCharacter"
save_precision = "fp16"
save_model_as = "safetensors"
save_every_n_epochs = 1
save_every_n_steps = 5000
save_state = false
save_last_n_steps_state = 1 # basically saving the last + final state if save_state set to true
# save_last_n_epochs_state = 1
# save_n_epoch_ratio = 10
# save_last_n_epochs = 10
# save_last_n_steps = 100000

[SDv2]
v2 = false
v_parameterization = false
scale_v_pred_loss_like_noise_pred = false

[Network_setup]
network_dim = 64
network_alpha = 1
dim_from_weights = false
network_dropout = 0
network_train_unet_only = false
network_train_text_encoder_only = false
resume = false
# network_weights = 'path/to/network_weights'
# base_weights = 'path/to/base_weights'
# base_weights_multiplier = 1

[LyCORIS]
network_module = "lycoris.kohya"
network_args = [ "preset=attn-mlp", "algo=full", "train_norm=True", ]

[Optimizer]
train_batch_size = 1
gradient_checkpointing = false
gradient_accumulation_steps = 1
optimizer_type = "AdamW8bit"
unet_lr = 0.0004
text_encoder_lr = 0.0004
max_grad_norm = 1.0
optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]

[Lr_scheduler]
lr_scheduler_type = ""
lr_scheduler = "cosine"
lr_warmup_steps = 5
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1.0    # Polynomial power for polynomial scheduler
# lr_scheduler_args = ...

[Training_preciscion]
mixed_precision = "fp16"

[Further_improvement]
min_snr_gamma = 0
# noise_offset = 0.05   # cannot be set with multires_noise
# adaptive_noise_scale = 0
multires_noise_discount = 0
multires_noise_iterations = 0
# scale_weight_norms = 1

[ARB]
enable_bucket = true
min_bucket_reso = 320
max_bucket_reso = 960
bucket_reso_steps = 64
bucket_no_upscale = true

[Captions]
shuffle_caption = false
caption_extension = ".txt"
keep_tokens = 0
caption_dropout_rate = 0
caption_dropout_every_n_epochs = 0
caption_tag_dropout_rate = 0.0
max_token_length = 150
weighted_captions = false
token_warmup_min = 1
token_warmup_step = 0

[Attention]
mem_eff_attn = false
xformers = true

[Data_augmentation]
color_aug = false
flip_aug = false
random_crop = false

[Cache_latents]
cache_latents = true
vae_batch_size = 1
cache_latents_to_disk = true

[Sampling_during_training]
sample_sampler = "ddim"
# sample_every_n_steps = 5000   # overwritten by n_epochs
# sample_every_n_epochs = 1
# sample_prompts = "sample_prompts.txt"

[Logging]
logging_dir = "logs_training"
log_with = "tensorboard"
log_prefix = "AveryCharacter_"
# log_tracker_name = ?
# wandb_api_key = ?

[Dataset]
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
dataset_repeats = 1 # Not sure how this is used 
# dataset_class = package.module.Class
# dataset_config = ...

[Regularization]
# This is not really needed because you can do regularization by putting everything in train
# reg_data_dir = "/path/to/reg"
prior_loss_weight = 1.0

[Huggingface]
save_state_to_huggingface = false
resume_from_huggingface = false
async_upload = false
# There are more arguments

[Debugging]
debug_dataset = false

[Deprecated]
use_8bit_adam = false
use_lion_optimizer = false
learning_rate = 0.0004

[Others]
lowram = false
# in_json = "/path/to/json_metadata"
# face_crop_aug_range = 2.0
# vae = "/path/to/vae"
training_comment = ""

Extra Info

!pip install --upgrade accelerate==0.25.0 transformers==4.26.0 ftfy==6.1.1 albumentations==1.3.1 opencv-python==4.8.0.76 einops==0.6.0 diffusers[torch]==0.25.0 pytorch-lightning==1.9.0 bitsandbytes==0.41.3.post2 tensorflow==2.14.0 safetensors==0.4.1 toml==0.10.2 voluptuous==0.13.1 lion_pytorch==0.0.6 dadaptation==3.1 prodigyopt==1.0 lycoris_lora xformers==0.0.22.post7

Let me know if you need any extra info, I'm quite new to training LoRAs / LyCORIS so I might be doing something silly

KohakuBlueleaf commented 9 months ago

This should be fault in kohya.