kohya-ss / sd-scripts

Apache License 2.0
5.26k stars 872 forks source link

ValueError: Attempting to unscale FP16 gradients when using algo=full #1107

Open TPreece101 opened 9 months ago

TPreece101 commented 9 months ago

Hi, I'm having trouble training a LyCORIS using the "full" algorithm. This is the error I get:

Traceback (most recent call last):
  File "/content/kohya-trainer/train_network.py", line 1033, in <module>
    trainer.train(args)
  File "/content/kohya-trainer/train_network.py", line 849, in train
    accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
  File "/usr/local/lib/python3.10/dist-packages/accelerate/accelerator.py", line 2040, in clip_grad_norm_
    self.unscale_gradients()
  File "/usr/local/lib/python3.10/dist-packages/accelerate/accelerator.py", line 2003, in unscale_gradients
    self.scaler.unscale_(opt)
  File "/usr/local/lib/python3.10/dist-packages/torch/cuda/amp/grad_scaler.py", line 307, in unscale_
    optimizer_state["found_inf_per_device"] = self._unscale_grads_(
  File "/usr/local/lib/python3.10/dist-packages/torch/cuda/amp/grad_scaler.py", line 229, in _unscale_grads_
    raise ValueError("Attempting to unscale FP16 gradients.")
ValueError: Attempting to unscale FP16 gradients.

I'm wondering if there is anything obviously wrong in my config file (below). It works fine with other algorithms such as lora and lokr which is strange.

[Basics]
pretrained_model_name_or_path = "/content/drive/MyDrive/ComfyUI/models/checkpoints/SD15/sd-v1-5-pruned-noema-fp16.safetensors"
train_data_dir = "/content/drive/MyDrive/Loras/AveryCharacter/dataset"
resolution = "768"
seed = 23
max_train_steps = 10000 # This is overwritten by max_train_epochs anyway
max_train_epochs = 10
clip_skip = 1

[Save]
output_dir = "/content/drive/MyDrive/Loras/AveryCharacter/output"
output_name = "AveryCharacter"
save_precision = "fp16"
save_model_as = "safetensors"
save_every_n_epochs = 1
save_every_n_steps = 5000
save_state = false
save_last_n_steps_state = 1 # basically saving the last + final state if save_state set to true
# save_last_n_epochs_state = 1
# save_n_epoch_ratio = 10
# save_last_n_epochs = 10
# save_last_n_steps = 100000

[SDv2]
v2 = false
v_parameterization = false
scale_v_pred_loss_like_noise_pred = false

[Network_setup]
network_dim = 64
network_alpha = 1
dim_from_weights = false
network_dropout = 0
network_train_unet_only = false
network_train_text_encoder_only = false
resume = false
# network_weights = 'path/to/network_weights'
# base_weights = 'path/to/base_weights'
# base_weights_multiplier = 1

[LyCORIS]
network_module = "lycoris.kohya"
network_args = [ "preset=attn-mlp", "algo=full", "train_norm=True", ]

[Optimizer]
train_batch_size = 1
gradient_checkpointing = false
gradient_accumulation_steps = 1
optimizer_type = "AdamW8bit"
unet_lr = 0.0004
text_encoder_lr = 0.0004
max_grad_norm = 1.0
optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]

[Lr_scheduler]
lr_scheduler_type = ""
lr_scheduler = "cosine"
lr_warmup_steps = 5
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1.0    # Polynomial power for polynomial scheduler
# lr_scheduler_args = ...

[Training_preciscion]
mixed_precision = "fp16"

[Further_improvement]
min_snr_gamma = 0
# noise_offset = 0.05   # cannot be set with multires_noise
# adaptive_noise_scale = 0
multires_noise_discount = 0
multires_noise_iterations = 0
# scale_weight_norms = 1

[ARB]
enable_bucket = true
min_bucket_reso = 320
max_bucket_reso = 960
bucket_reso_steps = 64
bucket_no_upscale = true

[Captions]
shuffle_caption = false
caption_extension = ".txt"
keep_tokens = 0
caption_dropout_rate = 0
caption_dropout_every_n_epochs = 0
caption_tag_dropout_rate = 0.0
max_token_length = 150
weighted_captions = false
token_warmup_min = 1
token_warmup_step = 0

[Attention]
mem_eff_attn = false
xformers = true

[Data_augmentation]
color_aug = false
flip_aug = false
random_crop = false

[Cache_latents]
cache_latents = true
vae_batch_size = 1
cache_latents_to_disk = true

[Sampling_during_training]
sample_sampler = "ddim"
# sample_every_n_steps = 5000   # overwritten by n_epochs
# sample_every_n_epochs = 1
# sample_prompts = "sample_prompts.txt"

[Logging]
logging_dir = "logs_training"
log_with = "tensorboard"
log_prefix = "AveryCharacter_"
# log_tracker_name = ?
# wandb_api_key = ?

[Dataset]
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
dataset_repeats = 1 # Not sure how this is used 
# dataset_class = package.module.Class
# dataset_config = ...

[Regularization]
# This is not really needed because you can do regularization by putting everything in train
# reg_data_dir = "/path/to/reg"
prior_loss_weight = 1.0

[Huggingface]
save_state_to_huggingface = false
resume_from_huggingface = false
async_upload = false
# There are more arguments

[Debugging]
debug_dataset = false

[Deprecated]
use_8bit_adam = false
use_lion_optimizer = false
learning_rate = 0.0004

[Others]
lowram = false
# in_json = "/path/to/json_metadata"
# face_crop_aug_range = 2.0
# vae = "/path/to/vae"
training_comment = ""

Extra Info

Let me know if you need any extra info, I'm quite new to training LoRAs / LyCORIS so I might be doing something silly

kuzman123 commented 9 months ago

Have you tried to set max_grad_norm = 0.0 ? This is command i've used and it worked, just change training script, paths and filenames: accelerate launch --num_cpu_threads_per_process=4 "./sdxl_train_network.py" --enable_bucket --min_bucket_reso=256 --max_bucket_reso=2048 --pretrained_model_name_or_path="/workspace/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors" --train_data_dir="/workspace/dataset/img" --resolution="1024,1024" --output_dir="/workspace/dataset/model" --logging_dir="workspace/dataset/log" --save_model_as=safetensors --network_module=lycoris.kohya --network_args "preset=attn-mlp" "algo=full" "train_norm=True" "rank_dropout=0" "module_dropout=0" "use_tucker=True" "use_scalar=False" "rank_dropout_scale=False" --network_dropout="0" --text_encoder_lr=1.0 --unet_lr=1.0 --output_name="Mstult" --lr_scheduler_num_cycles="100" --no_half_vae --full_bf16 --learning_rate="1.0" --lr_scheduler="cosine" --train_batch_size="2" --max_train_steps="8700" --save_every_n_epochs="10" --mixed_precision="bf16" --save_precision="bf16" --caption_extension=".txt" --cache_latents --cache_latents_to_disk --optimizer_type="Prodigy" --optimizer_args decouple=True weight_decay=0.01 d_coef=2 use_bias_correction=True safeguard_warmup=False betas=0.9,0.999 --max_grad_norm="0" --max_data_loader_n_workers="1" --keep_tokens="1" --vae_batch_size="2" --bucket_reso_steps=32 --min_snr_gamma=5 --shuffle_caption --gradient_checkpointing --persistent_data_loader_workers --noise_offset=0.0357 --vae="/workspace/stable-diffusion-webui/models/VAE/sdxl_vae.safetensors" --sample_sampler=euler_a --sample_prompts="/workspace/dataset/model/sample/prompt.txt" --sample_every_n_steps="870"

kuzman123 commented 9 months ago

Make sure you have enough RAM memory, cause i was getting error with saving checkpoint (RTX 3090). I had to use RTX A6000 on Runpod to make it working