bmaltais / kohya_ss

Apache License 2.0
9.44k stars 1.22k forks source link

RuntimeError: "cat_cuda" not implemented for 'Float8_e4m3fn' #2733

Open axelblaze88 opened 1 month ago

axelblaze88 commented 1 month ago

Hello, I am unable to start my training. I am encountering the following error:

[rank1]: Traceback (most recent call last):
[rank1]:   File "/kaggle/tmp/kohya_ss/sd-scripts/flux_train_network.py", line 411, in <module>
[rank1]:     trainer.train(args)
[rank1]:   File "/kaggle/tmp/kohya_ss/sd-scripts/train_network.py", line 342, in train
[rank1]:     model_version, text_encoder, vae, unet = self.load_target_model(args, weight_dtype, accelerator)
[rank1]:   File "/kaggle/tmp/kohya_ss/sd-scripts/flux_train_network.py", line 65, in load_target_model
[rank1]:     model = self.prepare_split_model(model, weight_dtype, accelerator)
[rank1]:   File "/kaggle/tmp/kohya_ss/sd-scripts/flux_train_network.py", line 103, in prepare_split_model
[rank1]:     flux_upper = accelerator.prepare(flux_upper)
[rank1]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1311, in prepare
[rank1]:     result = tuple(
[rank1]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1312, in <genexpr>
[rank1]:     self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)
[rank1]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1188, in _prepare_one
[rank1]:     return self.prepare_model(obj, device_placement=device_placement)
[rank1]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1452, in prepare_model
[rank1]:     model = torch.nn.parallel.DistributedDataParallel(
[rank1]:   File "/opt/conda/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 824, in __init__
[rank1]:     _sync_module_states(
[rank1]:   File "/opt/conda/lib/python3.10/site-packages/torch/distributed/utils.py", line 315, in _sync_module_states
[rank1]:     _sync_params_and_buffers(process_group, module_states, broadcast_bucket_size, src)
[rank1]:   File "/opt/conda/lib/python3.10/site-packages/torch/distributed/utils.py", line 326, in _sync_params_and_buffers
[rank1]:     dist._broadcast_coalesced(
[rank1]: RuntimeError: "cat_cuda" not implemented for 'Float8_e4m3fn'
[rank0]: Traceback (most recent call last):
[rank0]:   File "/kaggle/tmp/kohya_ss/sd-scripts/flux_train_network.py", line 411, in <module>
[rank0]:     trainer.train(args)
[rank0]:   File "/kaggle/tmp/kohya_ss/sd-scripts/train_network.py", line 342, in train
[rank0]:     model_version, text_encoder, vae, unet = self.load_target_model(args, weight_dtype, accelerator)
[rank0]:   File "/kaggle/tmp/kohya_ss/sd-scripts/flux_train_network.py", line 65, in load_target_model
[rank0]:     model = self.prepare_split_model(model, weight_dtype, accelerator)
[rank0]:   File "/kaggle/tmp/kohya_ss/sd-scripts/flux_train_network.py", line 103, in prepare_split_model
[rank0]:     flux_upper = accelerator.prepare(flux_upper)
[rank0]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1311, in prepare
[rank0]:     result = tuple(
[rank0]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1312, in <genexpr>
[rank0]:     self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)
[rank0]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1188, in _prepare_one
[rank0]:     return self.prepare_model(obj, device_placement=device_placement)
[rank0]:   File "/opt/conda/lib/python3.10/site-packages/accelerate/accelerator.py", line 1452, in prepare_model
[rank0]:     model = torch.nn.parallel.DistributedDataParallel(
[rank0]:   File "/opt/conda/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 824, in __init__
[rank0]:     _sync_module_states(
[rank0]:   File "/opt/conda/lib/python3.10/site-packages/torch/distributed/utils.py", line 315, in _sync_module_states
[rank0]:     _sync_params_and_buffers(process_group, module_states, broadcast_bucket_size, src)
[rank0]:   File "/opt/conda/lib/python3.10/site-packages/torch/distributed/utils.py", line 326, in _sync_params_and_buffers
[rank0]:     dist._broadcast_coalesced(
[rank0]: RuntimeError: "cat_cuda" not implemented for 'Float8_e4m3fn'
src/tcmalloc.cc:283] Attempt to free invalid pointer 0x5849089ec380 
src/tcmalloc.cc:283] Attempt to free invalid pointer 0x5a395fa62380 
W0823 00:46:13.809000 136886560066496 torch/distributed/elastic/multiprocessing/api.py:858] Sending process 1285 closing signal SIGTERM
E0823 00:46:14.024000 136886560066496 torch/distributed/elastic/multiprocessing/api.py:833] failed (exitcode: -6) local_rank: 1 (pid: 1286) of binary: /opt/conda/bin/python3.10
Traceback (most recent call last):
  File "/opt/conda/bin/accelerate", line 8, in <module>
    sys.exit(main())
  File "/opt/conda/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 48, in main
    args.func(args)
  File "/opt/conda/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1097, in launch_command
    multi_gpu_launcher(args)
  File "/opt/conda/lib/python3.10/site-packages/accelerate/commands/launch.py", line 734, in multi_gpu_launcher
    distrib_run.run(args)
  File "/opt/conda/lib/python3.10/site-packages/torch/distributed/run.py", line 892, in run
    elastic_launch(
  File "/opt/conda/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 133, in __call__
    return launch_agent(self._config, self._entrypoint, list(args))
  File "/opt/conda/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 264, in launch_agent
    raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError: 
============================================================
/kaggle/tmp/kohya_ss/sd-scripts/flux_train_network.py FAILED
------------------------------------------------------------
Failures:
  <NO_OTHER_FAILURES>
------------------------------------------------------------
Root Cause (first observed failure):
[0]:
  time      : 2024-08-23_00:46:13
  host      : 117cde5323ca
  rank      : 1 (local_rank: 1)
  exitcode  : -6 (pid: 1286)
  error_file: <N/A>
  traceback : Signal 6 (SIGABRT) received by PID 1286
============================================================
src/tcmalloc.cc:283] Attempt to free invalid pointer 0x56e3bde08380 
00:46:15-566999 INFO     Training has ended.

This is my .toml

ae = "/kaggle/tmp/models/flux/ae.safetensors"
bucket_reso_steps = 32
cache_latents = true
cache_latents_to_disk = true
cache_text_encoder_outputs = true
cache_text_encoder_outputs_to_disk = true
caption_extension = ".txt"
clip_l = "/kaggle/tmp/models/flux/clip_l.safetensors"
clip_skip = 1
discrete_flow_shift = 1.0
dynamo_backend = "no"
enable_bucket = true
epoch = 5
gradient_accumulation_steps = 1
gradient_checkpointing = true
guidance_scale = 1.0
huber_c = 0.1
huber_schedule = "snr"
logging_dir = "/kaggle/tmp/mylora/logs"
loss_type = "l2"
lowvram = true
lr_scheduler = "cosine_with_restarts"
lr_scheduler_args = []
lr_scheduler_num_cycles = 5
lr_scheduler_power = 1
max_bucket_reso = 2048
max_data_loader_n_workers = 0
max_grad_norm = 1
max_timestep = 1000
max_train_steps = 7500
min_bucket_reso = 256
min_snr_gamma = 10
mixed_precision = "fp16"
model_prediction_type = "raw"
network_alpha = 8
network_args = [ "train_blocks=single",]
network_dim = 8
network_module = "networks.lora_flux"
network_train_unet_only = true
noise_offset_type = "Original"
optimizer_args = [ "relative_step=False", "scale_parameter=False", "warmup_init=False",]
optimizer_type = "Adafactor"
output_dir = "/kaggle/tmp/mylora/outputs"
output_name = "flux1-dev"
pretrained_model_name_or_path = "/kaggle/tmp/models/flux/flux1-dev.safetensors"
prior_loss_weight = 1
resolution = "1024,1024"
sample_every_n_epochs = 1
sample_prompts = "/kaggle/tmp/mylora/outputs/sample/prompt.txt"
sample_sampler = "euler"
save_every_n_epochs = 1
save_model_as = "safetensors"
save_precision = "fp16"
sdpa = true
seed = 42
split_mode = true
t5xxl = "/kaggle/tmp/models/flux/t5xxl_fp16.safetensors"
t5xxl_max_token_length = 512
timestep_sampling = "sigma"
train_batch_size = 1
train_data_dir = "/kaggle/tmp/mylora/dataset/prep/img"
unet_lr = 0.0001
wandb_run_name = "flux1-dev"
ronghuaxueleng commented 2 weeks ago

Is this problem solved?

axelblaze88 commented 2 weeks ago

Is this problem solved?

idk, I was testing this from Kaggle, and kinda figured out the T4 they give you isn't compatible with this kind of training. Haven't tried again with Kaggle since then...