Downloading: 87% 146M/167M [00:01<00:00, 91.6MB/s]
Downloading: 93% 156M/167M [00:02<00:00, 92.9MB/s]
Downloading: 100% 167M/167M [00:02<00:00, 76.7MB/s]
Fetching 15 files: 100% 15/15 [00:34<00:00, 2.31s/it]
/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py:435: UserWarning: Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: No such operator xformers::efficient_attention_forward_cutlass - did you forget to build xformers with python setup.py develop?
warnings.warn(
Generating class images: 0% 0/13 [00:06<?, ?it/s]
Traceback (most recent call last):
File "train_dreambooth.py", line 822, in
main(args)
File "train_dreambooth.py", line 475, in main
images = pipeline(example["prompt"]).images
File "/usr/local/lib/python3.8/dist-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py", line 532, in call
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, *kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/unet_2d_condition.py", line 341, in forward
sample, res_samples = downsample_block(
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(input, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/unet_2d_blocks.py", line 644, in forward
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 221, in forward
hidden_states = block(hidden_states, context=encoder_hidden_states, timestep=timestep)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, *kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 479, in forward
hidden_states = self.attn1(norm_hidden_states) + hidden_states
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(input, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 568, in forward
hidden_states = self._memory_efficient_attention_xformers(query, key, value)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 624, in _memory_efficient_attention_xformers
hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=None)
File "/usr/local/lib/python3.8/dist-packages/xformers/ops/memory_efficient_attention.py", line 967, in memory_efficient_attention
return op.forward_no_grad(
File "/usr/local/lib/python3.8/dist-packages/xformers/ops/memory_efficient_attention.py", line 343, in forward_no_grad
return cls.FORWARD_OPERATOR(
File "/usr/local/lib/python3.8/dist-packages/xformers/ops/common.py", line 11, in no_such_operator
raise RuntimeError(
RuntimeError: No such operator xformers::efficient_attention_forward_cutlass - did you forget to build xformers with python setup.py develop?
Traceback (most recent call last):
File "/usr/local/bin/accelerate", line 8, in
sys.exit(main())
File "/usr/local/lib/python3.8/dist-packages/accelerate/commands/accelerate_cli.py", line 43, in main
args.func(args)
File "/usr/local/lib/python3.8/dist-packages/accelerate/commands/launch.py", line 837, in launch_command
simple_launcher(args)
File "/usr/local/lib/python3.8/dist-packages/accelerate/commands/launch.py", line 354, in simple_launcher
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
subprocess.CalledProcessError: Command '['/usr/bin/python3', 'train_dreambooth.py', '--pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5', '--pretrained_vae_name_or_path=stabilityai/sd-vae-ft-mse', '--output_dir=/content/drive/MyDrive/stable_diffusion_weights/zwx', '--revision=fp16', '--with_prior_preservation', '--prior_loss_weight=1.0', '--seed=1337', '--resolution=512', '--train_batch_size=1', '--train_text_encoder', '--mixed_precision=fp16', '--use_8bit_adam', '--gradient_accumulation_steps=1', '--learning_rate=1e-6', '--lr_scheduler=constant', '--lr_warmup_steps=0', '--num_class_images=50', '--sample_batch_size=4', '--max_train_steps=800', '--save_interval=10000', '--save_sample_prompt=photo of zwx dog', '--concepts_list=concepts_list.json']' returned non-zero exit status 1.
This error comes from days ago, apparently Google updates its libraries leaving some elements outdated. Until this problem is solved I recommend you use fast dreambooth
Downloading: 87% 146M/167M [00:01<00:00, 91.6MB/s] Downloading: 93% 156M/167M [00:02<00:00, 92.9MB/s] Downloading: 100% 167M/167M [00:02<00:00, 76.7MB/s] Fetching 15 files: 100% 15/15 [00:34<00:00, 2.31s/it] /usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py:435: UserWarning: Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: No such operator xformers::efficient_attention_forward_cutlass - did you forget to build xformers with
main(args)
File "train_dreambooth.py", line 475, in main
images = pipeline(example["prompt"]).images
File "/usr/local/lib/python3.8/dist-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py", line 532, in call
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, *kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/unet_2d_condition.py", line 341, in forward
sample, res_samples = downsample_block(
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(input, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/unet_2d_blocks.py", line 644, in forward
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 221, in forward
hidden_states = block(hidden_states, context=encoder_hidden_states, timestep=timestep)
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(*input, *kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 479, in forward
hidden_states = self.attn1(norm_hidden_states) + hidden_states
File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1190, in _call_impl
return forward_call(input, kwargs)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 568, in forward
hidden_states = self._memory_efficient_attention_xformers(query, key, value)
File "/usr/local/lib/python3.8/dist-packages/diffusers/models/attention.py", line 624, in _memory_efficient_attention_xformers
hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=None)
File "/usr/local/lib/python3.8/dist-packages/xformers/ops/memory_efficient_attention.py", line 967, in memory_efficient_attention
return op.forward_no_grad(
File "/usr/local/lib/python3.8/dist-packages/xformers/ops/memory_efficient_attention.py", line 343, in forward_no_grad
return cls.FORWARD_OPERATOR(
File "/usr/local/lib/python3.8/dist-packages/xformers/ops/common.py", line 11, in no_such_operator
raise RuntimeError(
RuntimeError: No such operator xformers::efficient_attention_forward_cutlass - did you forget to build xformers with
sys.exit(main())
File "/usr/local/lib/python3.8/dist-packages/accelerate/commands/accelerate_cli.py", line 43, in main
args.func(args)
File "/usr/local/lib/python3.8/dist-packages/accelerate/commands/launch.py", line 837, in launch_command
simple_launcher(args)
File "/usr/local/lib/python3.8/dist-packages/accelerate/commands/launch.py", line 354, in simple_launcher
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
subprocess.CalledProcessError: Command '['/usr/bin/python3', 'train_dreambooth.py', '--pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5', '--pretrained_vae_name_or_path=stabilityai/sd-vae-ft-mse', '--output_dir=/content/drive/MyDrive/stable_diffusion_weights/zwx', '--revision=fp16', '--with_prior_preservation', '--prior_loss_weight=1.0', '--seed=1337', '--resolution=512', '--train_batch_size=1', '--train_text_encoder', '--mixed_precision=fp16', '--use_8bit_adam', '--gradient_accumulation_steps=1', '--learning_rate=1e-6', '--lr_scheduler=constant', '--lr_warmup_steps=0', '--num_class_images=50', '--sample_batch_size=4', '--max_train_steps=800', '--save_interval=10000', '--save_sample_prompt=photo of zwx dog', '--concepts_list=concepts_list.json']' returned non-zero exit status 1.
python setup.py develop
? warnings.warn( Generating class images: 0% 0/13 [00:06<?, ?it/s] Traceback (most recent call last): File "train_dreambooth.py", line 822, inpython setup.py develop
? Traceback (most recent call last): File "/usr/local/bin/accelerate", line 8, in