Closed Signorlimone closed 1 year ago
As per title, my resulting images are more or less all like:
prompt is: a man in a red coat
My settings: { "training_method": "LORA", "model_type": "STABLE_DIFFUSION_15", "debug_mode": false, "debug_dir": "debug", "workspace_dir": "C:/Users/one/Pictures/LoRa_Training/Loratry", "cache_dir": "C:/Users/one/Pictures/LoRa_Training/Loratry/cache", "tensorboard": true, "base_model_name": "C:/AI/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned.ckpt", "extra_model_name": "", "weight_dtype": "FLOAT_32", "output_dtype": "FLOAT_16", "output_model_format": "SAFETENSORS", "output_model_destination": "C:/Users/one/Pictures/LoRa_Training/Loratry/models/onetrainer/supa3d_oneT", "concept_file_name": "training_concepts/concepts.json", "circular_mask_generation": false, "random_rotate_and_crop": true, "aspect_ratio_bucketing": true, "latent_caching": true, "latent_caching_epochs": 1, "optimizer": "ADAMW_8BIT", "learning_rate_scheduler": "COSINE_WITH_RESTARTS", "learning_rate": 0.0002, "learning_rate_warmup_steps": 200, "learning_rate_cycles": 10, "weight_decay": 0.01, "epochs": 100, "batch_size": 1, "gradient_accumulation_steps": 1, "ema": "OFF", "ema_decay": 0.999, "ema_update_step_interval": 5, "train_text_encoder": true, "train_text_encoder_epochs": 30, "text_encoder_learning_rate": 5e-05, "text_encoder_layer_skip": 1, "train_unet": true, "train_unet_epochs": 100000, "unet_learning_rate": 0.0002, "offset_noise_weight": 0.05, "rescale_noise_scheduler_to_zero_terminal_snr": false, "force_v_prediction": false, "force_epsilon_prediction": false, "train_device": "cuda", "temp_device": "cpu", "train_dtype": "FLOAT_16", "only_cache": false, "resolution": 512, "masked_training": false, "unmasked_probability": 0.1, "unmasked_weight": 0.1, "normalize_masked_area_loss": false, "max_noising_strength": 1.0, "token_count": 1, "initial_embedding_text": "*", "lora_rank": 128, "lora_alpha": 128.0, "attention_mechanism": "XFORMERS", "sample_definition_file_name": "training_samples/samples.json", "sample_after": 2, "sample_after_unit": "MINUTE", "backup_after": 30, "backup_after_unit": "MINUTE", "backup_before_save": true }
what could it be?
can you test if this was fixed by #24
Sure, I'll try and let you know! thanks for the answer
As per title, my resulting images are more or less all like:
prompt is: a man in a red coat
My settings: { "training_method": "LORA", "model_type": "STABLE_DIFFUSION_15", "debug_mode": false, "debug_dir": "debug", "workspace_dir": "C:/Users/one/Pictures/LoRa_Training/Loratry", "cache_dir": "C:/Users/one/Pictures/LoRa_Training/Loratry/cache", "tensorboard": true, "base_model_name": "C:/AI/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned.ckpt", "extra_model_name": "", "weight_dtype": "FLOAT_32", "output_dtype": "FLOAT_16", "output_model_format": "SAFETENSORS", "output_model_destination": "C:/Users/one/Pictures/LoRa_Training/Loratry/models/onetrainer/supa3d_oneT", "concept_file_name": "training_concepts/concepts.json", "circular_mask_generation": false, "random_rotate_and_crop": true, "aspect_ratio_bucketing": true, "latent_caching": true, "latent_caching_epochs": 1, "optimizer": "ADAMW_8BIT", "learning_rate_scheduler": "COSINE_WITH_RESTARTS", "learning_rate": 0.0002, "learning_rate_warmup_steps": 200, "learning_rate_cycles": 10, "weight_decay": 0.01, "epochs": 100, "batch_size": 1, "gradient_accumulation_steps": 1, "ema": "OFF", "ema_decay": 0.999, "ema_update_step_interval": 5, "train_text_encoder": true, "train_text_encoder_epochs": 30, "text_encoder_learning_rate": 5e-05, "text_encoder_layer_skip": 1, "train_unet": true, "train_unet_epochs": 100000, "unet_learning_rate": 0.0002, "offset_noise_weight": 0.05, "rescale_noise_scheduler_to_zero_terminal_snr": false, "force_v_prediction": false, "force_epsilon_prediction": false, "train_device": "cuda", "temp_device": "cpu", "train_dtype": "FLOAT_16", "only_cache": false, "resolution": 512, "masked_training": false, "unmasked_probability": 0.1, "unmasked_weight": 0.1, "normalize_masked_area_loss": false, "max_noising_strength": 1.0, "token_count": 1, "initial_embedding_text": "*", "lora_rank": 128, "lora_alpha": 128.0, "attention_mechanism": "XFORMERS", "sample_definition_file_name": "training_samples/samples.json", "sample_after": 2, "sample_after_unit": "MINUTE", "backup_after": 30, "backup_after_unit": "MINUTE", "backup_before_save": true }
what could it be?