diff --git a/library/common_gui.py b/library/common_gui.py index 7977c26..862e216 100644 --- a/library/common_gui.py +++ b/library/common_gui.py @@ -453,6 +453,7 @@ def gradio_training( lr_scheduler = gr.Dropdown( label='LR Scheduler', choices=[ + 'adafactor', 'constant', 'constant_with_warmup', 'cosine', diff --git a/presets/finetune/adafactor.json b/presets/finetune/adafactor.json new file mode 100644 index 0000000..0e0149d --- /dev/null +++ b/presets/finetune/adafactor.json @@ -0,0 +1,61 @@ +{ + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "v2": false, + "v_parameterization": false, + "train_dir": "D:/dataset/paige_spiranac/ft", + "image_folder": "D:\\dataset\\paige_spiranac\\lora\\img4_g8\\16_paige_spiranac", + "output_dir": "D:/models/test", + "logging_dir": "D:/dataset/paige_spiranac/ft/logs", + "max_resolution": "512,512", + "min_bucket_reso": "256", + "max_bucket_reso": "1024", + "batch_size": "1", + "flip_aug": false, + "caption_metadata_filename": "meta_cap.json", + "latent_metadata_filename": "meta_lat.json", + "full_path": true, + "learning_rate": "1e-6", + "lr_scheduler": "adafactor", + "lr_warmup": "10", + "dataset_repeats": "10", + "train_batch_size": 4, + "epoch": "2", + "save_every_n_epochs": "1", + "mixed_precision": "bf16", + "save_precision": "fp16", + "seed": "1234", + "num_cpu_threads_per_process": 2, + "train_text_encoder": true, + "create_caption": true, + "create_buckets": false, + "save_model_as": "safetensors", + "caption_extension": ".txt", + "use_8bit_adam": false, + "xformers": true, + "clip_skip": 1, + "save_state": false, + "resume": "", + "gradient_checkpointing": false, + "gradient_accumulation_steps": 1.0, + "mem_eff_attn": false, + "shuffle_caption": true, + "output_name": "paige_spiranac_v1.5e", + "max_token_length": "150", + "max_train_epochs": "", + "max_data_loader_n_workers": "0", + "full_fp16": false, + "color_aug": false, + "model_list": "runwayml/stable-diffusion-v1-5", + "cache_latents": true, + "use_latent_files": "No", + "keep_tokens": 1, + "persistent_data_loader_workers": false, + "bucket_no_upscale": true, + "random_crop": false, + "bucket_reso_steps": 1.0, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.1, + "optimizer": "Adafactor", + "optimizer_args": "scale_parameter=True relative_step=True warmup_init=True weight_decay=2", + "noise_offset": "" +} \ No newline at end of file diff --git a/presets/finetune/lion.json b/presets/finetune/lion.json new file mode 100644 index 0000000..982c8a8 --- /dev/null +++ b/presets/finetune/lion.json @@ -0,0 +1,61 @@ +{ + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "v2": false, + "v_parameterization": false, + "train_dir": "D:/dataset/paige_spiranac/ft", + "image_folder": "D:\\dataset\\paige_spiranac\\lora\\img4_g8\\16_paige_spiranac", + "output_dir": "D:/models/test", + "logging_dir": "D:/dataset/paige_spiranac/ft/logs", + "max_resolution": "512,512", + "min_bucket_reso": "256", + "max_bucket_reso": "1024", + "batch_size": "1", + "flip_aug": false, + "caption_metadata_filename": "meta_cap.json", + "latent_metadata_filename": "meta_lat.json", + "full_path": true, + "learning_rate": "0.0000166666666", + "lr_scheduler": "cosine", + "lr_warmup": "10", + "dataset_repeats": "10", + "train_batch_size": 4, + "epoch": "2", + "save_every_n_epochs": "1", + "mixed_precision": "bf16", + "save_precision": "fp16", + "seed": "1234", + "num_cpu_threads_per_process": 2, + "train_text_encoder": true, + "create_caption": true, + "create_buckets": false, + "save_model_as": "safetensors", + "caption_extension": ".txt", + "use_8bit_adam": false, + "xformers": true, + "clip_skip": 1, + "save_state": false, + "resume": "", + "gradient_checkpointing": false, + "gradient_accumulation_steps": 1.0, + "mem_eff_attn": false, + "shuffle_caption": true, + "output_name": "paige_spiranac_v1.5e", + "max_token_length": "150", + "max_train_epochs": "", + "max_data_loader_n_workers": "0", + "full_fp16": false, + "color_aug": false, + "model_list": "runwayml/stable-diffusion-v1-5", + "cache_latents": true, + "use_latent_files": "No", + "keep_tokens": 1, + "persistent_data_loader_workers": false, + "bucket_no_upscale": true, + "random_crop": false, + "bucket_reso_steps": 1.0, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.1, + "optimizer": "Lion", + "optimizer_args": "", + "noise_offset": "" +} \ No newline at end of file diff --git a/presets/lion_optimizer.json b/presets/lora/lion_optimizer.json similarity index 100% rename from presets/lion_optimizer.json rename to presets/lora/lion_optimizer.json