banner_image ×
discard-icon
vip-icon

Stylized Setting (Isometric) SDXL & SD1.5

SDXL
v1.0
Zuletzt aktualisiert:2024-03-19 00:18:50
Anzahl der Erstellungen
2.2K
Favoriten
157
Herunterladen
408
Source-Modell:
Typ:
LORA
Basismodelle:
SDXL 1.0
Auslösewörter:
Isometric_Setting
Umfang der Lizenz:
Umfang der Kreativlizenz
Online Generierung
Fusion durchführen
Download zulassen
Umfang der Business-Lizenz
Generierte Bilder können verkauft oder für kommerzielle Zwecke verwendet werden
Modelle können weiterverkauft oder nach der Fusion verkauft werden
Model Parameters:
Bewertung:
5

[sdxl_arguments]

cache_text_encoder_outputs = true

no_half_vae = true

min_timestep = 0

max_timestep = 1000

shuffle_caption = false

lowram = true

[model_arguments]

pretrained_model_name_or_path = "stabilityai/stable-diffusion-xl-base-1.0"

vae = "/content/vae/sdxl_vae.safetensors"

[dataset_arguments]

debug_dataset = false

in_json = "/content/LoRA/meta_lat.json"

train_data_dir = "/content/drive/MyDrive/lora_training/datasets/Stylized_Setting_SDXL"

dataset_repeats = 2

keep_tokens = 1

resolution = "1024,1024"

color_aug = false

token_warmup_min = 1

token_warmup_step = 0

[training_arguments]

output_dir = "/content/drive/MyDrive/kohya-trainer/output/Stylized_Setting_SDXL"

output_name = "Stylized_Setting_SDXL"

save_precision = "fp16"

save_every_n_epochs = 1

train_batch_size = 4

max_token_length = 225

mem_eff_attn = false

sdpa = true

xformers = false

max_train_epochs = 10

max_data_loader_n_workers = 8

persistent_data_loader_workers = true

gradient_checkpointing = true

gradient_accumulation_steps = 1

mixed_precision = "fp16"

[logging_arguments]

log_with = "tensorboard"

logging_dir = "/content/LoRA/logs"

log_prefix = "Stylized_Setting_SDXL"

[sample_prompt_arguments]

sample_sampler = "euler_a"

[saving_arguments]

save_model_as = "safetensors"

[optimizer_arguments]

optimizer_type = "AdaFactor"

learning_rate = 0.0001

max_grad_norm = 0

optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",]

lr_scheduler = "constant_with_warmup"

lr_warmup_steps = 100

[additional_network_arguments]

no_metadata = false

network_module = "networks.lora"

network_dim = 32

network_alpha = 16

network_args = []

network_train_unet_only = true

Mehr
0 Kommentar(e)
157
49
0