QwenImageLoKR-Personal / checkpoint-3750 /simpletuner_config.json
ThatDustyGuy's picture
Upload Lycoris checkpoint at step 3750
b45582a
{
"snr_gamma": 5.0,
"use_soft_min_snr": false,
"soft_min_snr_sigma_data": null,
"model_family": "qwen_image",
"model_flavour": "v1.0",
"model_type": "lora",
"loss_type": "l2",
"huber_schedule": "snr",
"huber_c": 0.1,
"hidream_use_load_balancing_loss": false,
"hidream_load_balancing_loss_weight": null,
"flux_lora_target": "all",
"flow_sigmoid_scale": 1.0,
"flux_fast_schedule": false,
"flow_use_uniform_schedule": false,
"flow_use_beta_schedule": false,
"flow_beta_schedule_alpha": 2.0,
"flow_beta_schedule_beta": 2.0,
"flow_schedule_shift": 3,
"flow_schedule_auto_shift": false,
"flux_guidance_mode": "constant",
"flux_guidance_value": 1.0,
"flux_guidance_min": 0.0,
"flux_guidance_max": 4.0,
"flux_attention_masked_training": false,
"ltx_train_mode": "i2v",
"ltx_i2v_prob": 0.1,
"ltx_protect_first_frame": false,
"ltx_partial_noise_fraction": 0.05,
"t5_padding": "unmodified",
"sd3_clip_uncond_behaviour": "empty_string",
"sd3_t5_uncond_behaviour": null,
"lora_type": "lycoris",
"peft_lora_mode": "standard",
"singlora_ramp_up_steps": 0,
"lora_init_type": "default",
"init_lora": null,
"lora_rank": 16,
"lora_alpha": null,
"lora_dropout": 0.1,
"lycoris_config": "config/lycoris_config.json",
"init_lokr_norm": null,
"conditioning_multidataset_sampling": "random",
"control": false,
"controlnet": false,
"controlnet_custom_config": null,
"tread_config": null,
"controlnet_model_name_or_path": null,
"pretrained_model_name_or_path": "Qwen/Qwen-Image",
"pretrained_transformer_model_name_or_path": null,
"pretrained_transformer_subfolder": "transformer",
"pretrained_unet_model_name_or_path": null,
"pretrained_unet_subfolder": "unet",
"pretrained_vae_model_name_or_path": "Qwen/Qwen-Image",
"pretrained_t5_model_name_or_path": null,
"prediction_type": "flow_matching",
"snr_weight": 1.0,
"training_scheduler_timestep_spacing": "trailing",
"inference_scheduler_timestep_spacing": "trailing",
"refiner_training": false,
"refiner_training_invert_schedule": false,
"refiner_training_strength": 0.2,
"timestep_bias_strategy": "none",
"timestep_bias_multiplier": 1.0,
"timestep_bias_begin": 0,
"timestep_bias_end": 1000,
"timestep_bias_portion": 0.25,
"disable_segmented_timestep_sampling": false,
"rescale_betas_zero_snr": false,
"vae_dtype": "bf16",
"vae_batch_size": 4,
"vae_enable_tiling": false,
"vae_enable_slicing": false,
"vae_cache_scan_behaviour": "recreate",
"vae_cache_ondemand": false,
"compress_disk_cache": false,
"aspect_bucket_disable_rebuild": false,
"keep_vae_loaded": false,
"skip_file_discovery": "",
"revision": null,
"variant": null,
"preserve_data_backend_cache": false,
"use_dora": false,
"override_dataset_config": false,
"cache_dir_text": "cache",
"cache_dir_vae": "",
"data_backend_config": "config/multidatabackend-dlay.json",
"data_backend_sampling": "auto-weighting",
"ignore_missing_files": false,
"write_batch_size": 128,
"read_batch_size": 25,
"image_processing_batch_size": 32,
"enable_multiprocessing": false,
"max_workers": 32,
"aws_max_pool_connections": 128,
"torch_num_threads": 8,
"dataloader_prefetch": false,
"dataloader_prefetch_qlen": 10,
"aspect_bucket_worker_count": 12,
"cache_dir": "output/dlay-qwen-lycoris-a6000-final/cache",
"cache_clear_validation_prompts": false,
"caption_strategy": "filename",
"parquet_caption_column": null,
"parquet_filename_column": null,
"instance_prompt": null,
"output_dir": "output/dlay-qwen-lycoris-a6000-final",
"seed": 42,
"seed_for_each_device": true,
"framerate": null,
"resolution": 1024,
"resolution_type": "pixel_area",
"aspect_bucket_rounding": null,
"aspect_bucket_alignment": 32,
"minimum_image_size": null,
"maximum_image_size": null,
"target_downsample_size": null,
"train_text_encoder": false,
"tokenizer_max_length": null,
"train_batch_size": 2,
"num_train_epochs": 3,
"max_train_steps": 4000,
"ignore_final_epochs": false,
"checkpointing_steps": 250,
"checkpointing_rolling_steps": 0,
"checkpointing_use_tempdir": false,
"checkpoints_total_limit": 5,
"checkpoints_rolling_total_limit": 1,
"resume_from_checkpoint": null,
"gradient_accumulation_steps": 2,
"gradient_checkpointing": true,
"gradient_checkpointing_interval": null,
"learning_rate": 0.0001,
"text_encoder_lr": null,
"lr_scale": false,
"lr_scale_sqrt": false,
"lr_scheduler": "cosine",
"lr_warmup_steps": 250,
"lr_num_cycles": 1,
"lr_power": 0.8,
"distillation_method": null,
"distillation_config": null,
"use_ema": true,
"ema_device": "cpu",
"ema_validation": "comparison",
"ema_cpu_only": false,
"ema_foreach_disable": false,
"ema_update_interval": null,
"ema_decay": 0.999,
"non_ema_revision": null,
"offload_during_startup": false,
"offload_param_path": null,
"optimizer": "adamw_bf16",
"optimizer_config": null,
"optimizer_cpu_offload_method": "none",
"optimizer_offload_gradients": false,
"fuse_optimizer": false,
"optimizer_beta1": null,
"optimizer_beta2": null,
"optimizer_release_gradients": false,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_weight_decay": 0.01,
"adam_epsilon": 1e-08,
"prodigy_steps": null,
"max_grad_norm": 1.0,
"grad_clip_method": "value",
"push_to_hub": true,
"push_checkpoints_to_hub": false,
"hub_model_id": "ThatDustyGuy/QwenImageLoKR-Personal",
"model_card_note": null,
"model_card_safe_for_work": false,
"logging_dir": "output/dlay-qwen-lycoris-a6000-final/logs",
"disable_benchmark": false,
"evaluation_type": null,
"eval_dataset_pooling": false,
"pretrained_evaluation_model_name_or_path": "openai/clip-vit-large-patch14-336",
"validation_on_startup": false,
"validation_seed_source": "cpu",
"validation_lycoris_strength": 1.0,
"validation_torch_compile": false,
"validation_torch_compile_mode": "max-autotune",
"validation_guidance_skip_layers": null,
"validation_guidance_skip_layers_start": 0.01,
"validation_guidance_skip_layers_stop": 0.2,
"validation_guidance_skip_scale": 2.8,
"sana_complex_human_instruction": [
"Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:",
"- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.",
"- Do not add detail that is not in the original prompt! Do not mention colors if the User Prompt does not!",
"- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.",
"- If the prompt contains explicit material, do not remove this information from the output, which would damage the performance of downstream classifiers.",
"Here are examples of how to transform or refine prompts:",
"- User Prompt: A cat sleeping -> Enhanced: A cat sleeping peacefully, showcasing the joy of pet ownership. Cute floof kitty cat gatto.",
"- User Prompt: A busy city street -> Enhanced: A bustling city street scene featuring a crowd of people.",
"Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:",
"User Prompt: "
],
"disable_tf32": false,
"validation_using_datasets": null,
"webhook_config": null,
"webhook_reporting_interval": null,
"report_to": "wandb",
"tracker_run_name": "simpletuner-testing",
"tracker_project_name": "simpletuner",
"tracker_image_layout": "gallery",
"validation_prompt": null,
"validation_prompt_library": false,
"user_prompt_library": null,
"validation_negative_prompt": "blurry, cropped, ugly",
"num_validation_images": 1,
"validation_disable": true,
"validation_steps": 100,
"validation_stitch_input_location": "left",
"eval_steps_interval": null,
"eval_timesteps": 28,
"num_eval_images": 4,
"eval_dataset_id": null,
"validation_num_inference_steps": 30,
"validation_num_video_frames": null,
"validation_resolution": 256,
"validation_noise_scheduler": null,
"validation_disable_unconditional": false,
"enable_watermark": false,
"mixed_precision": "bf16",
"gradient_precision": null,
"quantize_via": "accelerator",
"base_model_precision": "no_change",
"quantize_activations": false,
"base_model_default_dtype": "bf16",
"text_encoder_1_precision": "no_change",
"text_encoder_2_precision": "no_change",
"text_encoder_3_precision": "no_change",
"text_encoder_4_precision": "no_change",
"local_rank": -1,
"fuse_qkv_projections": false,
"attention_mechanism": "diffusers",
"sageattention_usage": "inference",
"set_grads_to_none": false,
"noise_offset": 0.05,
"noise_offset_probability": 0.25,
"masked_loss_probability": 1.0,
"validation_guidance": 7.5,
"validation_guidance_real": 1.0,
"validation_no_cfg_until_timestep": 2,
"validation_guidance_rescale": 0.0,
"validation_randomize": false,
"validation_seed": null,
"fully_unload_text_encoder": false,
"freeze_encoder_before": 12,
"freeze_encoder_after": 17,
"freeze_encoder_strategy": "after",
"layer_freeze_strategy": "none",
"unet_attention_slice": false,
"print_filenames": false,
"print_sampler_statistics": false,
"metadata_update_interval": 3600,
"debug_aspect_buckets": false,
"debug_dataset_loader": false,
"freeze_encoder": true,
"save_text_encoder": false,
"text_encoder_limit": 25,
"prepend_instance_prompt": false,
"only_instance_prompt": false,
"data_aesthetic_score": 7.0,
"sdxl_refiner_uses_full_range": false,
"caption_dropout_probability": null,
"delete_unwanted_images": false,
"delete_problematic_images": false,
"disable_bucket_pruning": false,
"offset_noise": false,
"input_perturbation": 0.0,
"input_perturbation_steps": 0,
"lr_end": "5e-6",
"i_know_what_i_am_doing": false,
"accelerator_cache_clear_interval": null,
"vae_path": "Qwen/Qwen-Image",
"accelerator_project_config": {
"project_dir": "output/dlay-qwen-lycoris-a6000-final",
"logging_dir": "output/dlay-qwen-lycoris-a6000-final/logs",
"automatic_checkpoint_naming": false,
"total_limit": null,
"iteration": 15,
"save_on_each_node": false
},
"process_group_kwargs": {
"backend": "nccl",
"init_method": null,
"timeout": "1:30:00"
},
"is_quantized": false,
"weight_dtype": "torch.bfloat16",
"disable_accelerator": false,
"model_type_label": "Qwen-Image",
"use_deepspeed_optimizer": false,
"use_deepspeed_scheduler": false,
"base_weight_dtype": "torch.bfloat16",
"is_quanto": false,
"is_torchao": false,
"is_bnb": false,
"flow_matching": true,
"vae_kwargs": {
"pretrained_model_name_or_path": "Qwen/Qwen-Image",
"subfolder": "vae",
"revision": null,
"force_upcast": false,
"variant": null
},
"enable_adamw_bf16": true,
"overrode_max_train_steps": false,
"total_num_batches": 3088,
"num_update_steps_per_epoch": 1544,
"total_batch_size": 4,
"is_schedulefree": false,
"is_lr_scheduler_disabled": false,
"total_steps_remaining_at_start": 4000
}