File size: 1,402 Bytes
7df6a6b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# torchtitan Config.toml
# NOTE: this toml config is a preset for 64 A100 GPUs.
[job]
dump_folder = "./llama3_3b_output"
description = "Llama 3 3B training"
[profiling]
enable_profiling = true
save_traces_folder = "profile_trace"
profile_freq = 100
[metrics]
log_freq = 10
enable_tensorboard = true
save_tb_folder = "tb"
[model]
name = "llama3"
flavor = "3B"
tokenizer_path = "./assets/tokenizer/Llama-3.1-8B"
# converters = ["float8"]
[optimizer]
name = "AdamW"
lr = 3e-4
eps = 1e-8
[lr_scheduler]
warmup_steps = 6000 # lr scheduler warm up
[training]
local_batch_size = 2
global_batch_size = 128
seq_len = 8192
max_norm = 1.0 # grad norm clipping
steps = 60000
compile = false
dataset = "fineweb_edu_100bt"
[parallelism]
data_parallel_replicate_degree = 1
data_parallel_shard_degree = -1
tensor_parallel_degree = 1
pipeline_parallel_degree = 1
context_parallel_degree = 1
[checkpoint]
enable_checkpoint = true
folder = "./llama3_3b_output/checkpoint"
interval = 500
last_save_model_only = true
export_dtype = "float32"
async_mode = "disabled" # ["disabled", "async", "async_with_pinned_mem"]
[activation_checkpoint]
mode = "selective" # ["none", "selective", "full"]
selective_ac_option = "op" # "int" = ac every positive int layer or 'op', ac based on ops policy
[float8]
enable_fsdp_float8_all_gather = false
precompute_float8_dynamic_scale_for_fsdp = false
filter_fqns = ["output"]
|