ShuaiYang03 commited on
Commit
d243e0d
·
verified ·
1 Parent(s): e6c9810

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_libero_10_wrist--image_augstage1.jsonl filter=lfs diff=lfs merge=lfs -text
checkpoints/step-025500-epoch-64-loss=0.0361.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78b77bc4c34e8e7a698c8d13cce44c7067c73093eb34d218669bf0ef566dbdfa
3
+ size 11322825962
config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "action_dim": 7,
3
+ "action_model_type": "DiT-B",
4
+ "data_root_dir": "/mnt/inspurfs/efm_t/robot_data/cache/LIBERO/dataset",
5
+ "debug": false,
6
+ "disable_instruction": false,
7
+ "fix_system1": false,
8
+ "future_action_window_size": 7,
9
+ "hf_token": "hf_token",
10
+ "image_aug": true,
11
+ "is_resume": false,
12
+ "load_all_data_for_training": true,
13
+ "num_of_meta_query": 64,
14
+ "past_action_window_size": 0,
15
+ "pretrained_checkpoint": null,
16
+ "repeated_diffusion_steps": 4,
17
+ "resume_epoch": null,
18
+ "resume_step": null,
19
+ "run_id": "sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_libero_10_wrist--image_augstage1",
20
+ "run_id_note": null,
21
+ "run_root_dir": "outputs/libero_wrist",
22
+ "save_interval": 1500,
23
+ "seed": 42,
24
+ "stage": "stage1",
25
+ "trackers": [
26
+ "jsonl",
27
+ "wandb"
28
+ ],
29
+ "use_ema": false,
30
+ "use_mm": false,
31
+ "vla": {
32
+ "action_tokenizer": "extra_action_tokenizer",
33
+ "base_vlm": "/mnt/petrelfs/yangshuai1/yangshuai1/share_mllm/Eagle2-2B",
34
+ "data_mix": "libero_10_no_noops",
35
+ "enable_gradient_checkpointing": true,
36
+ "enable_mixed_precision_training": true,
37
+ "epochs": 100,
38
+ "expected_world_size": 8,
39
+ "freeze_llm_backbone": false,
40
+ "freeze_vision_backbone": false,
41
+ "global_batch_size": 256,
42
+ "learning_rate": 5e-05,
43
+ "lr_scheduler_type": "constant",
44
+ "max_grad_norm": 1.0,
45
+ "max_steps": null,
46
+ "per_device_batch_size": 32,
47
+ "reduce_in_full_precision": true,
48
+ "shuffle_buffer_size": 250000,
49
+ "train_strategy": "fsdp-full-shard",
50
+ "type": "prism-qwen25-dinosiglip-224px+0_5b",
51
+ "unfreeze_last_llm_layer": false,
52
+ "vla_id": "prism-qwen25-dinosiglip-224px+0_5b",
53
+ "warmup_ratio": 0.0,
54
+ "weight_decay": 0.0
55
+ },
56
+ "wandb_entity": "shuaiyang2003",
57
+ "wandb_project": "dual_sys_libero",
58
+ "with_pointing": true
59
+ }
config.yaml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ action_dim: 7
2
+ action_model_type: DiT-B
3
+ data_root_dir: /mnt/inspurfs/efm_t/robot_data/cache/LIBERO/dataset
4
+ debug: false
5
+ disable_instruction: false
6
+ fix_system1: false
7
+ future_action_window_size: 7
8
+ hf_token: hf_token
9
+ image_aug: true
10
+ is_resume: false
11
+ load_all_data_for_training: true
12
+ num_of_meta_query: 64
13
+ past_action_window_size: 0
14
+ pretrained_checkpoint: null
15
+ repeated_diffusion_steps: 4
16
+ resume_epoch: null
17
+ resume_step: null
18
+ run_id: sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_libero_10_wrist--image_augstage1
19
+ run_id_note: null
20
+ run_root_dir: outputs/libero_wrist
21
+ save_interval: 1500
22
+ seed: 42
23
+ stage: stage1
24
+ trackers:
25
+ - jsonl
26
+ - wandb
27
+ use_ema: false
28
+ use_mm: false
29
+ vla:
30
+ action_tokenizer: extra_action_tokenizer
31
+ base_vlm: /mnt/petrelfs/yangshuai1/yangshuai1/share_mllm/Eagle2-2B
32
+ data_mix: libero_10_no_noops
33
+ enable_gradient_checkpointing: true
34
+ enable_mixed_precision_training: true
35
+ epochs: 100
36
+ expected_world_size: 8
37
+ freeze_llm_backbone: false
38
+ freeze_vision_backbone: false
39
+ global_batch_size: 256
40
+ learning_rate: 5.0e-05
41
+ lr_scheduler_type: constant
42
+ max_grad_norm: 1.0
43
+ max_steps: null
44
+ per_device_batch_size: 32
45
+ reduce_in_full_precision: true
46
+ shuffle_buffer_size: 250000
47
+ train_strategy: fsdp-full-shard
48
+ type: prism-qwen25-dinosiglip-224px+0_5b
49
+ unfreeze_last_llm_layer: false
50
+ vla_id: prism-qwen25-dinosiglip-224px+0_5b
51
+ warmup_ratio: 0.0
52
+ weight_decay: 0.0
53
+ wandb_entity: shuaiyang2003
54
+ wandb_project: dual_sys_libero
55
+ with_pointing: true
dataset_statistics.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "libero_10_no_noops": {
3
+ "action": {
4
+ "mean": [
5
+ 0.01820324920117855,
6
+ 0.05858374014496803,
7
+ -0.05592384561896324,
8
+ 0.004626928828656673,
9
+ 0.00289608770981431,
10
+ -0.007673131301999092,
11
+ 0.5457824468612671
12
+ ],
13
+ "std": [
14
+ 0.2825464606285095,
15
+ 0.35904666781425476,
16
+ 0.3673802614212036,
17
+ 0.03770702704787254,
18
+ 0.05429719388484955,
19
+ 0.08725254982709885,
20
+ 0.49815231561660767
21
+ ],
22
+ "max": [
23
+ 0.9375,
24
+ 0.9375,
25
+ 0.9375,
26
+ 0.30000001192092896,
27
+ 0.29357144236564636,
28
+ 0.375,
29
+ 1.0
30
+ ],
31
+ "min": [
32
+ -0.9375,
33
+ -0.9375,
34
+ -0.9375,
35
+ -0.23642857372760773,
36
+ -0.3053571283817291,
37
+ -0.3675000071525574,
38
+ 0.0
39
+ ],
40
+ "q01": [
41
+ -0.6348214149475098,
42
+ -0.7741071581840515,
43
+ -0.7633928656578064,
44
+ -0.09749999642372131,
45
+ -0.14819999992847435,
46
+ -0.2742857038974762,
47
+ 0.0
48
+ ],
49
+ "q99": [
50
+ 0.7714285850524902,
51
+ 0.8464285731315613,
52
+ 0.9375,
53
+ 0.13928571343421936,
54
+ 0.15964286029338837,
55
+ 0.3246428668498993,
56
+ 1.0
57
+ ],
58
+ "mask": [
59
+ true,
60
+ true,
61
+ true,
62
+ true,
63
+ true,
64
+ true,
65
+ false
66
+ ]
67
+ },
68
+ "proprio": {
69
+ "mean": [
70
+ -0.04190658777952194,
71
+ 0.03539430722594261,
72
+ 0.8257141709327698,
73
+ 2.908308267593384,
74
+ -0.5562185049057007,
75
+ -0.16649018228054047,
76
+ 0.028316624462604523,
77
+ -0.028561657294631004
78
+ ],
79
+ "std": [
80
+ 0.10743364691734314,
81
+ 0.14424669742584229,
82
+ 0.2572328448295593,
83
+ 0.3441362977027893,
84
+ 1.234421730041504,
85
+ 0.3579835891723633,
86
+ 0.013308707624673843,
87
+ 0.013174631632864475
88
+ ],
89
+ "max": [
90
+ 0.21031762659549713,
91
+ 0.39128610491752625,
92
+ 1.3332009315490723,
93
+ 3.6714255809783936,
94
+ 3.560650587081909,
95
+ 1.386339545249939,
96
+ 0.04160946607589722,
97
+ 0.0013633022317662835
98
+ ],
99
+ "min": [
100
+ -0.4828203022480011,
101
+ -0.3255046010017395,
102
+ 0.445506751537323,
103
+ 1.1321442127227783,
104
+ -3.641430377960205,
105
+ -1.842738389968872,
106
+ -0.0010040868073701859,
107
+ -0.04111652821302414
108
+ ],
109
+ "q01": [
110
+ -0.3899900782108307,
111
+ -0.2838300323486328,
112
+ 0.44795057058334353,
113
+ 1.8810229921340942,
114
+ -2.886677579879761,
115
+ -1.1599004411697387,
116
+ 0.002066459748893976,
117
+ -0.04001387819647789
118
+ ],
119
+ "q99": [
120
+ 0.1530261474847791,
121
+ 0.32915401458740223,
122
+ 1.2546923208236693,
123
+ 3.303542451858519,
124
+ 2.7496529006957933,
125
+ 0.6893712210655194,
126
+ 0.040048558115959164,
127
+ -0.0017598449345678235
128
+ ]
129
+ },
130
+ "num_transitions": 101469,
131
+ "num_trajectories": 379
132
+ }
133
+ }
eval/EVAL-libero_10-instruct_vla-2025_09_08-15_48_26-step-025500-epoch-64-loss=0.0361.txt ADDED
The diff for this file is too large to render. See raw diff
 
eval/EVAL-libero_10-instruct_vla-2025_09_08-15_48_32-step-025500-epoch-64-loss=0.0361.txt ADDED
The diff for this file is too large to render. See raw diff
 
eval/EVAL-libero_10-instruct_vla-2025_09_08-15_48_50-step-025500-epoch-64-loss=0.0361.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-metrics.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"hparams": {"action_dim": 7, "action_model_type": "DiT-B", "data_root_dir": "/mnt/inspurfs/efm_t/robot_data/cache/LIBERO/dataset", "debug": false, "disable_instruction": false, "fix_system1": false, "future_action_window_size": 7, "hf_token": "hf_token", "image_aug": true, "is_resume": false, "load_all_data_for_training": true, "num_of_meta_query": 64, "past_action_window_size": 0, "pretrained_checkpoint": null, "repeated_diffusion_steps": 4, "resume_epoch": null, "resume_step": null, "run_id": "sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_libero_10_wrist--image_augstage1", "run_id_note": null, "run_root_dir": "outputs/libero_wrist", "save_interval": 1500, "seed": 42, "stage": "stage1", "trackers": ["jsonl", "wandb"], "use_ema": false, "use_mm": false, "vla": {"action_tokenizer": "extra_action_tokenizer", "base_vlm": "/mnt/petrelfs/yangshuai1/yangshuai1/share_mllm/Eagle2-2B", "data_mix": "libero_10_no_noops", "enable_gradient_checkpointing": true, "enable_mixed_precision_training": true, "epochs": 100, "expected_world_size": 8, "freeze_llm_backbone": false, "freeze_vision_backbone": false, "global_batch_size": 256, "learning_rate": 5e-05, "lr_scheduler_type": "constant", "max_grad_norm": 1.0, "max_steps": null, "per_device_batch_size": 32, "reduce_in_full_precision": true, "shuffle_buffer_size": 250000, "train_strategy": "fsdp-full-shard", "type": "prism-qwen25-dinosiglip-224px+0_5b", "unfreeze_last_llm_layer": false, "vla_id": "prism-qwen25-dinosiglip-224px+0_5b", "warmup_ratio": 0.0, "weight_decay": 0.0}, "wandb_entity": "shuaiyang2003", "wandb_project": "dual_sys_libero", "with_pointing": true}, "run_id": "sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_libero_10_wrist--image_augstage1"}
sys12_meta_query_action_only_sync_pretraining_v2_query_64_mlp_lora_libero_10_wrist--image_augstage1.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a21b5cdac4b388dab1303fe9db2481b6b5487c63f90e24b9d097cdd83803f54
3
+ size 15938619