Sakits commited on
Commit
0b24c25
·
verified ·
1 Parent(s): 22763ba

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "pi05",
3
+ "n_obs_steps": 1,
4
+ "input_features": {
5
+ "observation.state": {
6
+ "type": "STATE",
7
+ "shape": [
8
+ 6
9
+ ]
10
+ },
11
+ "observation.images.wrist": {
12
+ "type": "VISUAL",
13
+ "shape": [
14
+ 3,
15
+ 480,
16
+ 640
17
+ ]
18
+ }
19
+ },
20
+ "output_features": {
21
+ "action": {
22
+ "type": "ACTION",
23
+ "shape": [
24
+ 6
25
+ ]
26
+ }
27
+ },
28
+ "device": "cpu",
29
+ "use_amp": false,
30
+ "push_to_hub": false,
31
+ "repo_id": null,
32
+ "private": null,
33
+ "tags": null,
34
+ "license": null,
35
+ "pretrained_path": "lerobot/pi05_base",
36
+ "paligemma_variant": "gemma_2b",
37
+ "action_expert_variant": "gemma_300m",
38
+ "dtype": "bfloat16",
39
+ "chunk_size": 50,
40
+ "n_action_steps": 50,
41
+ "max_state_dim": 32,
42
+ "max_action_dim": 32,
43
+ "state_cond": true,
44
+ "num_inference_steps": 10,
45
+ "time_sampling_beta_alpha": 1.5,
46
+ "time_sampling_beta_beta": 1.0,
47
+ "time_sampling_scale": 0.999,
48
+ "time_sampling_offset": 0.001,
49
+ "min_period": 0.004,
50
+ "max_period": 4.0,
51
+ "image_resolution": [
52
+ 224,
53
+ 224
54
+ ],
55
+ "empty_cameras": 0,
56
+ "tokenizer_max_length": 200,
57
+ "normalization_mapping": {
58
+ "VISUAL": "IDENTITY",
59
+ "STATE": "MEAN_STD",
60
+ "ACTION": "MEAN_STD"
61
+ },
62
+ "gradient_checkpointing": false,
63
+ "compile_model": false,
64
+ "compile_mode": "max-autotune",
65
+ "fuse_qkv": false,
66
+ "fuse_gate_up": false,
67
+ "optimizer_lr": 2.5e-05,
68
+ "optimizer_betas": [
69
+ 0.9,
70
+ 0.95
71
+ ],
72
+ "optimizer_eps": 1e-08,
73
+ "optimizer_weight_decay": 0.01,
74
+ "optimizer_grad_clip_norm": 1.0,
75
+ "scheduler_warmup_steps": 1000,
76
+ "scheduler_decay_steps": 30000,
77
+ "scheduler_decay_lr": 2.5e-06,
78
+ "vlm_config": {},
79
+ "action_expert_config": {}
80
+ }
lora_adapters/README.md ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ tags:
4
+ - lora
5
+ ---
6
+
7
+ # Model Card for Model ID
8
+
9
+ <!-- Provide a quick summary of what the model is/does. -->
10
+
11
+
12
+
13
+ ## Model Details
14
+
15
+ ### Model Description
16
+
17
+ <!-- Provide a longer summary of what this model is. -->
18
+
19
+
20
+
21
+ - **Developed by:** [More Information Needed]
22
+ - **Funded by [optional]:** [More Information Needed]
23
+ - **Shared by [optional]:** [More Information Needed]
24
+ - **Model type:** [More Information Needed]
25
+ - **Language(s) (NLP):** [More Information Needed]
26
+ - **License:** [More Information Needed]
27
+ - **Finetuned from model [optional]:** [More Information Needed]
28
+
29
+ ### Model Sources [optional]
30
+
31
+ <!-- Provide the basic links for the model. -->
32
+
33
+ - **Repository:** [More Information Needed]
34
+ - **Paper [optional]:** [More Information Needed]
35
+ - **Demo [optional]:** [More Information Needed]
36
+
37
+ ## Uses
38
+
39
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
40
+
41
+ ### Direct Use
42
+
43
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
44
+
45
+ [More Information Needed]
46
+
47
+ ### Downstream Use [optional]
48
+
49
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Out-of-Scope Use
54
+
55
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
56
+
57
+ [More Information Needed]
58
+
59
+ ## Bias, Risks, and Limitations
60
+
61
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ### Recommendations
66
+
67
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
68
+
69
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
70
+
71
+ ## How to Get Started with the Model
72
+
73
+ Use the code below to get started with the model.
74
+
75
+ [More Information Needed]
76
+
77
+ ## Training Details
78
+
79
+ ### Training Data
80
+
81
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
82
+
83
+ [More Information Needed]
84
+
85
+ ### Training Procedure
86
+
87
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
88
+
89
+ #### Preprocessing [optional]
90
+
91
+ [More Information Needed]
92
+
93
+
94
+ #### Training Hyperparameters
95
+
96
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
97
+
98
+ #### Speeds, Sizes, Times [optional]
99
+
100
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
101
+
102
+ [More Information Needed]
103
+
104
+ ## Evaluation
105
+
106
+ <!-- This section describes the evaluation protocols and provides the results. -->
107
+
108
+ ### Testing Data, Factors & Metrics
109
+
110
+ #### Testing Data
111
+
112
+ <!-- This should link to a Dataset Card if possible. -->
113
+
114
+ [More Information Needed]
115
+
116
+ #### Factors
117
+
118
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Metrics
123
+
124
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
125
+
126
+ [More Information Needed]
127
+
128
+ ### Results
129
+
130
+ [More Information Needed]
131
+
132
+ #### Summary
133
+
134
+
135
+
136
+ ## Model Examination [optional]
137
+
138
+ <!-- Relevant interpretability work for the model goes here -->
139
+
140
+ [More Information Needed]
141
+
142
+ ## Environmental Impact
143
+
144
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
145
+
146
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
147
+
148
+ - **Hardware Type:** [More Information Needed]
149
+ - **Hours used:** [More Information Needed]
150
+ - **Cloud Provider:** [More Information Needed]
151
+ - **Compute Region:** [More Information Needed]
152
+ - **Carbon Emitted:** [More Information Needed]
153
+
154
+ ## Technical Specifications [optional]
155
+
156
+ ### Model Architecture and Objective
157
+
158
+ [More Information Needed]
159
+
160
+ ### Compute Infrastructure
161
+
162
+ [More Information Needed]
163
+
164
+ #### Hardware
165
+
166
+ [More Information Needed]
167
+
168
+ #### Software
169
+
170
+ [More Information Needed]
171
+
172
+ ## Citation [optional]
173
+
174
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
175
+
176
+ **BibTeX:**
177
+
178
+ [More Information Needed]
179
+
180
+ **APA:**
181
+
182
+ [More Information Needed]
183
+
184
+ ## Glossary [optional]
185
+
186
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
187
+
188
+ [More Information Needed]
189
+
190
+ ## More Information [optional]
191
+
192
+ [More Information Needed]
193
+
194
+ ## Model Card Authors [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Contact
199
+
200
+ [More Information Needed]
201
+ ### Framework versions
202
+
203
+ - PEFT 0.18.0
lora_adapters/adapter_config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": null,
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 16,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.0,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": [
25
+ "action_out_proj",
26
+ "vlm.model.language_model.layers.15.input_layernorm",
27
+ "suffix_embedder.action_in_proj",
28
+ "action_expert.model.layers.13.post_attention_layernorm.dense",
29
+ "action_expert.model.layers.15.input_layernorm.dense",
30
+ "vlm.model.language_model.layers.8.input_layernorm",
31
+ "action_expert.model.layers.17.post_attention_layernorm.dense",
32
+ "vlm.model.language_model.layers.11.input_layernorm",
33
+ "suffix_embedder.state_mlp_in",
34
+ "action_expert.model.layers.7.input_layernorm.dense",
35
+ "action_expert.model.layers.11.post_attention_layernorm.dense",
36
+ "vlm.model.language_model.layers.4.input_layernorm",
37
+ "action_expert.model.layers.2.post_attention_layernorm.dense",
38
+ "action_expert.model.layers.14.input_layernorm.dense",
39
+ "vlm.model.language_model.layers.5.input_layernorm",
40
+ "action_expert.model.layers.4.input_layernorm.dense",
41
+ "action_expert.model.layers.11.input_layernorm.dense",
42
+ "action_expert.model.layers.8.input_layernorm.dense",
43
+ "action_expert.model.layers.17.input_layernorm.dense",
44
+ "action_expert.model.layers.7.post_attention_layernorm.dense",
45
+ "action_expert.model.layers.15.post_attention_layernorm.dense",
46
+ "vlm.model.language_model.layers.16.input_layernorm",
47
+ "action_expert.model.layers.5.post_attention_layernorm.dense",
48
+ "action_expert.model.layers.0.post_attention_layernorm.dense",
49
+ "action_expert.model.layers.3.input_layernorm.dense",
50
+ "action_expert.model.layers.16.post_attention_layernorm.dense",
51
+ "action_expert.model.layers.12.input_layernorm.dense",
52
+ "vlm.model.language_model.layers.10.post_attention_layernorm",
53
+ "action_expert.model.layers.13.input_layernorm.dense",
54
+ "vlm.model.language_model.layers.10.input_layernorm",
55
+ "vlm.model.language_model.layers.14.post_attention_layernorm",
56
+ "vlm.model.language_model.layers.13.input_layernorm",
57
+ "action_expert.model.layers.12.post_attention_layernorm.dense",
58
+ "action_expert.model.layers.16.input_layernorm.dense",
59
+ "vlm.model.language_model.layers.12.input_layernorm",
60
+ "vlm.model.language_model.layers.17.post_attention_layernorm",
61
+ "action_expert.model.layers.9.post_attention_layernorm.dense",
62
+ "action_expert.model.layers.10.input_layernorm.dense",
63
+ "vlm.model.language_model.layers.11.post_attention_layernorm",
64
+ "vlm.model.language_model.layers.17.input_layernorm",
65
+ "vlm.model.language_model.layers.5.post_attention_layernorm",
66
+ "suffix_embedder.state_proj",
67
+ "vlm.model.language_model.layers.3.post_attention_layernorm",
68
+ "vlm.model.language_model.layers.9.input_layernorm",
69
+ "vlm.model.language_model.layers.1.input_layernorm",
70
+ "vlm.model.language_model.layers.6.input_layernorm",
71
+ "vlm.model.language_model.layers.9.post_attention_layernorm",
72
+ "action_expert.model.layers.9.input_layernorm.dense",
73
+ "vlm.model.language_model.layers.3.input_layernorm",
74
+ "vlm.model.language_model.layers.1.post_attention_layernorm",
75
+ "vlm.model.language_model.layers.0.input_layernorm",
76
+ "vlm.model.vision_tower.vision_model.embeddings.patch_embedding",
77
+ "action_expert.model.layers.0.input_layernorm.dense",
78
+ "vlm.model.language_model.layers.14.input_layernorm",
79
+ "vlm.model.language_model.layers.13.post_attention_layernorm",
80
+ "action_expert.model.layers.14.post_attention_layernorm.dense",
81
+ "action_expert.model.layers.3.post_attention_layernorm.dense",
82
+ "vlm.model.language_model.layers.4.post_attention_layernorm",
83
+ "vlm.model.language_model.layers.7.input_layernorm",
84
+ "action_expert.model.layers.8.post_attention_layernorm.dense",
85
+ "vlm.model.language_model.layers.8.post_attention_layernorm",
86
+ "vlm.model.vision_tower.vision_model.embeddings.position_embedding",
87
+ "action_expert.model.layers.10.post_attention_layernorm.dense",
88
+ "suffix_embedder.state_mlp_out",
89
+ "action_expert.model.layers.1.post_attention_layernorm.dense",
90
+ "vlm.model.language_model.layers.12.post_attention_layernorm",
91
+ "vlm.model.language_model.layers.16.post_attention_layernorm",
92
+ "action_expert.model.layers.6.post_attention_layernorm.dense",
93
+ "action_expert.model.layers.2.input_layernorm.dense",
94
+ "vlm.model.language_model.layers.6.post_attention_layernorm",
95
+ "vlm.model.language_model.layers.15.post_attention_layernorm",
96
+ "suffix_embedder.time_mlp_in",
97
+ "vlm.model.language_model.layers.0.post_attention_layernorm",
98
+ "action_expert.model.layers.4.post_attention_layernorm.dense",
99
+ "action_expert.model.layers.6.input_layernorm.dense",
100
+ "vlm.model.language_model.layers.7.post_attention_layernorm",
101
+ "suffix_embedder.time_mlp_out",
102
+ "action_expert.model.layers.5.input_layernorm.dense",
103
+ "vlm.model.language_model.layers.2.post_attention_layernorm",
104
+ "action_expert.model.layers.1.input_layernorm.dense",
105
+ "vlm.model.language_model.layers.2.input_layernorm"
106
+ ],
107
+ "peft_type": "LORA",
108
+ "peft_version": "0.18.0",
109
+ "qalora_group_size": 16,
110
+ "r": 16,
111
+ "rank_pattern": {},
112
+ "revision": null,
113
+ "target_modules": [
114
+ "down_proj",
115
+ "out_proj",
116
+ "k_proj",
117
+ "up_proj",
118
+ "o_proj",
119
+ "fc2",
120
+ "q_proj",
121
+ "v_proj",
122
+ "fc1",
123
+ "dense",
124
+ "gate_proj"
125
+ ],
126
+ "target_parameters": null,
127
+ "task_type": "FEATURE_EXTRACTION",
128
+ "trainable_token_indices": null,
129
+ "use_dora": false,
130
+ "use_qalora": false,
131
+ "use_rslora": false
132
+ }
lora_adapters/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f88a2d4d80ed6ddaf5f837e84f1b86ed8826a9e5a7d796820c9b862be7f65d8f
3
+ size 722267392
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc8d34f402c086469ea5aaee13cb1c0c5b9eac42d942727ed280d848b736dae2
3
+ size 7481485688
train_config.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "Sakits/so101_pickplace_calibration/",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ },
61
+ "affine": {
62
+ "weight": 1.0,
63
+ "type": "RandomAffine",
64
+ "kwargs": {
65
+ "degrees": [
66
+ -5.0,
67
+ 5.0
68
+ ],
69
+ "translate": [
70
+ 0.05,
71
+ 0.05
72
+ ]
73
+ }
74
+ }
75
+ }
76
+ },
77
+ "revision": null,
78
+ "use_imagenet_stats": true,
79
+ "video_backend": "torchcodec",
80
+ "streaming": false
81
+ },
82
+ "env": null,
83
+ "policy": {
84
+ "type": "pi05",
85
+ "n_obs_steps": 1,
86
+ "input_features": {
87
+ "observation.state": {
88
+ "type": "STATE",
89
+ "shape": [
90
+ 6
91
+ ]
92
+ },
93
+ "observation.images.wrist": {
94
+ "type": "VISUAL",
95
+ "shape": [
96
+ 3,
97
+ 480,
98
+ 640
99
+ ]
100
+ }
101
+ },
102
+ "output_features": {
103
+ "action": {
104
+ "type": "ACTION",
105
+ "shape": [
106
+ 6
107
+ ]
108
+ }
109
+ },
110
+ "device": "cuda",
111
+ "use_amp": false,
112
+ "push_to_hub": false,
113
+ "repo_id": null,
114
+ "private": null,
115
+ "tags": null,
116
+ "license": null,
117
+ "pretrained_path": "lerobot/pi05_base",
118
+ "paligemma_variant": "gemma_2b",
119
+ "action_expert_variant": "gemma_300m",
120
+ "dtype": "bfloat16",
121
+ "chunk_size": 50,
122
+ "n_action_steps": 50,
123
+ "max_state_dim": 32,
124
+ "max_action_dim": 32,
125
+ "state_cond": true,
126
+ "num_inference_steps": 10,
127
+ "time_sampling_beta_alpha": 1.5,
128
+ "time_sampling_beta_beta": 1.0,
129
+ "time_sampling_scale": 0.999,
130
+ "time_sampling_offset": 0.001,
131
+ "min_period": 0.004,
132
+ "max_period": 4.0,
133
+ "image_resolution": [
134
+ 224,
135
+ 224
136
+ ],
137
+ "empty_cameras": 0,
138
+ "tokenizer_max_length": 200,
139
+ "normalization_mapping": {
140
+ "VISUAL": "IDENTITY",
141
+ "STATE": "MEAN_STD",
142
+ "ACTION": "MEAN_STD"
143
+ },
144
+ "gradient_checkpointing": false,
145
+ "compile_model": false,
146
+ "compile_mode": "max-autotune",
147
+ "fuse_qkv": false,
148
+ "fuse_gate_up": false,
149
+ "optimizer_lr": 2.5e-05,
150
+ "optimizer_betas": [
151
+ 0.9,
152
+ 0.95
153
+ ],
154
+ "optimizer_eps": 1e-08,
155
+ "optimizer_weight_decay": 0.01,
156
+ "optimizer_grad_clip_norm": 1.0,
157
+ "scheduler_warmup_steps": 1000,
158
+ "scheduler_decay_steps": 30000,
159
+ "scheduler_decay_lr": 2.5e-06,
160
+ "vlm_config": {},
161
+ "action_expert_config": {}
162
+ },
163
+ "output_dir": "outputs/train/pi05_async8_lora16_wo_emb",
164
+ "job_name": "pi05_async8_lora16_wo_emb",
165
+ "resume": false,
166
+ "seed": 1000,
167
+ "num_workers": 4,
168
+ "batch_size": 8,
169
+ "steps": 50000,
170
+ "eval_freq": 10000,
171
+ "log_freq": 200,
172
+ "save_checkpoint": true,
173
+ "save_freq": 10000,
174
+ "use_policy_training_preset": false,
175
+ "optimizer": {
176
+ "type": "adamw",
177
+ "lr": 5e-05,
178
+ "weight_decay": 1e-10,
179
+ "grad_clip_norm": 10.0,
180
+ "betas": [
181
+ 0.9,
182
+ 0.95
183
+ ],
184
+ "eps": 1e-08
185
+ },
186
+ "scheduler": {
187
+ "type": "cosine_decay_with_warmup",
188
+ "num_warmup_steps": 1000,
189
+ "num_decay_steps": 50000,
190
+ "peak_lr": 5e-05,
191
+ "decay_lr": 2.5e-06
192
+ },
193
+ "eval": {
194
+ "n_episodes": 50,
195
+ "batch_size": 50,
196
+ "use_async_envs": false
197
+ },
198
+ "wandb": {
199
+ "enable": true,
200
+ "disable_artifact": true,
201
+ "project": "vlash",
202
+ "entity": null,
203
+ "notes": null,
204
+ "run_id": "caagoah2",
205
+ "mode": null
206
+ },
207
+ "checkpoint_path": null,
208
+ "rename_map": {},
209
+ "max_delay_steps": 8,
210
+ "grad_accum_steps": 1,
211
+ "lora": {
212
+ "enable": true,
213
+ "backend": "peft",
214
+ "r": 16,
215
+ "alpha": 16,
216
+ "dropout": 0.0,
217
+ "extra_trainable_modules": [
218
+ "action_in_proj",
219
+ "action_out_proj",
220
+ "time_mlp_in",
221
+ "time_mlp_out",
222
+ "state_proj",
223
+ "state_mlp_in",
224
+ "state_mlp_out",
225
+ "embeddings",
226
+ "input_layernorm",
227
+ "post_attention_layernorm"
228
+ ],
229
+ "target_modules": [
230
+ "q_proj",
231
+ "k_proj",
232
+ "v_proj",
233
+ "o_proj",
234
+ "gate_proj",
235
+ "up_proj",
236
+ "down_proj",
237
+ "out_proj",
238
+ "fc1",
239
+ "fc2",
240
+ "dense"
241
+ ],
242
+ "use_qlora": false,
243
+ "qlora_quant_type": "nf4",
244
+ "qlora_compute_dtype": "bfloat16"
245
+ }
246
+ }