David Roy Nelson commited on
Commit
2c7e469
·
verified ·
1 Parent(s): 9576728

Upload 17 files

Browse files
README.md CHANGED
@@ -1,3 +1,24 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: QuantizationMethod.BITS_AND_BYTES
9
+ - _load_in_8bit: False
10
+ - _load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: float32
18
+ - bnb_4bit_quant_storage: uint8
19
+ - load_in_4bit: False
20
+ - load_in_8bit: False
21
+ ### Framework versions
22
+
23
+
24
+ - PEFT 0.4.0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 10,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "q_proj",
19
+ "o_proj",
20
+ "down_proj",
21
+ "up_proj",
22
+ "gate_proj",
23
+ "v_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f290c65998013f2cccfa91465d0342a4a50f2045ed90756f8a3f21678401748b
3
+ size 83945296
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 32000
3
+ }
config.json ADDED
The diff for this file is too large to render. See raw diff
 
eval-results_Algal_checkpoint-1000-Mistral7B-b96_noAst ADDED
The diff for this file is too large to render. See raw diff
 
eval-results_Bact_checkpoint-1000-Mistral7B-b96_noAst ADDED
The diff for this file is too large to render. See raw diff
 
merge.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ # List of JSON files to merge
4
+ json_files = [
5
+ "tokenizer.json",
6
+ "tokenizer_config.json",
7
+ "special_tokens_map.json",
8
+ "adapter_config.json",
9
+ "trainer_state.json"
10
+ ]
11
+
12
+ # Initialize an empty dictionary to store the merged data
13
+ merged_data = {}
14
+
15
+ # Iterate over the JSON files and merge their contents
16
+ for file_path in json_files:
17
+ with open(file_path) as file:
18
+ data = json.load(file)
19
+ merged_data.update(data)
20
+
21
+ # Write the merged data to the output file
22
+ with open("config.json", "w") as output_file:
23
+ json.dump(merged_data, output_file, indent=2)
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d4ef39cd1d3e5b1ec0f24abd717051b22f43e397da6b9787f93a6af462c6c67
3
+ size 168039994
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3af285a76b3712b289951955fed90d44c8f005858287a3848975dedb5ec20e4f
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f57e061ee75fd72eedf3a5fbb98a912d7e5358b32dc8141bb4ec089618ca8320
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "[PAD]",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "additional_special_tokens": [],
39
+ "bos_token": "<s>",
40
+ "clean_up_tokenization_spaces": false,
41
+ "eos_token": "</s>",
42
+ "model_max_length": 1000000000000000019884624838656,
43
+ "pad_token": "[PAD]",
44
+ "padding_side": "right",
45
+ "sp_model_kwargs": {},
46
+ "spaces_between_special_tokens": false,
47
+ "tokenizer_class": "LlamaTokenizer",
48
+ "unk_token": "<unk>",
49
+ "use_default_system_prompt": false
50
+ }
trainer_state.json ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.029098898061104047,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0002909889806110405,
13
+ "grad_norm": 3.3640072345733643,
14
+ "learning_rate": 0.0002,
15
+ "loss": 1.2361,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.000581977961222081,
20
+ "grad_norm": 0.2830420136451721,
21
+ "learning_rate": 0.0002,
22
+ "loss": 0.1232,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.0008729669418331215,
27
+ "grad_norm": 0.3788599669933319,
28
+ "learning_rate": 0.0002,
29
+ "loss": 0.0874,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.001163955922444162,
34
+ "grad_norm": 0.9566423892974854,
35
+ "learning_rate": 0.0002,
36
+ "loss": 0.1099,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.0014549449030552023,
41
+ "grad_norm": 0.5953503847122192,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.0727,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.001745933883666243,
48
+ "grad_norm": 0.3039131164550781,
49
+ "learning_rate": 0.0002,
50
+ "loss": 0.0575,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.0020369228642772835,
55
+ "grad_norm": 0.27436479926109314,
56
+ "learning_rate": 0.0002,
57
+ "loss": 0.0511,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.002327911844888324,
62
+ "grad_norm": 0.22409753501415253,
63
+ "learning_rate": 0.0002,
64
+ "loss": 0.0495,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.0026189008254993646,
69
+ "grad_norm": 0.5258105397224426,
70
+ "learning_rate": 0.0002,
71
+ "loss": 0.0447,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.0029098898061104047,
76
+ "grad_norm": 0.3420485854148865,
77
+ "learning_rate": 0.0002,
78
+ "loss": 0.0447,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.0032008787867214453,
83
+ "grad_norm": 0.18883126974105835,
84
+ "learning_rate": 0.0002,
85
+ "loss": 0.0437,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.003491867767332486,
90
+ "grad_norm": 0.09849688410758972,
91
+ "learning_rate": 0.0002,
92
+ "loss": 0.0428,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.0037828567479435264,
97
+ "grad_norm": 0.5861080884933472,
98
+ "learning_rate": 0.0002,
99
+ "loss": 0.0416,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.004073845728554567,
104
+ "grad_norm": 0.8478333353996277,
105
+ "learning_rate": 0.0002,
106
+ "loss": 0.0415,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.004364834709165607,
111
+ "grad_norm": 0.6563957333564758,
112
+ "learning_rate": 0.0002,
113
+ "loss": 0.0416,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.004655823689776648,
118
+ "grad_norm": 0.1496465653181076,
119
+ "learning_rate": 0.0002,
120
+ "loss": 0.04,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.004946812670387688,
125
+ "grad_norm": 0.7356327176094055,
126
+ "learning_rate": 0.0002,
127
+ "loss": 0.0406,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.005237801650998729,
132
+ "grad_norm": 0.5485235452651978,
133
+ "learning_rate": 0.0002,
134
+ "loss": 0.0415,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.005528790631609769,
139
+ "grad_norm": 0.28617948293685913,
140
+ "learning_rate": 0.0002,
141
+ "loss": 0.04,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.005819779612220809,
146
+ "grad_norm": 0.7351231575012207,
147
+ "learning_rate": 0.0002,
148
+ "loss": 0.0462,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.00611076859283185,
153
+ "grad_norm": 0.6597175598144531,
154
+ "learning_rate": 0.0002,
155
+ "loss": 0.042,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.0064017575734428905,
160
+ "grad_norm": 0.5418401956558228,
161
+ "learning_rate": 0.0002,
162
+ "loss": 0.0425,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.0066927465540539315,
167
+ "grad_norm": 0.4611615836620331,
168
+ "learning_rate": 0.0002,
169
+ "loss": 0.0409,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.006983735534664972,
174
+ "grad_norm": 0.039530955255031586,
175
+ "learning_rate": 0.0002,
176
+ "loss": 0.0414,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.007274724515276012,
181
+ "grad_norm": 0.03446557745337486,
182
+ "learning_rate": 0.0002,
183
+ "loss": 0.0393,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 0.007565713495887053,
188
+ "grad_norm": 0.7747415900230408,
189
+ "learning_rate": 0.0002,
190
+ "loss": 0.0419,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 0.007856702476498093,
195
+ "grad_norm": 0.3428023159503937,
196
+ "learning_rate": 0.0002,
197
+ "loss": 0.0423,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 0.008147691457109134,
202
+ "grad_norm": 0.2668132781982422,
203
+ "learning_rate": 0.0002,
204
+ "loss": 0.0404,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 0.008438680437720175,
209
+ "grad_norm": 0.06787655502557755,
210
+ "learning_rate": 0.0002,
211
+ "loss": 0.0373,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 0.008729669418331214,
216
+ "grad_norm": 0.17109806835651398,
217
+ "learning_rate": 0.0002,
218
+ "loss": 0.0371,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 0.009020658398942255,
223
+ "grad_norm": 0.1489395946264267,
224
+ "learning_rate": 0.0002,
225
+ "loss": 0.0377,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 0.009311647379553296,
230
+ "grad_norm": 0.20444560050964355,
231
+ "learning_rate": 0.0002,
232
+ "loss": 0.0385,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 0.009602636360164335,
237
+ "grad_norm": 1.251767873764038,
238
+ "learning_rate": 0.0002,
239
+ "loss": 0.0395,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 0.009893625340775376,
244
+ "grad_norm": 0.17964421212673187,
245
+ "learning_rate": 0.0002,
246
+ "loss": 0.0403,
247
+ "step": 340
248
+ },
249
+ {
250
+ "epoch": 0.010184614321386417,
251
+ "grad_norm": 0.20001742243766785,
252
+ "learning_rate": 0.0002,
253
+ "loss": 0.0374,
254
+ "step": 350
255
+ },
256
+ {
257
+ "epoch": 0.010475603301997458,
258
+ "grad_norm": 0.07885689288377762,
259
+ "learning_rate": 0.0002,
260
+ "loss": 0.0366,
261
+ "step": 360
262
+ },
263
+ {
264
+ "epoch": 0.010766592282608498,
265
+ "grad_norm": 0.1755530834197998,
266
+ "learning_rate": 0.0002,
267
+ "loss": 0.0366,
268
+ "step": 370
269
+ },
270
+ {
271
+ "epoch": 0.011057581263219539,
272
+ "grad_norm": 0.24720287322998047,
273
+ "learning_rate": 0.0002,
274
+ "loss": 0.036,
275
+ "step": 380
276
+ },
277
+ {
278
+ "epoch": 0.01134857024383058,
279
+ "grad_norm": 0.13627253472805023,
280
+ "learning_rate": 0.0002,
281
+ "loss": 0.0384,
282
+ "step": 390
283
+ },
284
+ {
285
+ "epoch": 0.011639559224441619,
286
+ "grad_norm": 0.10238471627235413,
287
+ "learning_rate": 0.0002,
288
+ "loss": 0.037,
289
+ "step": 400
290
+ },
291
+ {
292
+ "epoch": 0.01193054820505266,
293
+ "grad_norm": 0.1790493279695511,
294
+ "learning_rate": 0.0002,
295
+ "loss": 0.036,
296
+ "step": 410
297
+ },
298
+ {
299
+ "epoch": 0.0122215371856637,
300
+ "grad_norm": 0.8142262101173401,
301
+ "learning_rate": 0.0002,
302
+ "loss": 0.0366,
303
+ "step": 420
304
+ },
305
+ {
306
+ "epoch": 0.012512526166274742,
307
+ "grad_norm": 0.3142533600330353,
308
+ "learning_rate": 0.0002,
309
+ "loss": 0.0373,
310
+ "step": 430
311
+ },
312
+ {
313
+ "epoch": 0.012803515146885781,
314
+ "grad_norm": 0.5743248462677002,
315
+ "learning_rate": 0.0002,
316
+ "loss": 0.0364,
317
+ "step": 440
318
+ },
319
+ {
320
+ "epoch": 0.013094504127496822,
321
+ "grad_norm": 0.10562433302402496,
322
+ "learning_rate": 0.0002,
323
+ "loss": 0.0357,
324
+ "step": 450
325
+ },
326
+ {
327
+ "epoch": 0.013385493108107863,
328
+ "grad_norm": 0.1285354048013687,
329
+ "learning_rate": 0.0002,
330
+ "loss": 0.0348,
331
+ "step": 460
332
+ },
333
+ {
334
+ "epoch": 0.013676482088718902,
335
+ "grad_norm": 0.31671035289764404,
336
+ "learning_rate": 0.0002,
337
+ "loss": 0.0367,
338
+ "step": 470
339
+ },
340
+ {
341
+ "epoch": 0.013967471069329943,
342
+ "grad_norm": 0.10281776636838913,
343
+ "learning_rate": 0.0002,
344
+ "loss": 0.0367,
345
+ "step": 480
346
+ },
347
+ {
348
+ "epoch": 0.014258460049940984,
349
+ "grad_norm": 0.10469332337379456,
350
+ "learning_rate": 0.0002,
351
+ "loss": 0.0354,
352
+ "step": 490
353
+ },
354
+ {
355
+ "epoch": 0.014549449030552023,
356
+ "grad_norm": 0.04262165352702141,
357
+ "learning_rate": 0.0002,
358
+ "loss": 0.0358,
359
+ "step": 500
360
+ },
361
+ {
362
+ "epoch": 0.014840438011163064,
363
+ "grad_norm": 0.08256979286670685,
364
+ "learning_rate": 0.0002,
365
+ "loss": 0.0352,
366
+ "step": 510
367
+ },
368
+ {
369
+ "epoch": 0.015131426991774105,
370
+ "grad_norm": 0.2147534042596817,
371
+ "learning_rate": 0.0002,
372
+ "loss": 0.0352,
373
+ "step": 520
374
+ },
375
+ {
376
+ "epoch": 0.015422415972385146,
377
+ "grad_norm": 0.7227026224136353,
378
+ "learning_rate": 0.0002,
379
+ "loss": 0.0367,
380
+ "step": 530
381
+ },
382
+ {
383
+ "epoch": 0.015713404952996186,
384
+ "grad_norm": 0.3186182975769043,
385
+ "learning_rate": 0.0002,
386
+ "loss": 0.0366,
387
+ "step": 540
388
+ },
389
+ {
390
+ "epoch": 0.01600439393360723,
391
+ "grad_norm": 0.5129309296607971,
392
+ "learning_rate": 0.0002,
393
+ "loss": 0.036,
394
+ "step": 550
395
+ },
396
+ {
397
+ "epoch": 0.016295382914218268,
398
+ "grad_norm": 0.3547574281692505,
399
+ "learning_rate": 0.0002,
400
+ "loss": 0.0363,
401
+ "step": 560
402
+ },
403
+ {
404
+ "epoch": 0.016586371894829307,
405
+ "grad_norm": 0.29844892024993896,
406
+ "learning_rate": 0.0002,
407
+ "loss": 0.0369,
408
+ "step": 570
409
+ },
410
+ {
411
+ "epoch": 0.01687736087544035,
412
+ "grad_norm": 0.25678157806396484,
413
+ "learning_rate": 0.0002,
414
+ "loss": 0.0352,
415
+ "step": 580
416
+ },
417
+ {
418
+ "epoch": 0.01716834985605139,
419
+ "grad_norm": 0.07419384270906448,
420
+ "learning_rate": 0.0002,
421
+ "loss": 0.0344,
422
+ "step": 590
423
+ },
424
+ {
425
+ "epoch": 0.017459338836662428,
426
+ "grad_norm": 0.15620607137680054,
427
+ "learning_rate": 0.0002,
428
+ "loss": 0.0343,
429
+ "step": 600
430
+ },
431
+ {
432
+ "epoch": 0.01775032781727347,
433
+ "grad_norm": 0.17426913976669312,
434
+ "learning_rate": 0.0002,
435
+ "loss": 0.0326,
436
+ "step": 610
437
+ },
438
+ {
439
+ "epoch": 0.01804131679788451,
440
+ "grad_norm": 0.18652600049972534,
441
+ "learning_rate": 0.0002,
442
+ "loss": 0.0358,
443
+ "step": 620
444
+ },
445
+ {
446
+ "epoch": 0.01833230577849555,
447
+ "grad_norm": 0.5866808295249939,
448
+ "learning_rate": 0.0002,
449
+ "loss": 0.0364,
450
+ "step": 630
451
+ },
452
+ {
453
+ "epoch": 0.018623294759106592,
454
+ "grad_norm": 0.5470107793807983,
455
+ "learning_rate": 0.0002,
456
+ "loss": 0.0385,
457
+ "step": 640
458
+ },
459
+ {
460
+ "epoch": 0.01891428373971763,
461
+ "grad_norm": 0.4430047273635864,
462
+ "learning_rate": 0.0002,
463
+ "loss": 0.0355,
464
+ "step": 650
465
+ },
466
+ {
467
+ "epoch": 0.01920527272032867,
468
+ "grad_norm": 0.44153594970703125,
469
+ "learning_rate": 0.0002,
470
+ "loss": 0.0374,
471
+ "step": 660
472
+ },
473
+ {
474
+ "epoch": 0.019496261700939713,
475
+ "grad_norm": 0.2025349885225296,
476
+ "learning_rate": 0.0002,
477
+ "loss": 0.0361,
478
+ "step": 670
479
+ },
480
+ {
481
+ "epoch": 0.019787250681550753,
482
+ "grad_norm": 0.05006701499223709,
483
+ "learning_rate": 0.0002,
484
+ "loss": 0.0352,
485
+ "step": 680
486
+ },
487
+ {
488
+ "epoch": 0.020078239662161792,
489
+ "grad_norm": 0.15291444957256317,
490
+ "learning_rate": 0.0002,
491
+ "loss": 0.0339,
492
+ "step": 690
493
+ },
494
+ {
495
+ "epoch": 0.020369228642772835,
496
+ "grad_norm": 0.20080982148647308,
497
+ "learning_rate": 0.0002,
498
+ "loss": 0.034,
499
+ "step": 700
500
+ },
501
+ {
502
+ "epoch": 0.020660217623383874,
503
+ "grad_norm": 0.1934683471918106,
504
+ "learning_rate": 0.0002,
505
+ "loss": 0.0338,
506
+ "step": 710
507
+ },
508
+ {
509
+ "epoch": 0.020951206603994917,
510
+ "grad_norm": 0.2093890905380249,
511
+ "learning_rate": 0.0002,
512
+ "loss": 0.033,
513
+ "step": 720
514
+ },
515
+ {
516
+ "epoch": 0.021242195584605956,
517
+ "grad_norm": 0.17411717772483826,
518
+ "learning_rate": 0.0002,
519
+ "loss": 0.0329,
520
+ "step": 730
521
+ },
522
+ {
523
+ "epoch": 0.021533184565216995,
524
+ "grad_norm": 0.06554729491472244,
525
+ "learning_rate": 0.0002,
526
+ "loss": 0.0328,
527
+ "step": 740
528
+ },
529
+ {
530
+ "epoch": 0.021824173545828038,
531
+ "grad_norm": 0.3035508692264557,
532
+ "learning_rate": 0.0002,
533
+ "loss": 0.0345,
534
+ "step": 750
535
+ },
536
+ {
537
+ "epoch": 0.022115162526439077,
538
+ "grad_norm": 0.1284075379371643,
539
+ "learning_rate": 0.0002,
540
+ "loss": 0.0344,
541
+ "step": 760
542
+ },
543
+ {
544
+ "epoch": 0.022406151507050116,
545
+ "grad_norm": 0.06972914189100266,
546
+ "learning_rate": 0.0002,
547
+ "loss": 0.0326,
548
+ "step": 770
549
+ },
550
+ {
551
+ "epoch": 0.02269714048766116,
552
+ "grad_norm": 0.2625221908092499,
553
+ "learning_rate": 0.0002,
554
+ "loss": 0.0343,
555
+ "step": 780
556
+ },
557
+ {
558
+ "epoch": 0.0229881294682722,
559
+ "grad_norm": 0.2056276947259903,
560
+ "learning_rate": 0.0002,
561
+ "loss": 0.0341,
562
+ "step": 790
563
+ },
564
+ {
565
+ "epoch": 0.023279118448883238,
566
+ "grad_norm": 0.06602438539266586,
567
+ "learning_rate": 0.0002,
568
+ "loss": 0.0331,
569
+ "step": 800
570
+ },
571
+ {
572
+ "epoch": 0.02357010742949428,
573
+ "grad_norm": 0.1302807331085205,
574
+ "learning_rate": 0.0002,
575
+ "loss": 0.0328,
576
+ "step": 810
577
+ },
578
+ {
579
+ "epoch": 0.02386109641010532,
580
+ "grad_norm": 0.07038327306509018,
581
+ "learning_rate": 0.0002,
582
+ "loss": 0.0338,
583
+ "step": 820
584
+ },
585
+ {
586
+ "epoch": 0.02415208539071636,
587
+ "grad_norm": 0.3151911199092865,
588
+ "learning_rate": 0.0002,
589
+ "loss": 0.0353,
590
+ "step": 830
591
+ },
592
+ {
593
+ "epoch": 0.0244430743713274,
594
+ "grad_norm": 0.2942112982273102,
595
+ "learning_rate": 0.0002,
596
+ "loss": 0.0336,
597
+ "step": 840
598
+ },
599
+ {
600
+ "epoch": 0.02473406335193844,
601
+ "grad_norm": 0.09775586426258087,
602
+ "learning_rate": 0.0002,
603
+ "loss": 0.0331,
604
+ "step": 850
605
+ },
606
+ {
607
+ "epoch": 0.025025052332549483,
608
+ "grad_norm": 0.06825686991214752,
609
+ "learning_rate": 0.0002,
610
+ "loss": 0.0338,
611
+ "step": 860
612
+ },
613
+ {
614
+ "epoch": 0.025316041313160523,
615
+ "grad_norm": 0.08698020130395889,
616
+ "learning_rate": 0.0002,
617
+ "loss": 0.0329,
618
+ "step": 870
619
+ },
620
+ {
621
+ "epoch": 0.025607030293771562,
622
+ "grad_norm": 0.29394668340682983,
623
+ "learning_rate": 0.0002,
624
+ "loss": 0.0328,
625
+ "step": 880
626
+ },
627
+ {
628
+ "epoch": 0.025898019274382605,
629
+ "grad_norm": 0.2138691395521164,
630
+ "learning_rate": 0.0002,
631
+ "loss": 0.0327,
632
+ "step": 890
633
+ },
634
+ {
635
+ "epoch": 0.026189008254993644,
636
+ "grad_norm": 0.22760023176670074,
637
+ "learning_rate": 0.0002,
638
+ "loss": 0.0349,
639
+ "step": 900
640
+ },
641
+ {
642
+ "epoch": 0.026479997235604683,
643
+ "grad_norm": 0.07050047069787979,
644
+ "learning_rate": 0.0002,
645
+ "loss": 0.0327,
646
+ "step": 910
647
+ },
648
+ {
649
+ "epoch": 0.026770986216215726,
650
+ "grad_norm": 0.0632275640964508,
651
+ "learning_rate": 0.0002,
652
+ "loss": 0.0332,
653
+ "step": 920
654
+ },
655
+ {
656
+ "epoch": 0.027061975196826765,
657
+ "grad_norm": 0.2537945508956909,
658
+ "learning_rate": 0.0002,
659
+ "loss": 0.0334,
660
+ "step": 930
661
+ },
662
+ {
663
+ "epoch": 0.027352964177437804,
664
+ "grad_norm": 0.17872551083564758,
665
+ "learning_rate": 0.0002,
666
+ "loss": 0.0339,
667
+ "step": 940
668
+ },
669
+ {
670
+ "epoch": 0.027643953158048847,
671
+ "grad_norm": 0.1240101158618927,
672
+ "learning_rate": 0.0002,
673
+ "loss": 0.0328,
674
+ "step": 950
675
+ },
676
+ {
677
+ "epoch": 0.027934942138659886,
678
+ "grad_norm": 0.24408769607543945,
679
+ "learning_rate": 0.0002,
680
+ "loss": 0.0337,
681
+ "step": 960
682
+ },
683
+ {
684
+ "epoch": 0.028225931119270926,
685
+ "grad_norm": 0.06075837463140488,
686
+ "learning_rate": 0.0002,
687
+ "loss": 0.0327,
688
+ "step": 970
689
+ },
690
+ {
691
+ "epoch": 0.02851692009988197,
692
+ "grad_norm": 0.09202170372009277,
693
+ "learning_rate": 0.0002,
694
+ "loss": 0.0326,
695
+ "step": 980
696
+ },
697
+ {
698
+ "epoch": 0.028807909080493008,
699
+ "grad_norm": 0.09207413345575333,
700
+ "learning_rate": 0.0002,
701
+ "loss": 0.0316,
702
+ "step": 990
703
+ },
704
+ {
705
+ "epoch": 0.029098898061104047,
706
+ "grad_norm": 0.056632377207279205,
707
+ "learning_rate": 0.0002,
708
+ "loss": 0.0331,
709
+ "step": 1000
710
+ }
711
+ ],
712
+ "logging_steps": 10,
713
+ "max_steps": 40000,
714
+ "num_input_tokens_seen": 0,
715
+ "num_train_epochs": 2,
716
+ "save_steps": 250,
717
+ "stateful_callbacks": {
718
+ "TrainerControl": {
719
+ "args": {
720
+ "should_epoch_stop": false,
721
+ "should_evaluate": false,
722
+ "should_log": false,
723
+ "should_save": true,
724
+ "should_training_stop": false
725
+ },
726
+ "attributes": {}
727
+ }
728
+ },
729
+ "total_flos": 5.102264300163564e+18,
730
+ "train_batch_size": 96,
731
+ "trial_name": null,
732
+ "trial_params": null
733
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a0015a84881583a785c56dacfa73a1e2f0b4798a0add8a62f044199e95ad41c
3
+ size 7224