bigsmoke05 commited on
Commit
e08b6b5
·
verified ·
1 Parent(s): 2c3733a

Upload 13 files

Browse files
README.md CHANGED
@@ -1,3 +1,60 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: Qwen/CodeQwen1.5-7B
9
+ model-index:
10
+ - name: train_scopai1
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # train_scopai1
18
+
19
+ This model is a fine-tuned version of [Qwen/CodeQwen1.5-7B](https://huggingface.co/Qwen/CodeQwen1.5-7B) on the scopai dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 0.0002
39
+ - train_batch_size: 2
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 4
43
+ - total_train_batch_size: 8
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - lr_scheduler_warmup_steps: 5
47
+ - num_epochs: 2.0
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - PEFT 0.10.1.dev0
57
+ - Transformers 4.41.0.dev0
58
+ - Pytorch 2.1.2+cu121
59
+ - Datasets 2.19.0
60
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/CodeQwen1.5-7B",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 8,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 4,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e48cbadaf8b2bbc623e76624a745d30b09e817abeae778bbefe535f09722c692
3
+ size 6570424
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 1341465195577344.0,
4
+ "train_loss": 0.8099669218063354,
5
+ "train_runtime": 334.6224,
6
+ "train_samples_per_second": 0.233,
7
+ "train_steps_per_second": 0.03
8
+ }
running_log.txt ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 05/15/2024 23:19:53 - INFO - transformers.tokenization_utils_base - loading file tokenizer.json from cache at /home/f200309/.cache/huggingface/hub/models--Qwen--CodeQwen1.5-7B/snapshots/5ce5a1554e50a9e3bb236de7c0b8a2a1746186e4/tokenizer.json
2
+
3
+ 05/15/2024 23:19:53 - INFO - transformers.tokenization_utils_base - loading file added_tokens.json from cache at None
4
+
5
+ 05/15/2024 23:19:53 - INFO - transformers.tokenization_utils_base - loading file special_tokens_map.json from cache at None
6
+
7
+ 05/15/2024 23:19:53 - INFO - transformers.tokenization_utils_base - loading file tokenizer_config.json from cache at /home/f200309/.cache/huggingface/hub/models--Qwen--CodeQwen1.5-7B/snapshots/5ce5a1554e50a9e3bb236de7c0b8a2a1746186e4/tokenizer_config.json
8
+
9
+ 05/15/2024 23:19:55 - INFO - llmtuner.data.loader - Loading dataset bigsmoke05/optimized-solidity-dataset...
10
+
11
+ 05/15/2024 23:20:04 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /home/f200309/.cache/huggingface/hub/models--Qwen--CodeQwen1.5-7B/snapshots/5ce5a1554e50a9e3bb236de7c0b8a2a1746186e4/config.json
12
+
13
+ 05/15/2024 23:20:04 - INFO - transformers.configuration_utils - Model config Qwen2Config {
14
+ "_name_or_path": "Qwen/CodeQwen1.5-7B",
15
+ "architectures": [
16
+ "Qwen2ForCausalLM"
17
+ ],
18
+ "attention_dropout": 0.0,
19
+ "bos_token_id": 2,
20
+ "eos_token_id": 2,
21
+ "hidden_act": "silu",
22
+ "hidden_size": 4096,
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 13440,
25
+ "max_position_embeddings": 65536,
26
+ "max_window_layers": 28,
27
+ "model_type": "qwen2",
28
+ "num_attention_heads": 32,
29
+ "num_hidden_layers": 32,
30
+ "num_key_value_heads": 4,
31
+ "rms_norm_eps": 1e-05,
32
+ "rope_theta": 1000000,
33
+ "rotary_emb_base": 1000000,
34
+ "seq_length": 65536,
35
+ "sliding_window": 65536,
36
+ "tie_word_embeddings": false,
37
+ "torch_dtype": "bfloat16",
38
+ "transformers_version": "4.41.0.dev0",
39
+ "use_cache": true,
40
+ "use_sliding_window": false,
41
+ "vocab_size": 92416
42
+ }
43
+
44
+
45
+ 05/15/2024 23:20:04 - INFO - llmtuner.model.utils.quantization - Quantizing model to 4 bit.
46
+
47
+ 05/15/2024 23:20:04 - INFO - transformers.modeling_utils - loading weights file model.safetensors from cache at /home/f200309/.cache/huggingface/hub/models--Qwen--CodeQwen1.5-7B/snapshots/5ce5a1554e50a9e3bb236de7c0b8a2a1746186e4/model.safetensors.index.json
48
+
49
+ 05/15/2024 23:20:04 - INFO - transformers.modeling_utils - Instantiating Qwen2ForCausalLM model under default dtype torch.float16.
50
+
51
+ 05/15/2024 23:20:04 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
52
+ "bos_token_id": 2,
53
+ "eos_token_id": 2,
54
+ "use_cache": false
55
+ }
56
+
57
+
58
+ 05/15/2024 23:20:18 - INFO - transformers.modeling_utils - All model checkpoint weights were used when initializing Qwen2ForCausalLM.
59
+
60
+
61
+ 05/15/2024 23:20:18 - INFO - transformers.modeling_utils - All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at Qwen/CodeQwen1.5-7B.
62
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
63
+
64
+ 05/15/2024 23:20:18 - INFO - transformers.generation.configuration_utils - loading configuration file generation_config.json from cache at /home/f200309/.cache/huggingface/hub/models--Qwen--CodeQwen1.5-7B/snapshots/5ce5a1554e50a9e3bb236de7c0b8a2a1746186e4/generation_config.json
65
+
66
+ 05/15/2024 23:20:18 - INFO - transformers.generation.configuration_utils - Generate config GenerationConfig {
67
+ "bos_token_id": 2,
68
+ "eos_token_id": [
69
+ 4,
70
+ 2
71
+ ],
72
+ "pad_token_id": 92298,
73
+ "top_p": 0.95
74
+ }
75
+
76
+
77
+ 05/15/2024 23:20:19 - INFO - llmtuner.model.utils.checkpointing - Gradient checkpointing enabled.
78
+
79
+ 05/15/2024 23:20:19 - INFO - llmtuner.model.utils.attention - Using torch SDPA for faster training and inference.
80
+
81
+ 05/15/2024 23:20:19 - INFO - llmtuner.model.adapter - Fine-tuning method: LoRA
82
+
83
+ 05/15/2024 23:20:19 - INFO - llmtuner.model.loader - trainable params: 1638400 || all params: 7251922944 || trainable%: 0.0226
84
+
85
+ 05/15/2024 23:20:19 - INFO - transformers.trainer - Using auto half precision backend
86
+
87
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - ***** Running training *****
88
+
89
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - Num examples = 39
90
+
91
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - Num Epochs = 2
92
+
93
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - Instantaneous batch size per device = 2
94
+
95
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - Total train batch size (w. parallel, distributed & accumulation) = 8
96
+
97
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - Gradient Accumulation steps = 4
98
+
99
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - Total optimization steps = 10
100
+
101
+ 05/15/2024 23:20:20 - INFO - transformers.trainer - Number of trainable parameters = 1,638,400
102
+
103
+ 05/15/2024 23:20:59 - INFO - llmtuner.extras.callbacks - {'loss': 0.7110, 'learning_rate': 4.0000e-05, 'epoch': 0.20}
104
+
105
+ 05/15/2024 23:21:31 - INFO - llmtuner.extras.callbacks - {'loss': 0.7668, 'learning_rate': 8.0000e-05, 'epoch': 0.40}
106
+
107
+ 05/15/2024 23:22:00 - INFO - llmtuner.extras.callbacks - {'loss': 0.9731, 'learning_rate': 1.2000e-04, 'epoch': 0.60}
108
+
109
+ 05/15/2024 23:22:33 - INFO - llmtuner.extras.callbacks - {'loss': 0.8388, 'learning_rate': 1.6000e-04, 'epoch': 0.80}
110
+
111
+ 05/15/2024 23:23:02 - INFO - llmtuner.extras.callbacks - {'loss': 0.8452, 'learning_rate': 2.0000e-04, 'epoch': 1.00}
112
+
113
+ 05/15/2024 23:23:46 - INFO - llmtuner.extras.callbacks - {'loss': 0.6138, 'learning_rate': 1.6000e-04, 'epoch': 1.20}
114
+
115
+ 05/15/2024 23:24:28 - INFO - llmtuner.extras.callbacks - {'loss': 0.5858, 'learning_rate': 1.2000e-04, 'epoch': 1.40}
116
+
117
+ 05/15/2024 23:25:07 - INFO - llmtuner.extras.callbacks - {'loss': 0.7195, 'learning_rate': 8.0000e-05, 'epoch': 1.60}
118
+
119
+ 05/15/2024 23:25:26 - INFO - llmtuner.extras.callbacks - {'loss': 1.0479, 'learning_rate': 4.0000e-05, 'epoch': 1.80}
120
+
121
+ 05/15/2024 23:25:55 - INFO - llmtuner.extras.callbacks - {'loss': 0.9978, 'learning_rate': 0.0000e+00, 'epoch': 2.00}
122
+
123
+ 05/15/2024 23:25:55 - INFO - transformers.trainer -
124
+
125
+ Training completed. Do not forget to share your model on huggingface.co/models =)
126
+
127
+
128
+
129
+ 05/15/2024 23:25:55 - INFO - transformers.trainer - Saving model checkpoint to saves/Qwen1.5-Code-7B/lora/train_scopai1
130
+
131
+ 05/15/2024 23:25:57 - INFO - transformers.configuration_utils - loading configuration file config.json from cache at /home/f200309/.cache/huggingface/hub/models--Qwen--CodeQwen1.5-7B/snapshots/5ce5a1554e50a9e3bb236de7c0b8a2a1746186e4/config.json
132
+
133
+ 05/15/2024 23:25:57 - INFO - transformers.configuration_utils - Model config Qwen2Config {
134
+ "architectures": [
135
+ "Qwen2ForCausalLM"
136
+ ],
137
+ "attention_dropout": 0.0,
138
+ "bos_token_id": 2,
139
+ "eos_token_id": 2,
140
+ "hidden_act": "silu",
141
+ "hidden_size": 4096,
142
+ "initializer_range": 0.02,
143
+ "intermediate_size": 13440,
144
+ "max_position_embeddings": 65536,
145
+ "max_window_layers": 28,
146
+ "model_type": "qwen2",
147
+ "num_attention_heads": 32,
148
+ "num_hidden_layers": 32,
149
+ "num_key_value_heads": 4,
150
+ "rms_norm_eps": 1e-05,
151
+ "rope_theta": 1000000,
152
+ "rotary_emb_base": 1000000,
153
+ "seq_length": 65536,
154
+ "sliding_window": 65536,
155
+ "tie_word_embeddings": false,
156
+ "torch_dtype": "bfloat16",
157
+ "transformers_version": "4.41.0.dev0",
158
+ "use_cache": true,
159
+ "use_sliding_window": false,
160
+ "vocab_size": 92416
161
+ }
162
+
163
+
164
+ 05/15/2024 23:25:57 - INFO - transformers.tokenization_utils_base - tokenizer config file saved in saves/Qwen1.5-Code-7B/lora/train_scopai1/tokenizer_config.json
165
+
166
+ 05/15/2024 23:25:57 - INFO - transformers.tokenization_utils_base - Special tokens file saved in saves/Qwen1.5-Code-7B/lora/train_scopai1/special_tokens_map.json
167
+
168
+ 05/15/2024 23:25:57 - INFO - transformers.modelcard - Dropping the following result as it does not have all the necessary fields:
169
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
170
+
special_tokens_map.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<fim_prefix>",
6
+ "<fim_middle>",
7
+ "<fim_suffix>",
8
+ "<fim_pad>"
9
+ ],
10
+ "bos_token": {
11
+ "content": "<|endoftext|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ "eos_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<fim_pad>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "unk_token": {
32
+ "content": "<unk>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ }
38
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 1341465195577344.0,
4
+ "train_loss": 0.8099669218063354,
5
+ "train_runtime": 334.6224,
6
+ "train_samples_per_second": 0.233,
7
+ "train_steps_per_second": 0.03
8
+ }
trainer_config.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cutoff_len: 512
2
+ dataset: scopai
3
+ dataset_dir: data
4
+ do_train: true
5
+ finetuning_type: lora
6
+ flash_attn: auto
7
+ fp16: true
8
+ gradient_accumulation_steps: 4
9
+ learning_rate: 0.0002
10
+ logging_steps: 1
11
+ lora_alpha: 8
12
+ lora_dropout: 0.05
13
+ lora_rank: 4
14
+ lora_target: q_proj,v_proj
15
+ lr_scheduler_type: linear
16
+ max_grad_norm: 0.5
17
+ max_samples: 100
18
+ model_name_or_path: Qwen/CodeQwen1.5-7B
19
+ num_train_epochs: 2.0
20
+ optim: adamw_torch
21
+ output_dir: saves/Qwen1.5-Code-7B/lora/train_scopai1
22
+ packing: false
23
+ per_device_train_batch_size: 2
24
+ quantization_bit: 4
25
+ report_to: none
26
+ save_steps: 100
27
+ stage: sft
28
+ template: default
29
+ warmup_steps: 5
trainer_log.jsonl ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 10, "loss": 0.711, "learning_rate": 4e-05, "epoch": 0.2, "percentage": 10.0, "elapsed_time": "0:00:39", "remaining_time": "0:05:54"}
2
+ {"current_steps": 2, "total_steps": 10, "loss": 0.7668, "learning_rate": 8e-05, "epoch": 0.4, "percentage": 20.0, "elapsed_time": "0:01:10", "remaining_time": "0:04:42"}
3
+ {"current_steps": 3, "total_steps": 10, "loss": 0.9731, "learning_rate": 0.00012, "epoch": 0.6, "percentage": 30.0, "elapsed_time": "0:01:39", "remaining_time": "0:03:52"}
4
+ {"current_steps": 4, "total_steps": 10, "loss": 0.8388, "learning_rate": 0.00016, "epoch": 0.8, "percentage": 40.0, "elapsed_time": "0:02:12", "remaining_time": "0:03:19"}
5
+ {"current_steps": 5, "total_steps": 10, "loss": 0.8452, "learning_rate": 0.0002, "epoch": 1.0, "percentage": 50.0, "elapsed_time": "0:02:42", "remaining_time": "0:02:42"}
6
+ {"current_steps": 6, "total_steps": 10, "loss": 0.6138, "learning_rate": 0.00016, "epoch": 1.2, "percentage": 60.0, "elapsed_time": "0:03:26", "remaining_time": "0:02:17"}
7
+ {"current_steps": 7, "total_steps": 10, "loss": 0.5858, "learning_rate": 0.00012, "epoch": 1.4, "percentage": 70.0, "elapsed_time": "0:04:08", "remaining_time": "0:01:46"}
8
+ {"current_steps": 8, "total_steps": 10, "loss": 0.7195, "learning_rate": 8e-05, "epoch": 1.6, "percentage": 80.0, "elapsed_time": "0:04:46", "remaining_time": "0:01:11"}
9
+ {"current_steps": 9, "total_steps": 10, "loss": 1.0479, "learning_rate": 4e-05, "epoch": 1.8, "percentage": 90.0, "elapsed_time": "0:05:05", "remaining_time": "0:00:33"}
10
+ {"current_steps": 10, "total_steps": 10, "loss": 0.9978, "learning_rate": 0.0, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "0:05:34", "remaining_time": "0:00:00"}
11
+ {"current_steps": 10, "total_steps": 10, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "0:05:34", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 10,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2,
13
+ "grad_norm": 0.0990709736943245,
14
+ "learning_rate": 4e-05,
15
+ "loss": 0.711,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.4,
20
+ "grad_norm": 0.120853953063488,
21
+ "learning_rate": 8e-05,
22
+ "loss": 0.7668,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.6,
27
+ "grad_norm": 0.13565421104431152,
28
+ "learning_rate": 0.00012,
29
+ "loss": 0.9731,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.8,
34
+ "grad_norm": 0.14744164049625397,
35
+ "learning_rate": 0.00016,
36
+ "loss": 0.8388,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 1.0,
41
+ "grad_norm": 0.16943834722042084,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.8452,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 1.2,
48
+ "grad_norm": 0.09526122361421585,
49
+ "learning_rate": 0.00016,
50
+ "loss": 0.6138,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 1.4,
55
+ "grad_norm": 0.11567643284797668,
56
+ "learning_rate": 0.00012,
57
+ "loss": 0.5858,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 1.6,
62
+ "grad_norm": 0.20003917813301086,
63
+ "learning_rate": 8e-05,
64
+ "loss": 0.7195,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 1.8,
69
+ "grad_norm": 0.4086407721042633,
70
+ "learning_rate": 4e-05,
71
+ "loss": 1.0479,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 2.0,
76
+ "grad_norm": 0.2869514226913452,
77
+ "learning_rate": 0.0,
78
+ "loss": 0.9978,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 2.0,
83
+ "step": 10,
84
+ "total_flos": 1341465195577344.0,
85
+ "train_loss": 0.8099669218063354,
86
+ "train_runtime": 334.6224,
87
+ "train_samples_per_second": 0.233,
88
+ "train_steps_per_second": 0.03
89
+ }
90
+ ],
91
+ "logging_steps": 1,
92
+ "max_steps": 10,
93
+ "num_input_tokens_seen": 0,
94
+ "num_train_epochs": 2,
95
+ "save_steps": 100,
96
+ "total_flos": 1341465195577344.0,
97
+ "train_batch_size": 2,
98
+ "trial_name": null,
99
+ "trial_params": null
100
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e58748cd2f7b71f16f9a8755f1a3ba8bc7b8165c0f7a390639ad9a463cde2c7
3
+ size 5176