SubhrajitSain commited on
Commit
5f6065d
verified
1 Parent(s): 13c087c

Training in progress, step 100

Browse files
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "SubhrajitSain/anwgpt2-355m",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "c_attn"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_dora": false,
27
+ "use_rslora": false
28
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e3da487c9677fe0914e685dfde5cb00a079bb29c722a1bc87636a3f5ffb100
3
+ size 418019808
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|im_end|>": 50258,
3
+ "<|im_start|>": 50257
4
+ }
special_tokens_map.json CHANGED
@@ -1,12 +1,46 @@
1
  {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "pad_token": {
5
  "content": "<|endoftext|>",
6
  "lstrip": false,
7
- "normalized": false,
8
  "rstrip": false,
9
  "single_word": false
10
  },
11
- "unk_token": "<|endoftext|>"
 
 
 
 
 
 
12
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_start|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_end|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "bos_token": {
19
+ "content": "<|endoftext|>",
20
+ "lstrip": false,
21
+ "normalized": true,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "eos_token": {
26
+ "content": "<|endoftext|>",
27
+ "lstrip": false,
28
+ "normalized": true,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
  "pad_token": {
33
  "content": "<|endoftext|>",
34
  "lstrip": false,
35
+ "normalized": true,
36
  "rstrip": false,
37
  "single_word": false
38
  },
39
+ "unk_token": {
40
+ "content": "<|endoftext|>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ }
46
  }
tokenizer_config.json CHANGED
@@ -1,22 +1,52 @@
1
  {
 
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
  "50256": {
5
  "content": "<|endoftext|>",
6
  "lstrip": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  }
12
  },
 
 
 
 
13
  "bos_token": "<|endoftext|>",
 
14
  "clean_up_tokenization_spaces": false,
15
  "eos_token": "<|endoftext|>",
 
16
  "extra_special_tokens": {},
17
- "fast": false,
18
  "model_max_length": 1024,
 
19
  "pad_token": "<|endoftext|>",
 
 
 
20
  "tokenizer_class": "GPT2Tokenizer",
21
- "unk_token": "<|endoftext|>"
 
 
 
22
  }
 
1
  {
2
+ "add_bos_token": false,
3
  "add_prefix_space": false,
4
  "added_tokens_decoder": {
5
  "50256": {
6
  "content": "<|endoftext|>",
7
  "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "50257": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "50258": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
28
  }
29
  },
30
+ "additional_special_tokens": [
31
+ "<|im_start|>",
32
+ "<|im_end|>"
33
+ ],
34
  "bos_token": "<|endoftext|>",
35
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}",
36
  "clean_up_tokenization_spaces": false,
37
  "eos_token": "<|endoftext|>",
38
+ "errors": "replace",
39
  "extra_special_tokens": {},
40
+ "max_length": 128,
41
  "model_max_length": 1024,
42
+ "pad_to_multiple_of": null,
43
  "pad_token": "<|endoftext|>",
44
+ "pad_token_type_id": 0,
45
+ "padding_side": "right",
46
+ "stride": 0,
47
  "tokenizer_class": "GPT2Tokenizer",
48
+ "truncation_side": "right",
49
+ "truncation_strategy": "longest_first",
50
+ "unk_token": "<|endoftext|>",
51
+ "use_fast": false
52
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18b5a51f90c4015d536a9c4f9f2a5d0ab745e0cc30bf0d837e67a25d990917e7
3
- size 5432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926e3d635f94e2c2a5b53dbb62882929c4c9a95fd069c3b24cce0c02b01b17d2
3
+ size 5112
vocab.json CHANGED
The diff for this file is too large to render. See raw diff