ncgc commited on
Commit
dadf14e
·
verified ·
1 Parent(s): 0d7ed60

Training in progress, step 125

Browse files
Files changed (4) hide show
  1. README.md +2 -2
  2. config.json +31 -0
  3. model.safetensors +3 -0
  4. training_args.bin +1 -1
README.md CHANGED
@@ -4,8 +4,8 @@ library_name: transformers
4
  model_name: pythia_410m_hh_ga_lowest10k.fadam
5
  tags:
6
  - generated_from_trainer
7
- - trl
8
  - sft
 
9
  licence: license
10
  ---
11
 
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/representation_learning_lab_iisc/huggingface/runs/5761g4sd)
31
 
32
 
33
  This model was trained with SFT.
 
4
  model_name: pythia_410m_hh_ga_lowest10k.fadam
5
  tags:
6
  - generated_from_trainer
 
7
  - sft
8
+ - trl
9
  licence: license
10
  ---
11
 
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/representation_learning_lab_iisc/huggingface/runs/zow48r4l)
31
 
32
 
33
  This model was trained with SFT.
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GPTNeoXForCausalLM"
4
+ ],
5
+ "attention_bias": true,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": 0.1,
9
+ "eos_token_id": 0,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout": 0.0,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 2048,
17
+ "model_type": "gpt_neox",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "partial_rotary_factor": 0.25,
21
+ "rope_scaling": null,
22
+ "rope_theta": 10000,
23
+ "rotary_emb_base": 10000,
24
+ "rotary_pct": 0.25,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.53.0",
28
+ "use_cache": true,
29
+ "use_parallel_residual": true,
30
+ "vocab_size": 50304
31
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3244e56a47586752bcaf73587614e9287aea21383d7837ce68b67170cdcd40c
3
+ size 1621370224
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:79e40f354a9941ec0df82c5c289f5d99086014a487ef158305825dbe8d8e8ba2
3
  size 6225
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:308f39bba9080387ac690094c44bf9f41d1d8b3bb3d7957312d52359d88f17ae
3
  size 6225