dima806 commited on
Commit
0d31143
·
verified ·
1 Parent(s): d35d999

Upload folder using huggingface_hub

Browse files
checkpoint-6000/config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "dima806/garbage_types_image_detection",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "battery",
13
+ "1": "biological",
14
+ "2": "brown-glass",
15
+ "3": "cardboard",
16
+ "4": "clothes",
17
+ "5": "green-glass",
18
+ "6": "metal",
19
+ "7": "paper",
20
+ "8": "plastic",
21
+ "9": "shoes",
22
+ "10": "trash",
23
+ "11": "white-glass"
24
+ },
25
+ "image_size": 224,
26
+ "initializer_range": 0.02,
27
+ "intermediate_size": 3072,
28
+ "label2id": {
29
+ "battery": 0,
30
+ "biological": 1,
31
+ "brown-glass": 2,
32
+ "cardboard": 3,
33
+ "clothes": 4,
34
+ "green-glass": 5,
35
+ "metal": 6,
36
+ "paper": 7,
37
+ "plastic": 8,
38
+ "shoes": 9,
39
+ "trash": 10,
40
+ "white-glass": 11
41
+ },
42
+ "layer_norm_eps": 1e-12,
43
+ "model_type": "vit",
44
+ "num_attention_heads": 12,
45
+ "num_channels": 3,
46
+ "num_hidden_layers": 12,
47
+ "patch_size": 16,
48
+ "problem_type": "single_label_classification",
49
+ "qkv_bias": true,
50
+ "torch_dtype": "float32",
51
+ "transformers_version": "4.44.0"
52
+ }
checkpoint-6000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d9dec6fe465c468dd9f4cd986e633c5ad770d2a5603c22a74cfeaf6c8327b4
3
+ size 343254736
checkpoint-6000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3735b930d00392f3c04fc6e4f3f5ef4b82bbe13a4838fd90c20ca12d17883a2d
3
+ size 686630330
checkpoint-6000/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-6000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a00186650a8a74b9765b1faef1b2a983e8905d0b61542b58d9e1ac92cb048513
3
+ size 14244
checkpoint-6000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bfca70c0e914fec799e5ecd522f22687b6bedb332fca95a109a349ad5e40a62
3
+ size 1064
checkpoint-6000/trainer_state.json ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.0335067510604858,
3
+ "best_model_checkpoint": "garbage_types_image_detection/checkpoint-6000",
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 6000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.8333333333333334,
13
+ "grad_norm": 1.107461929321289,
14
+ "learning_rate": 9.243697478991597e-07,
15
+ "loss": 2.3762,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6217918622848201,
21
+ "eval_loss": 2.261021137237549,
22
+ "eval_model_preparation_time": 0.0049,
23
+ "eval_runtime": 230.6368,
24
+ "eval_samples_per_second": 110.824,
25
+ "eval_steps_per_second": 3.464,
26
+ "step": 600
27
+ },
28
+ {
29
+ "epoch": 1.6666666666666665,
30
+ "grad_norm": 1.2842817306518555,
31
+ "learning_rate": 8.403361344537815e-07,
32
+ "loss": 2.1389,
33
+ "step": 1000
34
+ },
35
+ {
36
+ "epoch": 2.0,
37
+ "eval_accuracy": 0.84358372456964,
38
+ "eval_loss": 1.9786875247955322,
39
+ "eval_model_preparation_time": 0.0049,
40
+ "eval_runtime": 231.1495,
41
+ "eval_samples_per_second": 110.578,
42
+ "eval_steps_per_second": 3.457,
43
+ "step": 1200
44
+ },
45
+ {
46
+ "epoch": 2.5,
47
+ "grad_norm": 1.3137625455856323,
48
+ "learning_rate": 7.563025210084033e-07,
49
+ "loss": 1.871,
50
+ "step": 1500
51
+ },
52
+ {
53
+ "epoch": 3.0,
54
+ "eval_accuracy": 0.8843896713615024,
55
+ "eval_loss": 1.6969841718673706,
56
+ "eval_model_preparation_time": 0.0049,
57
+ "eval_runtime": 229.6174,
58
+ "eval_samples_per_second": 111.316,
59
+ "eval_steps_per_second": 3.48,
60
+ "step": 1800
61
+ },
62
+ {
63
+ "epoch": 3.3333333333333335,
64
+ "grad_norm": 1.404272198677063,
65
+ "learning_rate": 6.722689075630252e-07,
66
+ "loss": 1.62,
67
+ "step": 2000
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "eval_accuracy": 0.8964006259780908,
72
+ "eval_loss": 1.4749500751495361,
73
+ "eval_model_preparation_time": 0.0049,
74
+ "eval_runtime": 229.017,
75
+ "eval_samples_per_second": 111.607,
76
+ "eval_steps_per_second": 3.489,
77
+ "step": 2400
78
+ },
79
+ {
80
+ "epoch": 4.166666666666667,
81
+ "grad_norm": 1.3687832355499268,
82
+ "learning_rate": 5.88235294117647e-07,
83
+ "loss": 1.4226,
84
+ "step": 2500
85
+ },
86
+ {
87
+ "epoch": 5.0,
88
+ "grad_norm": 4.501117706298828,
89
+ "learning_rate": 5.042016806722689e-07,
90
+ "loss": 1.272,
91
+ "step": 3000
92
+ },
93
+ {
94
+ "epoch": 5.0,
95
+ "eval_accuracy": 0.9044992175273865,
96
+ "eval_loss": 1.3128881454467773,
97
+ "eval_model_preparation_time": 0.0049,
98
+ "eval_runtime": 227.3834,
99
+ "eval_samples_per_second": 112.409,
100
+ "eval_steps_per_second": 3.514,
101
+ "step": 3000
102
+ },
103
+ {
104
+ "epoch": 5.833333333333333,
105
+ "grad_norm": 1.4435608386993408,
106
+ "learning_rate": 4.2016806722689076e-07,
107
+ "loss": 1.1605,
108
+ "step": 3500
109
+ },
110
+ {
111
+ "epoch": 6.0,
112
+ "eval_accuracy": 0.9131846635367762,
113
+ "eval_loss": 1.2000573873519897,
114
+ "eval_model_preparation_time": 0.0049,
115
+ "eval_runtime": 229.3389,
116
+ "eval_samples_per_second": 111.451,
117
+ "eval_steps_per_second": 3.484,
118
+ "step": 3600
119
+ },
120
+ {
121
+ "epoch": 6.666666666666667,
122
+ "grad_norm": 1.3652788400650024,
123
+ "learning_rate": 3.361344537815126e-07,
124
+ "loss": 1.0793,
125
+ "step": 4000
126
+ },
127
+ {
128
+ "epoch": 7.0,
129
+ "eval_accuracy": 0.9211658841940532,
130
+ "eval_loss": 1.1223890781402588,
131
+ "eval_model_preparation_time": 0.0049,
132
+ "eval_runtime": 228.3054,
133
+ "eval_samples_per_second": 111.955,
134
+ "eval_steps_per_second": 3.5,
135
+ "step": 4200
136
+ },
137
+ {
138
+ "epoch": 7.5,
139
+ "grad_norm": 1.9194189310073853,
140
+ "learning_rate": 2.5210084033613445e-07,
141
+ "loss": 1.0186,
142
+ "step": 4500
143
+ },
144
+ {
145
+ "epoch": 8.0,
146
+ "eval_accuracy": 0.9250782472613458,
147
+ "eval_loss": 1.0719075202941895,
148
+ "eval_model_preparation_time": 0.0049,
149
+ "eval_runtime": 228.4622,
150
+ "eval_samples_per_second": 111.878,
151
+ "eval_steps_per_second": 3.497,
152
+ "step": 4800
153
+ },
154
+ {
155
+ "epoch": 8.333333333333334,
156
+ "grad_norm": 1.7687724828720093,
157
+ "learning_rate": 1.680672268907563e-07,
158
+ "loss": 0.9775,
159
+ "step": 5000
160
+ },
161
+ {
162
+ "epoch": 9.0,
163
+ "eval_accuracy": 0.9281298904538341,
164
+ "eval_loss": 1.0426900386810303,
165
+ "eval_model_preparation_time": 0.0049,
166
+ "eval_runtime": 233.5981,
167
+ "eval_samples_per_second": 109.419,
168
+ "eval_steps_per_second": 3.42,
169
+ "step": 5400
170
+ },
171
+ {
172
+ "epoch": 9.166666666666666,
173
+ "grad_norm": 1.763899564743042,
174
+ "learning_rate": 8.403361344537815e-08,
175
+ "loss": 0.9562,
176
+ "step": 5500
177
+ },
178
+ {
179
+ "epoch": 10.0,
180
+ "grad_norm": 3.2325987815856934,
181
+ "learning_rate": 0.0,
182
+ "loss": 0.9383,
183
+ "step": 6000
184
+ },
185
+ {
186
+ "epoch": 10.0,
187
+ "eval_accuracy": 0.928169014084507,
188
+ "eval_loss": 1.0335067510604858,
189
+ "eval_model_preparation_time": 0.0049,
190
+ "eval_runtime": 233.555,
191
+ "eval_samples_per_second": 109.439,
192
+ "eval_steps_per_second": 3.421,
193
+ "step": 6000
194
+ }
195
+ ],
196
+ "logging_steps": 500,
197
+ "max_steps": 6000,
198
+ "num_input_tokens_seen": 0,
199
+ "num_train_epochs": 10,
200
+ "save_steps": 500,
201
+ "stateful_callbacks": {
202
+ "TrainerControl": {
203
+ "args": {
204
+ "should_epoch_stop": false,
205
+ "should_evaluate": false,
206
+ "should_log": false,
207
+ "should_save": true,
208
+ "should_training_stop": true
209
+ },
210
+ "attributes": {}
211
+ }
212
+ },
213
+ "total_flos": 2.97130916699947e+19,
214
+ "train_batch_size": 64,
215
+ "trial_name": null,
216
+ "trial_params": null
217
+ }
checkpoint-6000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc0c982884ddde220b2d119f9fb47fdaddccb089e8ef201a0cf5a3daaebc02b
3
+ size 5112
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "google/vit-base-patch16-224-in21k",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
 
1
  {
2
+ "_name_or_path": "dima806/garbage_types_image_detection",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6dd8fe7131b97c1a831dda21343aa9c31cf015568ad90a080bc55a1193d58352
3
  size 343254736
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d9dec6fe465c468dd9f4cd986e633c5ad770d2a5603c22a74cfeaf6c8327b4
3
  size 343254736
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f29d01e98b321d6bc09b19af4a6fa0a48a4bb82d4b942c03e76fd51e739844ef
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc0c982884ddde220b2d119f9fb47fdaddccb089e8ef201a0cf5a3daaebc02b
3
  size 5112