quantaRoche commited on
Commit
0da512c
verified
1 Parent(s): e880ba9

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForQuestionAnswering"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "dtype": "float32",
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "language": "english",
17
+ "layer_norm_eps": 1e-05,
18
+ "max_position_embeddings": 514,
19
+ "model_type": "roberta",
20
+ "name": "Roberta",
21
+ "num_attention_heads": 12,
22
+ "num_hidden_layers": 12,
23
+ "pad_token_id": 1,
24
+ "position_embedding_type": "absolute",
25
+ "transformers_version": "4.57.0",
26
+ "type_vocab_size": 1,
27
+ "use_cache": true,
28
+ "vocab_size": 50265
29
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ada7b33c7a257e4b70f2736c3d8c1a4c673baab1519752d6b9e401e9a580b0d7
3
+ size 496250232
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "do_lower_case": false,
49
+ "eos_token": "</s>",
50
+ "errors": "replace",
51
+ "extra_special_tokens": {},
52
+ "full_tokenizer_file": null,
53
+ "mask_token": "<mask>",
54
+ "model_max_length": 512,
55
+ "pad_token": "<pad>",
56
+ "sep_token": "</s>",
57
+ "tokenizer_class": "RobertaTokenizer",
58
+ "trim_offsets": true,
59
+ "unk_token": "<unk>"
60
+ }
trainer_state.json ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 70,
3
+ "best_metric": 80.06465424867399,
4
+ "best_model_checkpoint": "./finetuned_models/roberta-base-squad2-nq-nasa\\checkpoint-70",
5
+ "epoch": 4.5,
6
+ "eval_steps": 10,
7
+ "global_step": 90,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.25,
14
+ "grad_norm": 78.77288818359375,
15
+ "learning_rate": 2.88e-05,
16
+ "loss": 2.4514,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.5,
21
+ "grad_norm": 38.788047790527344,
22
+ "learning_rate": 2.7300000000000003e-05,
23
+ "loss": 2.5046,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.5,
28
+ "eval_HasAns_f1": 72.05322567724542,
29
+ "eval_NoAns_f1": 0.0,
30
+ "eval_exact": 58.0,
31
+ "eval_f1": 72.05322567724542,
32
+ "eval_loss": 1.7548620700836182,
33
+ "eval_runtime": 0.281,
34
+ "eval_samples_per_second": 177.92,
35
+ "eval_steps_per_second": 14.234,
36
+ "step": 10
37
+ },
38
+ {
39
+ "epoch": 0.75,
40
+ "grad_norm": 39.58235168457031,
41
+ "learning_rate": 2.58e-05,
42
+ "loss": 2.161,
43
+ "step": 15
44
+ },
45
+ {
46
+ "epoch": 1.0,
47
+ "grad_norm": 27.151378631591797,
48
+ "learning_rate": 2.43e-05,
49
+ "loss": 1.5071,
50
+ "step": 20
51
+ },
52
+ {
53
+ "epoch": 1.0,
54
+ "eval_HasAns_f1": 79.20518851115948,
55
+ "eval_NoAns_f1": 0.0,
56
+ "eval_exact": 66.0,
57
+ "eval_f1": 79.20518851115948,
58
+ "eval_loss": 1.5741324424743652,
59
+ "eval_runtime": 0.2825,
60
+ "eval_samples_per_second": 176.976,
61
+ "eval_steps_per_second": 14.158,
62
+ "step": 20
63
+ },
64
+ {
65
+ "epoch": 1.25,
66
+ "grad_norm": 22.792789459228516,
67
+ "learning_rate": 2.2800000000000002e-05,
68
+ "loss": 1.1212,
69
+ "step": 25
70
+ },
71
+ {
72
+ "epoch": 1.5,
73
+ "grad_norm": 16.58112907409668,
74
+ "learning_rate": 2.13e-05,
75
+ "loss": 1.0362,
76
+ "step": 30
77
+ },
78
+ {
79
+ "epoch": 1.5,
80
+ "eval_HasAns_f1": 72.06465424867399,
81
+ "eval_NoAns_f1": 0.0,
82
+ "eval_exact": 58.0,
83
+ "eval_f1": 72.06465424867399,
84
+ "eval_loss": 1.6612951755523682,
85
+ "eval_runtime": 0.2746,
86
+ "eval_samples_per_second": 182.093,
87
+ "eval_steps_per_second": 14.567,
88
+ "step": 30
89
+ },
90
+ {
91
+ "epoch": 1.75,
92
+ "grad_norm": 28.810911178588867,
93
+ "learning_rate": 1.98e-05,
94
+ "loss": 1.0579,
95
+ "step": 35
96
+ },
97
+ {
98
+ "epoch": 2.0,
99
+ "grad_norm": 18.168163299560547,
100
+ "learning_rate": 1.83e-05,
101
+ "loss": 0.7972,
102
+ "step": 40
103
+ },
104
+ {
105
+ "epoch": 2.0,
106
+ "eval_HasAns_f1": 71.7789399629597,
107
+ "eval_NoAns_f1": 0.0,
108
+ "eval_exact": 56.0,
109
+ "eval_f1": 71.7789399629597,
110
+ "eval_loss": 1.7468838691711426,
111
+ "eval_runtime": 0.2855,
112
+ "eval_samples_per_second": 175.114,
113
+ "eval_steps_per_second": 14.009,
114
+ "step": 40
115
+ },
116
+ {
117
+ "epoch": 2.25,
118
+ "grad_norm": 12.084095001220703,
119
+ "learning_rate": 1.6800000000000002e-05,
120
+ "loss": 0.7772,
121
+ "step": 45
122
+ },
123
+ {
124
+ "epoch": 2.5,
125
+ "grad_norm": 15.065386772155762,
126
+ "learning_rate": 1.53e-05,
127
+ "loss": 0.3204,
128
+ "step": 50
129
+ },
130
+ {
131
+ "epoch": 2.5,
132
+ "eval_HasAns_f1": 78.35036853438828,
133
+ "eval_NoAns_f1": 0.0,
134
+ "eval_exact": 66.0,
135
+ "eval_f1": 78.35036853438828,
136
+ "eval_loss": 1.749760389328003,
137
+ "eval_runtime": 0.2837,
138
+ "eval_samples_per_second": 176.263,
139
+ "eval_steps_per_second": 14.101,
140
+ "step": 50
141
+ },
142
+ {
143
+ "epoch": 2.75,
144
+ "grad_norm": 14.943965911865234,
145
+ "learning_rate": 1.3800000000000002e-05,
146
+ "loss": 0.5864,
147
+ "step": 55
148
+ },
149
+ {
150
+ "epoch": 3.0,
151
+ "grad_norm": 69.38665771484375,
152
+ "learning_rate": 1.2299999999999999e-05,
153
+ "loss": 0.4019,
154
+ "step": 60
155
+ },
156
+ {
157
+ "epoch": 3.0,
158
+ "eval_HasAns_f1": 76.06465424867399,
159
+ "eval_NoAns_f1": 0.0,
160
+ "eval_exact": 62.0,
161
+ "eval_f1": 76.06465424867399,
162
+ "eval_loss": 1.8023927211761475,
163
+ "eval_runtime": 0.2736,
164
+ "eval_samples_per_second": 182.767,
165
+ "eval_steps_per_second": 14.621,
166
+ "step": 60
167
+ },
168
+ {
169
+ "epoch": 3.25,
170
+ "grad_norm": 4.20263671875,
171
+ "learning_rate": 1.08e-05,
172
+ "loss": 0.3497,
173
+ "step": 65
174
+ },
175
+ {
176
+ "epoch": 3.5,
177
+ "grad_norm": 19.26348876953125,
178
+ "learning_rate": 9.3e-06,
179
+ "loss": 0.4772,
180
+ "step": 70
181
+ },
182
+ {
183
+ "epoch": 3.5,
184
+ "eval_HasAns_f1": 80.06465424867399,
185
+ "eval_NoAns_f1": 0.0,
186
+ "eval_exact": 66.0,
187
+ "eval_f1": 80.06465424867399,
188
+ "eval_loss": 1.7305716276168823,
189
+ "eval_runtime": 0.2777,
190
+ "eval_samples_per_second": 180.029,
191
+ "eval_steps_per_second": 14.402,
192
+ "step": 70
193
+ },
194
+ {
195
+ "epoch": 3.75,
196
+ "grad_norm": 14.338303565979004,
197
+ "learning_rate": 7.8e-06,
198
+ "loss": 0.268,
199
+ "step": 75
200
+ },
201
+ {
202
+ "epoch": 4.0,
203
+ "grad_norm": 2.6691396236419678,
204
+ "learning_rate": 6.3e-06,
205
+ "loss": 0.3622,
206
+ "step": 80
207
+ },
208
+ {
209
+ "epoch": 4.0,
210
+ "eval_HasAns_f1": 78.06465424867399,
211
+ "eval_NoAns_f1": 0.0,
212
+ "eval_exact": 64.0,
213
+ "eval_f1": 78.06465424867399,
214
+ "eval_loss": 1.7988979816436768,
215
+ "eval_runtime": 0.276,
216
+ "eval_samples_per_second": 181.159,
217
+ "eval_steps_per_second": 14.493,
218
+ "step": 80
219
+ },
220
+ {
221
+ "epoch": 4.25,
222
+ "grad_norm": 7.720816135406494,
223
+ "learning_rate": 4.800000000000001e-06,
224
+ "loss": 0.1835,
225
+ "step": 85
226
+ },
227
+ {
228
+ "epoch": 4.5,
229
+ "grad_norm": 13.70312786102295,
230
+ "learning_rate": 3.3e-06,
231
+ "loss": 0.4416,
232
+ "step": 90
233
+ },
234
+ {
235
+ "epoch": 4.5,
236
+ "eval_HasAns_f1": 78.16991740656873,
237
+ "eval_NoAns_f1": 0.0,
238
+ "eval_exact": 66.0,
239
+ "eval_f1": 78.16991740656873,
240
+ "eval_loss": 1.8446077108383179,
241
+ "eval_runtime": 0.2905,
242
+ "eval_samples_per_second": 172.105,
243
+ "eval_steps_per_second": 13.768,
244
+ "step": 90
245
+ }
246
+ ],
247
+ "logging_steps": 5,
248
+ "max_steps": 100,
249
+ "num_input_tokens_seen": 0,
250
+ "num_train_epochs": 5,
251
+ "save_steps": 10,
252
+ "stateful_callbacks": {
253
+ "TrainerControl": {
254
+ "args": {
255
+ "should_epoch_stop": false,
256
+ "should_evaluate": false,
257
+ "should_log": false,
258
+ "should_save": true,
259
+ "should_training_stop": false
260
+ },
261
+ "attributes": {}
262
+ }
263
+ },
264
+ "total_flos": 141100248637440.0,
265
+ "train_batch_size": 8,
266
+ "trial_name": null,
267
+ "trial_params": null
268
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cc9b93f54aee0cf1f95e663d30fcd4fcc3cd4f5822d07561328a691ac9055b8
3
+ size 5905
vocab.json ADDED
The diff for this file is too large to render. See raw diff