abarbosa commited on
Commit
96dc152
·
verified ·
1 Parent(s): 1c9075d

Pushing fine-tuned model to Hugging Face Hub

Browse files
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - pt
5
+ - en
6
+ tags:
7
+ - aes
8
+ datasets:
9
+ - kamel-usp/aes_enem_dataset
10
+ base_model: neuralmind/bert-large-portuguese-cased
11
+ metrics:
12
+ - accuracy
13
+ - qwk
14
+ library_name: transformers
15
+ model-index:
16
+ - name: bert-large-portuguese-cased-encoder_classification-C5-essay_only
17
+ results:
18
+ - task:
19
+ type: text-classification
20
+ name: Automated Essay Score
21
+ dataset:
22
+ name: Automated Essay Score ENEM Dataset
23
+ type: kamel-usp/aes_enem_dataset
24
+ config: JBCS2025
25
+ split: test
26
+ metrics:
27
+ - name: Macro F1
28
+ type: f1
29
+ value: 0.3013676792556103
30
+ - name: QWK
31
+ type: qwk
32
+ value: 0.4591540693235608
33
+ - name: Weighted Macro F1
34
+ type: f1
35
+ value: 0.3597310868375336
36
+ ---
37
+ # Model ID: bert-large-portuguese-cased-encoder_classification-C5-essay_only
38
+ ## Results
39
+ | | test_data |
40
+ |:-----------------|------------:|
41
+ | eval_accuracy | 0.413043 |
42
+ | eval_RMSE | 59.758 |
43
+ | eval_QWK | 0.459154 |
44
+ | eval_Macro_F1 | 0.301368 |
45
+ | eval_Weighted_F1 | 0.359731 |
46
+ | eval_Micro_F1 | 0.413043 |
47
+ | eval_HDIV | 0.144928 |
48
+
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "directionality": "bidi",
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "id2label": {
12
+ "0": 0,
13
+ "1": 40,
14
+ "2": 80,
15
+ "3": 120,
16
+ "4": 160,
17
+ "5": 200
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "0": 0,
23
+ "40": 1,
24
+ "80": 2,
25
+ "120": 3,
26
+ "160": 4,
27
+ "200": 5
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "max_position_embeddings": 512,
31
+ "model_type": "bert",
32
+ "num_attention_heads": 16,
33
+ "num_hidden_layers": 24,
34
+ "output_past": true,
35
+ "pad_token_id": 0,
36
+ "pooler_fc_size": 768,
37
+ "pooler_num_attention_heads": 12,
38
+ "pooler_num_fc_layers": 3,
39
+ "pooler_size_per_head": 128,
40
+ "pooler_type": "first_token_transform",
41
+ "position_embedding_type": "absolute",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.53.1",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 29794
47
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2025-07-09T17:01:24,jbcs2025,eeac1237-10cc-489f-9fff-9d2b32dc2447,bert-large-portuguese-cased-encoder_classification-C5-essay_only,218.28439827199327,0.004794613344546399,2.1964984133094436e-05,53.71,151.8616928158688,58.0,0.0027177277048807927,0.013759523507610005,0.003451893176540842,0.01992914438903164,Romania,ROU,gorj county,,,Linux-5.15.0-143-generic-x86_64-with-glibc2.35,3.12.11,3.0.2,36,Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz,1,1 x NVIDIA RTX A6000,23.2904,45.0489,393.6063117980957,machine,N,1.0
evaluation_results.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ eval_loss,eval_model_preparation_time,eval_accuracy,eval_RMSE,eval_QWK,eval_HDIV,eval_Macro_F1,eval_Micro_F1,eval_Weighted_F1,eval_TP_0,eval_TN_0,eval_FP_0,eval_FN_0,eval_TP_1,eval_TN_1,eval_FP_1,eval_FN_1,eval_TP_2,eval_TN_2,eval_FP_2,eval_FN_2,eval_TP_3,eval_TN_3,eval_FP_3,eval_FN_3,eval_TP_4,eval_TN_4,eval_FP_4,eval_FN_4,eval_TP_5,eval_TN_5,eval_FP_5,eval_FN_5,eval_runtime,eval_samples_per_second,eval_steps_per_second,epoch,reference,timestamp,id
2
+ 1.9279780387878418,0.0037,0.13636363636363635,100.24213110506405,-0.014913745130773481,0.43181818181818177,0.05408522170328086,0.13636363636363635,0.053618698662808675,16,7,108,1,0,96,0,36,0,112,0,20,2,93,6,31,0,108,0,24,0,130,0,2,1.0644,124.016,8.456,-1,validation_before_training,2025-07-09 16:57:57,bert-large-portuguese-cased-encoder_classification-C5-essay_only
3
+ 1.7058095932006836,0.0037,0.3106060606060606,54.49492609130661,0.5028286803591193,0.08333333333333337,0.27143120393120396,0.3106060606060606,0.3049726999727,6,113,2,11,7,91,5,29,7,79,33,13,6,83,16,27,15,73,35,9,0,130,0,2,0.8907,148.192,10.104,11.0,validation_after_training,2025-07-09 16:57:57,bert-large-portuguese-cased-encoder_classification-C5-essay_only
4
+ 1.5725419521331787,0.0037,0.41304347826086957,59.75796593554388,0.45915406932356084,0.14492753623188404,0.3013676792556103,0.41304347826086957,0.3597310868375336,5,115,1,17,6,94,12,26,14,82,32,10,3,112,1,22,29,71,35,3,0,135,0,3,0.9202,149.972,9.781,11.0,test_results,2025-07-09 16:57:57,bert-large-portuguese-cased-encoder_classification-C5-essay_only
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7410bfd9a65bc8d306395c2d870b60f27dfdd30867aa59ae6f638ce4bab09a8
3
+ size 1337657272
run_experiment.log ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-07-09 16:57:42,869][__main__][INFO] - cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ bootstrap:
12
+ enabled: true
13
+ n_bootstrap: 10000
14
+ bootstrap_seed: 42
15
+ metrics:
16
+ - QWK
17
+ - Macro_F1
18
+ - Weighted_F1
19
+ post_training_results:
20
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
21
+ experiments:
22
+ model:
23
+ name: neuralmind/bert-large-portuguese-cased
24
+ type: encoder_classification
25
+ num_labels: 6
26
+ output_dir: ./results/bertimbau_large/C5
27
+ logging_dir: ./logs/bertimbau_large/C5
28
+ best_model_dir: ./results/bertimbau_large/C5/best_model
29
+ tokenizer:
30
+ name: neuralmind/bert-large-portuguese-cased
31
+ dataset:
32
+ grade_index: 4
33
+ use_full_context: false
34
+ training_params:
35
+ weight_decay: 0.01
36
+ warmup_ratio: 0.1
37
+ learning_rate: 5.0e-05
38
+ train_batch_size: 16
39
+ eval_batch_size: 16
40
+ gradient_accumulation_steps: 1
41
+ gradient_checkpointing: false
42
+
43
+ [2025-07-09 16:57:46,732][__main__][INFO] - GPU 0: NVIDIA RTX A6000 | TDP ≈ 300 W
44
+ [2025-07-09 16:57:46,733][__main__][INFO] - Starting the Fine Tuning training process.
45
+ [2025-07-09 16:57:52,183][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
46
+ [2025-07-09 16:57:52,184][transformers.configuration_utils][INFO] - Model config BertConfig {
47
+ "architectures": [
48
+ "BertForMaskedLM"
49
+ ],
50
+ "attention_probs_dropout_prob": 0.1,
51
+ "classifier_dropout": null,
52
+ "directionality": "bidi",
53
+ "hidden_act": "gelu",
54
+ "hidden_dropout_prob": 0.1,
55
+ "hidden_size": 1024,
56
+ "initializer_range": 0.02,
57
+ "intermediate_size": 4096,
58
+ "layer_norm_eps": 1e-12,
59
+ "max_position_embeddings": 512,
60
+ "model_type": "bert",
61
+ "num_attention_heads": 16,
62
+ "num_hidden_layers": 24,
63
+ "output_past": true,
64
+ "pad_token_id": 0,
65
+ "pooler_fc_size": 768,
66
+ "pooler_num_attention_heads": 12,
67
+ "pooler_num_fc_layers": 3,
68
+ "pooler_size_per_head": 128,
69
+ "pooler_type": "first_token_transform",
70
+ "position_embedding_type": "absolute",
71
+ "transformers_version": "4.53.1",
72
+ "type_vocab_size": 2,
73
+ "use_cache": true,
74
+ "vocab_size": 29794
75
+ }
76
+
77
+ [2025-07-09 16:57:52,386][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
78
+ [2025-07-09 16:57:52,387][transformers.configuration_utils][INFO] - Model config BertConfig {
79
+ "architectures": [
80
+ "BertForMaskedLM"
81
+ ],
82
+ "attention_probs_dropout_prob": 0.1,
83
+ "classifier_dropout": null,
84
+ "directionality": "bidi",
85
+ "hidden_act": "gelu",
86
+ "hidden_dropout_prob": 0.1,
87
+ "hidden_size": 1024,
88
+ "initializer_range": 0.02,
89
+ "intermediate_size": 4096,
90
+ "layer_norm_eps": 1e-12,
91
+ "max_position_embeddings": 512,
92
+ "model_type": "bert",
93
+ "num_attention_heads": 16,
94
+ "num_hidden_layers": 24,
95
+ "output_past": true,
96
+ "pad_token_id": 0,
97
+ "pooler_fc_size": 768,
98
+ "pooler_num_attention_heads": 12,
99
+ "pooler_num_fc_layers": 3,
100
+ "pooler_size_per_head": 128,
101
+ "pooler_type": "first_token_transform",
102
+ "position_embedding_type": "absolute",
103
+ "transformers_version": "4.53.1",
104
+ "type_vocab_size": 2,
105
+ "use_cache": true,
106
+ "vocab_size": 29794
107
+ }
108
+
109
+ [2025-07-09 16:57:52,630][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt
110
+ [2025-07-09 16:57:52,631][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
111
+ [2025-07-09 16:57:52,631][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json
112
+ [2025-07-09 16:57:52,631][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json
113
+ [2025-07-09 16:57:52,631][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json
114
+ [2025-07-09 16:57:52,631][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
115
+ [2025-07-09 16:57:52,631][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
116
+ [2025-07-09 16:57:52,632][transformers.configuration_utils][INFO] - Model config BertConfig {
117
+ "architectures": [
118
+ "BertForMaskedLM"
119
+ ],
120
+ "attention_probs_dropout_prob": 0.1,
121
+ "classifier_dropout": null,
122
+ "directionality": "bidi",
123
+ "hidden_act": "gelu",
124
+ "hidden_dropout_prob": 0.1,
125
+ "hidden_size": 1024,
126
+ "initializer_range": 0.02,
127
+ "intermediate_size": 4096,
128
+ "layer_norm_eps": 1e-12,
129
+ "max_position_embeddings": 512,
130
+ "model_type": "bert",
131
+ "num_attention_heads": 16,
132
+ "num_hidden_layers": 24,
133
+ "output_past": true,
134
+ "pad_token_id": 0,
135
+ "pooler_fc_size": 768,
136
+ "pooler_num_attention_heads": 12,
137
+ "pooler_num_fc_layers": 3,
138
+ "pooler_size_per_head": 128,
139
+ "pooler_type": "first_token_transform",
140
+ "position_embedding_type": "absolute",
141
+ "transformers_version": "4.53.1",
142
+ "type_vocab_size": 2,
143
+ "use_cache": true,
144
+ "vocab_size": 29794
145
+ }
146
+
147
+ [2025-07-09 16:57:52,662][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
148
+ [2025-07-09 16:57:52,663][transformers.configuration_utils][INFO] - Model config BertConfig {
149
+ "architectures": [
150
+ "BertForMaskedLM"
151
+ ],
152
+ "attention_probs_dropout_prob": 0.1,
153
+ "classifier_dropout": null,
154
+ "directionality": "bidi",
155
+ "hidden_act": "gelu",
156
+ "hidden_dropout_prob": 0.1,
157
+ "hidden_size": 1024,
158
+ "initializer_range": 0.02,
159
+ "intermediate_size": 4096,
160
+ "layer_norm_eps": 1e-12,
161
+ "max_position_embeddings": 512,
162
+ "model_type": "bert",
163
+ "num_attention_heads": 16,
164
+ "num_hidden_layers": 24,
165
+ "output_past": true,
166
+ "pad_token_id": 0,
167
+ "pooler_fc_size": 768,
168
+ "pooler_num_attention_heads": 12,
169
+ "pooler_num_fc_layers": 3,
170
+ "pooler_size_per_head": 128,
171
+ "pooler_type": "first_token_transform",
172
+ "position_embedding_type": "absolute",
173
+ "transformers_version": "4.53.1",
174
+ "type_vocab_size": 2,
175
+ "use_cache": true,
176
+ "vocab_size": 29794
177
+ }
178
+
179
+ [2025-07-09 16:57:52,682][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
180
+ [2025-07-09 16:57:53,334][__main__][INFO] -
181
+ Token statistics for 'train' split:
182
+ [2025-07-09 16:57:53,334][__main__][INFO] - Total examples: 500
183
+ [2025-07-09 16:57:53,334][__main__][INFO] - Min tokens: 512
184
+ [2025-07-09 16:57:53,334][__main__][INFO] - Max tokens: 512
185
+ [2025-07-09 16:57:53,334][__main__][INFO] - Avg tokens: 512.00
186
+ [2025-07-09 16:57:53,335][__main__][INFO] - Std tokens: 0.00
187
+ [2025-07-09 16:57:53,430][__main__][INFO] -
188
+ Token statistics for 'validation' split:
189
+ [2025-07-09 16:57:53,431][__main__][INFO] - Total examples: 132
190
+ [2025-07-09 16:57:53,431][__main__][INFO] - Min tokens: 512
191
+ [2025-07-09 16:57:53,431][__main__][INFO] - Max tokens: 512
192
+ [2025-07-09 16:57:53,431][__main__][INFO] - Avg tokens: 512.00
193
+ [2025-07-09 16:57:53,431][__main__][INFO] - Std tokens: 0.00
194
+ [2025-07-09 16:57:53,529][__main__][INFO] -
195
+ Token statistics for 'test' split:
196
+ [2025-07-09 16:57:53,529][__main__][INFO] - Total examples: 138
197
+ [2025-07-09 16:57:53,529][__main__][INFO] - Min tokens: 512
198
+ [2025-07-09 16:57:53,529][__main__][INFO] - Max tokens: 512
199
+ [2025-07-09 16:57:53,529][__main__][INFO] - Avg tokens: 512.00
200
+ [2025-07-09 16:57:53,529][__main__][INFO] - Std tokens: 0.00
201
+ [2025-07-09 16:57:53,529][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
202
+ [2025-07-09 16:57:53,529][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
203
+ [2025-07-09 16:57:53,738][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json
204
+ [2025-07-09 16:57:53,738][transformers.configuration_utils][INFO] - Model config BertConfig {
205
+ "architectures": [
206
+ "BertForMaskedLM"
207
+ ],
208
+ "attention_probs_dropout_prob": 0.1,
209
+ "classifier_dropout": null,
210
+ "directionality": "bidi",
211
+ "hidden_act": "gelu",
212
+ "hidden_dropout_prob": 0.1,
213
+ "hidden_size": 1024,
214
+ "id2label": {
215
+ "0": 0,
216
+ "1": 40,
217
+ "2": 80,
218
+ "3": 120,
219
+ "4": 160,
220
+ "5": 200
221
+ },
222
+ "initializer_range": 0.02,
223
+ "intermediate_size": 4096,
224
+ "label2id": {
225
+ "0": 0,
226
+ "40": 1,
227
+ "80": 2,
228
+ "120": 3,
229
+ "160": 4,
230
+ "200": 5
231
+ },
232
+ "layer_norm_eps": 1e-12,
233
+ "max_position_embeddings": 512,
234
+ "model_type": "bert",
235
+ "num_attention_heads": 16,
236
+ "num_hidden_layers": 24,
237
+ "output_past": true,
238
+ "pad_token_id": 0,
239
+ "pooler_fc_size": 768,
240
+ "pooler_num_attention_heads": 12,
241
+ "pooler_num_fc_layers": 3,
242
+ "pooler_size_per_head": 128,
243
+ "pooler_type": "first_token_transform",
244
+ "position_embedding_type": "absolute",
245
+ "transformers_version": "4.53.1",
246
+ "type_vocab_size": 2,
247
+ "use_cache": true,
248
+ "vocab_size": 29794
249
+ }
250
+
251
+ [2025-07-09 16:57:54,136][transformers.modeling_utils][INFO] - loading weights file pytorch_model.bin from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/pytorch_model.bin
252
+ [2025-07-09 16:57:54,377][transformers.safetensors_conversion][INFO] - Attempting to create safetensors variant
253
+ [2025-07-09 16:57:55,046][transformers.modeling_utils][INFO] - Since the `torch_dtype` attribute can't be found in model's config object, will use torch_dtype={torch_dtype} as derived from model's weights
254
+ [2025-07-09 16:57:55,046][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
255
+ [2025-07-09 16:57:55,324][transformers.safetensors_conversion][INFO] - Safetensors PR exists
256
+ [2025-07-09 16:57:56,969][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at neuralmind/bert-large-portuguese-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']
257
+ - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
258
+ - This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
259
+ [2025-07-09 16:57:56,970][transformers.modeling_utils][WARNING] - Some weights of BertForSequenceClassification were not initialized from the model checkpoint at neuralmind/bert-large-portuguese-cased and are newly initialized: ['classifier.bias', 'classifier.weight']
260
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
261
+ [2025-07-09 16:57:56,979][transformers.training_args][INFO] - PyTorch: setting up devices
262
+ [2025-07-09 16:57:57,001][__main__][INFO] - Total steps: 620. Number of warmup steps: 62
263
+ [2025-07-09 16:57:57,009][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
264
+ [2025-07-09 16:57:57,035][transformers.trainer][INFO] - Using auto half precision backend
265
+ [2025-07-09 16:57:57,038][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
266
+ [2025-07-09 16:57:57,044][transformers.trainer][INFO] -
267
+ ***** Running Evaluation *****
268
+ [2025-07-09 16:57:57,044][transformers.trainer][INFO] - Num examples = 132
269
+ [2025-07-09 16:57:57,045][transformers.trainer][INFO] - Batch size = 16
270
+ [2025-07-09 16:57:58,469][transformers.trainer][INFO] - The following columns in the Training set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
271
+ [2025-07-09 16:57:58,484][transformers.trainer][INFO] - ***** Running training *****
272
+ [2025-07-09 16:57:58,484][transformers.trainer][INFO] - Num examples = 500
273
+ [2025-07-09 16:57:58,484][transformers.trainer][INFO] - Num Epochs = 20
274
+ [2025-07-09 16:57:58,484][transformers.trainer][INFO] - Instantaneous batch size per device = 16
275
+ [2025-07-09 16:57:58,484][transformers.trainer][INFO] - Total train batch size (w. parallel, distributed & accumulation) = 16
276
+ [2025-07-09 16:57:58,484][transformers.trainer][INFO] - Gradient Accumulation steps = 1
277
+ [2025-07-09 16:57:58,484][transformers.trainer][INFO] - Total optimization steps = 640
278
+ [2025-07-09 16:57:58,485][transformers.trainer][INFO] - Number of trainable parameters = 334,402,566
279
+ [2025-07-09 16:58:10,840][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
280
+ [2025-07-09 16:58:10,842][transformers.trainer][INFO] -
281
+ ***** Running Evaluation *****
282
+ [2025-07-09 16:58:10,843][transformers.trainer][INFO] - Num examples = 132
283
+ [2025-07-09 16:58:10,843][transformers.trainer][INFO] - Batch size = 16
284
+ [2025-07-09 16:58:11,763][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-32
285
+ [2025-07-09 16:58:11,765][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-32/config.json
286
+ [2025-07-09 16:58:14,552][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-32/model.safetensors
287
+ [2025-07-09 16:58:29,916][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
288
+ [2025-07-09 16:58:29,920][transformers.trainer][INFO] -
289
+ ***** Running Evaluation *****
290
+ [2025-07-09 16:58:29,920][transformers.trainer][INFO] - Num examples = 132
291
+ [2025-07-09 16:58:29,920][transformers.trainer][INFO] - Batch size = 16
292
+ [2025-07-09 16:58:30,836][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-64
293
+ [2025-07-09 16:58:30,837][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-64/config.json
294
+ [2025-07-09 16:58:33,080][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-64/model.safetensors
295
+ [2025-07-09 16:58:48,106][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
296
+ [2025-07-09 16:58:48,110][transformers.trainer][INFO] -
297
+ ***** Running Evaluation *****
298
+ [2025-07-09 16:58:48,110][transformers.trainer][INFO] - Num examples = 132
299
+ [2025-07-09 16:58:48,110][transformers.trainer][INFO] - Batch size = 16
300
+ [2025-07-09 16:58:49,026][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-96
301
+ [2025-07-09 16:58:49,028][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-96/config.json
302
+ [2025-07-09 16:58:50,997][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-96/model.safetensors
303
+ [2025-07-09 16:58:53,331][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-32] due to args.save_total_limit
304
+ [2025-07-09 16:58:53,540][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-64] due to args.save_total_limit
305
+ [2025-07-09 16:59:06,189][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
306
+ [2025-07-09 16:59:06,193][transformers.trainer][INFO] -
307
+ ***** Running Evaluation *****
308
+ [2025-07-09 16:59:06,193][transformers.trainer][INFO] - Num examples = 132
309
+ [2025-07-09 16:59:06,193][transformers.trainer][INFO] - Batch size = 16
310
+ [2025-07-09 16:59:07,110][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-128
311
+ [2025-07-09 16:59:07,112][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-128/config.json
312
+ [2025-07-09 16:59:09,263][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-128/model.safetensors
313
+ [2025-07-09 16:59:23,878][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
314
+ [2025-07-09 16:59:23,880][transformers.trainer][INFO] -
315
+ ***** Running Evaluation *****
316
+ [2025-07-09 16:59:23,881][transformers.trainer][INFO] - Num examples = 132
317
+ [2025-07-09 16:59:23,881][transformers.trainer][INFO] - Batch size = 16
318
+ [2025-07-09 16:59:24,800][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-160
319
+ [2025-07-09 16:59:24,801][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-160/config.json
320
+ [2025-07-09 16:59:27,049][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-160/model.safetensors
321
+ [2025-07-09 16:59:29,822][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-128] due to args.save_total_limit
322
+ [2025-07-09 16:59:42,490][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
323
+ [2025-07-09 16:59:42,494][transformers.trainer][INFO] -
324
+ ***** Running Evaluation *****
325
+ [2025-07-09 16:59:42,494][transformers.trainer][INFO] - Num examples = 132
326
+ [2025-07-09 16:59:42,494][transformers.trainer][INFO] - Batch size = 16
327
+ [2025-07-09 16:59:43,446][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-192
328
+ [2025-07-09 16:59:43,448][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-192/config.json
329
+ [2025-07-09 16:59:45,659][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-192/model.safetensors
330
+ [2025-07-09 16:59:47,967][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-96] due to args.save_total_limit
331
+ [2025-07-09 16:59:48,167][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-160] due to args.save_total_limit
332
+ [2025-07-09 17:00:00,675][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
333
+ [2025-07-09 17:00:00,679][transformers.trainer][INFO] -
334
+ ***** Running Evaluation *****
335
+ [2025-07-09 17:00:00,679][transformers.trainer][INFO] - Num examples = 132
336
+ [2025-07-09 17:00:00,679][transformers.trainer][INFO] - Batch size = 16
337
+ [2025-07-09 17:00:01,594][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-224
338
+ [2025-07-09 17:00:01,596][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-224/config.json
339
+ [2025-07-09 17:00:04,095][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-224/model.safetensors
340
+ [2025-07-09 17:00:18,859][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
341
+ [2025-07-09 17:00:18,863][transformers.trainer][INFO] -
342
+ ***** Running Evaluation *****
343
+ [2025-07-09 17:00:18,863][transformers.trainer][INFO] - Num examples = 132
344
+ [2025-07-09 17:00:18,863][transformers.trainer][INFO] - Batch size = 16
345
+ [2025-07-09 17:00:19,813][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-256
346
+ [2025-07-09 17:00:19,816][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-256/config.json
347
+ [2025-07-09 17:00:22,407][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-256/model.safetensors
348
+ [2025-07-09 17:00:25,011][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-224] due to args.save_total_limit
349
+ [2025-07-09 17:00:37,587][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
350
+ [2025-07-09 17:00:37,589][transformers.trainer][INFO] -
351
+ ***** Running Evaluation *****
352
+ [2025-07-09 17:00:37,589][transformers.trainer][INFO] - Num examples = 132
353
+ [2025-07-09 17:00:37,589][transformers.trainer][INFO] - Batch size = 16
354
+ [2025-07-09 17:00:38,507][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-288
355
+ [2025-07-09 17:00:38,509][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-288/config.json
356
+ [2025-07-09 17:00:40,642][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-288/model.safetensors
357
+ [2025-07-09 17:00:42,927][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-256] due to args.save_total_limit
358
+ [2025-07-09 17:00:55,513][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
359
+ [2025-07-09 17:00:55,516][transformers.trainer][INFO] -
360
+ ***** Running Evaluation *****
361
+ [2025-07-09 17:00:55,516][transformers.trainer][INFO] - Num examples = 132
362
+ [2025-07-09 17:00:55,516][transformers.trainer][INFO] - Batch size = 16
363
+ [2025-07-09 17:00:56,443][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-320
364
+ [2025-07-09 17:00:56,445][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-320/config.json
365
+ [2025-07-09 17:00:58,820][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-320/model.safetensors
366
+ [2025-07-09 17:01:01,234][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-288] due to args.save_total_limit
367
+ [2025-07-09 17:01:13,788][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
368
+ [2025-07-09 17:01:13,791][transformers.trainer][INFO] -
369
+ ***** Running Evaluation *****
370
+ [2025-07-09 17:01:13,792][transformers.trainer][INFO] - Num examples = 132
371
+ [2025-07-09 17:01:13,792][transformers.trainer][INFO] - Batch size = 16
372
+ [2025-07-09 17:01:14,713][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-352
373
+ [2025-07-09 17:01:14,715][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-352/config.json
374
+ [2025-07-09 17:01:16,746][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-352/model.safetensors
375
+ [2025-07-09 17:01:19,071][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-320] due to args.save_total_limit
376
+ [2025-07-09 17:01:19,334][transformers.trainer][INFO] -
377
+
378
+ Training completed. Do not forget to share your model on huggingface.co/models =)
379
+
380
+
381
+ [2025-07-09 17:01:19,334][transformers.trainer][INFO] - Loading best model from /workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-192 (score: 0.5028286803591193).
382
+ [2025-07-09 17:01:19,785][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/16-57-42/results/bertimbau_large/C5/checkpoint-352] due to args.save_total_limit
383
+ [2025-07-09 17:01:20,049][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
384
+ [2025-07-09 17:01:20,052][transformers.trainer][INFO] -
385
+ ***** Running Evaluation *****
386
+ [2025-07-09 17:01:20,052][transformers.trainer][INFO] - Num examples = 132
387
+ [2025-07-09 17:01:20,052][transformers.trainer][INFO] - Batch size = 16
388
+ [2025-07-09 17:01:20,950][__main__][INFO] - Training completed successfully.
389
+ [2025-07-09 17:01:20,950][__main__][INFO] - Running on Test
390
+ [2025-07-09 17:01:20,951][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt. If grades, essay_year, id_prompt, essay_text, reference, id, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
391
+ [2025-07-09 17:01:20,953][transformers.trainer][INFO] -
392
+ ***** Running Evaluation *****
393
+ [2025-07-09 17:01:20,953][transformers.trainer][INFO] - Num examples = 138
394
+ [2025-07-09 17:01:20,953][transformers.trainer][INFO] - Batch size = 16
395
+ [2025-07-09 17:01:21,879][__main__][INFO] - Test metrics: {'eval_loss': 1.5725419521331787, 'eval_model_preparation_time': 0.0037, 'eval_accuracy': 0.41304347826086957, 'eval_RMSE': 59.75796593554388, 'eval_QWK': 0.45915406932356084, 'eval_HDIV': 0.14492753623188404, 'eval_Macro_F1': 0.3013676792556103, 'eval_Micro_F1': 0.41304347826086957, 'eval_Weighted_F1': 0.3597310868375336, 'eval_TP_0': 5, 'eval_TN_0': 115, 'eval_FP_0': 1, 'eval_FN_0': 17, 'eval_TP_1': 6, 'eval_TN_1': 94, 'eval_FP_1': 12, 'eval_FN_1': 26, 'eval_TP_2': 14, 'eval_TN_2': 82, 'eval_FP_2': 32, 'eval_FN_2': 10, 'eval_TP_3': 3, 'eval_TN_3': 112, 'eval_FP_3': 1, 'eval_FN_3': 22, 'eval_TP_4': 29, 'eval_TN_4': 71, 'eval_FP_4': 35, 'eval_FN_4': 3, 'eval_TP_5': 0, 'eval_TN_5': 135, 'eval_FP_5': 0, 'eval_FN_5': 3, 'eval_runtime': 0.9202, 'eval_samples_per_second': 149.972, 'eval_steps_per_second': 9.781, 'epoch': 11.0}
396
+ [2025-07-09 17:01:21,880][transformers.trainer][INFO] - Saving model checkpoint to ./results/bertimbau_large/C5/best_model
397
+ [2025-07-09 17:01:21,881][transformers.configuration_utils][INFO] - Configuration saved in ./results/bertimbau_large/C5/best_model/config.json
398
+ [2025-07-09 17:01:23,984][transformers.modeling_utils][INFO] - Model weights saved in ./results/bertimbau_large/C5/best_model/model.safetensors
399
+ [2025-07-09 17:01:23,987][transformers.tokenization_utils_base][INFO] - tokenizer config file saved in ./results/bertimbau_large/C5/best_model/tokenizer_config.json
400
+ [2025-07-09 17:01:23,987][transformers.tokenization_utils_base][INFO] - Special tokens file saved in ./results/bertimbau_large/C5/best_model/special_tokens_map.json
401
+ [2025-07-09 17:01:23,999][__main__][INFO] - Model and tokenizer saved to ./results/bertimbau_large/C5/best_model
402
+ [2025-07-09 17:01:24,003][__main__][INFO] - Fine Tuning Finished.
403
+ [2025-07-09 17:01:24,515][__main__][INFO] - Total emissions: 0.0048 kg CO2eq
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e692aea3147ca3a6411ab7e83b5922a32c56013007aeeb6a47a368b4527625cb
3
+ size 5841
vocab.txt ADDED
The diff for this file is too large to render. See raw diff