jialicheng commited on
Commit
840d640
·
verified ·
1 Parent(s): 06b7fb2

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: dandelin/vilt-b32-finetuned-nlvr2
5
+ tags:
6
+ - image-text-classification
7
+ - generated_from_trainer
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: '42'
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # 42
19
+
20
+ This model is a fine-tuned version of [dandelin/vilt-b32-finetuned-nlvr2](https://huggingface.co/dandelin/vilt-b32-finetuned-nlvr2) on the nlvr2 dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.9443
23
+ - Accuracy: 0.7327
24
+ - Dt Accuracy: 0.7327
25
+ - Df Accuracy: 0.4959
26
+ - Unlearn Overall Accuracy: 0.8978
27
+ - Unlearn Time: None
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 1e-05
47
+ - train_batch_size: 32
48
+ - eval_batch_size: 64
49
+ - seed: 42
50
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 5
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Overall Accuracy | Unlearn Overall Accuracy | Time |
57
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|:----------------:|:------------------------:|:----:|
58
+ | 0.3079 | 1.0 | 2700 | 0.5663 | 0.8542 | 0.6449 | 0.6449 | None |
59
+ | 0.2432 | 2.0 | 5400 | 0.6403 | 0.7017 | 0.7676 | 0.7676 | None |
60
+ | 0.1905 | 3.0 | 8100 | 0.7980 | 0.5862 | 0.8458 | 0.8458 | None |
61
+ | 0.1569 | 4.0 | 10800 | 0.8834 | 0.5176 | 0.8863 | 0.8863 | None |
62
+ | 0.1273 | 5.0 | 13500 | 0.9443 | 0.4959 | 0.8978 | 0.8978 | None |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.48.0
68
+ - Pytorch 2.3.0+cu121
69
+ - Datasets 2.18.0
70
+ - Tokenizers 0.21.0
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "df_accuracy": 0.49593967517401394,
3
+ "df_loss": 1.892191767692566,
4
+ "dr_accuracy": 0.9968333333333333,
5
+ "dr_loss": 0.02866685576736927,
6
+ "dt_accuracy": 0.7440792306588202,
7
+ "epoch": 5.0,
8
+ "eval_unlearn_overall_accuracy": 0.8977844146136806,
9
+ "knowledge_gap": 0.7504383217324052,
10
+ "ood_accuracy": 0.7559540889526543,
11
+ "ood_loss": 0.8654005527496338,
12
+ "test_accuracy": 0.7440792306588202,
13
+ "test_loss": 0.9116904735565186,
14
+ "total_flos": 8453448251631240.0,
15
+ "train_loss": 0.20511870829264323,
16
+ "train_runtime": 11255.3092,
17
+ "train_samples_per_second": 38.37,
18
+ "train_steps_per_second": 1.199,
19
+ "unlearn_overall_accuracy": 0.9026347180426603,
20
+ "unlearn_time": 11256.28268122673,
21
+ "zrf": 0.8546327662809815
22
+ }
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "dandelin/vilt-b32-finetuned-nlvr2",
3
+ "architectures": [
4
+ "ViltForImagesAndTextClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.0,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "False",
12
+ "1": "True"
13
+ },
14
+ "image_size": 384,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "label2id": {
18
+ "False": 0,
19
+ "True": 1
20
+ },
21
+ "layer_norm_eps": 1e-12,
22
+ "max_image_length": -1,
23
+ "max_position_embeddings": 40,
24
+ "modality_type_vocab_size": 3,
25
+ "model_type": "vilt",
26
+ "num_attention_heads": 12,
27
+ "num_channels": 3,
28
+ "num_hidden_layers": 12,
29
+ "num_images": 2,
30
+ "patch_size": 32,
31
+ "qkv_bias": true,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.48.0",
35
+ "type_vocab_size": 2,
36
+ "vocab_size": 30522
37
+ }
df_results.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "df_accuracy": 0.49593967517401394,
3
+ "df_loss": 1.892191767692566
4
+ }
dr_results.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "dr_accuracy": 0.9968333333333333,
3
+ "dr_loss": 0.02866685576736927
4
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9739c21920e00d85ac9f6d8733f06ca10d3790f1f0fdd6f5f30e985b6472836
3
+ size 455875832
ood_results.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "ood_accuracy": 0.7559540889526543,
3
+ "ood_loss": 0.8654005527496338
4
+ }
pred_logit_df.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:304aa91662f5dfb030a91382e3ca642293a96dc680b84089b1e8947c5ca06c3f
3
+ size 41504
pred_logit_dr.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c1ace116a1cae36e18cd8e2fa379d2be939e624df8673df3f6fc0ba95a5f847
3
+ size 48128
pred_logit_eval.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82a8cdbb7a2e993748265d79ecad4bfdb1237e4c226ef4aeadb5980f69bc0049
3
+ size 55984
pred_logit_ood.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb43d998bcff0c7c81ef3aa0c4daa79a3e8d7044f08bf53fadf1d81fdf27adab
3
+ size 55888
pred_logit_test.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce0a01622d323f272960fc4691beeea35bc7b3a4ccfec25da9c9e9d2446cd57
3
+ size 55864
sal_mask_with_0.5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bacd1deb9b7898b1d187d06515e10fbe2d20eb364a12b26f303ca81d9a92662b
3
+ size 911772538
test_results.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "test_accuracy": 0.7440792306588202,
3
+ "test_loss": 0.9116904735565186
4
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 8453448251631240.0,
4
+ "train_loss": 0.20511870829264323,
5
+ "train_runtime": 11255.3092,
6
+ "train_samples_per_second": 38.37,
7
+ "train_steps_per_second": 1.199
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8977844146136806,
3
+ "best_model_checkpoint": "../../checkpoint/unlearn/nlvr2/vilt/salun/6.0/42/checkpoint-13500",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 13500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.18518518518518517,
13
+ "grad_norm": 3.2584547996520996,
14
+ "learning_rate": 9.62962962962963e-06,
15
+ "loss": 0.3282,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.37037037037037035,
20
+ "grad_norm": 4.812547206878662,
21
+ "learning_rate": 9.25925925925926e-06,
22
+ "loss": 0.3197,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.5555555555555556,
27
+ "grad_norm": 4.050705909729004,
28
+ "learning_rate": 8.888888888888888e-06,
29
+ "loss": 0.3219,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.7407407407407407,
34
+ "grad_norm": 3.562553882598877,
35
+ "learning_rate": 8.518518518518519e-06,
36
+ "loss": 0.3194,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.9259259259259259,
41
+ "grad_norm": 5.053567409515381,
42
+ "learning_rate": 8.148148148148148e-06,
43
+ "loss": 0.3079,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "df_accuracy": 0.854215003866976,
48
+ "dt_accuracy": 0.7513606416499571,
49
+ "epoch": 1.0,
50
+ "eval_accuracy": 0.7513606416499571,
51
+ "eval_loss": 0.5662743449211121,
52
+ "eval_runtime": 113.7346,
53
+ "eval_samples_per_second": 61.389,
54
+ "eval_steps_per_second": 0.967,
55
+ "eval_unlearn_overall_accuracy": 0.6449160678015313,
56
+ "step": 2700,
57
+ "unlearn_overall_accuracy": 0.6449160678015313,
58
+ "unlearn_time": null
59
+ },
60
+ {
61
+ "epoch": 1.1111111111111112,
62
+ "grad_norm": 1.7998104095458984,
63
+ "learning_rate": 7.77777777777778e-06,
64
+ "loss": 0.2708,
65
+ "step": 3000
66
+ },
67
+ {
68
+ "epoch": 1.2962962962962963,
69
+ "grad_norm": 2.322847366333008,
70
+ "learning_rate": 7.4074074074074075e-06,
71
+ "loss": 0.2454,
72
+ "step": 3500
73
+ },
74
+ {
75
+ "epoch": 1.4814814814814814,
76
+ "grad_norm": 2.8383140563964844,
77
+ "learning_rate": 7.0370370370370375e-06,
78
+ "loss": 0.2361,
79
+ "step": 4000
80
+ },
81
+ {
82
+ "epoch": 1.6666666666666665,
83
+ "grad_norm": 2.5046894550323486,
84
+ "learning_rate": 6.666666666666667e-06,
85
+ "loss": 0.2486,
86
+ "step": 4500
87
+ },
88
+ {
89
+ "epoch": 1.8518518518518519,
90
+ "grad_norm": 4.192043781280518,
91
+ "learning_rate": 6.296296296296297e-06,
92
+ "loss": 0.2432,
93
+ "step": 5000
94
+ },
95
+ {
96
+ "df_accuracy": 0.7016627996906419,
97
+ "dt_accuracy": 0.7433400171870524,
98
+ "epoch": 2.0,
99
+ "eval_accuracy": 0.7433400171870524,
100
+ "eval_loss": 0.6402536630630493,
101
+ "eval_runtime": 108.1015,
102
+ "eval_samples_per_second": 64.587,
103
+ "eval_steps_per_second": 1.018,
104
+ "eval_unlearn_overall_accuracy": 0.7676276016043365,
105
+ "step": 5400,
106
+ "unlearn_overall_accuracy": 0.7676276016043365,
107
+ "unlearn_time": null
108
+ },
109
+ {
110
+ "epoch": 2.037037037037037,
111
+ "grad_norm": 4.436009407043457,
112
+ "learning_rate": 5.925925925925926e-06,
113
+ "loss": 0.2304,
114
+ "step": 5500
115
+ },
116
+ {
117
+ "epoch": 2.2222222222222223,
118
+ "grad_norm": 3.5263144969940186,
119
+ "learning_rate": 5.555555555555557e-06,
120
+ "loss": 0.1807,
121
+ "step": 6000
122
+ },
123
+ {
124
+ "epoch": 2.4074074074074074,
125
+ "grad_norm": 3.2321693897247314,
126
+ "learning_rate": 5.185185185185185e-06,
127
+ "loss": 0.188,
128
+ "step": 6500
129
+ },
130
+ {
131
+ "epoch": 2.5925925925925926,
132
+ "grad_norm": 3.6127853393554688,
133
+ "learning_rate": 4.814814814814815e-06,
134
+ "loss": 0.1931,
135
+ "step": 7000
136
+ },
137
+ {
138
+ "epoch": 2.7777777777777777,
139
+ "grad_norm": 4.856908798217773,
140
+ "learning_rate": 4.444444444444444e-06,
141
+ "loss": 0.1923,
142
+ "step": 7500
143
+ },
144
+ {
145
+ "epoch": 2.962962962962963,
146
+ "grad_norm": 8.728678703308105,
147
+ "learning_rate": 4.074074074074074e-06,
148
+ "loss": 0.1905,
149
+ "step": 8000
150
+ },
151
+ {
152
+ "df_accuracy": 0.5862335653518949,
153
+ "dt_accuracy": 0.7396161558292753,
154
+ "epoch": 3.0,
155
+ "eval_accuracy": 0.7396161558292753,
156
+ "eval_loss": 0.7980125546455383,
157
+ "eval_runtime": 102.947,
158
+ "eval_samples_per_second": 67.821,
159
+ "eval_steps_per_second": 1.069,
160
+ "eval_unlearn_overall_accuracy": 0.845758066951303,
161
+ "step": 8100,
162
+ "unlearn_overall_accuracy": 0.845758066951303,
163
+ "unlearn_time": null
164
+ },
165
+ {
166
+ "epoch": 3.148148148148148,
167
+ "grad_norm": 0.8636759519577026,
168
+ "learning_rate": 3.7037037037037037e-06,
169
+ "loss": 0.1548,
170
+ "step": 8500
171
+ },
172
+ {
173
+ "epoch": 3.3333333333333335,
174
+ "grad_norm": 4.8238606452941895,
175
+ "learning_rate": 3.3333333333333333e-06,
176
+ "loss": 0.1483,
177
+ "step": 9000
178
+ },
179
+ {
180
+ "epoch": 3.5185185185185186,
181
+ "grad_norm": 3.636707067489624,
182
+ "learning_rate": 2.962962962962963e-06,
183
+ "loss": 0.1477,
184
+ "step": 9500
185
+ },
186
+ {
187
+ "epoch": 3.7037037037037037,
188
+ "grad_norm": 0.8585140705108643,
189
+ "learning_rate": 2.5925925925925925e-06,
190
+ "loss": 0.1434,
191
+ "step": 10000
192
+ },
193
+ {
194
+ "epoch": 3.888888888888889,
195
+ "grad_norm": 3.986074209213257,
196
+ "learning_rate": 2.222222222222222e-06,
197
+ "loss": 0.1569,
198
+ "step": 10500
199
+ },
200
+ {
201
+ "df_accuracy": 0.5175947409126064,
202
+ "dt_accuracy": 0.7354626181609853,
203
+ "epoch": 4.0,
204
+ "eval_accuracy": 0.7354626181609853,
205
+ "eval_loss": 0.8833993673324585,
206
+ "eval_runtime": 109.8215,
207
+ "eval_samples_per_second": 63.576,
208
+ "eval_steps_per_second": 1.002,
209
+ "eval_unlearn_overall_accuracy": 0.8863248174250627,
210
+ "step": 10800,
211
+ "unlearn_overall_accuracy": 0.8863248174250627,
212
+ "unlearn_time": null
213
+ },
214
+ {
215
+ "epoch": 4.074074074074074,
216
+ "grad_norm": 2.4611103534698486,
217
+ "learning_rate": 1.8518518518518519e-06,
218
+ "loss": 0.1421,
219
+ "step": 11000
220
+ },
221
+ {
222
+ "epoch": 4.2592592592592595,
223
+ "grad_norm": 3.0824742317199707,
224
+ "learning_rate": 1.4814814814814815e-06,
225
+ "loss": 0.1283,
226
+ "step": 11500
227
+ },
228
+ {
229
+ "epoch": 4.444444444444445,
230
+ "grad_norm": 2.9113352298736572,
231
+ "learning_rate": 1.111111111111111e-06,
232
+ "loss": 0.1198,
233
+ "step": 12000
234
+ },
235
+ {
236
+ "epoch": 4.62962962962963,
237
+ "grad_norm": 0.5261613130569458,
238
+ "learning_rate": 7.407407407407407e-07,
239
+ "loss": 0.1196,
240
+ "step": 12500
241
+ },
242
+ {
243
+ "epoch": 4.814814814814815,
244
+ "grad_norm": 4.700664520263672,
245
+ "learning_rate": 3.7037037037037036e-07,
246
+ "loss": 0.1336,
247
+ "step": 13000
248
+ },
249
+ {
250
+ "epoch": 5.0,
251
+ "grad_norm": 4.13147497177124,
252
+ "learning_rate": 0.0,
253
+ "loss": 0.1273,
254
+ "step": 13500
255
+ },
256
+ {
257
+ "df_accuracy": 0.49593967517401394,
258
+ "dt_accuracy": 0.7327413348610713,
259
+ "epoch": 5.0,
260
+ "eval_accuracy": 0.7327413348610713,
261
+ "eval_loss": 0.9442616701126099,
262
+ "eval_runtime": 113.4763,
263
+ "eval_samples_per_second": 61.528,
264
+ "eval_steps_per_second": 0.969,
265
+ "eval_unlearn_overall_accuracy": 0.8977844146136806,
266
+ "step": 13500,
267
+ "unlearn_overall_accuracy": 0.8977844146136806,
268
+ "unlearn_time": null
269
+ },
270
+ {
271
+ "epoch": 5.0,
272
+ "step": 13500,
273
+ "total_flos": 8453448251631240.0,
274
+ "train_loss": 0.20511870829264323,
275
+ "train_runtime": 11255.3092,
276
+ "train_samples_per_second": 38.37,
277
+ "train_steps_per_second": 1.199
278
+ }
279
+ ],
280
+ "logging_steps": 500,
281
+ "max_steps": 13500,
282
+ "num_input_tokens_seen": 0,
283
+ "num_train_epochs": 5,
284
+ "save_steps": 500,
285
+ "stateful_callbacks": {
286
+ "TrainerControl": {
287
+ "args": {
288
+ "should_epoch_stop": false,
289
+ "should_evaluate": false,
290
+ "should_log": false,
291
+ "should_save": true,
292
+ "should_training_stop": true
293
+ },
294
+ "attributes": {}
295
+ }
296
+ },
297
+ "total_flos": 8453448251631240.0,
298
+ "train_batch_size": 32,
299
+ "trial_name": null,
300
+ "trial_params": null
301
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81965d692c249bb4108c107b1b67353e93c98fba6cdf56f792b8e784b024ba74
3
+ size 5432
unlearn_final_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "df_accuracy": 0.49593967517401394,
3
+ "dr_accuracy": 0.9968333333333333,
4
+ "dt_accuracy": 0.7440792306588202,
5
+ "eval_unlearn_overall_accuracy": 0.8977844146136806,
6
+ "knowledge_gap": 0.7504383217324052,
7
+ "ood_accuracy": 0.7559540889526543,
8
+ "unlearn_overall_accuracy": 0.9026347180426603,
9
+ "unlearn_time": 11256.28268122673,
10
+ "zrf": 0.8546327662809815
11
+ }