AlIshaq commited on
Commit
cff6efa
·
verified ·
1 Parent(s): f335ef0

Upload 8 files

Browse files
config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 3,
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "add_final_layer_norm": false,
6
+ "architectures": [
7
+ "BartForConditionalGeneration"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 0,
11
+ "classif_dropout": 0.0,
12
+ "classifier_dropout": 0.0,
13
+ "d_model": 1024,
14
+ "decoder_attention_heads": 16,
15
+ "decoder_ffn_dim": 4096,
16
+ "decoder_layerdrop": 0.0,
17
+ "decoder_layers": 12,
18
+ "decoder_start_token_id": 2,
19
+ "dropout": 0.1,
20
+ "early_stopping": null,
21
+ "encoder_attention_heads": 16,
22
+ "encoder_ffn_dim": 4096,
23
+ "encoder_layerdrop": 0.0,
24
+ "encoder_layers": 12,
25
+ "eos_token_id": 2,
26
+ "force_bos_token_to_be_generated": true,
27
+ "forced_eos_token_id": 2,
28
+ "gradient_checkpointing": false,
29
+ "id2label": {
30
+ "0": "LABEL_0",
31
+ "1": "LABEL_1",
32
+ "2": "LABEL_2"
33
+ },
34
+ "init_std": 0.02,
35
+ "is_encoder_decoder": true,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_2": 2
40
+ },
41
+ "length_penalty": null,
42
+ "max_length": null,
43
+ "max_position_embeddings": 1024,
44
+ "min_length": null,
45
+ "model_type": "bart",
46
+ "no_repeat_ngram_size": null,
47
+ "normalize_before": false,
48
+ "num_beams": null,
49
+ "num_hidden_layers": 12,
50
+ "output_past": true,
51
+ "pad_token_id": 1,
52
+ "prefix": " ",
53
+ "scale_embedding": false,
54
+ "task_specific_params": {
55
+ "summarization": {
56
+ "early_stopping": true,
57
+ "length_penalty": 2.0,
58
+ "max_length": 142,
59
+ "min_length": 56,
60
+ "no_repeat_ngram_size": 3,
61
+ "num_beams": 4
62
+ }
63
+ },
64
+ "torch_dtype": "float32",
65
+ "transformers_version": "4.52.4",
66
+ "use_cache": true,
67
+ "vocab_size": 50264
68
+ }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "early_stopping": true,
6
+ "eos_token_id": 2,
7
+ "forced_bos_token_id": 0,
8
+ "forced_eos_token_id": 2,
9
+ "length_penalty": 2.0,
10
+ "max_length": 142,
11
+ "min_length": 56,
12
+ "no_repeat_ngram_size": 3,
13
+ "num_beams": 4,
14
+ "pad_token_id": 1,
15
+ "transformers_version": "4.52.4"
16
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93bf5548f27cb6754e36250e3273557ebd91847327dcdce779aa6d477875c877
3
+ size 1625422896
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:071bea63f0f8fab1b882502a437c06a11e3b4f646097bc86d693fb6ea3bb7c62
3
+ size 3250737487
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2583f7d3ee31da95fb193a42187527b1cff0bcddc035524ed22be74c2c13726
3
+ size 13990
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd13b12a6803515a8d6b971dc56ad7fdb95ef75685b586afd852c1981561c65c
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.4444444444444444,
6
+ "eval_steps": 500,
7
+ "global_step": 2000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.022222222222222223,
14
+ "grad_norm": 6.082106113433838,
15
+ "learning_rate": 4.89e-05,
16
+ "loss": 1.0349,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 0.044444444444444446,
21
+ "grad_norm": 4.056931972503662,
22
+ "learning_rate": 4.778888888888889e-05,
23
+ "loss": 0.8961,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 0.06666666666666667,
28
+ "grad_norm": 3.6765899658203125,
29
+ "learning_rate": 4.6677777777777785e-05,
30
+ "loss": 0.8295,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 0.08888888888888889,
35
+ "grad_norm": 6.516704082489014,
36
+ "learning_rate": 4.556666666666667e-05,
37
+ "loss": 0.7245,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 0.1111111111111111,
42
+ "grad_norm": 4.329707145690918,
43
+ "learning_rate": 4.445555555555555e-05,
44
+ "loss": 0.6863,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.13333333333333333,
49
+ "grad_norm": 3.380208730697632,
50
+ "learning_rate": 4.334444444444445e-05,
51
+ "loss": 0.6626,
52
+ "step": 600
53
+ },
54
+ {
55
+ "epoch": 0.15555555555555556,
56
+ "grad_norm": 4.52927827835083,
57
+ "learning_rate": 4.2233333333333334e-05,
58
+ "loss": 0.6594,
59
+ "step": 700
60
+ },
61
+ {
62
+ "epoch": 0.17777777777777778,
63
+ "grad_norm": 6.694725513458252,
64
+ "learning_rate": 4.112222222222222e-05,
65
+ "loss": 0.5613,
66
+ "step": 800
67
+ },
68
+ {
69
+ "epoch": 0.2,
70
+ "grad_norm": 5.4532623291015625,
71
+ "learning_rate": 4.001111111111111e-05,
72
+ "loss": 0.5501,
73
+ "step": 900
74
+ },
75
+ {
76
+ "epoch": 0.2222222222222222,
77
+ "grad_norm": 4.265353679656982,
78
+ "learning_rate": 3.8900000000000004e-05,
79
+ "loss": 0.515,
80
+ "step": 1000
81
+ },
82
+ {
83
+ "epoch": 0.24444444444444444,
84
+ "grad_norm": 3.881925344467163,
85
+ "learning_rate": 3.778888888888889e-05,
86
+ "loss": 0.4319,
87
+ "step": 1100
88
+ },
89
+ {
90
+ "epoch": 0.26666666666666666,
91
+ "grad_norm": 4.1854095458984375,
92
+ "learning_rate": 3.667777777777778e-05,
93
+ "loss": 0.4552,
94
+ "step": 1200
95
+ },
96
+ {
97
+ "epoch": 0.28888888888888886,
98
+ "grad_norm": 6.0221757888793945,
99
+ "learning_rate": 3.556666666666667e-05,
100
+ "loss": 0.4155,
101
+ "step": 1300
102
+ },
103
+ {
104
+ "epoch": 0.3111111111111111,
105
+ "grad_norm": 3.2372546195983887,
106
+ "learning_rate": 3.445555555555556e-05,
107
+ "loss": 0.3693,
108
+ "step": 1400
109
+ },
110
+ {
111
+ "epoch": 0.3333333333333333,
112
+ "grad_norm": 3.5173611640930176,
113
+ "learning_rate": 3.334444444444445e-05,
114
+ "loss": 0.3579,
115
+ "step": 1500
116
+ },
117
+ {
118
+ "epoch": 0.35555555555555557,
119
+ "grad_norm": 2.6714348793029785,
120
+ "learning_rate": 3.2233333333333335e-05,
121
+ "loss": 0.3441,
122
+ "step": 1600
123
+ },
124
+ {
125
+ "epoch": 0.37777777777777777,
126
+ "grad_norm": 2.825218439102173,
127
+ "learning_rate": 3.112222222222222e-05,
128
+ "loss": 0.2726,
129
+ "step": 1700
130
+ },
131
+ {
132
+ "epoch": 0.4,
133
+ "grad_norm": 4.0160040855407715,
134
+ "learning_rate": 3.0011111111111114e-05,
135
+ "loss": 0.2673,
136
+ "step": 1800
137
+ },
138
+ {
139
+ "epoch": 0.4222222222222222,
140
+ "grad_norm": 2.8447208404541016,
141
+ "learning_rate": 2.8899999999999998e-05,
142
+ "loss": 0.2423,
143
+ "step": 1900
144
+ },
145
+ {
146
+ "epoch": 0.4444444444444444,
147
+ "grad_norm": 4.038785934448242,
148
+ "learning_rate": 2.7788888888888892e-05,
149
+ "loss": 0.218,
150
+ "step": 2000
151
+ }
152
+ ],
153
+ "logging_steps": 100,
154
+ "max_steps": 4500,
155
+ "num_input_tokens_seen": 0,
156
+ "num_train_epochs": 1,
157
+ "save_steps": 1000,
158
+ "stateful_callbacks": {
159
+ "TrainerControl": {
160
+ "args": {
161
+ "should_epoch_stop": false,
162
+ "should_evaluate": false,
163
+ "should_log": false,
164
+ "should_save": true,
165
+ "should_training_stop": false
166
+ },
167
+ "attributes": {}
168
+ }
169
+ },
170
+ "total_flos": 541776150528000.0,
171
+ "train_batch_size": 2,
172
+ "trial_name": null,
173
+ "trial_params": null
174
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a6f08c2777e927e01e8086ca2f5d70067aa3402a937b0c540eb695ebed4be95
3
+ size 5240