Update README.md
Browse files
README.md
CHANGED
|
@@ -85,7 +85,82 @@ print(tokenizer.decode(outputs[0]))
|
|
| 85 |
|
| 86 |
🏆 Evaluation
|
| 87 |
|
| 88 |
-
###
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
### Training hyperparameters
|
| 91 |
|
|
|
|
| 85 |
|
| 86 |
🏆 Evaluation
|
| 87 |
|
| 88 |
+
### AGIEVAL
|
| 89 |
+
|
| 90 |
+
| Task | Version | Metric | Value | | StdErr |
|
| 91 |
+
|-------------------------------------------|---------|--------|-------|---|---------|
|
| 92 |
+
| agieval\_aqua\_rat | 0 | acc | 24.02 | _ | 2.69 |
|
| 93 |
+
| agieval\_aqua\_rat | 0 | acc\_norm | 24.02 | _ | 2.69 |
|
| 94 |
+
| agieval\_logiqa\_en | 0 | acc | 23.20 | _ | 1.66 |
|
| 95 |
+
| agieval\_logiqa\_en | 0 | acc\_norm | 24.42 | _ | 1.69 |
|
| 96 |
+
| agieval\_lsat\_ar | 0 | acc | 18.26 | _ | 2.55 |
|
| 97 |
+
| agieval\_lsat\_ar | 0 | acc\_norm | 18.70 | _ | 2.58 |
|
| 98 |
+
| agieval\_lsat\_lr | 0 | acc | 22.35 | _ | 1.85 |
|
| 99 |
+
| agieval\_lsat\_lr | 0 | acc\_norm | 23.53 | _ | 1.88 |
|
| 100 |
+
| agieval\_lsat\_rc | 0 | acc | 20.82 | _ | 2.48 |
|
| 101 |
+
| agieval\_lsat\_rc | 0 | acc\_norm | 20.07 | _ | 2.45 |
|
| 102 |
+
| agieval\_sat\_en | 0 | acc | 32.52 | _ | 3.27 |
|
| 103 |
+
| agieval\_sat\_en | 0 | acc\_norm | 32.52 | _ | 3.27 |
|
| 104 |
+
| agieval\_sat\_en\_without\_passage | 0 | acc | 25.73 | _ | 3.05 |
|
| 105 |
+
| agieval\_sat\_en\_without\_passage | 0 | acc\_norm | 24.27 | _ | 2.99 |
|
| 106 |
+
| agieval\_sat\_math | 0 | acc | 25.00 | _ | 2.93 |
|
| 107 |
+
| agieval\_sat\_math | 0 | acc\_norm | 20.91 | _ | 2.75 |
|
| 108 |
+
|
| 109 |
+
Average: 23.8
|
| 110 |
+
|
| 111 |
+
### GPT4ALL
|
| 112 |
+
|
| 113 |
+
| Task | Version | Metric | Value | | StdErr |
|
| 114 |
+
|----------------------|---------|--------|-------|---|---------|
|
| 115 |
+
| arc\_challenge | 0 | acc | 21.77 | _ | 1.21 |
|
| 116 |
+
| arc\_challenge | 0 | acc\_norm | 24.15 | _ | 1.25 |
|
| 117 |
+
| arc\_easy | 0 | acc | 37.37 | _ | 0.99 |
|
| 118 |
+
| arc\_easy | 0 | acc\_norm | 36.95 | _ | 0.99 |
|
| 119 |
+
| boolq | 1 | acc | 65.60 | _ | 0.83 |
|
| 120 |
+
| hellaswag | 0 | acc | 34.54 | _ | 0.47 |
|
| 121 |
+
| hellaswag | 0 | acc\_norm | 40.54 | _ | 0.49 |
|
| 122 |
+
| openbookqa | 0 | acc | 15.00 | _ | 1.59 |
|
| 123 |
+
| openbookqa | 0 | acc\_norm | 27.40 | _ | 2.00 |
|
| 124 |
+
| piqa | 0 | acc | 60.88 | _ | 1.14 |
|
| 125 |
+
| piqa | 0 | acc\_norm | 60.55 | _ | 1.14 |
|
| 126 |
+
| winogrande | 0 | acc | 50.91 | _ | 1.41 |
|
| 127 |
+
|
| 128 |
+
Average: 39.9
|
| 129 |
+
|
| 130 |
+
### BIGBENCH
|
| 131 |
+
|
| 132 |
+
| Task | Version | Metric | Value | Std Err |
|
| 133 |
+
|-----------------------------------|---------|--------|--------|---------|
|
| 134 |
+
| bigbench\_causal\_judgement | 0 | MCG | 50 | 2.26 |
|
| 135 |
+
| bigbench\_date\_understanding | 0 | MCG | 49.14 | 2.18 |
|
| 136 |
+
| bigbench\_disambiguation\_qa | 0 | MCG | 49.31 | 2.74 |
|
| 137 |
+
| bigbench\_geometric\_shapes | 0 | MCG | 14.18 | 1.37 |
|
| 138 |
+
| bigbench\_logical\_deduction\_5objs | 0 | MCG | 49.41 | 2.73 |
|
| 139 |
+
| bigbench\_logical\_deduction\_7objs | 0 | MCG | 41.48 | 2.46 |
|
| 140 |
+
| bigbench\_logical\_deduction\_3objs | 0 | MCG | 69.33 | 2.75 |
|
| 141 |
+
| bigbench\_movie\_recommendation | 0 | MCG | 51.71 | 2.25 |
|
| 142 |
+
| bigbench\_navigate | 0 | MCG | 50 | 1.58 |
|
| 143 |
+
| bigbench\_reasoning\_colored\_obj | 0 | MCG | 51.92 | 0.99 |
|
| 144 |
+
| bigbench\_ruin\_names | 0 | MCG | 48.14 | 2.01 |
|
| 145 |
+
| bigbench\_salient\_trans\_err\_detec | 0 | MCG | 39.92 | 1.2 |
|
| 146 |
+
| bigbench\_snarks | 0 | MCG | 64.14 | 3.71 |
|
| 147 |
+
| bigbench\_sports\_understanding | 0 | MCG | 55.31 | 1.59 |
|
| 148 |
+
| bigbench\_temporal\_sequences | 0 | MCG | 46.92 | 1.4 |
|
| 149 |
+
| bigbench\_tsk\_shuff\_objs\_5 | 0 | MCG | 25.04 | 1.01 |
|
| 150 |
+
| bigbench\_tsk\_shuff\_objs\_7 | 0 | MCG | 15.04 | 0.72 |
|
| 151 |
+
| bigbench\_tsk\_shuff\_objs\_3 | 0 | MCG | 55.33 | 2.75 |
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
Average: 44.7
|
| 155 |
+
|
| 156 |
+
### TRUTHFULQA
|
| 157 |
+
|
| 158 |
+
| Task | Version | Metric | Value | Std Err |
|
| 159 |
+
|----------------------------------|---------|--------|--------|----------|
|
| 160 |
+
| truthfulqa\_mc | 1 | mc1 | 30.11 | 1.61 |
|
| 161 |
+
| truthfulqa\_mc | 1 | mc2 | 47.69 | 1.61 |
|
| 162 |
+
|
| 163 |
+
Average:38.9
|
| 164 |
|
| 165 |
### Training hyperparameters
|
| 166 |
|