| slices: | |
| - sources: | |
| - model: Sao10K/L3-8B-Stheno-v3.2 | |
| layer_range: [0, 32] | |
| - model: princeton-nlp/Llama-3-Instruct-8B-SimPO | |
| layer_range: [0, 32] | |
| merge_method: slerp | |
| base_model: Sao10K/L3-8B-Stheno-v3.2 | |
| parameters: | |
| t: | |
| - filter: self_attn | |
| value: [0.2, 0.4, 0.4, 0.6] | |
| - filter: mlp | |
| value: [0.8, 0.6, 0.6, 0.4] | |
| - value: 0.4 | |
| dtype: bfloat16 | |