Qwen-3-4b-ultimate-reasoning / mergekit_config.yml
ertghiu256's picture
Upload folder using huggingface_hub
fd4b150 verified
raw
history blame contribute delete
654 Bytes
base_model: ertghiu256/qwen3-multi-reasoner
dtype: float16
merge_method: linear
modules:
default:
slices:
- sources:
- layer_range: [0, 36]
model: ertghiu256/qwen3-multi-reasoner
parameters:
weight: 0.7
- layer_range: [0, 36]
model: ertghiu256/qwen-3-4b-mixture-of-thought
parameters:
weight: 0.9
- layer_range: [0, 36]
model: ertghiu256/qwen3-4b-code-reasoning
parameters:
weight: 0.8
- layer_range: [0, 36]
model: ertghiu256/qwen3-math-reasoner
parameters:
weight: 0.6
parameters:
int8_mask: 1.0
normalize: 1.0