dengcao commited on
Commit
b4c0065
·
verified ·
1 Parent(s): 751ffc0

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,47 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *.tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.db* filter=lfs diff=lfs merge=lfs -text
29
+ *.ark* filter=lfs diff=lfs merge=lfs -text
30
+ **/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
31
+ **/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
32
+ **/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
33
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
34
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
35
+ *.gguf* filter=lfs diff=lfs merge=lfs -text
36
+ *.ggml filter=lfs diff=lfs merge=lfs -text
37
+ *.llamafile* filter=lfs diff=lfs merge=lfs -text
38
+ *.pt2 filter=lfs diff=lfs merge=lfs -text
39
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
40
+ *.npy filter=lfs diff=lfs merge=lfs -text
41
+ *.npz filter=lfs diff=lfs merge=lfs -text
42
+ *.pickle filter=lfs diff=lfs merge=lfs -text
43
+ *.pkl filter=lfs diff=lfs merge=lfs -text
44
+ *.tar filter=lfs diff=lfs merge=lfs -text
45
+ *.wasm filter=lfs diff=lfs merge=lfs -text
46
+ *.zst filter=lfs diff=lfs merge=lfs -text
47
+ *tfevents* filter=lfs diff=lfs merge=lfs -textimatrix_unsloth.dat filter=lfs diff=lfs merge=lfs -text
ERNIE-4.5-21B-A3B-PT-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:150e2463d7644d52ce88e116115ed1e664854edc5cbe619134793e72b019ba1d
3
+ size 13245828608
README.md CHANGED
@@ -1,3 +1,135 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - baidu/ERNIE-4.5-21B-A3B-PT
4
+ license: apache-2.0
5
+ language:
6
+ - en
7
+ - zh
8
+ pipeline_tag: text-generation
9
+ tags:
10
+ - ERNIE4.5
11
+ - unsloth
12
+ library_name: transformers
13
+ ---
14
+
15
+ <div align="center" style="line-height: 1;">
16
+ <a href="https://ernie.baidu.com/" target="_blank" style="margin: 2px;">
17
+ <img alt="Chat" src="https://img.shields.io/badge/🤖_Chat-ERNIE_Bot-blue" style="display: inline-block; vertical-align: middle;"/>
18
+ </a>
19
+ <a href="https://huggingface.co/baidu" target="_blank" style="margin: 2px;">
20
+ <img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Baidu-ffc107?color=ffc107&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
21
+ </a>
22
+ <a href="https://github.com/PaddlePaddle/ERNIE" target="_blank" style="margin: 2px;">
23
+ <img alt="Github" src="https://img.shields.io/badge/GitHub-ERNIE-000?logo=github&color=0000FF" style="display: inline-block; vertical-align: middle;"/>
24
+ </a>
25
+ <a href="https://ernie.baidu.com/blog/ernie4.5" target="_blank" style="margin: 2px;">
26
+ <img alt="Blog" src="https://img.shields.io/badge/🖖_Blog-ERNIE4.5-A020A0" style="display: inline-block; vertical-align: middle;"/>
27
+ </a>
28
+ </div>
29
+
30
+ <div align="center" style="line-height: 1;">
31
+ <a href="#license" style="margin: 2px;">
32
+ <img alt="License" src="https://img.shields.io/badge/License-Apache2.0-A5de54" style="display: inline-block; vertical-align: middle;"/>
33
+ </a>
34
+ </div>
35
+
36
+ # ERNIE-4.5-21B
37
+
38
+ > [!NOTE]
39
+ > Note: "**-Paddle**" models use [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) weights, while "**-PT**" models use Transformer-style PyTorch weights.
40
+
41
+
42
+ ## ERNIE 4.5 Highlights
43
+
44
+ The advanced capabilities of the ERNIE 4.5 models, particularly the MoE-based A47B and A3B series, are underpinned by several key technical innovations:
45
+
46
+ 1. **Multimodal Heterogeneous MoE Pre-Training:** Our models are jointly trained on both textual and visual modalities to better capture the nuances of multimodal information and improve performance on tasks involving text understanding and generation, image understanding, and cross-modal reasoning. To achieve this without one modality hindering the learning of another, we designed a *heterogeneous MoE structure*, incorporated *modality-isolated routing*, and employed *router orthogonal loss* and *multimodal token-balanced loss*. These architectural choices ensure that both modalities are effectively represented, allowing for mutual reinforcement during training.
47
+
48
+ 2. **Scaling-Efficient Infrastructure:** We propose a novel heterogeneous hybrid parallelism and hierarchical load balancing strategy for efficient training of ERNIE 4.5 models. By using intra-node expert parallelism, memory-efficient pipeline scheduling, FP8 mixed-precision training and finegrained recomputation methods, we achieve remarkable pre-training throughput. For inference, we propose *multi-expert parallel collaboration* method and *convolutional code quantization* algorithm to achieve 4-bit/2-bit lossless quantization. Furthermore, we introduce PD disaggregation with dynamic role switching for effective resource utilization to enhance inference performance for ERNIE 4.5 MoE models. Built on [PaddlePaddle](https://github.com/PaddlePaddle/Paddle), ERNIE 4.5 delivers high-performance inference across a wide range of hardware platforms.
49
+
50
+ 3. **Modality-Specific Post-Training:** To meet the diverse requirements of real-world applications, we fine-tuned variants of the pre-trained model for specific modalities. Our LLMs are optimized for general-purpose language understanding and generation. The VLMs focuses on visuallanguage understanding and supports both thinking and non-thinking modes. Each model employed a combination of *Supervised Fine-tuning (SFT)*, *Direct Preference Optimization (DPO)* or a modified reinforcement learning method named *Unified Preference Optimization (UPO)* for post-training.
51
+
52
+ ## Model Overview
53
+
54
+ ERNIE-4.5-21B-A3B is a text MoE Post-trained model, with 21B total parameters and 3B activated parameters for each token. The following are the model configuration details:
55
+
56
+ | Key | Value |
57
+ | --------------------------------- | ------------ |
58
+ | Modality | Text |
59
+ | Training Stage | Posttraining |
60
+ | Params(Total / Activated) | 21B / 3B |
61
+ | Layers | 28 |
62
+ | Heads(Q/KV) | 20 / 4 |
63
+ | Text Experts(Total / Activated) | 64 / 6 |
64
+ | Vision Experts(Total / Activated) | 64 / 6 |
65
+ | Shared Experts | 2 |
66
+ | Context Length | 131072 |
67
+
68
+ ## Quickstart
69
+
70
+ ### Using `transformers` library
71
+
72
+ **Note**: Before using the model, please ensure you have the `transformers` library installed (version 4.50.0 or higher)
73
+
74
+ The following contains a code snippet illustrating how to use the model generate content based on given inputs.
75
+
76
+ ```python
77
+ from transformers import AutoModelForCausalLM, AutoTokenizer
78
+
79
+ model_name = "baidu/ERNIE-4.5-21B-A3B-PT"
80
+
81
+ # load the tokenizer and the model
82
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
83
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
84
+
85
+ # prepare the model input
86
+ prompt = "Give me a short introduction to large language model."
87
+ messages = [
88
+ {"role": "user", "content": prompt}
89
+ ]
90
+ text = tokenizer.apply_chat_template(
91
+ messages,
92
+ tokenize=False,
93
+ add_generation_prompt=True
94
+ )
95
+ model_inputs = tokenizer([text], add_special_tokens=False, return_tensors="pt").to(model.device)
96
+
97
+ # conduct text completion
98
+ generated_ids = model.generate(
99
+ model_inputs.input_ids,
100
+ max_new_tokens=1024
101
+ )
102
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
103
+
104
+ # decode the generated ids
105
+ generate_text = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
106
+ print("generate_text:", generate_text)
107
+ ```
108
+
109
+ ### vLLM inference
110
+
111
+ [vllm](https://github.com/vllm-project/vllm/tree/main) github library. Python-only [build](https://docs.vllm.ai/en/latest/getting_started/installation/gpu.html#set-up-using-python-only-build-without-compilation).
112
+
113
+ ```bash
114
+ vllm serve baidu/ERNIE-4.5-21B-A3B-PT --trust-remote-code
115
+ ```
116
+
117
+ ## License
118
+
119
+ The ERNIE 4.5 models are provided under the Apache License 2.0. This license permits commercial use, subject to its terms and conditions. Copyright (c) 2025 Baidu, Inc. All Rights Reserved.
120
+
121
+ ## Citation
122
+
123
+ If you find ERNIE 4.5 useful or wish to use it in your projects, please kindly cite our technical report:
124
+
125
+ ```bibtex
126
+ @misc{ernie2025technicalreport,
127
+ title={ERNIE 4.5 Technical Report},
128
+ author={Baidu ERNIE Team},
129
+ year={2025},
130
+ eprint={},
131
+ archivePrefix={arXiv},
132
+ primaryClass={cs.CL},
133
+ url={}
134
+ }
135
+ ```
configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"text-generation"}
imatrix_unsloth.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ad7d92645df843c07534de2417c94eb21c6012aea1707f96440330da34e3088
3
+ size 48395091