liushaowei
commited on
Commit
·
7264a5e
1
Parent(s):
2e02d65
update readme
Browse files
README.md
CHANGED
|
@@ -98,14 +98,14 @@ For our pretrained model (Moonlight-16B-A3B):
|
|
| 98 |
```python
|
| 99 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 100 |
|
| 101 |
-
|
| 102 |
model = AutoModelForCausalLM.from_pretrained(
|
| 103 |
-
|
| 104 |
torch_dtype="auto",
|
| 105 |
device_map="auto",
|
| 106 |
trust_remote_code=True,
|
| 107 |
)
|
| 108 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 109 |
|
| 110 |
prompt = "1+1=2, 1+2="
|
| 111 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(model.device)
|
|
@@ -118,14 +118,14 @@ For our instruct model (Moonlight-16B-A3B-Instruct):
|
|
| 118 |
```python
|
| 119 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 120 |
|
| 121 |
-
|
| 122 |
model = AutoModelForCausalLM.from_pretrained(
|
| 123 |
-
|
| 124 |
torch_dtype="auto",
|
| 125 |
device_map="auto",
|
| 126 |
trust_remote_code=True
|
| 127 |
)
|
| 128 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 129 |
|
| 130 |
prompt = "Give me a short introduction to large language model."
|
| 131 |
messages = [
|
|
|
|
| 98 |
```python
|
| 99 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 100 |
|
| 101 |
+
model_name = "moonshotai/Moonlight-16B-A3B"
|
| 102 |
model = AutoModelForCausalLM.from_pretrained(
|
| 103 |
+
model_name,
|
| 104 |
torch_dtype="auto",
|
| 105 |
device_map="auto",
|
| 106 |
trust_remote_code=True,
|
| 107 |
)
|
| 108 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 109 |
|
| 110 |
prompt = "1+1=2, 1+2="
|
| 111 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(model.device)
|
|
|
|
| 118 |
```python
|
| 119 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 120 |
|
| 121 |
+
model_name = "moonshotai/Moonlight-16B-A3B-Instruct"
|
| 122 |
model = AutoModelForCausalLM.from_pretrained(
|
| 123 |
+
model_name,
|
| 124 |
torch_dtype="auto",
|
| 125 |
device_map="auto",
|
| 126 |
trust_remote_code=True
|
| 127 |
)
|
| 128 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 129 |
|
| 130 |
prompt = "Give me a short introduction to large language model."
|
| 131 |
messages = [
|