Update README.md
Browse files
README.md
CHANGED
|
@@ -34,7 +34,7 @@ Both the combined model and adapter weights are available.
|
|
| 34 |
|
| 35 |
The combined model can be loaded and used right out of the box:
|
| 36 |
```
|
| 37 |
-
BASE_MODEL = "
|
| 38 |
|
| 39 |
model = LlamaForCausalLM.from_pretrained(
|
| 40 |
BASE_MODEL,
|
|
@@ -55,7 +55,7 @@ bmodel = LlamaForCausalLM.from_pretrained(
|
|
| 55 |
device_map="sequential"
|
| 56 |
)
|
| 57 |
|
| 58 |
-
peft_model_id = "starfishmedical/SFDocumentOracle-
|
| 59 |
tokenizer = LlamaTokenizer.from_pretrained(peft_model_id)
|
| 60 |
|
| 61 |
model = PeftModel.from_pretrained(bmodel, peft_model_id)
|
|
|
|
| 34 |
|
| 35 |
The combined model can be loaded and used right out of the box:
|
| 36 |
```
|
| 37 |
+
BASE_MODEL = "starfishmedical/SFDocumentOracle-open_llama_7b_700bt_lora"
|
| 38 |
|
| 39 |
model = LlamaForCausalLM.from_pretrained(
|
| 40 |
BASE_MODEL,
|
|
|
|
| 55 |
device_map="sequential"
|
| 56 |
)
|
| 57 |
|
| 58 |
+
peft_model_id = "starfishmedical/SFDocumentOracle-open_llama_7b_700bt_lora"
|
| 59 |
tokenizer = LlamaTokenizer.from_pretrained(peft_model_id)
|
| 60 |
|
| 61 |
model = PeftModel.from_pretrained(bmodel, peft_model_id)
|