Upload Modelfile
Browse files
Modelfile
CHANGED
|
@@ -1,16 +1,16 @@
|
|
| 1 |
# Modelfile for Ollama (GGUF)
|
| 2 |
# Auto-generated using Unsloth's template mapper
|
| 3 |
-
# This uses the
|
| 4 |
#
|
| 5 |
# Note: You can change the FROM line to use a different quantization
|
| 6 |
# Available quantizations in this directory:
|
| 7 |
-
# -
|
| 8 |
-
# -
|
| 9 |
-
# -
|
| 10 |
-
# -
|
| 11 |
|
| 12 |
|
| 13 |
-
FROM ./
|
| 14 |
TEMPLATE """{{ if .Messages }}
|
| 15 |
{{- if or .System .Tools }}<|start_header_id|>system<|end_header_id|>
|
| 16 |
{{- if .System }}
|
|
|
|
| 1 |
# Modelfile for Ollama (GGUF)
|
| 2 |
# Auto-generated using Unsloth's template mapper
|
| 3 |
+
# This uses the Llama-3.2-1B-Instruct-bnb-4bit-lima-Q4_K_M.gguf quantization
|
| 4 |
#
|
| 5 |
# Note: You can change the FROM line to use a different quantization
|
| 6 |
# Available quantizations in this directory:
|
| 7 |
+
# - Llama-3.2-1B-Instruct-bnb-4bit-lima-F16.gguf
|
| 8 |
+
# - Llama-3.2-1B-Instruct-bnb-4bit-lima-Q8_0.gguf
|
| 9 |
+
# - Llama-3.2-1B-Instruct-bnb-4bit-lima-Q6_K.gguf
|
| 10 |
+
# - Llama-3.2-1B-Instruct-bnb-4bit-lima-Q4_K_M.gguf
|
| 11 |
|
| 12 |
|
| 13 |
+
FROM ./Llama-3.2-1B-Instruct-bnb-4bit-lima-Q4_K_M.gguf
|
| 14 |
TEMPLATE """{{ if .Messages }}
|
| 15 |
{{- if or .System .Tools }}<|start_header_id|>system<|end_header_id|>
|
| 16 |
{{- if .System }}
|