File size: 518 Bytes
8653bef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
# MODELFILE for Qwen3Guard-Stream-8B
# Used by LM Studio, OpenWebUI, etc.
context_length: 8192
embedding: false
f16: cpu
# Prompt template for real-time streaming classification
prompt_template: >-
AnalyzeStream: {prompt}
# Output format: {"safe": true/false, "categories": [...], "partial": bool, "confidence": float}
# Default parameters for reliable streaming classification
temperature: 0.0
top_p: 1.0
top_k: 40
repeat_penalty: 1.0
num_keep: 1
max_tokens: 128
# Stop tokens (optional)
stop: "{"
stop: "}"
|