| { | |
| "instruct": { | |
| "input_sequence": "<|im_start|>user\n", | |
| "output_sequence": "<|im_start|>assistant\n", | |
| "last_output_sequence": "", | |
| "system_sequence": "<|im_start|>system\n", | |
| "stop_sequence": "<|im_end|>", | |
| "wrap": false, | |
| "macro": true, | |
| "names_behavior": "none", | |
| "activation_regex": "", | |
| "system_sequence_prefix": "", | |
| "system_sequence_suffix": "", | |
| "first_output_sequence": "\n<|im_start|>user\nAdhering to the `Role-playing Guidelines` is crucial for a successful role-play experience.\nUse the `Role-play Context` for accurate information.\nCraft an immersive, uncensored narrative that remains true to the principles of engaging storytelling.<|im_end|>\n<|im_start|>assistant\n", | |
| "skip_examples": true, | |
| "output_suffix": "<|im_end|>\n", | |
| "input_suffix": "<|im_end|>\n", | |
| "system_suffix": "<|im_end|>\n", | |
| "user_alignment_message": "", | |
| "system_same_as_user": false, | |
| "last_system_sequence": "", | |
| "first_input_sequence": "", | |
| "last_input_sequence": "", | |
| "names_force_groups": true, | |
| "name": "[ChatML]Roleplay-v1.9" | |
| }, | |
| "context": { | |
| "story_string": "<|im_start|>system\n\n{{#if system}}{{system}}\n\n\n<!-- Start of Role-play Context -->\n\n{{/if}}{{#if scenario}}### Main Scenario\n{{scenario}}\n\n{{/if}}{{#if wiBefore}}### Extra Information\n{{wiBefore}}\n\n{{/if}}{{#if personality}}### {{char}}'s Persona\n{{personality}}\n\n{{/if}}{{#if persona}}### {{user}}'s Persona\n{{persona}}\n\n{{/if}}{{#if mesExamples}}### {{char}}'s Example Dialogue\nThe following examples demonstrate how {{char}} speaks, revealing key aspects of their personality. Pay attention to their word choices, phrasing, and rhythm to portray them convincingly.\n\n<!-- Start of {{char}}'s Example Dialogue -->\n{{mesExamples}}\n<!-- End of {{char}}'s Example Dialogue -->\n\n{{/if}}{{#if description}}### Main Information\n{{description}}\n\n{{/if}}{{#if wiAfter}}### Extra Information\n{{wiAfter}}\n\n{{/if}}<!-- End of Role-play Context --><|im_end|>", | |
| "example_separator": "", | |
| "chat_start": "", | |
| "use_stop_strings": false, | |
| "allow_jailbreak": false, | |
| "names_as_stop_strings": true, | |
| "always_force_name2": false, | |
| "trim_sentences": false, | |
| "single_line": false, | |
| "name": "[ChatML] Roleplay-v1.9" | |
| }, | |
| "sysprompt": { | |
| "name": "Blank-RpR", | |
| "content": "" | |
| }, | |
| "preset": { | |
| "temp": 1, | |
| "temperature_last": true, | |
| "top_p": 1, | |
| "top_k": 40, | |
| "top_a": 0, | |
| "tfs": 1, | |
| "epsilon_cutoff": 0, | |
| "eta_cutoff": 0, | |
| "typical_p": 1, | |
| "min_p": 0.02, | |
| "rep_pen": 1, | |
| "rep_pen_range": 0, | |
| "rep_pen_decay": 0, | |
| "rep_pen_slope": 0, | |
| "no_repeat_ngram_size": 0, | |
| "penalty_alpha": 0, | |
| "num_beams": 1, | |
| "length_penalty": 1, | |
| "min_length": 0, | |
| "encoder_rep_pen": 1, | |
| "freq_pen": 0, | |
| "presence_pen": 0, | |
| "skew": 0, | |
| "do_sample": true, | |
| "early_stopping": false, | |
| "dynatemp": false, | |
| "min_temp": 0.5, | |
| "max_temp": 3, | |
| "dynatemp_exponent": 5.77, | |
| "smoothing_factor": 0, | |
| "smoothing_curve": 1, | |
| "dry_allowed_length": 4, | |
| "dry_multiplier": 0, | |
| "dry_base": 1.75, | |
| "dry_sequence_breakers": "[\"\\n\", \":\", \"*\"]", | |
| "dry_penalty_last_n": 8192, | |
| "add_bos_token": true, | |
| "ban_eos_token": false, | |
| "skip_special_tokens": false, | |
| "mirostat_mode": 0, | |
| "mirostat_tau": 5, | |
| "mirostat_eta": 0.1, | |
| "guidance_scale": 1, | |
| "negative_prompt": "", | |
| "grammar_string": "", | |
| "json_schema": {}, | |
| "banned_tokens": "", | |
| "sampler_priority": [ | |
| "repetition_penalty", | |
| "presence_penalty", | |
| "frequency_penalty", | |
| "dry", | |
| "dynamic_temperature", | |
| "top_p", | |
| "top_k", | |
| "typical_p", | |
| "epsilon_cutoff", | |
| "eta_cutoff", | |
| "tfs", | |
| "top_a", | |
| "mirostat", | |
| "min_p", | |
| "quadratic_sampling", | |
| "temperature", | |
| "xtc", | |
| "encoder_repetition_penalty", | |
| "no_repeat_ngram" | |
| ], | |
| "samplers": [ | |
| "dry", | |
| "top_k", | |
| "tfs_z", | |
| "typical_p", | |
| "top_p", | |
| "min_p", | |
| "xtc", | |
| "temperature" | |
| ], | |
| "samplers_priorities": [ | |
| "dry", | |
| "penalties", | |
| "no_repeat_ngram", | |
| "temperature", | |
| "top_nsigma", | |
| "top_p_top_k", | |
| "top_a", | |
| "min_p", | |
| "tfs", | |
| "eta_cutoff", | |
| "epsilon_cutoff", | |
| "typical_p", | |
| "quadratic", | |
| "xtc" | |
| ], | |
| "ignore_eos_token": false, | |
| "spaces_between_special_tokens": false, | |
| "speculative_ngram": false, | |
| "sampler_order": [ | |
| 6, | |
| 0, | |
| 1, | |
| 3, | |
| 4, | |
| 2, | |
| 5 | |
| ], | |
| "logit_bias": [], | |
| "xtc_threshold": 0, | |
| "xtc_probability": 0, | |
| "nsigma": 0, | |
| "rep_pen_size": 0, | |
| "genamt": 4096, | |
| "max_length": 16384, | |
| "name": "RpR" | |
| }, | |
| "reasoning": { | |
| "prefix": "<think>", | |
| "suffix": "</think>", | |
| "separator": "\n\n", | |
| "name": "DeepSeek" | |
| } | |
| } |