File size: 10,250 Bytes
594f99d 1dfefd9 e5ab6c2 594f99d 1dfefd9 594f99d 1dfefd9 594f99d 1dfefd9 594f99d e5ab6c2 594f99d e5ab6c2 594f99d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import os
import asyncio
import random
import re
import json
import logging
from colorama import Fore, Style
from groq import AsyncGroq, RateLimitError
from mistralai import Mistral
from openai import AsyncOpenAI
import traceback
# Configura logger local
logger = logging.getLogger("JadeHeavy")
logger.setLevel(logging.INFO)
class JadeHeavyAgent:
def __init__(self):
self.groq_api_key = os.getenv("GROQ_API_KEY")
self.mistral_api_key = os.getenv("MISTRAL_API_KEY")
self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
if not self.groq_api_key:
logger.warning("GROQ_API_KEY not set. Jade Heavy may fail.")
self.groq_client = AsyncGroq(api_key=self.groq_api_key)
self.mistral = None
if self.mistral_api_key:
self.mistral = Mistral(api_key=self.mistral_api_key)
else:
logger.warning("MISTRAL_API_KEY not set. Mistral model will be skipped or substituted.")
self.openrouter = None
if self.openrouter_api_key:
self.openrouter = AsyncOpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=self.openrouter_api_key,
)
else:
logger.warning("OPENROUTER_API_KEY not set. Qwen/OpenRouter models will be skipped.")
# Updated Model Map for Generalist Chat
self.models = {
"Kimi": "moonshotai/kimi-k2-instruct-0905", # Groq (Logic/Reasoning)
"Mistral": "mistral-large-latest", # Mistral API
"Llama": "openai/gpt-oss-120b", # Groq
"Qwen": "qwen/qwen3-coder:free" # OpenRouter (Fallback if key exists) or Groq equivalent
# Note: The original script used qwen/qwen3-235b... on OpenRouter.
# If no OpenRouter key, we might need a fallback on Groq or skip.
}
# Judge model (Groq is fast and cheap)
self.judge_id = "moonshotai/kimi-k2-instruct-0905"
async def _safe_propose(self, model_name, history_text):
"""Phase 1: Strategic Planning"""
# Staggering to avoid rate limits
delay_map = {"Kimi": 0, "Mistral": 1.0, "Llama": 2.0, "Qwen": 3.0}
await asyncio.sleep(delay_map.get(model_name, 1) + random.uniform(0.1, 0.5))
sys_prompt = (
"You are a Strategic Architect. Create a high-level roadmap to answer the user's request comprehensively.\n"
"DO NOT write the final response yet. Just plan the structure and key points.\n"
"FORMAT: 1. [INTENT ANALYSIS] 2. [KEY POINTS] 3. [STRUCTURE PROPOSAL]"
)
messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": history_text}]
try:
content = ""
if model_name == "Mistral" and self.mistral:
resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages)
content = resp.choices[0].message.content
elif model_name == "Qwen" and self.openrouter:
# Use OpenRouter if available
resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-235b-a22b:free", messages=messages) # Using the large free one if possible
content = resp.choices[0].message.content
else:
# Default to Groq (Kimi, Llama, or fallback for others)
# If Mistral/OpenRouter key missing, fallback to Llama-3-70b on Groq for diversity?
target_model = self.models.get(model_name)
if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter):
target_model = "openai/gpt-oss-120b" # Fallback
resp = await self.groq_client.chat.completions.create(
model=target_model,
messages=messages,
temperature=0.7
)
content = resp.choices[0].message.content
if content:
return f"--- {model_name} Plan ---\n{content}"
except Exception as e:
logger.error(f"Error in propose ({model_name}): {e}")
return ""
return ""
async def _safe_expand(self, model_name, history_text, strategy):
"""Phase 3: Execution/Expansion"""
delay_map = {"Kimi": 0, "Mistral": 1.5, "Llama": 3.0, "Qwen": 4.5}
await asyncio.sleep(delay_map.get(model_name, 1))
sys_prompt = (
f"You are a Precision Engine. Execute the following plan to answer the user request:\n\n{strategy}\n\n"
"Write a detailed, natural, and high-quality response following this plan.\n"
"Do not output internal reasoning like '[DECOMPOSITION]', just the final response text."
)
messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": history_text}]
try:
content = ""
if model_name == "Mistral" and self.mistral:
resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages)
content = resp.choices[0].message.content
elif model_name == "Qwen" and self.openrouter:
resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-coder:free", messages=messages)
content = resp.choices[0].message.content
else:
target_model = self.models.get(model_name)
if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter):
target_model = "openai/gpt-oss-120b"
resp = await self.groq_client.chat.completions.create(
model=target_model,
messages=messages,
temperature=0.7
)
content = resp.choices[0].message.content
if content:
return f"[{model_name} Draft]:\n{content}"
except Exception as e:
logger.error(f"Error in expand ({model_name}): {e}")
return ""
return ""
async def respond(self, history, user_input, user_id="default", vision_context=None):
"""
Main entry point for the Heavy Agent.
History is a list of dicts: [{"role": "user", "content": "..."}...]
"""
# Prepare context
full_context = ""
for msg in history[-6:]: # Limit context to last few turns to avoid huge prompts
full_context += f"{msg['role'].upper()}: {msg['content']}\n"
if vision_context:
full_context += f"SYSTEM (Vision): {vision_context}\n"
full_context += f"USER: {user_input}\n"
agents = ["Kimi", "Mistral", "Llama", "Qwen"]
# --- PHASE 1: STRATEGY ---
logger.info("Jade Heavy: Phase 1 - Planning...")
tasks = [self._safe_propose(m, full_context) for m in agents]
results = await asyncio.gather(*tasks)
valid_strats = [s for s in results if s]
if not valid_strats:
return "Failed to generate a plan.", None, history
# --- PHASE 2: PRUNING (Select Best Plan) ---
logger.info("Jade Heavy: Phase 2 - Pruning...")
prune_prompt = (
f"User Request Context:\n{full_context}\n\nProposed Plans:\n" +
"\n".join(valid_strats) +
"\n\nTASK: SELECT THE SINGLE MOST ROBUST AND HELPFUL PLAN. Return ONLY the content of the best plan."
)
try:
best_strat_resp = await self.groq_client.chat.completions.create(
model=self.judge_id,
messages=[{"role":"user","content":prune_prompt}],
temperature=0.5
)
best_strat = best_strat_resp.choices[0].message.content
except Exception as e:
logger.error(f"Pruning failed: {e}")
best_strat = valid_strats[0] # Fallback to first plan
# --- PHASE 3: EXPANSION (Drafting Responses) ---
logger.info("Jade Heavy: Phase 3 - Expansion...")
tasks_exp = [self._safe_expand(m, full_context, best_strat) for m in agents]
results_exp = await asyncio.gather(*tasks_exp)
valid_sols = [s for s in results_exp if s]
if not valid_sols:
return "Failed to generate drafts.", None, history
# --- PHASE 4: VERDICT (Synthesis) ---
logger.info("Jade Heavy: Phase 4 - Verdict...")
council_prompt = (
f"User Request:\n{full_context}\n\nCandidate Responses:\n" +
"\n".join(valid_sols) +
"\n\nTASK: Synthesize the best parts of these drafts into a FINAL, PERFECT RESPONSE."
"The response should be natural, helpful, and high-quality. Do not mention the agents or the process."
)
final_answer = ""
try:
resp = await self.groq_client.chat.completions.create(
model=self.judge_id,
messages=[{"role":"system","content":"You are the Chief Editor."},{"role":"user","content":council_prompt}],
temperature=0.5
)
final_answer = resp.choices[0].message.content
except Exception as e:
logger.error(f"Verdict failed: {e}")
final_answer = valid_sols[0].replace(f"[{agents[0]} Draft]:\n", "") # Fallback
# Update History
history.append({"role": "user", "content": user_input})
history.append({"role": "assistant", "content": final_answer})
# Audio (Optional/Placeholder - returning None for now or use TTS if needed)
# The user said "backend focuses on request", so we can skip TTS generation here
# or implement it if JadeAgent does it. The original code uses `jade_agent.tts`.
# For Heavy mode, maybe we skip audio for speed, or add it later.
# I'll return None for audio path.
return final_answer, None, history
|