import os import asyncio import random import re import json import logging from colorama import Fore, Style from groq import AsyncGroq, RateLimitError from mistralai import Mistral from openai import AsyncOpenAI import traceback from .web_search import WebSearchHandler # Configura logger local logger = logging.getLogger("JadeHeavy") logger.setLevel(logging.INFO) class JadeHeavyAgent: def __init__(self): self.groq_api_key = os.getenv("GROQ_API_KEY") self.mistral_api_key = os.getenv("MISTRAL_API_KEY") self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY") if not self.groq_api_key: logger.warning("GROQ_API_KEY not set. Jade Heavy may fail.") self.groq_client = AsyncGroq(api_key=self.groq_api_key) self.mistral = None if self.mistral_api_key: self.mistral = Mistral(api_key=self.mistral_api_key) else: logger.warning("MISTRAL_API_KEY not set. Mistral model will be skipped or substituted.") self.openrouter = None if self.openrouter_api_key: self.openrouter = AsyncOpenAI( base_url="https://openrouter.ai/api/v1", api_key=self.openrouter_api_key, ) else: logger.warning("OPENROUTER_API_KEY not set. Qwen/OpenRouter models will be skipped.") # Updated Model Map for Generalist Chat self.models = { "Kimi": "moonshotai/kimi-k2-instruct-0905", # Groq (Logic/Reasoning) "Mistral": "mistral-large-latest", # Mistral API "Llama": "openai/gpt-oss-120b", # Groq "Qwen": "qwen/qwen3-coder:free" # OpenRouter (Fallback if key exists) or Groq equivalent # Note: The original script used qwen/qwen3-235b... on OpenRouter. # If no OpenRouter key, we might need a fallback on Groq or skip. } # Judge model (Groq is fast and cheap) self.judge_id = "moonshotai/kimi-k2-instruct-0905" # Web Search Handler self.web_search_handler = WebSearchHandler() async def _safe_propose(self, model_name, history_text): """Phase 1: Strategic Planning""" # Staggering to avoid rate limits delay_map = {"Kimi": 0, "Mistral": 1.0, "Llama": 2.0, "Qwen": 3.0} await asyncio.sleep(delay_map.get(model_name, 1) + random.uniform(0.1, 0.5)) sys_prompt = ( "You are a Strategic Architect. Create a high-level roadmap to answer the user's request comprehensively.\n" "DO NOT write the final response yet. Just plan the structure and key points.\n" "FORMAT: 1. [INTENT ANALYSIS] 2. [KEY POINTS] 3. [STRUCTURE PROPOSAL]" ) messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": history_text}] try: content = "" if model_name == "Mistral" and self.mistral: resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages) content = resp.choices[0].message.content elif model_name == "Qwen" and self.openrouter: # Use OpenRouter if available resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-235b-a22b:free", messages=messages) # Using the large free one if possible content = resp.choices[0].message.content else: # Default to Groq (Kimi, Llama, or fallback for others) # If Mistral/OpenRouter key missing, fallback to Llama-3-70b on Groq for diversity? target_model = self.models.get(model_name) if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter): target_model = "openai/gpt-oss-120b" # Fallback resp = await self.groq_client.chat.completions.create( model=target_model, messages=messages, temperature=0.7 ) content = resp.choices[0].message.content if content: return f"--- {model_name} Plan ---\n{content}" except Exception as e: logger.error(f"Error in propose ({model_name}): {e}") return "" return "" async def _safe_expand(self, model_name, history_text, strategy): """Phase 3: Execution/Expansion""" delay_map = {"Kimi": 0, "Mistral": 1.5, "Llama": 3.0, "Qwen": 4.5} await asyncio.sleep(delay_map.get(model_name, 1)) sys_prompt = ( f"You are a Precision Engine. Execute the following plan to answer the user request:\n\n{strategy}\n\n" "Write a detailed, natural, and high-quality response following this plan.\n" "Do not output internal reasoning like '[DECOMPOSITION]', just the final response text." ) messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": history_text}] try: content = "" if model_name == "Mistral" and self.mistral: resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages) content = resp.choices[0].message.content elif model_name == "Qwen" and self.openrouter: resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-coder:free", messages=messages) content = resp.choices[0].message.content else: target_model = self.models.get(model_name) if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter): target_model = "openai/gpt-oss-120b" resp = await self.groq_client.chat.completions.create( model=target_model, messages=messages, temperature=0.7 ) content = resp.choices[0].message.content if content: return f"[{model_name} Draft]:\n{content}" except Exception as e: logger.error(f"Error in expand ({model_name}): {e}") return "" return "" async def _safe_criticize(self, model_name, draft, original_context): """Phase 3.5: Self-Criticism - Each model reviews and improves its own draft""" await asyncio.sleep(random.uniform(0.5, 1.5)) # Stagger sys_prompt = ( "You are a Critical Reviewer. You wrote the draft below. Now critically review it.\n" "Fix any errors, add missing important information, improve clarity and flow.\n" "Return the IMPROVED version of the response. Keep the same general structure.\n" "Do not add meta-commentary, just return the improved text." ) user_prompt = f"Original Request Context:\n{original_context}\n\nYour Draft to Improve:\n{draft}" messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": user_prompt}] try: content = "" if model_name == "Mistral" and self.mistral: resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages) content = resp.choices[0].message.content elif model_name == "Qwen" and self.openrouter: resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-coder:free", messages=messages) content = resp.choices[0].message.content else: target_model = self.models.get(model_name, "openai/gpt-oss-120b") resp = await self.groq_client.chat.completions.create( model=target_model, messages=messages, temperature=0.5 ) content = resp.choices[0].message.content if content: return f"[{model_name} Refined]:\n{content}" except Exception as e: logger.error(f"Error in criticize ({model_name}): {e}") return draft # Return original draft if criticism fails return draft async def respond(self, history, user_input, user_id="default", vision_context=None, web_search=False): """ Main entry point for the Heavy Agent. History is a list of dicts: [{"role": "user", "content": "..."}...] """ # Prepare context full_context = "" for msg in history[-6:]: # Limit context to last few turns to avoid huge prompts full_context += f"{msg['role'].upper()}: {msg['content']}\n" if vision_context: full_context += f"SYSTEM (Vision): {vision_context}\n" # --- WEB SEARCH (if enabled) --- if web_search and self.web_search_handler.is_available(): logger.info("Jade Heavy: Performing web search...") search_results = self.web_search_handler.search(user_input) if search_results: full_context = f"[WEB SEARCH RESULTS]\n{search_results}\n\n" + full_context full_context += f"USER: {user_input}\n" agents = ["Kimi", "Mistral", "Llama", "Qwen"] # --- PHASE 1: STRATEGY --- logger.info("Jade Heavy: Phase 1 - Planning...") tasks = [self._safe_propose(m, full_context) for m in agents] results = await asyncio.gather(*tasks) valid_strats = [s for s in results if s] if not valid_strats: return "Failed to generate a plan.", None, history # --- PHASE 2: PRUNING (Select Best Plan) --- logger.info("Jade Heavy: Phase 2 - Pruning...") prune_prompt = ( f"User Request Context:\n{full_context}\n\nProposed Plans:\n" + "\n".join(valid_strats) + "\n\nTASK: SELECT THE SINGLE MOST ROBUST AND HELPFUL PLAN. Return ONLY the content of the best plan." ) try: best_strat_resp = await self.groq_client.chat.completions.create( model=self.judge_id, messages=[{"role":"user","content":prune_prompt}], temperature=0.5 ) best_strat = best_strat_resp.choices[0].message.content except Exception as e: logger.error(f"Pruning failed: {e}") best_strat = valid_strats[0] # Fallback to first plan # --- PHASE 3: EXPANSION (Drafting Responses) --- logger.info("Jade Heavy: Phase 3 - Expansion...") tasks_exp = [self._safe_expand(m, full_context, best_strat) for m in agents] results_exp = await asyncio.gather(*tasks_exp) valid_sols = [s for s in results_exp if s] if not valid_sols: return "Failed to generate drafts.", None, history # --- PHASE 3.5: SELF-CRITICISM (NEW!) --- logger.info("Jade Heavy: Phase 3.5 - Self-Criticism...") # Pair each agent with its draft for self-criticism agent_draft_pairs = list(zip(agents[:len(valid_sols)], valid_sols)) tasks_crit = [self._safe_criticize(m, d, full_context) for m, d in agent_draft_pairs] results_crit = await asyncio.gather(*tasks_crit) refined_sols = [s for s in results_crit if s] # Use refined solutions if available, otherwise fallback to original drafts final_drafts = refined_sols if refined_sols else valid_sols # --- PHASE 4: VERDICT (Synthesis) --- logger.info("Jade Heavy: Phase 4 - Verdict...") council_prompt = ( f"User Request:\n{full_context}\n\nCandidate Responses:\n" + "\n".join(final_drafts) + "\n\nTASK: Synthesize the best parts of these drafts into a FINAL, PERFECT RESPONSE." "The response should be natural, helpful, and high-quality. Do not mention the agents or the process." ) final_answer = "" try: resp = await self.groq_client.chat.completions.create( model=self.judge_id, messages=[{"role":"system","content":"You are the Chief Editor."},{"role":"user","content":council_prompt}], temperature=0.5 ) final_answer = resp.choices[0].message.content except Exception as e: logger.error(f"Verdict failed: {e}") final_answer = final_drafts[0].split(":\n", 1)[-1] if final_drafts else "Error generating response." # Update History history.append({"role": "user", "content": user_input}) history.append({"role": "assistant", "content": final_answer}) return final_answer, None, history