Madras1 commited on
Commit
23d2c72
·
verified ·
1 Parent(s): 98438f3

Upload 12 files

Browse files
Files changed (4) hide show
  1. app.py +2 -1
  2. jade/config.json +7 -7
  3. jade/core.py +168 -168
  4. jade/heavy_mode.py +63 -9
app.py CHANGED
@@ -95,7 +95,8 @@ async def handle_chat(request: UserRequest):
95
  history=current_history,
96
  user_input=final_user_input,
97
  user_id=user_id,
98
- vision_context=vision_context
 
99
  )
100
  user_sessions[user_id]["heavy"] = updated_history
101
 
 
95
  history=current_history,
96
  user_input=final_user_input,
97
  user_id=user_id,
98
+ vision_context=vision_context,
99
+ web_search=request.web_search # Passa web search para Heavy Mode
100
  )
101
  user_sessions[user_id]["heavy"] = updated_history
102
 
jade/config.json CHANGED
@@ -1,8 +1,8 @@
1
- {
2
- "groq_model": "moonshotai/kimi-k2-instruct-0905",
3
- "audio_model": "whisper-large-v3",
4
- "caption_model": "microsoft/Florence-2-base-ft",
5
- "max_context": 12,
6
- "language": "pt",
7
- "local_mode": false
8
  }
 
1
+ {
2
+ "groq_model": "meta-llama/llama-4-maverick-17b-128e-instruct",
3
+ "audio_model": "whisper-large-v3",
4
+ "caption_model": "microsoft/Florence-2-base-ft",
5
+ "max_context": 12,
6
+ "language": "pt",
7
+ "local_mode": false
8
  }
jade/core.py CHANGED
@@ -1,168 +1,168 @@
1
- import json
2
- import logging
3
- import os
4
- import sys
5
- import time
6
- import uuid
7
-
8
- from groq import Groq
9
-
10
- # Importa nossos módulos customizados
11
- from .handlers import ImageHandler
12
- from .tts import TTSPlayer
13
- from .utils import slim_history
14
- from .shorestone import ShoreStoneMemory
15
- from .curator_heuristic import MemoryCuratorHeuristic
16
- from .web_search import WebSearchHandler
17
-
18
- # Configura o logger principal
19
- logging.basicConfig(level=logging.INFO, format="%(asctime)s - JADE - %(levelname)s - %(message)s")
20
-
21
- class JadeAgent:
22
- def __init__(self, config_path="jade/config.json"):
23
- # Carrega configurações
24
- # Try to load from absolute path first, then relative
25
- try:
26
- with open(config_path) as f:
27
- self.cfg = json.load(f)
28
- except FileNotFoundError:
29
- # Fallback: try to find it relative to this file
30
- base_dir = os.path.dirname(os.path.abspath(__file__))
31
- config_path = os.path.join(base_dir, "config.json")
32
- with open(config_path) as f:
33
- self.cfg = json.load(f)
34
-
35
- # --- Configuração da API Groq ---
36
- logging.info("Iniciando J.A.D.E. em modo API (Groq)...")
37
- self.api_key = self._get_api_key()
38
- self.client = Groq(api_key=self.api_key)
39
- self.model_name = self.cfg.get("groq_model", "moonshotai/kimi-k2-instruct-0905")
40
-
41
- # System Prompt Base
42
- self.system_prompt = {"role": "system", "content": "Você é J.A.D.E., uma IA multimodal calma e inteligente. Seja direta. Responda de forma concisa e natural. NÃO explique seu processo de pensamento. Apenas responda à pergunta."}
43
-
44
- # --- Inicialização dos Módulos ---
45
- logging.info("Carregando módulos de percepção e memória...")
46
-
47
- # Visão e Fala
48
- self.image_handler = ImageHandler(self.cfg.get("caption_model", "Salesforce/blip-image-captioning-large"))
49
- self.tts = TTSPlayer(lang=self.cfg.get("language", "pt"))
50
-
51
- # 1. Memória ShoreStone (Persistente)
52
- self.memory = ShoreStoneMemory()
53
- # Inicializa com sessão padrão, mas será trocada dinamicamente no respond()
54
- self.memory.load_or_create_session("sessao_padrao_gabriel")
55
-
56
- # 2. Curador Heurístico (Manutenção Automática)
57
- self.curator = MemoryCuratorHeuristic(shorestone_memory=self.memory)
58
- self.response_count = 0
59
- self.maintenance_interval = 10 # Executar a manutenção a cada 10 interações
60
-
61
- # 3. Web Search (Tavily)
62
- self.web_search_handler = WebSearchHandler()
63
-
64
- logging.info(f"J.A.D.E. pronta e conectada ao modelo {self.model_name}.")
65
-
66
- def _get_api_key(self):
67
- """Recupera a chave da API do ambiente de forma segura."""
68
- key = os.getenv("GROQ_API_KEY")
69
- if not key:
70
- logging.error("Chave GROQ_API_KEY não encontrada nas variáveis de ambiente.")
71
- # For development, try to warn but not crash if possible, but Groq needs it.
72
- # raise RuntimeError("❌ GROQ_API_KEY não encontrada. Defina a variável de ambiente.")
73
- print("WARNING: GROQ_API_KEY not found.")
74
- return key
75
-
76
- def _chat(self, messages):
77
- """Envia as mensagens para a Groq e retorna a resposta."""
78
- try:
79
- chat = self.client.chat.completions.create(
80
- messages=messages,
81
- model=self.model_name,
82
- temperature=0.7, # Criatividade balanceada
83
- max_tokens=1024 # Limite de resposta razoável
84
- )
85
- return chat.choices[0].message.content.strip()
86
- except Exception as e:
87
- logging.error(f"Erro na comunicação com a Groq: {e}")
88
- return "Desculpe, tive um problema ao me conectar com meu cérebro na nuvem."
89
-
90
- def respond(self, history, user_input, user_id="default", vision_context=None, web_search=False, thinking_mode=False):
91
- """Processo principal de raciocínio: Buscar -> Lembrar -> Ver -> Pensar -> Responder -> Memorizar -> Manter."""
92
-
93
- # TROCA A SESSÃO DA MEMÓRIA PARA O USUÁRIO ATUAL
94
- session_name = f"user_{user_id}"
95
- self.memory.load_or_create_session(session_name)
96
-
97
- messages = history[:]
98
-
99
- # 0. Thinking Mode - Adiciona instrução de CoT
100
- if thinking_mode:
101
- thinking_prompt = {
102
- "role": "system",
103
- "content": """MODO THINKING ATIVADO: Antes de dar sua resposta final, pense passo a passo.
104
- Coloque todo seu raciocínio dentro de tags <thinking>...</thinking>.
105
- Após fechar a tag </thinking>, dê sua resposta final de forma clara e direta.
106
- Exemplo:
107
- <thinking>
108
- 1. Primeiro, vou analisar...
109
- 2. Considerando que...
110
- 3. Portanto...
111
- </thinking>
112
-
113
- [Sua resposta final aqui]"""
114
- }
115
- messages.append(thinking_prompt)
116
-
117
- # 0. Buscar na Web (se habilitado)
118
- if web_search and self.web_search_handler.is_available():
119
- search_results = self.web_search_handler.search(user_input)
120
- if search_results:
121
- search_context = f"--- RESULTADOS DA BUSCA WEB ---\n{search_results}\n--- FIM DA BUSCA ---"
122
- messages.append({"role": "system", "content": search_context})
123
-
124
- # 1. Lembrar (Recuperação de Contexto)
125
- memories = self.memory.remember(user_input)
126
- if memories:
127
- memory_context = f"--- MEMÓRIAS RELEVANTES (ShoreStone) ---\n{memories}\n--- FIM DAS MEMÓRIAS ---"
128
- # Inserimos as memórias como contexto de sistema para guiar a resposta
129
- messages.append({"role": "system", "content": memory_context})
130
-
131
- # 2. Ver (Contexto Visual)
132
- if vision_context:
133
- messages.append({"role": "system", "content": f"Contexto visual da imagem que o usuário enviou: {vision_context}"})
134
-
135
- # Adiciona a pergunta atual ao histórico temporário e ao prompt
136
- history.append({"role": "user", "content": user_input})
137
- messages.append({"role": "user", "content": user_input})
138
-
139
- # 3. Responder (Geração)
140
- resposta = self._chat(messages)
141
-
142
- # Atualiza histórico
143
- history.append({"role": "assistant", "content": resposta})
144
- history = slim_history(history, keep=self.cfg.get("max_context", 12))
145
-
146
- # 4. Memorizar (Armazenamento Persistente)
147
- self.memory.memorize(user_input, resposta)
148
-
149
- print(f"\n🤖 J.A.D.E.: {resposta}")
150
-
151
- # Falar (TTS) - Modified for Backend compatibility
152
- audio_path = None
153
- try:
154
- # Uses the TTSPlayer from tts.py which has save_audio_to_file
155
- audio_path = self.tts.save_audio_to_file(resposta)
156
- except Exception as e:
157
- logging.warning(f"TTS falhou (silenciado): {e}")
158
-
159
- # 5. Manter (Ciclo de Curadoria Automática)
160
- self.response_count += 1
161
- if self.response_count % self.maintenance_interval == 0:
162
- logging.info(f"Ciclo de manutenção agendado (interação {self.response_count}). Verificando saúde da memória...")
163
- try:
164
- self.curator.run_maintenance_cycle()
165
- except Exception as e:
166
- logging.error(f"Erro no Curador de Memória: {e}")
167
-
168
- return resposta, audio_path, history
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import sys
5
+ import time
6
+ import uuid
7
+
8
+ from groq import Groq
9
+
10
+ # Importa nossos módulos customizados
11
+ from .handlers import ImageHandler
12
+ from .tts import TTSPlayer
13
+ from .utils import slim_history
14
+ from .shorestone import ShoreStoneMemory
15
+ from .curator_heuristic import MemoryCuratorHeuristic
16
+ from .web_search import WebSearchHandler
17
+
18
+ # Configura o logger principal
19
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - JADE - %(levelname)s - %(message)s")
20
+
21
+ class JadeAgent:
22
+ def __init__(self, config_path="jade/config.json"):
23
+ # Carrega configurações
24
+ # Try to load from absolute path first, then relative
25
+ try:
26
+ with open(config_path) as f:
27
+ self.cfg = json.load(f)
28
+ except FileNotFoundError:
29
+ # Fallback: try to find it relative to this file
30
+ base_dir = os.path.dirname(os.path.abspath(__file__))
31
+ config_path = os.path.join(base_dir, "config.json")
32
+ with open(config_path) as f:
33
+ self.cfg = json.load(f)
34
+
35
+ # --- Configuração da API Groq ---
36
+ logging.info("Iniciando J.A.D.E. em modo API (Groq)...")
37
+ self.api_key = self._get_api_key()
38
+ self.client = Groq(api_key=self.api_key)
39
+ self.model_name = self.cfg.get("groq_model", "meta-llama/llama-4-maverick-17b-128e-instruct")
40
+
41
+ # System Prompt Base
42
+ self.system_prompt = {"role": "system", "content": "Você é J.A.D.E., uma IA multimodal calma e inteligente. Seja direta. Responda de forma concisa e natural. NÃO explique seu processo de pensamento. Apenas responda à pergunta."}
43
+
44
+ # --- Inicialização dos Módulos ---
45
+ logging.info("Carregando módulos de percepção e memória...")
46
+
47
+ # Visão e Fala
48
+ self.image_handler = ImageHandler(self.cfg.get("caption_model", "Salesforce/blip-image-captioning-large"))
49
+ self.tts = TTSPlayer(lang=self.cfg.get("language", "pt"))
50
+
51
+ # 1. Memória ShoreStone (Persistente)
52
+ self.memory = ShoreStoneMemory()
53
+ # Inicializa com sessão padrão, mas será trocada dinamicamente no respond()
54
+ self.memory.load_or_create_session("sessao_padrao_gabriel")
55
+
56
+ # 2. Curador Heurístico (Manutenção Automática)
57
+ self.curator = MemoryCuratorHeuristic(shorestone_memory=self.memory)
58
+ self.response_count = 0
59
+ self.maintenance_interval = 10 # Executar a manutenção a cada 10 interações
60
+
61
+ # 3. Web Search (Tavily)
62
+ self.web_search_handler = WebSearchHandler()
63
+
64
+ logging.info(f"J.A.D.E. pronta e conectada ao modelo {self.model_name}.")
65
+
66
+ def _get_api_key(self):
67
+ """Recupera a chave da API do ambiente de forma segura."""
68
+ key = os.getenv("GROQ_API_KEY")
69
+ if not key:
70
+ logging.error("Chave GROQ_API_KEY não encontrada nas variáveis de ambiente.")
71
+ # For development, try to warn but not crash if possible, but Groq needs it.
72
+ # raise RuntimeError("❌ GROQ_API_KEY não encontrada. Defina a variável de ambiente.")
73
+ print("WARNING: GROQ_API_KEY not found.")
74
+ return key
75
+
76
+ def _chat(self, messages):
77
+ """Envia as mensagens para a Groq e retorna a resposta."""
78
+ try:
79
+ chat = self.client.chat.completions.create(
80
+ messages=messages,
81
+ model=self.model_name,
82
+ temperature=0.7, # Criatividade balanceada
83
+ max_tokens=1024 # Limite de resposta razoável
84
+ )
85
+ return chat.choices[0].message.content.strip()
86
+ except Exception as e:
87
+ logging.error(f"Erro na comunicação com a Groq: {e}")
88
+ return "Desculpe, tive um problema ao me conectar com meu cérebro na nuvem."
89
+
90
+ def respond(self, history, user_input, user_id="default", vision_context=None, web_search=False, thinking_mode=False):
91
+ """Processo principal de raciocínio: Buscar -> Lembrar -> Ver -> Pensar -> Responder -> Memorizar -> Manter."""
92
+
93
+ # TROCA A SESSÃO DA MEMÓRIA PARA O USUÁRIO ATUAL
94
+ session_name = f"user_{user_id}"
95
+ self.memory.load_or_create_session(session_name)
96
+
97
+ messages = history[:]
98
+
99
+ # 0. Thinking Mode - Adiciona instrução de CoT
100
+ if thinking_mode:
101
+ thinking_prompt = {
102
+ "role": "system",
103
+ "content": """MODO THINKING ATIVADO: Antes de dar sua resposta final, pense passo a passo.
104
+ Coloque todo seu raciocínio dentro de tags <thinking>...</thinking>.
105
+ Após fechar a tag </thinking>, dê sua resposta final de forma clara e direta.
106
+ Exemplo:
107
+ <thinking>
108
+ 1. Primeiro, vou analisar...
109
+ 2. Considerando que...
110
+ 3. Portanto...
111
+ </thinking>
112
+
113
+ [Sua resposta final aqui]"""
114
+ }
115
+ messages.append(thinking_prompt)
116
+
117
+ # 0. Buscar na Web (se habilitado)
118
+ if web_search and self.web_search_handler.is_available():
119
+ search_results = self.web_search_handler.search(user_input)
120
+ if search_results:
121
+ search_context = f"--- RESULTADOS DA BUSCA WEB ---\n{search_results}\n--- FIM DA BUSCA ---"
122
+ messages.append({"role": "system", "content": search_context})
123
+
124
+ # 1. Lembrar (Recuperação de Contexto)
125
+ memories = self.memory.remember(user_input)
126
+ if memories:
127
+ memory_context = f"--- MEMÓRIAS RELEVANTES (ShoreStone) ---\n{memories}\n--- FIM DAS MEMÓRIAS ---"
128
+ # Inserimos as memórias como contexto de sistema para guiar a resposta
129
+ messages.append({"role": "system", "content": memory_context})
130
+
131
+ # 2. Ver (Contexto Visual)
132
+ if vision_context:
133
+ messages.append({"role": "system", "content": f"Contexto visual da imagem que o usuário enviou: {vision_context}"})
134
+
135
+ # Adiciona a pergunta atual ao histórico temporário e ao prompt
136
+ history.append({"role": "user", "content": user_input})
137
+ messages.append({"role": "user", "content": user_input})
138
+
139
+ # 3. Responder (Geração)
140
+ resposta = self._chat(messages)
141
+
142
+ # Atualiza histórico
143
+ history.append({"role": "assistant", "content": resposta})
144
+ history = slim_history(history, keep=self.cfg.get("max_context", 12))
145
+
146
+ # 4. Memorizar (Armazenamento Persistente)
147
+ self.memory.memorize(user_input, resposta)
148
+
149
+ print(f"\n🤖 J.A.D.E.: {resposta}")
150
+
151
+ # Falar (TTS) - Modified for Backend compatibility
152
+ audio_path = None
153
+ try:
154
+ # Uses the TTSPlayer from tts.py which has save_audio_to_file
155
+ audio_path = self.tts.save_audio_to_file(resposta)
156
+ except Exception as e:
157
+ logging.warning(f"TTS falhou (silenciado): {e}")
158
+
159
+ # 5. Manter (Ciclo de Curadoria Automática)
160
+ self.response_count += 1
161
+ if self.response_count % self.maintenance_interval == 0:
162
+ logging.info(f"Ciclo de manutenção agendado (interação {self.response_count}). Verificando saúde da memória...")
163
+ try:
164
+ self.curator.run_maintenance_cycle()
165
+ except Exception as e:
166
+ logging.error(f"Erro no Curador de Memória: {e}")
167
+
168
+ return resposta, audio_path, history
jade/heavy_mode.py CHANGED
@@ -10,6 +10,7 @@ from groq import AsyncGroq, RateLimitError
10
  from mistralai import Mistral
11
  from openai import AsyncOpenAI
12
  import traceback
 
13
 
14
  # Configura logger local
15
  logger = logging.getLogger("JadeHeavy")
@@ -54,6 +55,9 @@ class JadeHeavyAgent:
54
  # Judge model (Groq is fast and cheap)
55
  self.judge_id = "moonshotai/kimi-k2-instruct-0905"
56
 
 
 
 
57
  async def _safe_propose(self, model_name, history_text):
58
  """Phase 1: Strategic Planning"""
59
  # Staggering to avoid rate limits
@@ -138,7 +142,45 @@ class JadeHeavyAgent:
138
  return ""
139
  return ""
140
 
141
- async def respond(self, history, user_input, user_id="default", vision_context=None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  """
143
  Main entry point for the Heavy Agent.
144
  History is a list of dicts: [{"role": "user", "content": "..."}...]
@@ -151,6 +193,13 @@ class JadeHeavyAgent:
151
 
152
  if vision_context:
153
  full_context += f"SYSTEM (Vision): {vision_context}\n"
 
 
 
 
 
 
 
154
 
155
  full_context += f"USER: {user_input}\n"
156
 
@@ -192,11 +241,22 @@ class JadeHeavyAgent:
192
  if not valid_sols:
193
  return "Failed to generate drafts.", None, history
194
 
 
 
 
 
 
 
 
 
 
 
 
195
  # --- PHASE 4: VERDICT (Synthesis) ---
196
  logger.info("Jade Heavy: Phase 4 - Verdict...")
197
  council_prompt = (
198
  f"User Request:\n{full_context}\n\nCandidate Responses:\n" +
199
- "\n".join(valid_sols) +
200
  "\n\nTASK: Synthesize the best parts of these drafts into a FINAL, PERFECT RESPONSE."
201
  "The response should be natural, helpful, and high-quality. Do not mention the agents or the process."
202
  )
@@ -211,16 +271,10 @@ class JadeHeavyAgent:
211
  final_answer = resp.choices[0].message.content
212
  except Exception as e:
213
  logger.error(f"Verdict failed: {e}")
214
- final_answer = valid_sols[0].replace(f"[{agents[0]} Draft]:\n", "") # Fallback
215
 
216
  # Update History
217
  history.append({"role": "user", "content": user_input})
218
  history.append({"role": "assistant", "content": final_answer})
219
 
220
- # Audio (Optional/Placeholder - returning None for now or use TTS if needed)
221
- # The user said "backend focuses on request", so we can skip TTS generation here
222
- # or implement it if JadeAgent does it. The original code uses `jade_agent.tts`.
223
- # For Heavy mode, maybe we skip audio for speed, or add it later.
224
- # I'll return None for audio path.
225
-
226
  return final_answer, None, history
 
10
  from mistralai import Mistral
11
  from openai import AsyncOpenAI
12
  import traceback
13
+ from .web_search import WebSearchHandler
14
 
15
  # Configura logger local
16
  logger = logging.getLogger("JadeHeavy")
 
55
  # Judge model (Groq is fast and cheap)
56
  self.judge_id = "moonshotai/kimi-k2-instruct-0905"
57
 
58
+ # Web Search Handler
59
+ self.web_search_handler = WebSearchHandler()
60
+
61
  async def _safe_propose(self, model_name, history_text):
62
  """Phase 1: Strategic Planning"""
63
  # Staggering to avoid rate limits
 
142
  return ""
143
  return ""
144
 
145
+ async def _safe_criticize(self, model_name, draft, original_context):
146
+ """Phase 3.5: Self-Criticism - Each model reviews and improves its own draft"""
147
+ await asyncio.sleep(random.uniform(0.5, 1.5)) # Stagger
148
+
149
+ sys_prompt = (
150
+ "You are a Critical Reviewer. You wrote the draft below. Now critically review it.\n"
151
+ "Fix any errors, add missing important information, improve clarity and flow.\n"
152
+ "Return the IMPROVED version of the response. Keep the same general structure.\n"
153
+ "Do not add meta-commentary, just return the improved text."
154
+ )
155
+
156
+ user_prompt = f"Original Request Context:\n{original_context}\n\nYour Draft to Improve:\n{draft}"
157
+ messages = [{"role": "system", "content": sys_prompt}, {"role": "user", "content": user_prompt}]
158
+
159
+ try:
160
+ content = ""
161
+ if model_name == "Mistral" and self.mistral:
162
+ resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages)
163
+ content = resp.choices[0].message.content
164
+ elif model_name == "Qwen" and self.openrouter:
165
+ resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-coder:free", messages=messages)
166
+ content = resp.choices[0].message.content
167
+ else:
168
+ target_model = self.models.get(model_name, "openai/gpt-oss-120b")
169
+ resp = await self.groq_client.chat.completions.create(
170
+ model=target_model,
171
+ messages=messages,
172
+ temperature=0.5
173
+ )
174
+ content = resp.choices[0].message.content
175
+
176
+ if content:
177
+ return f"[{model_name} Refined]:\n{content}"
178
+ except Exception as e:
179
+ logger.error(f"Error in criticize ({model_name}): {e}")
180
+ return draft # Return original draft if criticism fails
181
+ return draft
182
+
183
+ async def respond(self, history, user_input, user_id="default", vision_context=None, web_search=False):
184
  """
185
  Main entry point for the Heavy Agent.
186
  History is a list of dicts: [{"role": "user", "content": "..."}...]
 
193
 
194
  if vision_context:
195
  full_context += f"SYSTEM (Vision): {vision_context}\n"
196
+
197
+ # --- WEB SEARCH (if enabled) ---
198
+ if web_search and self.web_search_handler.is_available():
199
+ logger.info("Jade Heavy: Performing web search...")
200
+ search_results = self.web_search_handler.search(user_input)
201
+ if search_results:
202
+ full_context = f"[WEB SEARCH RESULTS]\n{search_results}\n\n" + full_context
203
 
204
  full_context += f"USER: {user_input}\n"
205
 
 
241
  if not valid_sols:
242
  return "Failed to generate drafts.", None, history
243
 
244
+ # --- PHASE 3.5: SELF-CRITICISM (NEW!) ---
245
+ logger.info("Jade Heavy: Phase 3.5 - Self-Criticism...")
246
+ # Pair each agent with its draft for self-criticism
247
+ agent_draft_pairs = list(zip(agents[:len(valid_sols)], valid_sols))
248
+ tasks_crit = [self._safe_criticize(m, d, full_context) for m, d in agent_draft_pairs]
249
+ results_crit = await asyncio.gather(*tasks_crit)
250
+ refined_sols = [s for s in results_crit if s]
251
+
252
+ # Use refined solutions if available, otherwise fallback to original drafts
253
+ final_drafts = refined_sols if refined_sols else valid_sols
254
+
255
  # --- PHASE 4: VERDICT (Synthesis) ---
256
  logger.info("Jade Heavy: Phase 4 - Verdict...")
257
  council_prompt = (
258
  f"User Request:\n{full_context}\n\nCandidate Responses:\n" +
259
+ "\n".join(final_drafts) +
260
  "\n\nTASK: Synthesize the best parts of these drafts into a FINAL, PERFECT RESPONSE."
261
  "The response should be natural, helpful, and high-quality. Do not mention the agents or the process."
262
  )
 
271
  final_answer = resp.choices[0].message.content
272
  except Exception as e:
273
  logger.error(f"Verdict failed: {e}")
274
+ final_answer = final_drafts[0].split(":\n", 1)[-1] if final_drafts else "Error generating response."
275
 
276
  # Update History
277
  history.append({"role": "user", "content": user_input})
278
  history.append({"role": "assistant", "content": final_answer})
279
 
 
 
 
 
 
 
280
  return final_answer, None, history