Madras1 commited on
Commit
f10c580
verified
1 Parent(s): 2c4dbbc

Upload 18 files

Browse files
Files changed (3) hide show
  1. app.py +50 -2
  2. code_jade/config.json +1 -1
  3. code_jade/core.py +26 -1
app.py CHANGED
@@ -2,7 +2,8 @@
2
  import os
3
  import base64
4
  import io
5
- from fastapi import FastAPI
 
6
  from fastapi.middleware.cors import CORSMiddleware
7
  from pydantic import BaseModel
8
  from PIL import Image
@@ -27,18 +28,65 @@ class UserRequest(BaseModel):
27
  class CodeRequest(BaseModel):
28
  user_input: str
29
 
 
 
 
 
 
 
30
  @app.post("/code/chat")
31
  def handle_code_chat(request: CodeRequest):
32
  try:
 
 
 
 
33
  response = code_agent.chat_loop(request.user_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  return {
35
  "success": True,
36
- "bot_response": response
 
37
  }
38
  except Exception as e:
39
  print(f"Erro cr铆tico no endpoint /code/chat: {e}")
40
  return {"success": False, "error": str(e)}
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  @app.post("/chat")
43
  def handle_chat(request: UserRequest):
44
  try:
 
2
  import os
3
  import base64
4
  import io
5
+ from fastapi import FastAPI, HTTPException
6
+ from fastapi.responses import FileResponse
7
  from fastapi.middleware.cors import CORSMiddleware
8
  from pydantic import BaseModel
9
  from PIL import Image
 
28
  class CodeRequest(BaseModel):
29
  user_input: str
30
 
31
+ def get_workspace_files():
32
+ workspace = code_agent.cfg.get("work_dir", "./workspace")
33
+ if not os.path.exists(workspace):
34
+ return set()
35
+ return set(os.listdir(workspace))
36
+
37
  @app.post("/code/chat")
38
  def handle_code_chat(request: CodeRequest):
39
  try:
40
+ # Detect existing files
41
+ files_before = get_workspace_files()
42
+
43
+ # Run agent
44
  response = code_agent.chat_loop(request.user_input)
45
+
46
+ # Detect new files
47
+ files_after = get_workspace_files()
48
+ new_files = files_after - files_before
49
+
50
+ generated_images = []
51
+ workspace = code_agent.cfg.get("work_dir", "./workspace")
52
+
53
+ for filename in new_files:
54
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif')):
55
+ file_path = os.path.join(workspace, filename)
56
+ try:
57
+ with open(file_path, "rb") as img_file:
58
+ b64_data = base64.b64encode(img_file.read()).decode('utf-8')
59
+ generated_images.append({
60
+ "filename": filename,
61
+ "b64": b64_data
62
+ })
63
+ except Exception as e:
64
+ print(f"Error reading generated image {filename}: {e}")
65
+
66
  return {
67
  "success": True,
68
+ "bot_response": response,
69
+ "generated_images": generated_images
70
  }
71
  except Exception as e:
72
  print(f"Erro cr铆tico no endpoint /code/chat: {e}")
73
  return {"success": False, "error": str(e)}
74
 
75
+ @app.get("/download/{filename}")
76
+ def download_file(filename: str):
77
+ # Sanitize and check existence in workspace
78
+ workspace = code_agent.cfg.get("work_dir", "./workspace")
79
+ file_path = os.path.join(workspace, filename)
80
+
81
+ # Basic path traversal protection
82
+ if not os.path.abspath(file_path).startswith(os.path.abspath(workspace)):
83
+ raise HTTPException(status_code=403, detail="Access denied")
84
+
85
+ if os.path.exists(file_path) and os.path.isfile(file_path):
86
+ return FileResponse(file_path, filename=filename)
87
+ else:
88
+ raise HTTPException(status_code=404, detail="File not found")
89
+
90
  @app.post("/chat")
91
  def handle_chat(request: UserRequest):
92
  try:
code_jade/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "groq_model": "moonshotai/kimi-k2-instruct-0905",
3
  "max_context": 20,
4
  "safe_mode": false,
5
  "work_dir": "./workspace"
 
1
  {
2
+ "groq_model": "llama3-70b-8192",
3
  "max_context": 20,
4
  "safe_mode": false,
5
  "work_dir": "./workspace"
code_jade/core.py CHANGED
@@ -12,7 +12,7 @@ class CodeJadeAgent:
12
  self.cfg = json.load(f)
13
  except FileNotFoundError:
14
  # Fallback se n茫o achar, mas idealmente deve existir
15
- self.cfg = {"groq_model": "moonshotai/kimi-k2-instruct-0905", "safe_mode": True, "work_dir": "./workspace", "max_context": 20}
16
 
17
  self.client = Groq(api_key=self._get_api_key())
18
  self.tools = ToolManager(safe_mode=self.cfg.get("safe_mode", True), work_dir=self.cfg.get("work_dir", "."))
@@ -169,6 +169,9 @@ CodeJade: "Arquivo criado. Quer que eu execute?"
169
 
170
  final_response = ""
171
 
 
 
 
172
  while turn < max_turns:
173
  # 2. Chama o modelo
174
  response = self._chat(self.history)
@@ -177,6 +180,21 @@ CodeJade: "Arquivo criado. Quer que eu execute?"
177
  tool_data = self.process_tool_call(response)
178
 
179
  if tool_data:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  # 脡 uma ferramenta -> Executa
181
  tool_result = self.run_tool(tool_data)
182
 
@@ -192,6 +210,13 @@ CodeJade: "Arquivo criado. Quer que eu execute?"
192
  self.history.append({"role": "assistant", "content": final_response})
193
  break
194
 
 
 
 
 
 
 
 
195
  # Gerencia a mem贸ria ao fim do ciclo
196
  self._manage_memory()
197
 
 
12
  self.cfg = json.load(f)
13
  except FileNotFoundError:
14
  # Fallback se n茫o achar, mas idealmente deve existir
15
+ self.cfg = {"groq_model": "llama3-70b-8192", "safe_mode": True, "work_dir": "./workspace", "max_context": 20}
16
 
17
  self.client = Groq(api_key=self._get_api_key())
18
  self.tools = ToolManager(safe_mode=self.cfg.get("safe_mode", True), work_dir=self.cfg.get("work_dir", "."))
 
169
 
170
  final_response = ""
171
 
172
+ last_tool_call = None
173
+ consecutive_tool_failures = 0
174
+
175
  while turn < max_turns:
176
  # 2. Chama o modelo
177
  response = self._chat(self.history)
 
180
  tool_data = self.process_tool_call(response)
181
 
182
  if tool_data:
183
+ # Loop detection
184
+ current_call_signature = f"{tool_data['tool']}:{json.dumps(tool_data['args'], sort_keys=True)}"
185
+ if last_tool_call == current_call_signature:
186
+ consecutive_tool_failures += 1
187
+ if consecutive_tool_failures >= 2:
188
+ error_msg = f"TOOL_ERROR: Detected repetitive loop with same arguments. Stopping tool execution."
189
+ self.history.append({"role": "system", "content": error_msg})
190
+ print(f"馃洃 Loop detectado: {error_msg}")
191
+ # Force model to explain
192
+ continue
193
+ else:
194
+ consecutive_tool_failures = 0
195
+
196
+ last_tool_call = current_call_signature
197
+
198
  # 脡 uma ferramenta -> Executa
199
  tool_result = self.run_tool(tool_data)
200
 
 
210
  self.history.append({"role": "assistant", "content": final_response})
211
  break
212
 
213
+ # Se saiu do loop sem resposta final (max_turns reached)
214
+ if not final_response and turn >= max_turns:
215
+ print("鈿狅笍 Max turns reached without final response. Forcing summary.")
216
+ self.history.append({"role": "system", "content": "SYSTEM: Max tool turns reached. Please summarize what was done and what failed."})
217
+ final_response = self._chat(self.history)
218
+ self.history.append({"role": "assistant", "content": final_response})
219
+
220
  # Gerencia a mem贸ria ao fim do ciclo
221
  self._manage_memory()
222