Madras1 commited on
Commit
88bdc06
·
verified ·
1 Parent(s): e805067

Upload 14 files

Browse files
Files changed (5) hide show
  1. Dockerfile +1 -4
  2. app.py +33 -128
  3. jade/core.py +66 -2
  4. jade/scholar.py +545 -0
  5. jade/tests/test_scholar.py +88 -0
Dockerfile CHANGED
@@ -1,9 +1,6 @@
1
  # Usa uma imagem Python leve e moderna
2
  FROM python:3.10-slim
3
 
4
- # Instala dependências do sistema (FFmpeg para áudio, Graphviz para mapas mentais)
5
- RUN apt-get update && apt-get install -y ffmpeg graphviz && rm -rf /var/lib/apt/lists/*
6
-
7
  # Define a pasta de trabalho dentro do container
8
  WORKDIR /app
9
 
@@ -22,4 +19,4 @@ RUN mkdir -p /app/jade_memory_db && chmod 777 /app/jade_memory_db
22
 
23
  # Comando para ligar o servidor
24
  # O Hugging Face sempre espera a porta 7860
25
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
  # Usa uma imagem Python leve e moderna
2
  FROM python:3.10-slim
3
 
 
 
 
4
  # Define a pasta de trabalho dentro do container
5
  WORKDIR /app
6
 
 
19
 
20
  # Comando para ligar o servidor
21
  # O Hugging Face sempre espera a porta 7860
22
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -2,15 +2,12 @@
2
  import os
3
  import base64
4
  import io
5
- import shutil
6
- from fastapi import FastAPI, HTTPException
7
- from fastapi.middleware.cors import CORSMiddleware
8
- from fastapi.responses import FileResponse
9
  from fastapi.staticfiles import StaticFiles
 
10
  from pydantic import BaseModel
11
  from PIL import Image
12
- from jade.core import JadeAgent
13
- from jade.scholar_agent import ScholarAgent
14
 
15
  print("Iniciando a J.A.D.E. com FastAPI...")
16
  agent = JadeAgent()
@@ -22,34 +19,22 @@ app.add_middleware(
22
  allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],
23
  )
24
 
 
 
 
 
 
 
 
 
25
  # Dicionário global para armazenar sessões de usuários
26
  user_sessions = {}
27
- scholar_sessions = {} # Armazena instâncias de ScholarAgent por usuário
28
 
29
  class UserRequest(BaseModel):
30
  user_input: str
31
  image_base64: str | None = None
32
  user_id: str | None = None
33
-
34
- class ScholarRequest(BaseModel):
35
- user_id: str
36
- target: str | None = None # URL, Tópico ou Texto
37
- action: str | None = None # ingest, summarize, mindmap, podcast, debate, quiz, flashcards, handout
38
- mode: str | None = "lecture" # Para podcast/debate
39
-
40
- def get_scholar_agent(user_id: str):
41
- if user_id not in scholar_sessions:
42
- print(f"Criando novo Agente Scholar para: {user_id}")
43
- scholar_sessions[user_id] = ScholarAgent()
44
- return scholar_sessions[user_id]
45
-
46
- def encode_file_base64(filepath):
47
- if filepath and os.path.exists(filepath):
48
- print(f"Codificando arquivo: {filepath}")
49
- with open(filepath, "rb") as f:
50
- encoded = base64.b64encode(f.read()).decode('utf-8')
51
- return encoded
52
- return None
53
 
54
  @app.post("/chat")
55
  def handle_chat(request: UserRequest):
@@ -76,127 +61,47 @@ def handle_chat(request: UserRequest):
76
 
77
  final_user_input = request.user_input if request.user_input else "Descreva a imagem em detalhes."
78
 
79
- bot_response_text, audio_path, updated_history = agent.respond(
 
80
  history=current_history,
81
  user_input=final_user_input,
82
  user_id=user_id,
83
- vision_context=vision_context
 
84
  )
85
 
86
  # Atualiza o histórico da sessão
87
  user_sessions[user_id] = updated_history
88
 
89
  # LÓGICA DO ÁUDIO: Converte o arquivo MP3 gerado para Base64
 
 
 
 
90
  audio_base64 = None
91
  if audio_path and os.path.exists(audio_path):
92
- audio_base64 = encode_file_base64(audio_path)
93
- os.remove(audio_path) # Limpa arquivo
 
 
 
 
 
 
94
 
95
  return {
96
  "success": True,
97
  "bot_response": bot_response_text,
98
- "audio_base64": audio_base64
 
99
  }
100
  except Exception as e:
101
  print(f"Erro crítico no endpoint /chat: {e}")
102
  return {"success": False, "error": str(e)}
103
 
104
- @app.post("/scholar")
105
- def handle_scholar(request: ScholarRequest):
106
- try:
107
- user_id = request.user_id
108
- scholar = get_scholar_agent(user_id)
109
-
110
- response = {"success": True, "message": "", "data": None, "file_base64": None, "file_type": None}
111
-
112
- if request.action == "ingest":
113
- if not request.target:
114
- raise HTTPException(status_code=400, detail="Target is required for ingest.")
115
- success = scholar.ingest(request.target)
116
- if success:
117
- response["message"] = f"Conteúdo sobre '{request.target}' processado com sucesso!"
118
- else:
119
- response["success"] = False
120
- response["message"] = "Falha ao processar conteúdo. Tente outro link ou tópico."
121
-
122
- elif request.action == "summarize":
123
- summary = scholar.summarize()
124
- response["message"] = "Resumo gerado."
125
- response["data"] = summary
126
-
127
- elif request.action == "mindmap":
128
- path = scholar.mindmap()
129
- if path:
130
- response["message"] = "Mapa Mental gerado."
131
- response["file_base64"] = encode_file_base64(path)
132
- response["file_type"] = "image/png"
133
- os.remove(path) # Clean up file
134
- else:
135
- response["success"] = False
136
- response["message"] = "Erro ao gerar Mapa Mental."
137
-
138
- elif request.action == "podcast" or request.action == "debate":
139
- mode = "debate" if request.action == "debate" else "lecture"
140
- path = scholar.podcast(mode=mode)
141
- if path:
142
- response["message"] = f"Áudio ({mode}) gerado."
143
- response["file_base64"] = encode_file_base64(path)
144
- response["file_type"] = "audio/mp3"
145
- os.remove(path) # Clean up file
146
- else:
147
- response["success"] = False
148
- response["message"] = "Erro ao gerar áudio."
149
-
150
- elif request.action == "quiz":
151
- quiz = scholar.quiz()
152
- response["message"] = "Quiz gerado."
153
- response["data"] = quiz
154
-
155
- elif request.action == "flashcards":
156
- path = scholar.flashcards()
157
- if path:
158
- response["message"] = "Flashcards (.apkg) gerados."
159
- response["file_base64"] = encode_file_base64(path)
160
- response["file_type"] = "application/octet-stream"
161
- response["filename"] = path # Enviar nome do arquivo para download
162
- os.remove(path) # Clean up file
163
- else:
164
- response["success"] = False
165
- response["message"] = "Erro ao gerar Flashcards."
166
-
167
- elif request.action == "handout":
168
- path = scholar.handout()
169
- if path:
170
- response["message"] = "Apostila PDF gerada."
171
- response["file_base64"] = encode_file_base64(path)
172
- response["file_type"] = "application/pdf"
173
- os.remove(path) # Clean up file
174
- else:
175
- response["success"] = False
176
- response["message"] = "Erro ao gerar Apostila (gere o Resumo primeiro)."
177
-
178
- else:
179
- response["success"] = False
180
- response["message"] = "Ação inválida."
181
-
182
- return response
183
-
184
- except Exception as e:
185
- print(f"Erro no Scholar Agent: {e}")
186
- return {"success": False, "error": str(e)}
187
-
188
- # Mount frontend directory
189
- # IMPORTANT: This must be the last route/mount to avoid shadowing API endpoints
190
- frontend_path = os.path.join(os.path.dirname(__file__), "frontend")
191
- if os.path.exists(frontend_path):
192
- print(f"Montando frontend estático em: {frontend_path}")
193
- # Mount at root "/" to serve index.html and assets directly
194
- app.mount("/", StaticFiles(directory=frontend_path, html=True), name="frontend")
195
- else:
196
- print(f"⚠️ Frontend não encontrado em: {frontend_path}")
197
- @app.get("/")
198
- def root():
199
- return {"message": "Servidor J.A.D.E. com FastAPI está online. Frontend não encontrado."}
200
 
201
  if __name__ == "__main__":
202
  import uvicorn
 
2
  import os
3
  import base64
4
  import io
5
+ from fastapi import FastAPI
 
 
 
6
  from fastapi.staticfiles import StaticFiles
7
+ from fastapi.middleware.cors import CORSMiddleware
8
  from pydantic import BaseModel
9
  from PIL import Image
10
+ from jade.core import JadeAgent
 
11
 
12
  print("Iniciando a J.A.D.E. com FastAPI...")
13
  agent = JadeAgent()
 
19
  allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],
20
  )
21
 
22
+ # Ensure generated directory exists
23
+ GENERATED_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "generated")
24
+ if not os.path.exists(GENERATED_DIR):
25
+ os.makedirs(GENERATED_DIR)
26
+
27
+ # Mount static files
28
+ app.mount("/generated", StaticFiles(directory=GENERATED_DIR), name="generated")
29
+
30
  # Dicionário global para armazenar sessões de usuários
31
  user_sessions = {}
 
32
 
33
  class UserRequest(BaseModel):
34
  user_input: str
35
  image_base64: str | None = None
36
  user_id: str | None = None
37
+ agent_mode: str | None = "jade" # "jade" or "scholar"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  @app.post("/chat")
40
  def handle_chat(request: UserRequest):
 
61
 
62
  final_user_input = request.user_input if request.user_input else "Descreva a imagem em detalhes."
63
 
64
+ # Pass agent_mode to respond
65
+ bot_response_text, audio_path, updated_history, attachments = agent.respond(
66
  history=current_history,
67
  user_input=final_user_input,
68
  user_id=user_id,
69
+ vision_context=vision_context,
70
+ agent_mode=request.agent_mode
71
  )
72
 
73
  # Atualiza o histórico da sessão
74
  user_sessions[user_id] = updated_history
75
 
76
  # LÓGICA DO ÁUDIO: Converte o arquivo MP3 gerado para Base64
77
+ # Note: If Scholar returns an audio attachment URL, we might still want to return audio_base64 for auto-play?
78
+ # Or let frontend handle attachments differently.
79
+ # JadeAgent.respond returns audio_path for TTS. Scholar might return it via attachments.
80
+
81
  audio_base64 = None
82
  if audio_path and os.path.exists(audio_path):
83
+ print(f"Codificando arquivo de áudio: {audio_path}")
84
+ with open(audio_path, "rb") as audio_file:
85
+ audio_bytes = audio_file.read()
86
+ audio_base64 = base64.b64encode(audio_bytes).decode('utf-8')
87
+ # Only remove if it's a temp file. Scholar generated files might persist.
88
+ # Jade TTS creates temp files. Scholar creates persistent files in /generated.
89
+ if "/generated/" not in audio_path:
90
+ os.remove(audio_path)
91
 
92
  return {
93
  "success": True,
94
  "bot_response": bot_response_text,
95
+ "audio_base64": audio_base64, # Envia o áudio como texto para o front-end
96
+ "attachments": attachments
97
  }
98
  except Exception as e:
99
  print(f"Erro crítico no endpoint /chat: {e}")
100
  return {"success": False, "error": str(e)}
101
 
102
+ @app.get("/")
103
+ def root():
104
+ return {"message": "Servidor J.A.D.E. com FastAPI está online."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  if __name__ == "__main__":
107
  import uvicorn
jade/core.py CHANGED
@@ -13,6 +13,7 @@ from .tts import TTSPlayer
13
  from .utils import slim_history
14
  from .shorestone import ShoreStoneMemory
15
  from .curator_heuristic import MemoryCuratorHeuristic
 
16
 
17
  # Configura o logger principal
18
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - JADE - %(levelname)s - %(message)s")
@@ -56,6 +57,14 @@ class JadeAgent:
56
  self.curator = MemoryCuratorHeuristic(shorestone_memory=self.memory)
57
  self.response_count = 0
58
  self.maintenance_interval = 10 # Executar a manutenção a cada 10 interações
 
 
 
 
 
 
 
 
59
 
60
  logging.info(f"J.A.D.E. pronta e conectada ao modelo {self.model_name}.")
61
 
@@ -83,9 +92,64 @@ class JadeAgent:
83
  logging.error(f"Erro na comunicação com a Groq: {e}")
84
  return "Desculpe, tive um problema ao me conectar com meu cérebro na nuvem."
85
 
86
- def respond(self, history, user_input, user_id="default", vision_context=None):
87
  """Processo principal de raciocínio: Lembrar -> Ver -> Responder -> Memorizar -> Manter."""
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  # TROCA A SESSÃO DA MEMÓRIA PARA O USUÁRIO ATUAL
90
  session_name = f"user_{user_id}"
91
  self.memory.load_or_create_session(session_name)
@@ -136,4 +200,4 @@ class JadeAgent:
136
  except Exception as e:
137
  logging.error(f"Erro no Curador de Memória: {e}")
138
 
139
- return resposta, audio_path, history
 
13
  from .utils import slim_history
14
  from .shorestone import ShoreStoneMemory
15
  from .curator_heuristic import MemoryCuratorHeuristic
16
+ from .scholar import ScholarAgent # Import Scholar Agent
17
 
18
  # Configura o logger principal
19
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - JADE - %(levelname)s - %(message)s")
 
57
  self.curator = MemoryCuratorHeuristic(shorestone_memory=self.memory)
58
  self.response_count = 0
59
  self.maintenance_interval = 10 # Executar a manutenção a cada 10 interações
60
+
61
+ # 3. Scholar Agent
62
+ try:
63
+ self.scholar = ScholarAgent(api_key=self.api_key)
64
+ logging.info("Scholar Agent inicializado com sucesso.")
65
+ except Exception as e:
66
+ logging.error(f"Erro ao inicializar Scholar Agent: {e}")
67
+ self.scholar = None
68
 
69
  logging.info(f"J.A.D.E. pronta e conectada ao modelo {self.model_name}.")
70
 
 
92
  logging.error(f"Erro na comunicação com a Groq: {e}")
93
  return "Desculpe, tive um problema ao me conectar com meu cérebro na nuvem."
94
 
95
+ def respond(self, history, user_input, user_id="default", vision_context=None, agent_mode="jade"):
96
  """Processo principal de raciocínio: Lembrar -> Ver -> Responder -> Memorizar -> Manter."""
97
 
98
+ # Attachments list to return
99
+ attachments = []
100
+
101
+ # SCHOLAR AGENT ROUTING
102
+ if agent_mode == "scholar" and self.scholar:
103
+ scholar_response = self.scholar.process_request(user_input, user_id)
104
+ resposta = scholar_response.get("text", "")
105
+ attachments = scholar_response.get("attachments", [])
106
+
107
+ # Add to history
108
+ history.append({"role": "user", "content": user_input})
109
+ history.append({"role": "assistant", "content": resposta})
110
+
111
+ # Check for audio attachment to set as primary audio response for autoplay
112
+ audio_path = None
113
+ for att in attachments:
114
+ if att.get("type") == "audio":
115
+ # Attachments have URL /generated/filename.mp3
116
+ # We need the full file path for TTSPlayer logic in app.py if we wanted to read bytes
117
+ # But app.py logic reads file at `audio_path`.
118
+ # So we need to convert URL back to path or change app.py logic.
119
+ # App.py expects a file path.
120
+
121
+ # Convert /generated/foo.mp3 -> backend/generated/foo.mp3
122
+ url = att.get("url", "")
123
+ filename = os.path.basename(url)
124
+ # We know where generated dir is relative to this file?
125
+ # scholar.py defines generated dir.
126
+ # It's better if app.py handles the URL if provided.
127
+
128
+ # Hack: app.py expects audio_path to read bytes.
129
+ # Let's reconstruct path for now.
130
+ # Assuming cwd is repo root
131
+ possible_path = os.path.join("backend", "generated", filename)
132
+ if os.path.exists(possible_path):
133
+ audio_path = possible_path
134
+ else:
135
+ # Try absolute
136
+ possible_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "generated", filename)
137
+ if os.path.exists(possible_path):
138
+ audio_path = possible_path
139
+
140
+ # If no audio generated by scholar, maybe TTS the text?
141
+ # Scholar usually generates its own audio for podcasts/debates.
142
+ # If it's just text response, maybe we should TTS it?
143
+ if not audio_path and resposta:
144
+ try:
145
+ audio_path = self.tts.save_audio_to_file(resposta)
146
+ except Exception as e:
147
+ logging.warning(f"TTS falhou: {e}")
148
+
149
+ return resposta, audio_path, history, attachments
150
+
151
+ # NORMAL JADE AGENT FLOW
152
+
153
  # TROCA A SESSÃO DA MEMÓRIA PARA O USUÁRIO ATUAL
154
  session_name = f"user_{user_id}"
155
  self.memory.load_or_create_session(session_name)
 
200
  except Exception as e:
201
  logging.error(f"Erro no Curador de Memória: {e}")
202
 
203
+ return resposta, audio_path, history, attachments
jade/scholar.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # backend/jade/scholar.py
2
+ import os
3
+ import json
4
+ import re
5
+ import random
6
+ import logging
7
+ from io import BytesIO
8
+ from typing import List, Dict, Any, Optional
9
+ import numpy as np
10
+
11
+ # Third-party imports
12
+ import groq
13
+ import pypdf
14
+ import faiss
15
+ import graphviz
16
+ import genanki
17
+ import requests
18
+ from bs4 import BeautifulSoup
19
+ from youtube_transcript_api import YouTubeTranscriptApi
20
+ from sentence_transformers import SentenceTransformer
21
+ from fpdf import FPDF
22
+ from duckduckgo_search import DDGS
23
+ from gtts import gTTS
24
+ from pydub import AudioSegment
25
+
26
+ # Setup logging
27
+ logger = logging.getLogger(__name__)
28
+
29
+ # Constants
30
+ GENERATED_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "generated")
31
+ if not os.path.exists(GENERATED_DIR):
32
+ os.makedirs(GENERATED_DIR)
33
+
34
+ class ToolBox:
35
+ """Caixa de ferramentas para os agentes."""
36
+
37
+ @staticmethod
38
+ def get_file_path(filename: str) -> str:
39
+ return os.path.join(GENERATED_DIR, filename)
40
+
41
+ @staticmethod
42
+ def read_pdf(filepath: str) -> str:
43
+ try:
44
+ logger.info(f"📄 [Ferramenta] Lendo PDF: {filepath}...")
45
+ reader = pypdf.PdfReader(filepath)
46
+ text = "".join([p.extract_text() or "" for p in reader.pages])
47
+ return re.sub(r'\s+', ' ', text).strip()
48
+ except Exception as e:
49
+ return f"Erro ao ler PDF: {str(e)}"
50
+
51
+ @staticmethod
52
+ def scrape_web(url: str) -> str:
53
+ try:
54
+ logger.info(f"🌐 [Ferramenta] Acessando URL: {url}...")
55
+ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}
56
+ response = requests.get(url, headers=headers, timeout=10)
57
+ soup = BeautifulSoup(response.content, 'html.parser')
58
+ for script in soup(["script", "style", "header", "footer", "nav"]):
59
+ script.extract()
60
+ text = soup.get_text()
61
+ return re.sub(r'\s+', ' ', text).strip()[:40000]
62
+ except Exception as e:
63
+ logger.error(f"Erro ao acessar {url}: {e}")
64
+ return ""
65
+
66
+ @staticmethod
67
+ def search_topic(topic: str) -> List[str]:
68
+ """Pesquisa no DuckDuckGo e retorna URLs."""
69
+ logger.info(f"🔎 [Ferramenta] Pesquisando na Web sobre: '{topic}'...")
70
+ urls = []
71
+ try:
72
+ with DDGS() as ddgs:
73
+ results = list(ddgs.text(topic, max_results=3))
74
+ for r in results:
75
+ urls.append(r['href'])
76
+ except Exception as e:
77
+ logger.error(f"Erro na busca: {e}")
78
+ return urls
79
+
80
+ @staticmethod
81
+ def get_youtube_transcript(url: str) -> str:
82
+ try:
83
+ logger.info(f"📺 [Ferramenta] Extraindo legendas do YouTube: {url}...")
84
+ video_id = url.split("v=")[-1].split("&")[0]
85
+ transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['pt', 'en'])
86
+ text = " ".join([t['text'] for t in transcript])
87
+ return text
88
+ except Exception as e:
89
+ return f"Erro ao pegar legendas do YouTube: {str(e)}"
90
+
91
+ @staticmethod
92
+ def generate_audio_mix(script: List[Dict], filename="aula_podcast.mp3"):
93
+ logger.info("🎙️ [Estúdio] Produzindo áudio imersivo...")
94
+ combined = AudioSegment.silent(duration=500)
95
+
96
+ for line in script:
97
+ speaker = line.get("speaker", "Narrador").upper()
98
+ text = line.get("text", "")
99
+
100
+ lang = 'pt'
101
+ tld = 'com.br'
102
+
103
+ if "BERTA" in speaker or "PROFESSORA" in speaker or "AGENT B" in speaker:
104
+ tld = 'pt' # Portuguese accent
105
+
106
+ try:
107
+ tts = gTTS(text=text, lang=lang, tld=tld, slow=False)
108
+ fp = BytesIO()
109
+ tts.write_to_fp(fp)
110
+ fp.seek(0)
111
+
112
+ segment = AudioSegment.from_file(fp, format="mp3")
113
+ combined += segment
114
+ combined += AudioSegment.silent(duration=300)
115
+ except Exception as e:
116
+ logger.error(f"Error generating audio segment: {e}")
117
+
118
+ filepath = ToolBox.get_file_path(filename)
119
+ combined.export(filepath, format="mp3")
120
+ return filepath
121
+
122
+ @staticmethod
123
+ def generate_mindmap_image(dot_code: str, filename="mapa_mental"):
124
+ try:
125
+ logger.info("🗺️ [Design] Renderizando Mapa Mental...")
126
+ clean_dot = dot_code.replace("```dot", "").replace("```", "").strip()
127
+ filepath = ToolBox.get_file_path(filename)
128
+ # Graphviz adds extension automatically, so we remove it from filename if present
129
+ # But Source.render expects filename without extension if we want exact control or with extension?
130
+ # actually render(filename=...) saves as filename.format
131
+
132
+ src = graphviz.Source(clean_dot)
133
+ src.format = 'png'
134
+ output_path = src.render(filename=filepath, view=False, cleanup=True)
135
+ return output_path
136
+ except Exception as e:
137
+ logger.error(f"Erro ao gerar gráfico: {e}")
138
+ return None
139
+
140
+ @staticmethod
141
+ def generate_anki_deck(qa_pairs: List[Dict], deck_name="ScholarGraph Deck"):
142
+ logger.info("🧠 [Anki] Criando arquivo de Flashcards (.apkg)...")
143
+ try:
144
+ model_id = random.randrange(1 << 30, 1 << 31)
145
+ deck_id = random.randrange(1 << 30, 1 << 31)
146
+
147
+ my_model = genanki.Model(
148
+ model_id,
149
+ 'Simple Model',
150
+ fields=[{'name': 'Question'}, {'name': 'Answer'}],
151
+ templates=[{
152
+ 'name': 'Card 1',
153
+ 'qfmt': '{{Question}}',
154
+ 'afmt': '{{FrontSide}}<hr id="answer">{{Answer}}',
155
+ }]
156
+ )
157
+
158
+ my_deck = genanki.Deck(deck_id, deck_name)
159
+
160
+ for item in qa_pairs:
161
+ my_deck.add_note(genanki.Note(
162
+ model=my_model,
163
+ fields=[item['question'], item['answer']]
164
+ ))
165
+
166
+ filename = f"flashcards_{deck_id}.apkg"
167
+ filepath = ToolBox.get_file_path(filename)
168
+ genanki.Package(my_deck).write_to_file(filepath)
169
+ return filepath
170
+ except Exception as e:
171
+ logger.error(f"Erro ao criar Anki deck: {e}")
172
+ return None
173
+
174
+ class VectorMemory:
175
+ def __init__(self):
176
+ logger.info("🧠 [Memória] Inicializando Banco de Vetores (RAG)...")
177
+ # Modelo leve para embeddings
178
+ self.model = SentenceTransformer('all-MiniLM-L6-v2')
179
+ self.index = None
180
+ self.chunks = []
181
+
182
+ def ingest(self, text: str, chunk_size=500):
183
+ words = text.split()
184
+ # Cria chunks sobrepostos para melhor contexto
185
+ self.chunks = [' '.join(words[i:i+chunk_size]) for i in range(0, len(words), int(chunk_size*0.8))]
186
+
187
+ logger.info(f"🧠 [Memória] Vetorizando {len(self.chunks)} fragmentos...")
188
+ if not self.chunks: return
189
+
190
+ embeddings = self.model.encode(self.chunks)
191
+ dimension = embeddings.shape[1]
192
+ self.index = faiss.IndexFlatL2(dimension)
193
+ self.index.add(np.array(embeddings).astype('float32'))
194
+ logger.info("🧠 [Memória] Indexação concluída.")
195
+
196
+ def retrieve(self, query: str, k=3) -> str:
197
+ if not self.index: return ""
198
+ query_vec = self.model.encode([query])
199
+ D, I = self.index.search(np.array(query_vec).astype('float32'), k)
200
+
201
+ results = [self.chunks[i] for i in I[0] if i < len(self.chunks)]
202
+ return "\n\n".join(results)
203
+
204
+ class GraphState:
205
+ def __init__(self):
206
+ self.raw_content: str = ""
207
+ self.summary: str = ""
208
+ self.script: List[Dict] = []
209
+ self.quiz_data: List[Dict] = []
210
+ self.mindmap_path: str = ""
211
+ self.flashcards: List[Dict] = []
212
+
213
+ class LLMEngine:
214
+ def __init__(self, api_key: str):
215
+ self.client = groq.Groq(api_key=api_key)
216
+ self.model = "llama-3.3-70b-versatile"
217
+
218
+ def chat(self, messages: List[Dict], json_mode=False) -> str:
219
+ try:
220
+ kwargs = {"messages": messages, "model": self.model, "temperature": 0.6}
221
+ if json_mode: kwargs["response_format"] = {"type": "json_object"}
222
+ return self.client.chat.completions.create(**kwargs).choices[0].message.content
223
+ except Exception as e:
224
+ return f"Erro na IA: {e}"
225
+
226
+ # --- Agentes ---
227
+
228
+ class ResearcherAgent:
229
+ def deep_research(self, topic: str) -> str:
230
+ logger.info(f"🕵️ [Pesquisador] Iniciando Deep Research sobre: {topic}")
231
+ urls = ToolBox.search_topic(topic)
232
+ if not urls:
233
+ return f"Não encontrei informações sobre {topic}."
234
+
235
+ full_text = ""
236
+ for url in urls:
237
+ content = ToolBox.scrape_web(url)
238
+ if content:
239
+ full_text += f"\n\n--- Fonte: {url} ---\n{content[:10000]}"
240
+
241
+ return full_text
242
+
243
+ class FlashcardAgent:
244
+ def __init__(self, llm: LLMEngine):
245
+ self.llm = llm
246
+
247
+ def create_deck(self, content: str) -> List[Dict]:
248
+ logger.info("🃏 [Flashcard] Gerando pares Pergunta-Resposta...")
249
+ prompt = f"""
250
+ Crie 10 Flashcards (Pergunta e Resposta) sobre o conteúdo para memorização.
251
+ SAÍDA JSON: {{ "cards": [ {{ "question": "...", "answer": "..." }} ] }}
252
+ Conteúdo: {content[:15000]}
253
+ """
254
+ try:
255
+ resp = self.llm.chat([{"role": "user", "content": prompt}], json_mode=True)
256
+ return json.loads(resp).get("cards", [])
257
+ except: return []
258
+
259
+ class IngestAgent:
260
+ def __init__(self, researcher: ResearcherAgent):
261
+ self.researcher = researcher
262
+
263
+ def process(self, user_input: str) -> str:
264
+ # Se for arquivo PDF (assume path local se existir, mas no contexto web pode ser mais complexo)
265
+ # Aqui vamos simplificar: se for URL ou topico.
266
+ # Se o backend salvar arquivos de upload, poderiamos passar o path aqui.
267
+
268
+ if user_input.lower().endswith(".pdf") and os.path.exists(user_input):
269
+ return ToolBox.read_pdf(user_input)
270
+ elif "youtube.com" in user_input or "youtu.be" in user_input:
271
+ return ToolBox.get_youtube_transcript(user_input)
272
+ elif user_input.startswith("http"):
273
+ return ToolBox.scrape_web(user_input)
274
+ else:
275
+ logger.info("🔍 Entrada detectada como Tópico. Ativando ResearcherAgent...")
276
+ return self.researcher.deep_research(user_input)
277
+
278
+ class ProfessorAgent:
279
+ def __init__(self, llm: LLMEngine):
280
+ self.llm = llm
281
+
282
+ def summarize(self, full_text: str) -> str:
283
+ logger.info("🧠 [Professor] Gerando resumo estratégico...")
284
+ prompt = f"""
285
+ Você é um Professor Universitário. Crie um resumo estruturado e profundo.
286
+ Texto: {full_text[:25000]}
287
+ Formato: # Título / ## Introdução / ## Pontos Chave / ## Conclusão
288
+ """
289
+ return self.llm.chat([{"role": "user", "content": prompt}])
290
+
291
+ class VisualizerAgent:
292
+ def __init__(self, llm: LLMEngine):
293
+ self.llm = llm
294
+
295
+ def create_mindmap(self, text: str) -> str:
296
+ logger.info("🎨 [Visualizador] Projetando Mapa Mental...")
297
+ prompt = f"""
298
+ Crie um código GRAPHVIZ (DOT) para um mapa mental deste conteúdo.
299
+ Use formas coloridas. NÃO explique, apenas dê o código DOT dentro de ```dot ... ```.
300
+ Texto: {text[:15000]}
301
+ """
302
+ response = self.llm.chat([{"role": "user", "content": prompt}])
303
+ match = re.search(r'```dot(.*?)```', response, re.DOTALL)
304
+ if match: return match.group(1).strip()
305
+ return response
306
+
307
+ class ScriptwriterAgent:
308
+ def __init__(self, llm: LLMEngine):
309
+ self.llm = llm
310
+
311
+ def create_script(self, content: str, mode="lecture") -> List[Dict]:
312
+ if mode == "debate":
313
+ logger.info("🔥 [Roteirista] Criando DEBATE INTENSO...")
314
+ prompt = f"""
315
+ Crie um DEBATE acalorado mas intelectual entre dois agentes (8 falas).
316
+ Personagens:
317
+ - AGENT A (Gabriel): A favor / Otimista / Pragmático.
318
+ - AGENT B (Berta): Contra / Cética / Filosófica.
319
+
320
+ SAÍDA JSON: {{ "dialogue": [ {{"speaker": "Agent A", "text": "..."}}, {{"speaker": "Agent B", "text": "..."}} ] }}
321
+ Tema Base: {content[:15000]}
322
+ """
323
+ else:
324
+ logger.info("✍️ [Roteirista] Escrevendo roteiro de aula...")
325
+ prompt = f"""
326
+ Crie um roteiro de podcast (8 falas).
327
+ Personagens: GABRIEL (Aluno BR) e BERTA (Professora PT).
328
+ SAÍDA JSON: {{ "dialogue": [ {{"speaker": "Gabriel", "text": "..."}}, ...] }}
329
+ Base: {content[:15000]}
330
+ """
331
+
332
+ try:
333
+ resp = self.llm.chat([{"role": "user", "content": prompt}], json_mode=True)
334
+ return json.loads(resp).get("dialogue", [])
335
+ except: return []
336
+
337
+ class ExaminerAgent:
338
+ def __init__(self, llm: LLMEngine):
339
+ self.llm = llm
340
+
341
+ def generate_quiz(self, content: str) -> List[Dict]:
342
+ logger.info("📝 [Examinador] Criando Prova Gamificada...")
343
+ prompt = f"""
344
+ Crie 5 perguntas de múltipla escolha (Difíceis).
345
+ SAÍDA JSON: {{ "quiz": [ {{ "question": "...", "options": ["A)..."], "correct_option": "A", "explanation": "..." }} ] }}
346
+ Base: {content[:15000]}
347
+ """
348
+ try:
349
+ resp = self.llm.chat([{"role": "user", "content": prompt}], json_mode=True)
350
+ return json.loads(resp).get("quiz", [])
351
+ except: return []
352
+
353
+ class PublisherAgent:
354
+ def create_handout(self, state: GraphState, filename="Apostila_Estudos.pdf"):
355
+ logger.info("📚 [Editora] Diagramando Apostila PDF...")
356
+ pdf = FPDF()
357
+ pdf.add_page()
358
+ pdf.set_font("Arial", size=12)
359
+ pdf.set_font("Arial", 'B', 16)
360
+ pdf.cell(0, 10, "Apostila de Estudos - Scholar Graph", ln=True, align='C')
361
+ pdf.ln(10)
362
+ pdf.set_font("Arial", size=11)
363
+ safe_summary = state.summary.encode('latin-1', 'replace').decode('latin-1')
364
+ pdf.multi_cell(0, 7, safe_summary)
365
+ if state.mindmap_path and os.path.exists(state.mindmap_path):
366
+ pdf.add_page()
367
+ # FPDF expects path to image
368
+ pdf.image(state.mindmap_path, x=10, y=30, w=190)
369
+
370
+ filepath = ToolBox.get_file_path(filename)
371
+ pdf.output(filepath)
372
+ return filepath
373
+
374
+ class ScholarAgent:
375
+ def __init__(self, api_key: Optional[str] = None):
376
+ self.api_key = api_key or os.getenv("GROQ_API_KEY")
377
+ if not self.api_key:
378
+ raise ValueError("GROQ_API_KEY is required for ScholarAgent")
379
+
380
+ self.llm = LLMEngine(self.api_key)
381
+ self.memory = VectorMemory()
382
+
383
+ self.researcher = ResearcherAgent()
384
+ self.ingestor = IngestAgent(self.researcher)
385
+
386
+ self.professor = ProfessorAgent(self.llm)
387
+ self.visualizer = VisualizerAgent(self.llm)
388
+ self.scriptwriter = ScriptwriterAgent(self.llm)
389
+ self.examiner = ExaminerAgent(self.llm)
390
+ self.flashcarder = FlashcardAgent(self.llm)
391
+ self.publisher = PublisherAgent()
392
+
393
+ # In a real multi-user app, state should be managed externally or per-session.
394
+ # For this integration, we'll keep a simple session mapping if needed,
395
+ # or just pass state around.
396
+ self.sessions: Dict[str, GraphState] = {}
397
+
398
+ def get_or_create_state(self, user_id: str) -> GraphState:
399
+ if user_id not in self.sessions:
400
+ self.sessions[user_id] = GraphState()
401
+ return self.sessions[user_id]
402
+
403
+ def process_request(self, user_input: str, user_id: str = "default") -> Dict[str, Any]:
404
+ """
405
+ Process user input and return a dictionary with response text and optional attachments.
406
+ Structure:
407
+ {
408
+ "text": "...",
409
+ "attachments": [
410
+ {"type": "image", "url": "...", "title": "..."},
411
+ {"type": "audio", "url": "...", "title": "..."},
412
+ {"type": "file", "url": "...", "title": "..."}
413
+ ]
414
+ }
415
+ """
416
+ state = self.get_or_create_state(user_id)
417
+
418
+ # Simple command parsing logic
419
+ cmd = user_input.lower().strip()
420
+
421
+ if not state.raw_content and not cmd.startswith("scholar:"):
422
+ # Assume it's a topic or URL to ingest
423
+ content = self.ingestor.process(user_input)
424
+ if not content or len(content) < 50:
425
+ return {"text": f"Não consegui encontrar conteúdo suficiente sobre '{user_input}'. Tente ser mais específico ou fornecer uma URL válida."}
426
+
427
+ state.raw_content = content
428
+ self.memory.ingest(content)
429
+
430
+ return {
431
+ "text": (
432
+ f"🎓 Conteúdo sobre '{user_input}' processado com sucesso!\n\n"
433
+ "**Menu Scholar Graph:**\n"
434
+ "1. Resumo Estratégico\n"
435
+ "2. Mapa Mental\n"
436
+ "3. Podcast (Aula)\n"
437
+ "4. Debate (Prós/Contras)\n"
438
+ "5. Quiz Gamificado\n"
439
+ "6. Flashcards (Anki)\n"
440
+ "7. Gerar Apostila PDF\n\n"
441
+ "Digite o número ou o nome da opção."
442
+ )
443
+ }
444
+
445
+ # Menu handling
446
+ if "resumo" in cmd or cmd == "1":
447
+ state.summary = self.professor.summarize(state.raw_content)
448
+ return {"text": f"## 📝 Resumo Estratégico\n\n{state.summary}"}
449
+
450
+ elif "mapa" in cmd or "mental" in cmd or cmd == "2":
451
+ dot = self.visualizer.create_mindmap(state.raw_content)
452
+ path = ToolBox.generate_mindmap_image(dot)
453
+ if path:
454
+ state.mindmap_path = path
455
+ filename = os.path.basename(path)
456
+ return {
457
+ "text": "Aqui está o mapa mental do conteúdo:",
458
+ "attachments": [{
459
+ "type": "image",
460
+ "url": f"/generated/{filename}",
461
+ "title": "Mapa Mental"
462
+ }]
463
+ }
464
+ return {"text": "Desculpe, não consegui gerar o mapa mental."}
465
+
466
+ elif "podcast" in cmd or "aula" in cmd or cmd == "3":
467
+ script = self.scriptwriter.create_script(state.raw_content, mode="lecture")
468
+ path = ToolBox.generate_audio_mix(script, filename=f"podcast_{user_id}.mp3")
469
+ filename = os.path.basename(path)
470
+ return {
471
+ "text": "🎙️ Aula preparada! Ouça abaixo:",
472
+ "attachments": [{
473
+ "type": "audio",
474
+ "url": f"/generated/{filename}",
475
+ "title": "Aula Podcast"
476
+ }]
477
+ }
478
+
479
+ elif "debate" in cmd or cmd == "4":
480
+ script = self.scriptwriter.create_script(state.raw_content, mode="debate")
481
+ path = ToolBox.generate_audio_mix(script, filename=f"debate_{user_id}.mp3")
482
+ filename = os.path.basename(path)
483
+ return {
484
+ "text": "🔥 Debate gerado! Ouça abaixo:",
485
+ "attachments": [{
486
+ "type": "audio",
487
+ "url": f"/generated/{filename}",
488
+ "title": "Debate Intenso"
489
+ }]
490
+ }
491
+
492
+ elif "quiz" in cmd or cmd == "5":
493
+ quiz = self.examiner.generate_quiz(state.raw_content)
494
+ state.quiz_data = quiz
495
+ text = "## 🎮 Quiz Gamificado\n\n"
496
+ for i, q in enumerate(quiz):
497
+ text += f"**{i+1}. {q['question']}**\n"
498
+ for opt in q['options']:
499
+ text += f"- {opt}\n"
500
+ text += f"\n*(Resposta correta: {q['correct_option']})*\n\n"
501
+ return {"text": text}
502
+
503
+ elif "flashcard" in cmd or "anki" in cmd or cmd == "6":
504
+ cards = self.flashcarder.create_deck(state.raw_content)
505
+ if cards:
506
+ path = ToolBox.generate_anki_deck(cards)
507
+ filename = os.path.basename(path)
508
+ return {
509
+ "text": "✅ Deck do Anki criado com sucesso!",
510
+ "attachments": [{
511
+ "type": "file",
512
+ "url": f"/generated/{filename}",
513
+ "title": "Flashcards.apkg"
514
+ }]
515
+ }
516
+ return {"text": "Falha ao criar flashcards."}
517
+
518
+ elif "apostila" in cmd or "pdf" in cmd or cmd == "7":
519
+ if not state.summary:
520
+ state.summary = self.professor.summarize(state.raw_content)
521
+
522
+ path = self.publisher.create_handout(state)
523
+ filename = os.path.basename(path)
524
+ return {
525
+ "text": "📚 Apostila completa gerada!",
526
+ "attachments": [{
527
+ "type": "file",
528
+ "url": f"/generated/{filename}",
529
+ "title": "Apostila de Estudos.pdf"
530
+ }]
531
+ }
532
+
533
+ elif cmd == "sair" or cmd == "8":
534
+ # Reset state?
535
+ self.sessions[user_id] = GraphState()
536
+ return {"text": "Saindo do modo Scholar. Envie um novo tópico para começar."}
537
+
538
+ else:
539
+ # New topic? Or unrecognized command
540
+ return {
541
+ "text": (
542
+ "Não entendi o comando. Se quiser iniciar um novo tópico, apenas digite o tema ou URL.\n"
543
+ "Se estiver tentando acessar o menu, use os números (1-7)."
544
+ )
545
+ }
jade/tests/test_scholar.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import unittest
3
+ import os
4
+ import sys
5
+ import shutil
6
+ from unittest.mock import MagicMock, patch
7
+
8
+ # Mock dependencies that might be heavy or require API keys
9
+ sys.modules['groq'] = MagicMock()
10
+ sys.modules['sentence_transformers'] = MagicMock()
11
+ sys.modules['faiss'] = MagicMock()
12
+ sys.modules['pypdf'] = MagicMock()
13
+ sys.modules['genanki'] = MagicMock()
14
+ sys.modules['youtube_transcript_api'] = MagicMock()
15
+ sys.modules['gtts'] = MagicMock()
16
+ sys.modules['pydub'] = MagicMock()
17
+ sys.modules['graphviz'] = MagicMock()
18
+ sys.modules['duckduckgo_search'] = MagicMock()
19
+
20
+ # Import after mocking
21
+ from backend.jade.scholar import ScholarAgent, ToolBox, GraphState
22
+
23
+ class TestScholarAgent(unittest.TestCase):
24
+ def setUp(self):
25
+ self.mock_api_key = "test_key"
26
+ with patch.dict(os.environ, {"GROQ_API_KEY": self.mock_api_key}):
27
+ self.agent = ScholarAgent(api_key=self.mock_api_key)
28
+
29
+ # Ensure generated dir exists
30
+ self.generated_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "generated")
31
+ if not os.path.exists(self.generated_dir):
32
+ os.makedirs(self.generated_dir)
33
+
34
+ def test_initialization(self):
35
+ self.assertIsNotNone(self.agent)
36
+ self.assertIsNotNone(self.agent.llm)
37
+
38
+ def test_get_or_create_state(self):
39
+ state = self.agent.get_or_create_state("user1")
40
+ self.assertIsInstance(state, GraphState)
41
+
42
+ state2 = self.agent.get_or_create_state("user1")
43
+ self.assertEqual(state, state2)
44
+
45
+ state3 = self.agent.get_or_create_state("user2")
46
+ self.assertNotEqual(state, state3)
47
+
48
+ @patch.object(ToolBox, 'search_topic')
49
+ @patch.object(ToolBox, 'scrape_web')
50
+ def test_process_request_new_topic(self, mock_scrape, mock_search):
51
+ mock_search.return_value = ["http://example.com"]
52
+ mock_scrape.return_value = "Content about topic"
53
+
54
+ response = self.agent.process_request("Physics", "user1")
55
+
56
+ self.assertIn("text", response)
57
+ self.assertIn("Conteúdo sobre 'Physics' processado", response["text"])
58
+ self.assertEqual(self.agent.sessions["user1"].raw_content, "\n\n--- Fonte: http://example.com ---\nContent about topic")
59
+
60
+ def test_process_request_menu_command(self):
61
+ # Setup state
62
+ state = self.agent.get_or_create_state("user1")
63
+ state.raw_content = "Some content"
64
+
65
+ # Mock professor summarize
66
+ self.agent.professor.summarize = MagicMock(return_value="Summary of content")
67
+
68
+ response = self.agent.process_request("1", "user1")
69
+
70
+ self.assertIn("text", response)
71
+ self.assertIn("Resumo Estratégico", response["text"])
72
+ self.assertIn("Summary of content", response["text"])
73
+ self.assertEqual(state.summary, "Summary of content")
74
+
75
+ def test_process_request_unknown_command(self):
76
+ # Set state to simulate that we have content, so it should treat input as command
77
+ state = self.agent.get_or_create_state("user1")
78
+ state.raw_content = "Some content"
79
+
80
+ response = self.agent.process_request("unknown command", "user1")
81
+ self.assertIn("text", response)
82
+ self.assertIn("Não entendi o comando", response["text"])
83
+
84
+ def tearDown(self):
85
+ pass
86
+
87
+ if __name__ == '__main__':
88
+ unittest.main()