Spaces:
Runtime error
Runtime error
Actualizar orchestrator.py
Browse files- orchestrator.py +76 -63
orchestrator.py
CHANGED
|
@@ -1,89 +1,102 @@
|
|
| 1 |
-
import
|
|
|
|
| 2 |
from typing import List
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
class SuperConfig:
|
| 8 |
-
def __init__(self,
|
| 9 |
-
self.openai_api_key = openai_api_key
|
| 10 |
self.deepseek_api_key = deepseek_api_key
|
| 11 |
-
self.
|
|
|
|
| 12 |
self.enable_local_models = enable_local_models
|
| 13 |
self.debug_mode = debug_mode
|
|
|
|
| 14 |
|
| 15 |
class SuperResponse:
|
| 16 |
-
def __init__(self, content
|
| 17 |
self.content = content
|
| 18 |
self.task_type = task_type
|
| 19 |
self.source = source
|
| 20 |
self.confidence = confidence
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
class SuperOrchestrator:
|
| 23 |
def __init__(self, config: SuperConfig):
|
| 24 |
self.config = config
|
| 25 |
-
self.
|
| 26 |
-
self.
|
| 27 |
-
self.
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
"resumen": ["openai","gemini","deepseek"],
|
| 36 |
-
"generaci贸n_imagen": ["gemini","openai","deepseek"],
|
| 37 |
-
"investigaci贸n": ["openai","gemini","deepseek"],
|
| 38 |
-
"escritura_creativa": ["gemini","openai","deepseek"],
|
| 39 |
-
"conversaci贸n": ["gemini","openai","deepseek"],
|
| 40 |
-
}
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
self.
|
|
|
|
|
|
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
return self.deepseek.chat(message, self._build_context(), task_type)
|
| 58 |
-
elif model_name == "gemini":
|
| 59 |
-
if not self.gemini:
|
| 60 |
-
raise Exception("Gemini API Key no configurada")
|
| 61 |
-
return self.gemini.chat(message, self._build_context(), task_type)
|
| 62 |
-
else:
|
| 63 |
-
raise Exception(f"Modelo desconocido: {model_name}")
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
| 71 |
|
| 72 |
-
|
| 73 |
try:
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
elapsed = time.time() - start
|
| 77 |
-
self.usage_stats[model_name] += 1
|
| 78 |
-
if response_text and isinstance(response_text, str) and len(response_text) > 0:
|
| 79 |
-
self.conversation_context.append(f"Asistente: {response_text}")
|
| 80 |
-
return SuperResponse(response_text, task_type, model_name, 0.95)
|
| 81 |
except Exception as e:
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
return SuperResponse(fallback, task_type, "ninguno", 0.0)
|
| 89 |
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from datetime import datetime
|
| 3 |
from typing import List
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
class TaskType:
|
| 7 |
+
CONVERSATION = "conversaci贸n"
|
| 8 |
+
CODE_GENERATION = "c贸digo"
|
| 9 |
+
CODE_EXPLANATION = "explicaci贸n_c贸digo"
|
| 10 |
+
CREATIVE_WRITING = "escritura_creativa"
|
| 11 |
+
TECHNICAL_EXPLANATION = "explicaci贸n_t茅cnica"
|
| 12 |
+
TRANSLATION = "traducci贸n"
|
| 13 |
+
SUMMARIZATION = "resumen"
|
| 14 |
+
MATH_REASONING = "razonamiento_matem谩tico"
|
| 15 |
+
IMAGE_GENERATION = "generaci贸n_imagen"
|
| 16 |
+
RESEARCH = "investigaci贸n"
|
| 17 |
+
DEBUGGING = "depuraci贸n"
|
| 18 |
|
| 19 |
class SuperConfig:
|
| 20 |
+
def __init__(self, deepseek_api_key="", openai_api_key="", hf_token="", enable_local_models=True, debug_mode=False, max_history=20):
|
|
|
|
| 21 |
self.deepseek_api_key = deepseek_api_key
|
| 22 |
+
self.openai_api_key = openai_api_key
|
| 23 |
+
self.hf_token = hf_token
|
| 24 |
self.enable_local_models = enable_local_models
|
| 25 |
self.debug_mode = debug_mode
|
| 26 |
+
self.max_history = max_history
|
| 27 |
|
| 28 |
class SuperResponse:
|
| 29 |
+
def __init__(self, content, task_type, source, confidence, processing_time):
|
| 30 |
self.content = content
|
| 31 |
self.task_type = task_type
|
| 32 |
self.source = source
|
| 33 |
self.confidence = confidence
|
| 34 |
+
self.processing_time = processing_time
|
| 35 |
+
|
| 36 |
+
try:
|
| 37 |
+
from models.local_chat_model import ChatLocal
|
| 38 |
+
from models.local_code_model import CodeLocal
|
| 39 |
+
from models.local_image_model import ImageLocal
|
| 40 |
+
except ImportError:
|
| 41 |
+
ChatLocal = None
|
| 42 |
+
CodeLocal = None
|
| 43 |
+
ImageLocal = None
|
| 44 |
|
| 45 |
class SuperOrchestrator:
|
| 46 |
def __init__(self, config: SuperConfig):
|
| 47 |
self.config = config
|
| 48 |
+
self.logger = logging.getLogger("super_chatbot")
|
| 49 |
+
self._setup_logger()
|
| 50 |
+
self.history: List[str] = []
|
| 51 |
+
self.local_chat = ChatLocal() if (ChatLocal and config.enable_local_models) else None
|
| 52 |
+
self.local_code = CodeLocal() if (CodeLocal and config.enable_local_models) else None
|
| 53 |
+
self.local_image = ImageLocal() if (ImageLocal and config.enable_local_models) else None
|
| 54 |
|
| 55 |
+
def _setup_logger(self):
|
| 56 |
+
if not self.logger.handlers:
|
| 57 |
+
self.logger.setLevel(logging.DEBUG if self.config.debug_mode else logging.INFO)
|
| 58 |
+
ch = logging.StreamHandler()
|
| 59 |
+
ch.setLevel(logging.DEBUG)
|
| 60 |
+
self.logger.addHandler(ch)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
+
def process(self, message: str, task_type: str = TaskType.CONVERSATION) -> SuperResponse:
|
| 63 |
+
start = datetime.now()
|
| 64 |
+
self.history.append(message)
|
| 65 |
+
if len(self.history) > self.config.max_history:
|
| 66 |
+
self.history = self.history[-self.config.max_history:]
|
| 67 |
|
| 68 |
+
content = None
|
| 69 |
+
source = "local"
|
| 70 |
|
| 71 |
+
if task_type in [TaskType.CONVERSATION, TaskType.CREATIVE_WRITING]:
|
| 72 |
+
if self.local_chat:
|
| 73 |
+
try:
|
| 74 |
+
content = self.local_chat.generate(message, task_type)
|
| 75 |
+
source = "local_chat"
|
| 76 |
+
except Exception as e:
|
| 77 |
+
self.logger.error(f"Fallo local_chat: {str(e)}")
|
| 78 |
+
content = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
if not content and self.local_code:
|
| 81 |
+
try:
|
| 82 |
+
content = self.local_code.generate_code(message, task_type)
|
| 83 |
+
source = "local_code"
|
| 84 |
+
except Exception as e:
|
| 85 |
+
self.logger.error(f"Fallo local_code: {str(e)}")
|
| 86 |
+
content = None
|
| 87 |
|
| 88 |
+
if not content and self.local_image and task_type == TaskType.IMAGE_GENERATION:
|
| 89 |
try:
|
| 90 |
+
content = self.local_image.generate(message)
|
| 91 |
+
source = "local_image"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
except Exception as e:
|
| 93 |
+
self.logger.error(f"Fallo local_image: {str(e)}")
|
| 94 |
+
content = None
|
| 95 |
+
|
| 96 |
+
if not content:
|
| 97 |
+
content = "Lo siento, no pude generar una respuesta con los modelos locales disponibles."
|
| 98 |
+
source = "fallback"
|
| 99 |
|
| 100 |
+
duration = (datetime.now() - start).total_seconds()
|
| 101 |
+
return SuperResponse(content, task_type, source, 0.85, duration)
|
|
|
|
| 102 |
|