ivanoctaviogaitansantos commited on
Commit
5bb7c4b
verified
1 Parent(s): 7d38fbd

Actualizar orchestrator.py

Browse files
Files changed (1) hide show
  1. orchestrator.py +76 -63
orchestrator.py CHANGED
@@ -1,89 +1,102 @@
1
- import time
 
2
  from typing import List
3
- from models.openai_api import OpenAIClient
4
- from models.deepseek_api import DeepSeekClient
5
- from models.gemini_api import GeminiClient
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  class SuperConfig:
8
- def __init__(self, openai_api_key="", deepseek_api_key="", gemini_api_key="", enable_local_models=False, debug_mode=False):
9
- self.openai_api_key = openai_api_key
10
  self.deepseek_api_key = deepseek_api_key
11
- self.gemini_api_key = gemini_api_key
 
12
  self.enable_local_models = enable_local_models
13
  self.debug_mode = debug_mode
 
14
 
15
  class SuperResponse:
16
- def __init__(self, content: str, task_type: str, source: str, confidence: float):
17
  self.content = content
18
  self.task_type = task_type
19
  self.source = source
20
  self.confidence = confidence
 
 
 
 
 
 
 
 
 
 
21
 
22
  class SuperOrchestrator:
23
  def __init__(self, config: SuperConfig):
24
  self.config = config
25
- self.usage_stats = {"openai": 0, "deepseek": 0, "gemini": 0}
26
- self.load_times = {"openai": "0.3s", "deepseek": "0.4s", "gemini": "0.5s"}
27
- self.conversation_context: List[str] = []
 
 
 
28
 
29
- self.model_priority = {
30
- "c贸digo": ["deepseek","openai","gemini"],
31
- "depuraci贸n": ["deepseek","openai","gemini"],
32
- "explicaci贸n_c贸digo": ["deepseek","openai","gemini"],
33
- "razonamiento_matem谩tico": ["openai","gemini","deepseek"],
34
- "traducci贸n": ["openai","gemini","deepseek"],
35
- "resumen": ["openai","gemini","deepseek"],
36
- "generaci贸n_imagen": ["gemini","openai","deepseek"],
37
- "investigaci贸n": ["openai","gemini","deepseek"],
38
- "escritura_creativa": ["gemini","openai","deepseek"],
39
- "conversaci贸n": ["gemini","openai","deepseek"],
40
- }
41
 
42
- self.openai = OpenAIClient(config.openai_api_key) if config.openai_api_key else None
43
- self.deepseek = DeepSeekClient(config.deepseek_api_key) if config.deepseek_api_key else None
44
- self.gemini = GeminiClient(config.gemini_api_key) if config.gemini_api_key else None
 
 
45
 
46
- def _build_context(self) -> List[str]:
47
- return self.conversation_context[-10:]
48
 
49
- def _call_model(self, model_name: str, message: str, task_type: str) -> str:
50
- if model_name == "openai":
51
- if not self.openai:
52
- raise Exception("OpenAI API Key no configurada")
53
- return self.openai.chat(message, self._build_context(), task_type)
54
- elif model_name == "deepseek":
55
- if not self.deepseek:
56
- raise Exception("DeepSeek API Key no configurada")
57
- return self.deepseek.chat(message, self._build_context(), task_type)
58
- elif model_name == "gemini":
59
- if not self.gemini:
60
- raise Exception("Gemini API Key no configurada")
61
- return self.gemini.chat(message, self._build_context(), task_type)
62
- else:
63
- raise Exception(f"Modelo desconocido: {model_name}")
64
 
65
- def process(self, message: str, task_type: str) -> SuperResponse:
66
- self.conversation_context.append(f"Usuario: {message}")
67
- if len(self.conversation_context) > 20:
68
- self.conversation_context = self.conversation_context[-20:]
69
-
70
- candidates = self.model_priority.get(task_type, ["gemini","openai","deepseek"])
 
71
 
72
- for model_name in candidates:
73
  try:
74
- start = time.time()
75
- response_text = self._call_model(model_name, message, task_type)
76
- elapsed = time.time() - start
77
- self.usage_stats[model_name] += 1
78
- if response_text and isinstance(response_text, str) and len(response_text) > 0:
79
- self.conversation_context.append(f"Asistente: {response_text}")
80
- return SuperResponse(response_text, task_type, model_name, 0.95)
81
  except Exception as e:
82
- if self.config.debug_mode:
83
- print(f"Error llamando a {model_name}: {str(e)}")
84
- continue
 
 
 
85
 
86
- fallback = "Lo siento, no pude procesar tu solicitud con los modelos disponibles."
87
- self.conversation_context.append(f"Asistente: {fallback}")
88
- return SuperResponse(fallback, task_type, "ninguno", 0.0)
89
 
 
1
+ import logging
2
+ from datetime import datetime
3
  from typing import List
4
+ import os
5
+
6
+ class TaskType:
7
+ CONVERSATION = "conversaci贸n"
8
+ CODE_GENERATION = "c贸digo"
9
+ CODE_EXPLANATION = "explicaci贸n_c贸digo"
10
+ CREATIVE_WRITING = "escritura_creativa"
11
+ TECHNICAL_EXPLANATION = "explicaci贸n_t茅cnica"
12
+ TRANSLATION = "traducci贸n"
13
+ SUMMARIZATION = "resumen"
14
+ MATH_REASONING = "razonamiento_matem谩tico"
15
+ IMAGE_GENERATION = "generaci贸n_imagen"
16
+ RESEARCH = "investigaci贸n"
17
+ DEBUGGING = "depuraci贸n"
18
 
19
  class SuperConfig:
20
+ def __init__(self, deepseek_api_key="", openai_api_key="", hf_token="", enable_local_models=True, debug_mode=False, max_history=20):
 
21
  self.deepseek_api_key = deepseek_api_key
22
+ self.openai_api_key = openai_api_key
23
+ self.hf_token = hf_token
24
  self.enable_local_models = enable_local_models
25
  self.debug_mode = debug_mode
26
+ self.max_history = max_history
27
 
28
  class SuperResponse:
29
+ def __init__(self, content, task_type, source, confidence, processing_time):
30
  self.content = content
31
  self.task_type = task_type
32
  self.source = source
33
  self.confidence = confidence
34
+ self.processing_time = processing_time
35
+
36
+ try:
37
+ from models.local_chat_model import ChatLocal
38
+ from models.local_code_model import CodeLocal
39
+ from models.local_image_model import ImageLocal
40
+ except ImportError:
41
+ ChatLocal = None
42
+ CodeLocal = None
43
+ ImageLocal = None
44
 
45
  class SuperOrchestrator:
46
  def __init__(self, config: SuperConfig):
47
  self.config = config
48
+ self.logger = logging.getLogger("super_chatbot")
49
+ self._setup_logger()
50
+ self.history: List[str] = []
51
+ self.local_chat = ChatLocal() if (ChatLocal and config.enable_local_models) else None
52
+ self.local_code = CodeLocal() if (CodeLocal and config.enable_local_models) else None
53
+ self.local_image = ImageLocal() if (ImageLocal and config.enable_local_models) else None
54
 
55
+ def _setup_logger(self):
56
+ if not self.logger.handlers:
57
+ self.logger.setLevel(logging.DEBUG if self.config.debug_mode else logging.INFO)
58
+ ch = logging.StreamHandler()
59
+ ch.setLevel(logging.DEBUG)
60
+ self.logger.addHandler(ch)
 
 
 
 
 
 
61
 
62
+ def process(self, message: str, task_type: str = TaskType.CONVERSATION) -> SuperResponse:
63
+ start = datetime.now()
64
+ self.history.append(message)
65
+ if len(self.history) > self.config.max_history:
66
+ self.history = self.history[-self.config.max_history:]
67
 
68
+ content = None
69
+ source = "local"
70
 
71
+ if task_type in [TaskType.CONVERSATION, TaskType.CREATIVE_WRITING]:
72
+ if self.local_chat:
73
+ try:
74
+ content = self.local_chat.generate(message, task_type)
75
+ source = "local_chat"
76
+ except Exception as e:
77
+ self.logger.error(f"Fallo local_chat: {str(e)}")
78
+ content = None
 
 
 
 
 
 
 
79
 
80
+ if not content and self.local_code:
81
+ try:
82
+ content = self.local_code.generate_code(message, task_type)
83
+ source = "local_code"
84
+ except Exception as e:
85
+ self.logger.error(f"Fallo local_code: {str(e)}")
86
+ content = None
87
 
88
+ if not content and self.local_image and task_type == TaskType.IMAGE_GENERATION:
89
  try:
90
+ content = self.local_image.generate(message)
91
+ source = "local_image"
 
 
 
 
 
92
  except Exception as e:
93
+ self.logger.error(f"Fallo local_image: {str(e)}")
94
+ content = None
95
+
96
+ if not content:
97
+ content = "Lo siento, no pude generar una respuesta con los modelos locales disponibles."
98
+ source = "fallback"
99
 
100
+ duration = (datetime.now() - start).total_seconds()
101
+ return SuperResponse(content, task_type, source, 0.85, duration)
 
102