Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| import json | |
| from datetime import datetime | |
| import logging | |
| # Configuración de logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Importar módulos personalizados | |
| from model_manager import ModelManager | |
| from api_agent import APIAgent | |
| from prompt_generator import PromptGenerator | |
| # Inicializar componentes | |
| model_manager = ModelManager() | |
| api_agent = APIAgent() | |
| prompt_generator = PromptGenerator() | |
| class BATUTOChatbot: | |
| def __init__(self): | |
| self.conversation_history = [] | |
| self.config = { | |
| "deepseek_api_key": "", | |
| "openai_api_key": "", | |
| "max_tokens": 400, | |
| "temperature": 0.7 | |
| } | |
| def update_config(self, deepseek_key, openai_key, max_tokens, temperature): | |
| """Actualiza la configuración desde la UI""" | |
| updated = False | |
| if deepseek_key: | |
| self.config["deepseek_api_key"] = deepseek_key | |
| updated = True | |
| if openai_key: | |
| self.config["openai_api_key"] = openai_key | |
| updated = True | |
| if max_tokens: | |
| self.config["max_tokens"] = int(max_tokens) | |
| updated = True | |
| if temperature: | |
| self.config["temperature"] = float(temperature) | |
| updated = True | |
| # Actualizar agentes | |
| model_manager.set_config(self.config) | |
| api_agent.set_config(self.config) | |
| return "✅ Configuración actualizada" if updated else "ℹ️ Sin cambios" | |
| def get_system_status(self): | |
| """Obtiene el estado del sistema""" | |
| has_deepseek = bool(self.config.get("deepseek_api_key")) | |
| has_openai = bool(self.config.get("openai_api_key")) | |
| models_loaded = model_manager.loaded | |
| status_html = f""" | |
| <div style='padding: 15px; border-radius: 10px; background: #f8f9fa; border: 2px solid #e9ecef;'> | |
| <h4 style='margin-top: 0;'>🔧 Estado del Sistema</h4> | |
| <p><strong>Modelos locales:</strong> {'✅ Cargados' if models_loaded else '🔄 Cargando...'}</p> | |
| <p><strong>DeepSeek API:</strong> {'✅ Configurada' if has_deepseek else '❌ No configurada'}</p> | |
| <p><strong>OpenAI API:</strong> {'✅ Configurada' if has_openai else '❌ No configurada'}</p> | |
| <p><strong>Mensajes en sesión:</strong> {len(self.conversation_history)}</p> | |
| </div> | |
| """ | |
| return status_html | |
| def chat_response(self, message, history): | |
| """Genera respuesta del chatbot optimizado para HF""" | |
| if not message.strip(): | |
| return "" | |
| # Mostrar indicador de typing | |
| yield "🔄 Procesando..." | |
| try: | |
| # Detectar intención y mejorar prompt | |
| intent = prompt_generator.detect_intent(message) | |
| enhanced_prompt = prompt_generator.enhance_prompt(message, intent) | |
| # Intentar usar APIs primero | |
| api_result = api_agent.generate_response(enhanced_prompt, intent["is_code"]) | |
| if api_result["response"]: | |
| # Usar respuesta de API | |
| response_text = api_result["response"] | |
| source = api_result["source"] | |
| else: | |
| # Usar modelo local como fallback | |
| response_text = model_manager.generate_local_response( | |
| enhanced_prompt, | |
| intent["is_code"], | |
| max_length=200 # Reducido para HF | |
| ) | |
| source = "local" | |
| # Agregar metadata a la respuesta | |
| metadata = f"\n\n---\n*🔧 Fuente: {source.upper()}*" | |
| if intent["is_code"]: | |
| metadata += f" | 💻 *Tipo: Código*" | |
| else: | |
| metadata += f" | 💬 *Tipo: Conversación*" | |
| full_response = response_text + metadata | |
| # Guardar en historial | |
| self.conversation_history.append({ | |
| "timestamp": datetime.now().isoformat(), | |
| "user": message, | |
| "bot": response_text, | |
| "source": source, | |
| "intent": intent | |
| }) | |
| yield full_response | |
| except Exception as e: | |
| error_msg = f"❌ Error: {str(e)}" | |
| logger.error(f"Error en chat_response: {e}") | |
| yield error_msg | |
| def clear_conversation(self): | |
| """Limpia la conversación""" | |
| self.conversation_history.clear() | |
| return None, [] | |
| # Crear instancia del chatbot | |
| chatbot = BATUTOChatbot() | |
| # Cargar modelos al inicio (async) | |
| def load_models_async(): | |
| logger.info("🔄 Cargando modelos en segundo plano...") | |
| model_manager.load_models() | |
| logger.info("✅ Modelos cargados exitosamente") | |
| # Iniciar carga de modelos | |
| import threading | |
| model_loader = threading.Thread(target=load_models_async, daemon=True) | |
| model_loader.start() | |
| # Configuración de la interfaz Gradio para HF | |
| with gr.Blocks( | |
| title="BATUTO Chatbot - Asistente Educativo", | |
| theme=gr.themes.Soft(), | |
| css=""" | |
| .gradio-container { | |
| max-width: 1000px !important; | |
| margin: auto; | |
| } | |
| .chat-container { | |
| height: 500px; | |
| } | |
| .status-panel { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| padding: 20px; | |
| border-radius: 10px; | |
| color: white; | |
| } | |
| """ | |
| ) as demo: | |
| gr.Markdown(""" | |
| # 🤖 BATUTO Chatbot - Asistente Educativo | |
| **Sistema inteligente con modelos locales y APIs externas** | |
| *Desplegado en Hugging Face Spaces - Versión Optimizada* | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| # Área de chat | |
| gr.Markdown("### 💬 Conversación") | |
| chatbot_interface = gr.Chatbot( | |
| label="Chat con BATUTO", | |
| height=400, | |
| show_copy_button=True, | |
| container=True | |
| ) | |
| msg = gr.Textbox( | |
| label="Escribe tu mensaje", | |
| placeholder="Pregunta sobre programación, explica conceptos, pide ejemplos...", | |
| lines=2, | |
| max_lines=4 | |
| ) | |
| with gr.Row(): | |
| submit_btn = gr.Button("🚀 Enviar", variant="primary") | |
| clear_btn = gr.Button("🧹 Limpiar", variant="secondary") | |
| with gr.Column(scale=1): | |
| # Panel de estado | |
| gr.Markdown("### 📊 Estado del Sistema") | |
| status_display = gr.HTML() | |
| # Configuración rápida | |
| with gr.Accordion("⚙️ Configuración Rápida", open=False): | |
| with gr.Group(): | |
| deepseek_key = gr.Textbox( | |
| label="DeepSeek API Key", | |
| type="password", | |
| placeholder="sk-...", | |
| info="Opcional - para respuestas mejoradas" | |
| ) | |
| openai_key = gr.Textbox( | |
| label="OpenAI API Key", | |
| type="password", | |
| placeholder="sk-...", | |
| info="Opcional - alternativa" | |
| ) | |
| with gr.Row(): | |
| max_tokens = gr.Slider( | |
| label="Tokens máx", | |
| minimum=100, | |
| maximum=800, | |
| value=400, | |
| step=50 | |
| ) | |
| temperature = gr.Slider( | |
| label="Temperatura", | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.1 | |
| ) | |
| save_config_btn = gr.Button("💾 Guardar Config", size="sm") | |
| config_output = gr.Textbox(label="Estado", interactive=False) | |
| # Información | |
| with gr.Accordion("ℹ️ Cómo usar", open=True): | |
| gr.Markdown(""" | |
| **Ejemplos:** | |
| - 💻 *"Muéstrame una función Python para ordenar listas"* | |
| - 💬 *"Explica qué es machine learning"* | |
| - 🔧 *"Corrige este código: [tu código]"* | |
| **Fuentes:** | |
| 1. 🚀 DeepSeek API (si se configura) | |
| 2. ⚡ OpenAI API (si se configura) | |
| 3. 🤖 Modelos locales (fallback) | |
| """) | |
| # Event handlers | |
| def handle_submit(message, history): | |
| if not message.strip(): | |
| return "", history | |
| return "", history + [[message, None]] | |
| # Conectar el botón de enviar | |
| submit_btn.click( | |
| handle_submit, | |
| inputs=[msg, chatbot_interface], | |
| outputs=[msg, chatbot_interface] | |
| ).then( | |
| chatbot.chat_response, | |
| inputs=[msg, chatbot_interface], | |
| outputs=[chatbot_interface] | |
| ) | |
| # Enter también envía | |
| msg.submit( | |
| handle_submit, | |
| inputs=[msg, chatbot_interface], | |
| outputs=[msg, chatbot_interface] | |
| ).then( | |
| chatbot.chat_response, | |
| inputs=[msg, chatbot_interface], | |
| outputs=[chatbot_interface] | |
| ) | |
| # Limpiar chat | |
| clear_btn.click( | |
| chatbot.clear_conversation, | |
| outputs=[msg, chatbot_interface] | |
| ) | |
| # Configuración | |
| save_config_btn.click( | |
| chatbot.update_config, | |
| inputs=[deepseek_key, openai_key, max_tokens, temperature], | |
| outputs=[config_output] | |
| ).then( | |
| chatbot.get_system_status, | |
| outputs=[status_display] | |
| ) | |
| # Actualizar estado al cargar | |
| demo.load( | |
| chatbot.get_system_status, | |
| outputs=[status_display] | |
| ) | |
| # Configuración específica para Hugging Face Spaces | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=True, | |
| show_error=True, | |
| debug=False, | |
| favicon_path=None | |
| ) |