Spaces:
Runtime error
Runtime error
File size: 3,896 Bytes
a308962 f49c531 a308962 7cb360c a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 a308962 f49c531 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import gradio as gr
import requests
from transformers import pipeline
import logging
import threading
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SimpleChatbot:
def __init__(self):
self.conversation_history = []
self.models_loaded = False
self.chat_model = None
def load_models(self):
try:
logger.info('Loading DialoGPT model...')
self.chat_model = pipeline(
"text-generation",
model="microsoft/DialoGPT-small",
device="cpu"
)
self.models_loaded = True
logger.info('Model loaded successfully')
return True
except Exception as e:
logger.error(f'Error loading model: {e}')
return False
def chat_response(self, message, history):
if not message.strip():
return ""
yield "Procesando..."
try:
if not self.models_loaded:
self.load_models()
# Generar respuesta con el modelo local
result = self.chat_model(
message,
max_length=150,
num_return_sequences=1,
temperature=0.7,
do_sample=True
)
response = result[0]['generated_text']
# Limpiar respuesta
if response.startswith(message):
response = response[len(message):].strip()
full_response = response + "\n\n---\nFuente: Modelo Local"
self.conversation_history.append({
"user": message,
"bot": response
})
yield full_response
except Exception as e:
error_msg = f"Error: {str(e)}"
yield error_msg
# Crear instancia
chatbot = SimpleChatbot()
# Cargar modelos en segundo plano
def load_models_async():
chatbot.load_models()
model_loader = threading.Thread(target=load_models_async, daemon=True)
model_loader.start()
# Interfaz simple
with gr.Blocks(title="BATUTO Chatbot") as demo:
gr.Markdown("# BATUTO Chatbot - Asistente Educativo")
with gr.Row():
with gr.Column(scale=2):
chatbot_interface = gr.Chatbot(label="Conversaci贸n", height=400)
msg = gr.Textbox(
label="Escribe tu mensaje",
placeholder="Pregunta sobre programaci贸n...",
lines=2
)
with gr.Row():
submit_btn = gr.Button("Enviar", variant="primary")
clear_btn = gr.Button("Limpiar", variant="secondary")
with gr.Column(scale=1):
gr.Markdown("### Informaci贸n")
gr.Markdown("""
**Ejemplos:**
- Explica qu茅 es Python
- Muestra funci贸n para ordenar listas
- Corrige c贸digo Python
""")
# Event handlers
def handle_submit(message, history):
if not message.strip():
return "", history
return "", history + [[message, None]]
submit_btn.click(
handle_submit,
inputs=[msg, chatbot_interface],
outputs=[msg, chatbot_interface]
).then(
chatbot.chat_response,
inputs=[msg, chatbot_interface],
outputs=[chatbot_interface]
)
msg.submit(
handle_submit,
inputs=[msg, chatbot_interface],
outputs=[msg, chatbot_interface]
).then(
chatbot.chat_response,
inputs=[msg, chatbot_interface],
outputs=[chatbot_interface]
)
clear_btn.click(
lambda: (None, []),
outputs=[msg, chatbot_interface]
)
if __name__ == "__main__":
demo.launch() |