File size: 3,569 Bytes
382d4ec b4c05f8 6ec8388 89200b0 1b88f4f 6ec8388 54da337 6ec8388 54da337 6ec8388 1b88f4f 6ec8388 1b88f4f 9c7c393 48bdb91 9c7c393 9fac1a2 54da337 a983381 54da337 9fac1a2 54da337 9fac1a2 54da337 9c7c393 6ec8388 9c7c393 1b88f4f 9c7c393 48a6e27 9c7c393 48a6e27 1b88f4f 48a6e27 9c7c393 1b88f4f 9c7c393 1b88f4f 48a6e27 1b88f4f 9c7c393 48a6e27 9c7c393 54da337 48bdb91 28a469c 9c7c393 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
import requests
import json
import os
import base64
import io
from PIL import Image
# Procesar imagen a base64
def process_image(image):
if image is None:
return None
buffered = io.BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
# Generar respuesta generativa con Sambanova
def generar_respuesta_api(mensaje_usuario):
API_KEY = os.getenv("SAMBANOVA_API_KEY")
API_URL = "https://api.sambanova.ai/v1/chat/completions"
headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"}
messages = [
{"role": "system", "content": "Eres un asistente útil y detallado."},
{"role": "user", "content": mensaje_usuario}
]
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False}
try:
response = requests.post(API_URL, headers=headers, json=json_data)
response.raise_for_status()
content = response.json()["choices"][0]["message"]["content"]
return content
except Exception as e:
return f"Error en la generación de respuesta: {str(e)}"
# Analizar imagen para prompt
def analizar_imagen_y_generar_prompt(image_base64):
API_KEY = os.getenv("SAMBANOVA_API_KEY")
API_URL = "https://api.sambanova.ai/v1/chat/completions"
headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"}
if not API_KEY:
return "⚠️ SAMBANOVA_API_KEY no configurada."
messages = [
{"role": "system", "content": "Describe images in detailed English."},
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}},
{"type": "text", "text": "Provide a detailed English description suitable for a prompt."}
]
}
]
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False}
try:
response = requests.post(API_URL, headers=headers, json=json_data)
response.raise_for_status()
text_resp = response.json()["choices"][0]["message"]["content"]
return text_resp
except Exception as e:
return f"Error al analizar imagen: {str(e)}"
# Función principal del chat dinámico
def chat_mode(user_message, image_input, chat_history, auto_mode):
if chat_history is None:
chat_history = []
image_b64 = process_image(image_input) if image_input else None
if user_message.strip() != "":
chat_history.append(["Usuario", user_message])
if auto_mode and image_b64:
respuesta = analizar_imagen_y_generar_prompt(image_b64)
else:
respuesta = generar_respuesta_api(user_message)
chat_history.append(["IA", respuesta])
return chat_history, chat_history
# Interfaz Gradio chat dinámica
with gr.Blocks() as demo:
gr.Markdown("# ⚡ BATUTO / Prompt Studio — Chat IA + Imagen")
chat_box = gr.Chatbot()
msg_input = gr.Textbox(placeholder="Escribe tu mensaje...")
upload_img = gr.Image(label="Sube una imagen (opcional)", type="pil")
auto_mode = gr.Checkbox(label="Modo automático: analizar imagen para prompt", value=False)
send_button = gr.Button("Enviar")
chat_state = gr.State(value=[])
send_button.click(
chat_mode,
inputs=[msg_input, upload_img, chat_state, auto_mode],
outputs=[chat_box, chat_state]
)
if __name__ == "__main__":
demo.launch()
|