|
|
import gradio as gr |
|
|
import requests |
|
|
import json |
|
|
import os |
|
|
import base64 |
|
|
import io |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
def process_image(image): |
|
|
if image is None: |
|
|
return None |
|
|
buffered = io.BytesIO() |
|
|
image.save(buffered, format="PNG") |
|
|
return base64.b64encode(buffered.getvalue()).decode("utf-8") |
|
|
|
|
|
|
|
|
def generar_respuesta_api(mensaje_usuario): |
|
|
API_KEY = os.getenv("SAMBANOVA_API_KEY") |
|
|
API_URL = "https://api.sambanova.ai/v1/chat/completions" |
|
|
headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"} |
|
|
|
|
|
messages = [ |
|
|
{"role": "system", "content": "Eres un asistente útil y detallado."}, |
|
|
{"role": "user", "content": mensaje_usuario} |
|
|
] |
|
|
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False} |
|
|
|
|
|
try: |
|
|
response = requests.post(API_URL, headers=headers, json=json_data) |
|
|
response.raise_for_status() |
|
|
content = response.json()["choices"][0]["message"]["content"] |
|
|
return content |
|
|
except Exception as e: |
|
|
return f"Error en la generación de respuesta: {str(e)}" |
|
|
|
|
|
|
|
|
def analizar_imagen_y_generar_prompt(image_base64): |
|
|
API_KEY = os.getenv("SAMBANOVA_API_KEY") |
|
|
API_URL = "https://api.sambanova.ai/v1/chat/completions" |
|
|
headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"} |
|
|
if not API_KEY: |
|
|
return "⚠️ SAMBANOVA_API_KEY no configurada." |
|
|
messages = [ |
|
|
{"role": "system", "content": "Describe images in detailed English."}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}, |
|
|
{"type": "text", "text": "Provide a detailed English description suitable for a prompt."} |
|
|
] |
|
|
} |
|
|
] |
|
|
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False} |
|
|
try: |
|
|
response = requests.post(API_URL, headers=headers, json=json_data) |
|
|
response.raise_for_status() |
|
|
text_resp = response.json()["choices"][0]["message"]["content"] |
|
|
return text_resp |
|
|
except Exception as e: |
|
|
return f"Error al analizar imagen: {str(e)}" |
|
|
|
|
|
|
|
|
def chat_mode(user_message, image_input, chat_history, auto_mode): |
|
|
if chat_history is None: |
|
|
chat_history = [] |
|
|
image_b64 = process_image(image_input) if image_input else None |
|
|
|
|
|
if user_message.strip() != "": |
|
|
chat_history.append(["Usuario", user_message]) |
|
|
|
|
|
if auto_mode and image_b64: |
|
|
respuesta = analizar_imagen_y_generar_prompt(image_b64) |
|
|
else: |
|
|
respuesta = generar_respuesta_api(user_message) |
|
|
|
|
|
chat_history.append(["IA", respuesta]) |
|
|
|
|
|
return chat_history, chat_history |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# ⚡ BATUTO / Prompt Studio — Chat IA + Imagen") |
|
|
chat_box = gr.Chatbot() |
|
|
msg_input = gr.Textbox(placeholder="Escribe tu mensaje...") |
|
|
upload_img = gr.Image(label="Sube una imagen (opcional)", type="pil") |
|
|
auto_mode = gr.Checkbox(label="Modo automático: analizar imagen para prompt", value=False) |
|
|
send_button = gr.Button("Enviar") |
|
|
chat_state = gr.State(value=[]) |
|
|
|
|
|
send_button.click( |
|
|
chat_mode, |
|
|
inputs=[msg_input, upload_img, chat_state, auto_mode], |
|
|
outputs=[chat_box, chat_state] |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|