Update app.py
Browse files
app.py
CHANGED
|
@@ -108,7 +108,8 @@ She is {pose}, in a {setting}. {atmosphere}.
|
|
| 108 |
|
| 109 |
gen = HyperrealisticPromptGenerator()
|
| 110 |
|
| 111 |
-
|
|
|
|
| 112 |
API_URL = "https://api.sambanova.ai/v1/chat/completions"
|
| 113 |
headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"}
|
| 114 |
|
|
@@ -123,18 +124,20 @@ def process_image(image):
|
|
| 123 |
|
| 124 |
def analizar_imagen_y_generar_prompt(image_base64):
|
| 125 |
if not API_KEY:
|
| 126 |
-
return "⚠️ SAMBANOVA_API_KEY
|
|
|
|
|
|
|
| 127 |
messages = [
|
| 128 |
{"role": "system", "content": "Describe images in detailed English."},
|
| 129 |
{
|
| 130 |
"role": "user",
|
| 131 |
-
"content":
|
| 132 |
-
"type": "image",
|
| 133 |
-
"
|
| 134 |
-
|
| 135 |
-
}
|
| 136 |
}
|
| 137 |
]
|
|
|
|
| 138 |
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False}
|
| 139 |
try:
|
| 140 |
response = requests.post(API_URL, headers=headers, json=json_data)
|
|
@@ -142,40 +145,46 @@ def analizar_imagen_y_generar_prompt(image_base64):
|
|
| 142 |
text_resp = response.json()["choices"][0]["message"]["content"]
|
| 143 |
return f"```\n{text_resp}\n```"
|
| 144 |
except Exception as e:
|
| 145 |
-
return f"
|
| 146 |
|
| 147 |
|
| 148 |
# ==============================================================
|
| 149 |
-
# CHAT
|
| 150 |
# ==============================================================
|
| 151 |
|
| 152 |
def chat_sambanova(user_message, image_input, auto_mode, chat_history):
|
| 153 |
updated_history = chat_history[:] if chat_history else []
|
| 154 |
image_base64 = process_image(image_input) if image_input else None
|
|
|
|
|
|
|
|
|
|
| 155 |
|
| 156 |
if not API_KEY:
|
| 157 |
error_msg = "Error: SAMBANOVA_API_KEY no configurada."
|
| 158 |
updated_history.append((user_message, error_msg))
|
| 159 |
-
|
|
|
|
| 160 |
return
|
| 161 |
|
| 162 |
if auto_mode and image_base64:
|
| 163 |
prompt = analizar_imagen_y_generar_prompt(image_base64)
|
| 164 |
updated_history.append((user_message or "Análisis automático (Imagen)", f"IA - Prompt generado:\n{prompt}"))
|
|
|
|
| 165 |
yield "", updated_history, "", ""
|
| 166 |
return
|
| 167 |
|
|
|
|
| 168 |
messages = [{"role": "system", "content": "Eres un asistente útil"}]
|
| 169 |
for user_msg, ai_msg in updated_history:
|
| 170 |
-
|
|
|
|
|
|
|
| 171 |
messages.append({"role": "assistant", "content": ai_msg})
|
| 172 |
|
| 173 |
-
|
|
|
|
| 174 |
if image_base64:
|
| 175 |
-
user_content
|
| 176 |
-
{"type": "text", "text": user_message},
|
| 177 |
-
{"type": "image", "image_data": f"data:image/png;base64,{image_base64}"}
|
| 178 |
-
]
|
| 179 |
messages.append({"role": "user", "content": user_content})
|
| 180 |
|
| 181 |
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": True}
|
|
@@ -184,7 +193,8 @@ def chat_sambanova(user_message, image_input, auto_mode, chat_history):
|
|
| 184 |
response = requests.post(API_URL, headers=headers, json=json_data, stream=True)
|
| 185 |
response.raise_for_status()
|
| 186 |
collected_text = ""
|
| 187 |
-
updated_history.append((user_message, ""))
|
|
|
|
| 188 |
for line in response.iter_lines(decode_unicode=True):
|
| 189 |
if line.startswith("data: "):
|
| 190 |
json_str = line[len("data: "):]
|
|
@@ -196,9 +206,11 @@ def chat_sambanova(user_message, image_input, auto_mode, chat_history):
|
|
| 196 |
text_fragment = delta.get("content", "")
|
| 197 |
collected_text += text_fragment
|
| 198 |
updated_history[-1] = (user_message, collected_text)
|
|
|
|
| 199 |
yield "", updated_history, "", "Procesando..."
|
| 200 |
except json.JSONDecodeError:
|
| 201 |
continue
|
|
|
|
| 202 |
yield "", updated_history, "", ""
|
| 203 |
except Exception as e:
|
| 204 |
error_msg = f"Error inesperado de la API: {str(e)}"
|
|
@@ -206,15 +218,16 @@ def chat_sambanova(user_message, image_input, auto_mode, chat_history):
|
|
| 206 |
updated_history[-1] = (user_message, error_msg)
|
| 207 |
else:
|
| 208 |
updated_history.append((user_message, error_msg))
|
| 209 |
-
|
| 210 |
-
|
| 211 |
|
| 212 |
def generar_prompt_interno():
|
| 213 |
-
prompt, _ = gen.
|
|
|
|
| 214 |
return prompt, ""
|
| 215 |
|
| 216 |
# ==============================================================
|
| 217 |
-
# INTERFAZ GRADIO
|
| 218 |
# ==============================================================
|
| 219 |
|
| 220 |
css_batuto = """
|
|
@@ -227,25 +240,18 @@ button:hover {background-color: #1B335F !important;}
|
|
| 227 |
input, textarea {background-color: #0B101A !important; color: #DDE8FF !important;}
|
| 228 |
"""
|
| 229 |
|
| 230 |
-
# Verificar que Gradio esté disponible
|
| 231 |
-
try:
|
| 232 |
-
import gradio as gr
|
| 233 |
-
print("✅ Gradio importado correctamente")
|
| 234 |
-
except ImportError as e:
|
| 235 |
-
print(f"❌ Error importando Gradio: {e}")
|
| 236 |
-
print("Instala Gradio con: pip install gradio")
|
| 237 |
-
exit(1)
|
| 238 |
-
|
| 239 |
with gr.Blocks(css=css_batuto, theme=gr.themes.Soft()) as demo:
|
| 240 |
gr.Markdown("# ⚡ BATUTO / Prompt Studio — Hyperrealistic Generator")
|
| 241 |
|
| 242 |
chat_history = gr.State([])
|
| 243 |
error_display = gr.Textbox(label="System messages", value="", visible=True, interactive=False)
|
| 244 |
-
|
| 245 |
chatbot = gr.Chatbot(label="💬 BATUTO Assistant (SambaNova - Llama-4 Maverick)", type='messages')
|
| 246 |
-
|
|
|
|
| 247 |
|
| 248 |
with gr.Row():
|
|
|
|
| 249 |
msg = gr.Textbox(label="Tu mensaje", scale=4, placeholder="Escribe tu mensaje o usa el modo automático...")
|
| 250 |
img_input = gr.Image(label="Sube una imagen (opcional)", type="pil", scale=2)
|
| 251 |
|
|
@@ -255,9 +261,10 @@ with gr.Blocks(css=css_batuto, theme=gr.themes.Soft()) as demo:
|
|
| 255 |
btn_gen_prompt = gr.Button("🎲 Generar prompt automático", variant="secondary")
|
| 256 |
copy_button = gr.Button("📋 Copiar Prompt")
|
| 257 |
|
| 258 |
-
|
|
|
|
| 259 |
|
| 260 |
-
#
|
| 261 |
btn_send.click(
|
| 262 |
fn=chat_sambanova,
|
| 263 |
inputs=[msg, img_input, auto_mode, chat_history],
|
|
@@ -270,35 +277,22 @@ with gr.Blocks(css=css_batuto, theme=gr.themes.Soft()) as demo:
|
|
| 270 |
outputs=[msg, chatbot, error_display, loading_state]
|
| 271 |
)
|
| 272 |
|
|
|
|
| 273 |
btn_gen_prompt.click(
|
| 274 |
fn=generar_prompt_interno,
|
| 275 |
inputs=[],
|
| 276 |
outputs=[prompt_output, error_display]
|
| 277 |
)
|
| 278 |
|
|
|
|
| 279 |
copy_button.click(
|
| 280 |
None,
|
| 281 |
[],
|
| 282 |
[],
|
| 283 |
js="""() => {
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
// Buscar el textarea del prompt
|
| 288 |
-
for (let textarea of textareas) {
|
| 289 |
-
if (textarea.value && textarea.value.includes('Highly detailed hyperrealistic')) {
|
| 290 |
-
promptText = textarea.value;
|
| 291 |
-
break;
|
| 292 |
-
}
|
| 293 |
-
}
|
| 294 |
-
|
| 295 |
-
if (!promptText) {
|
| 296 |
-
// Si no se encuentra, buscar en los elementos de texto
|
| 297 |
-
const promptElements = document.querySelectorAll('.prompt-output');
|
| 298 |
-
if (promptElements.length > 0) {
|
| 299 |
-
promptText = promptElements[0].value || promptElements[0].textContent || promptElements[0].innerText;
|
| 300 |
-
}
|
| 301 |
-
}
|
| 302 |
|
| 303 |
if (promptText) {
|
| 304 |
navigator.clipboard.writeText(promptText).then(() => {
|
|
@@ -307,14 +301,14 @@ with gr.Blocks(css=css_batuto, theme=gr.themes.Soft()) as demo:
|
|
| 307 |
alert('❌ Error al copiar: ' + err);
|
| 308 |
});
|
| 309 |
} else {
|
| 310 |
-
alert('❌ No se encontró el prompt para copiar');
|
| 311 |
}
|
| 312 |
}"""
|
| 313 |
)
|
| 314 |
|
| 315 |
if __name__ == "__main__":
|
| 316 |
try:
|
| 317 |
-
|
| 318 |
-
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
|
| 319 |
except Exception as e:
|
| 320 |
-
print(f"
|
|
|
|
|
|
| 108 |
|
| 109 |
gen = HyperrealisticPromptGenerator()
|
| 110 |
|
| 111 |
+
# Asegúrate de configurar SAMBANOVA_API_KEY en tu entorno
|
| 112 |
+
API_KEY = os.getenv("SAMBANOVA_API_KEY")
|
| 113 |
API_URL = "https://api.sambanova.ai/v1/chat/completions"
|
| 114 |
headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"}
|
| 115 |
|
|
|
|
| 124 |
|
| 125 |
def analizar_imagen_y_generar_prompt(image_base64):
|
| 126 |
if not API_KEY:
|
| 127 |
+
return "```\n⚠️ SAMBANOVA_API_KEY no configurada.\n```"
|
| 128 |
+
|
| 129 |
+
# Formato de contenido para el análisis de imagen (puede variar)
|
| 130 |
messages = [
|
| 131 |
{"role": "system", "content": "Describe images in detailed English."},
|
| 132 |
{
|
| 133 |
"role": "user",
|
| 134 |
+
"content": [
|
| 135 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}},
|
| 136 |
+
{"type": "text", "text": "Provide a detailed English description suitable for a prompt."}
|
| 137 |
+
]
|
|
|
|
| 138 |
}
|
| 139 |
]
|
| 140 |
+
|
| 141 |
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False}
|
| 142 |
try:
|
| 143 |
response = requests.post(API_URL, headers=headers, json=json_data)
|
|
|
|
| 145 |
text_resp = response.json()["choices"][0]["message"]["content"]
|
| 146 |
return f"```\n{text_resp}\n```"
|
| 147 |
except Exception as e:
|
| 148 |
+
return f"```\nError analizando imagen: {str(e)}\n```"
|
| 149 |
|
| 150 |
|
| 151 |
# ==============================================================
|
| 152 |
+
# FUNCIÓN PRINCIPAL DE CHAT Y PROMPT (CORREGIDA)
|
| 153 |
# ==============================================================
|
| 154 |
|
| 155 |
def chat_sambanova(user_message, image_input, auto_mode, chat_history):
|
| 156 |
updated_history = chat_history[:] if chat_history else []
|
| 157 |
image_base64 = process_image(image_input) if image_input else None
|
| 158 |
+
|
| 159 |
+
# 1. Yield inicial para mostrar "Procesando..." y borrar la entrada del usuario
|
| 160 |
+
yield "", updated_history, "", "Procesando..."
|
| 161 |
|
| 162 |
if not API_KEY:
|
| 163 |
error_msg = "Error: SAMBANOVA_API_KEY no configurada."
|
| 164 |
updated_history.append((user_message, error_msg))
|
| 165 |
+
# 2. Yield de error
|
| 166 |
+
yield user_message, updated_history, error_msg, ""
|
| 167 |
return
|
| 168 |
|
| 169 |
if auto_mode and image_base64:
|
| 170 |
prompt = analizar_imagen_y_generar_prompt(image_base64)
|
| 171 |
updated_history.append((user_message or "Análisis automático (Imagen)", f"IA - Prompt generado:\n{prompt}"))
|
| 172 |
+
# 3. Yield de finalización de análisis
|
| 173 |
yield "", updated_history, "", ""
|
| 174 |
return
|
| 175 |
|
| 176 |
+
# Preparar el historial de chat para la API
|
| 177 |
messages = [{"role": "system", "content": "Eres un asistente útil"}]
|
| 178 |
for user_msg, ai_msg in updated_history:
|
| 179 |
+
# Los mensajes del historial deben ser solo de texto para el rol 'user'
|
| 180 |
+
# Esto previene errores de formato en mensajes pasados.
|
| 181 |
+
messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]})
|
| 182 |
messages.append({"role": "assistant", "content": ai_msg})
|
| 183 |
|
| 184 |
+
# Preparar el mensaje actual (puede ser texto o multimodal)
|
| 185 |
+
user_content = [{"type": "text", "text": user_message}]
|
| 186 |
if image_base64:
|
| 187 |
+
user_content.append({"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}})
|
|
|
|
|
|
|
|
|
|
| 188 |
messages.append({"role": "user", "content": user_content})
|
| 189 |
|
| 190 |
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": True}
|
|
|
|
| 193 |
response = requests.post(API_URL, headers=headers, json=json_data, stream=True)
|
| 194 |
response.raise_for_status()
|
| 195 |
collected_text = ""
|
| 196 |
+
updated_history.append((user_message, ""))
|
| 197 |
+
|
| 198 |
for line in response.iter_lines(decode_unicode=True):
|
| 199 |
if line.startswith("data: "):
|
| 200 |
json_str = line[len("data: "):]
|
|
|
|
| 206 |
text_fragment = delta.get("content", "")
|
| 207 |
collected_text += text_fragment
|
| 208 |
updated_history[-1] = (user_message, collected_text)
|
| 209 |
+
# 4. Yield de streaming
|
| 210 |
yield "", updated_history, "", "Procesando..."
|
| 211 |
except json.JSONDecodeError:
|
| 212 |
continue
|
| 213 |
+
# 5. Yield final
|
| 214 |
yield "", updated_history, "", ""
|
| 215 |
except Exception as e:
|
| 216 |
error_msg = f"Error inesperado de la API: {str(e)}"
|
|
|
|
| 218 |
updated_history[-1] = (user_message, error_msg)
|
| 219 |
else:
|
| 220 |
updated_history.append((user_message, error_msg))
|
| 221 |
+
# 6. Yield de error final
|
| 222 |
+
yield user_message, updated_history, error_msg, ""
|
| 223 |
|
| 224 |
def generar_prompt_interno():
|
| 225 |
+
prompt, _ = gen.generate_single_prompt(), ""
|
| 226 |
+
# Retorna el prompt como texto de salida y una cadena vacía para el display de error
|
| 227 |
return prompt, ""
|
| 228 |
|
| 229 |
# ==============================================================
|
| 230 |
+
# INTERFAZ GRADIO (CORREGIDA)
|
| 231 |
# ==============================================================
|
| 232 |
|
| 233 |
css_batuto = """
|
|
|
|
| 240 |
input, textarea {background-color: #0B101A !important; color: #DDE8FF !important;}
|
| 241 |
"""
|
| 242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
with gr.Blocks(css=css_batuto, theme=gr.themes.Soft()) as demo:
|
| 244 |
gr.Markdown("# ⚡ BATUTO / Prompt Studio — Hyperrealistic Generator")
|
| 245 |
|
| 246 |
chat_history = gr.State([])
|
| 247 |
error_display = gr.Textbox(label="System messages", value="", visible=True, interactive=False)
|
| 248 |
+
|
| 249 |
chatbot = gr.Chatbot(label="💬 BATUTO Assistant (SambaNova - Llama-4 Maverick)", type='messages')
|
| 250 |
+
# Cambiado a Textbox para mejor funcionalidad de copia.
|
| 251 |
+
prompt_output = gr.Textbox(label="🎨 Prompt generado", elem_classes=["prompt-output"], lines=5, max_lines=10)
|
| 252 |
|
| 253 |
with gr.Row():
|
| 254 |
+
# La salida msg se usa para limpiar la caja de texto después del envío
|
| 255 |
msg = gr.Textbox(label="Tu mensaje", scale=4, placeholder="Escribe tu mensaje o usa el modo automático...")
|
| 256 |
img_input = gr.Image(label="Sube una imagen (opcional)", type="pil", scale=2)
|
| 257 |
|
|
|
|
| 261 |
btn_gen_prompt = gr.Button("🎲 Generar prompt automático", variant="secondary")
|
| 262 |
copy_button = gr.Button("📋 Copiar Prompt")
|
| 263 |
|
| 264 |
+
# Componente para mostrar el estado de carga (Procesando...)
|
| 265 |
+
loading_state = gr.Textbox(value="", label="Estado", interactive=False)
|
| 266 |
|
| 267 |
+
# Asignación de outputs corregida para coincidir con la función chat_sambanova
|
| 268 |
btn_send.click(
|
| 269 |
fn=chat_sambanova,
|
| 270 |
inputs=[msg, img_input, auto_mode, chat_history],
|
|
|
|
| 277 |
outputs=[msg, chatbot, error_display, loading_state]
|
| 278 |
)
|
| 279 |
|
| 280 |
+
# La salida del prompt va al Textbox y la segunda salida es para limpiar errores
|
| 281 |
btn_gen_prompt.click(
|
| 282 |
fn=generar_prompt_interno,
|
| 283 |
inputs=[],
|
| 284 |
outputs=[prompt_output, error_display]
|
| 285 |
)
|
| 286 |
|
| 287 |
+
# CORRECCIÓN DE ERROR: Cambiado _js a js
|
| 288 |
copy_button.click(
|
| 289 |
None,
|
| 290 |
[],
|
| 291 |
[],
|
| 292 |
js="""() => {
|
| 293 |
+
// Usa el ID o selector de clase del Textbox
|
| 294 |
+
const promptBox = document.querySelector('.prompt-output textarea');
|
| 295 |
+
const promptText = promptBox ? promptBox.value : '';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
|
| 297 |
if (promptText) {
|
| 298 |
navigator.clipboard.writeText(promptText).then(() => {
|
|
|
|
| 301 |
alert('❌ Error al copiar: ' + err);
|
| 302 |
});
|
| 303 |
} else {
|
| 304 |
+
alert('❌ No se encontró el prompt para copiar. Genera uno primero.');
|
| 305 |
}
|
| 306 |
}"""
|
| 307 |
)
|
| 308 |
|
| 309 |
if __name__ == "__main__":
|
| 310 |
try:
|
| 311 |
+
demo.launch()
|
|
|
|
| 312 |
except Exception as e:
|
| 313 |
+
print(f"Error al iniciar Gradio: {str(e)}")
|
| 314 |
+
|