|
|
import gradio as gr |
|
|
import requests |
|
|
import json |
|
|
import os |
|
|
import random |
|
|
import base64 |
|
|
import io |
|
|
from PIL import Image |
|
|
from typing import List, Optional |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class HyperrealisticPromptGenerator: |
|
|
def __init__(self): |
|
|
self.ROLES = [ |
|
|
"nurse", "nun", "maid", "flight attendant", "secretary", "teacher", "schoolgirl", "lawyer", |
|
|
"doctor", "boudoir model", "fitness model", "elegant judge", "seductive librarian", |
|
|
"business executive", "policewoman", "female military officer", "WWII-era secretary", |
|
|
"1960s flight attendant", "seductive maid", "mysterious nurse", "captivating schoolgirl" |
|
|
] |
|
|
self.AGES = [ |
|
|
"early 20s youthful vibrance", |
|
|
"early 20s fresh and vibrant", |
|
|
"mid 20s graceful confidence", |
|
|
"mid 20s elegant and fresh", |
|
|
"early 20s natural glow" |
|
|
] |
|
|
self.HAIR_COLORS = [ |
|
|
"deep sapphire blue", "silver platinum", "vibrant ruby red", "glossy jet black", |
|
|
"luxurious chestnut brown", "emerald green", "vivid amethyst purple", |
|
|
"chocolate brown", "honey blonde", "burgundy red" |
|
|
] |
|
|
self.EYE_COLORS = [ |
|
|
"intense brown", "bright sapphire blue", "emerald green", "golden amber", |
|
|
"fascinating hazel", "deep violet", "piercing emerald", "mysterious gray", |
|
|
"vibrant violet", "intense amber" |
|
|
] |
|
|
self.HAIR_STYLES = [ |
|
|
"long flowing chestnut hair styled in soft waves", |
|
|
"sleek straight long black hair", |
|
|
"luxurious long blonde curls", |
|
|
"elegant updo with loose cascading strands", |
|
|
"glossy long brunette hair parted in the middle", |
|
|
"voluminous curls", |
|
|
"thick braid over the shoulder", |
|
|
"loose and silky layers", |
|
|
"messy chic bun" |
|
|
] |
|
|
self.POSES = [ |
|
|
"standing with one leg slightly forward, natural elegance", |
|
|
"seated on a chair edge, legs crossed, professional expression", |
|
|
"leaning against a desk, confident look", |
|
|
"walking with subtle grace, light movement", |
|
|
"adjusting hair gently, natural body language" |
|
|
] |
|
|
self.SETTINGS = [ |
|
|
"modern office with elegant decor and warm ambient light", |
|
|
"luxury hotel suite with velvet furnishings and city view", |
|
|
"classic library with wooden shelves and soft reading lamps", |
|
|
"outdoor balcony at sunset with urban skyline", |
|
|
"high-end photo studio with professional soft lighting" |
|
|
] |
|
|
self.ATMOSPHERES = [ |
|
|
"soft professional lighting with smooth skin shadows, perfect color balance", |
|
|
"warm golden hour sunlight creating rich highlights and depth", |
|
|
"moody cinematic lighting with subtle shadow play", |
|
|
"gentle romantic candlelight with warm glows", |
|
|
"sharp studio flash lighting with balanced illumination" |
|
|
] |
|
|
self.TECHNICAL_DETAILS = ( |
|
|
"Captured in ultra HD 16K (15360×8640) vertical 9:16 full body format. " |
|
|
"Canon EOS R5 Cine RAW camera and Canon RF 85mm f/1.2L USM lens at f/1.2 aperture for creamy bokeh. " |
|
|
"ARRI SkyPanel S360-C soft lighting, Path Tracing, PBR, SSS for lifelike skin, and Ray Tracing. " |
|
|
"Photogrammetry-based textures, displacement maps for skin pores, delicate fabric weave. " |
|
|
"Natural hair strand flow, low-angle (knee to head) composition." |
|
|
) |
|
|
self.CONDITION_FIXED = ( |
|
|
"Wearing elegant professional attire matching the role, natural posture, confident expression. " |
|
|
"Full body portrait, cinematic tone, vertical 9:16 framing." |
|
|
) |
|
|
|
|
|
def _choose_random(self, options: List[str]) -> str: |
|
|
return random.choice(options) |
|
|
|
|
|
def generate_single_prompt(self, role: Optional[str] = None) -> str: |
|
|
selected_role = role if role else self._choose_random(self.ROLES) |
|
|
age = self._choose_random(self.AGES) |
|
|
hair_color = self._choose_random(self.HAIR_COLORS) |
|
|
eye_color = self._choose_random(self.EYE_COLORS) |
|
|
hair_style = self._choose_random(self.HAIR_STYLES) |
|
|
pose = self._choose_random(self.POSES) |
|
|
setting = self._choose_random(self.SETTINGS) |
|
|
atmosphere = self._choose_random(self.ATMOSPHERES) |
|
|
|
|
|
prompt = f""" |
|
|
""" |
|
|
return prompt |
|
|
|
|
|
def generate_prompt_automatic(self): |
|
|
return self.generate_single_prompt() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gen = HyperrealisticPromptGenerator() |
|
|
|
|
|
API_KEY = os.getenv("SAMBANOVA_API_KEY") |
|
|
API_URL = "https://api.sambanova.ai/v1/chat/completions" |
|
|
headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"} |
|
|
|
|
|
|
|
|
def process_image(image): |
|
|
if image is None: |
|
|
return None |
|
|
buffered = io.BytesIO() |
|
|
image.save(buffered, format="PNG") |
|
|
return base64.b64encode(buffered.getvalue()).decode("utf-8") |
|
|
|
|
|
|
|
|
def analizar_imagen_y_generar_prompt(image_base64): |
|
|
if not API_KEY: |
|
|
return "``````" |
|
|
messages = [ |
|
|
{"role": "system", "content": "Describe images in detailed English."}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}, |
|
|
{"type": "text", "text": "Provide a detailed English description for a prompt."} |
|
|
] |
|
|
} |
|
|
] |
|
|
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False} |
|
|
try: |
|
|
response = requests.post(API_URL, headers=headers, json=json_data) |
|
|
response.raise_for_status() |
|
|
text_resp = response.json()["choices"][0]["message"]["content"] |
|
|
return f"``````" |
|
|
except Exception: |
|
|
return f"``````" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat_sambanova(user_message, image_input, auto_mode, chat_history, loading_state): |
|
|
updated_history = chat_history[:] if chat_history else [] |
|
|
image_base64 = process_image(image_input) if image_input else None |
|
|
|
|
|
loading_state = "Procesando..." |
|
|
yield "", updated_history, "", loading_state |
|
|
|
|
|
if not API_KEY: |
|
|
error_msg = "Error: SAMBANOVA_API_KEY no configurada." |
|
|
updated_history.append((user_message, error_msg)) |
|
|
yield "", updated_history, error_msg, "" |
|
|
return |
|
|
|
|
|
if auto_mode and image_base64: |
|
|
prompt = analizar_imagen_y_generar_prompt(image_base64) |
|
|
updated_history.append((user_message or "Análisis automático", f"IA - Prompt generado:\n{prompt}")) |
|
|
yield "", updated_history, "", "" |
|
|
return |
|
|
|
|
|
messages = [{"role": "system", "content": "Eres un asistente útil"}] |
|
|
for user_msg, ai_msg in updated_history: |
|
|
messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]}) |
|
|
messages.append({"role": "assistant", "content": ai_msg}) |
|
|
|
|
|
user_content = [{"type": "text", "text": user_message}] |
|
|
if image_base64: |
|
|
user_content.append({"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}) |
|
|
messages.append({"role": "user", "content": user_content}) |
|
|
|
|
|
json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": True} |
|
|
|
|
|
try: |
|
|
response = requests.post(API_URL, headers=headers, json=json_data, stream=True) |
|
|
response.raise_for_status() |
|
|
collected_text = "" |
|
|
updated_history.append((user_message, "")) |
|
|
|
|
|
for line in response.iter_lines(decode_unicode=True): |
|
|
if line.startswith("data: "): |
|
|
json_str = line[len("data: "):] |
|
|
if json_str == "[DONE]": |
|
|
break |
|
|
try: |
|
|
data = json.loads(json_str) |
|
|
delta = data.get("choices", [{}])[0].get("delta", {}) |
|
|
text_fragment = delta.get("content", "") |
|
|
collected_text += text_fragment |
|
|
updated_history[-1] = (user_message, collected_text) |
|
|
yield "", updated_history, "", "Procesando..." |
|
|
except json.JSONDecodeError: |
|
|
continue |
|
|
yield "", updated_history, "", "" |
|
|
except Exception as e: |
|
|
error_msg = f"Error inesperado: {str(e)}" |
|
|
updated_history[-1] = (user_message, error_msg) |
|
|
yield "", updated_history, error_msg, "" |
|
|
|
|
|
|
|
|
def generar_prompt_interno(): |
|
|
return gen.generate_prompt_automatic(), "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
css_batuto = """ |
|
|
body {background-color: #05070A; color: #B0C8FF; font-family: 'Poppins', sans-serif;} |
|
|
h1, h2, h3, h4 {color: #5CA8FF; text-align: center;} |
|
|
.gradio-container {background-color: #05070A !important;} |
|
|
button {background-color: #0B1A33 !important; color: #B0C8FF !important; border-radius: 12px;} |
|
|
button:hover {background-color: #1B335F !important;} |
|
|
.prompt-output {background-color: #0A0F1A; color: #A8CFFF; border-radius: 10px; padding: 10px;} |
|
|
input, textarea {background-color: #0B101A !important; color: #DDE8FF !important;} |
|
|
""" |
|
|
|
|
|
with gr.Blocks(css=css_batuto, theme="gradio/soft") as demo: |
|
|
gr.Markdown("# ⚡ BATUTO / Prompt Studio — Hyperrealistic Generator") |
|
|
|
|
|
chat_history = gr.State([]) |
|
|
error_display = gr.Textbox(label="System messages", interactive=False, visible=True) |
|
|
loading_state = gr.State("") |
|
|
|
|
|
chatbot = gr.Chatbot(label="💬 BATUTO Assistant (SambaNova - Llama-4 Maverick)", type='messages') |
|
|
prompt_output = gr.Markdown(label="🎨 Prompt generado", elem_classes=["prompt-output"]) |
|
|
|
|
|
with gr.Row(): |
|
|
msg = gr.Textbox(label="Tu mensaje", scale=4) |
|
|
img_input = gr.Image(label="Sube una imagen (opcional)", type="pil", scale=2) |
|
|
|
|
|
with gr.Row(): |
|
|
auto_mode = gr.Checkbox(label="Modo automático (Generar prompt desde imagen)", value=False) |
|
|
btn_send = gr.Button("Enviar mensaje", variant="primary") |
|
|
btn_gen_prompt = gr.Button("🎲 Generar prompt automático", variant="secondary") |
|
|
copy_button = gr.Button("📋 Copiar Prompt") |
|
|
|
|
|
with gr.Row(): |
|
|
loading = gr.Markdown(value=lambda x: f"**{x}**" if x else "", label="Estado") |
|
|
|
|
|
btn_send.click( |
|
|
fn=chat_sambanova, |
|
|
inputs=[msg, img_input, auto_mode, chat_history, loading_state], |
|
|
outputs=[msg, chatbot, chat_history, error_display, loading] |
|
|
) |
|
|
msg.submit( |
|
|
fn=chat_sambanova, |
|
|
inputs=[msg, img_input, auto_mode, chat_history, loading_state], |
|
|
outputs=[msg, chatbot, chat_history, error_display, loading] |
|
|
) |
|
|
btn_gen_prompt.click( |
|
|
fn=generar_prompt_interno, |
|
|
inputs=[], |
|
|
outputs=[msg, prompt_output] |
|
|
) |
|
|
|
|
|
|
|
|
copy_button.click( |
|
|
None, |
|
|
[], |
|
|
[], |
|
|
_js="""() => { |
|
|
const el = document.querySelector('.prompt-output'); |
|
|
if (el) { |
|
|
navigator.clipboard.writeText(el.innerText); |
|
|
alert('✅ Prompt copiado al portapapeles'); |
|
|
} else { |
|
|
alert('❌ No se encontró el prompt para copiar'); |
|
|
} |
|
|
}""" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
try: |
|
|
demo.launch() |
|
|
except Exception as e: |
|
|
print(f"Error al iniciar Gradio: {str(e)}") |
|
|
|