ivanoctaviogaitansantos commited on
Commit
6ec8388
verified
1 Parent(s): 28a469c

Actualizar app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -119
app.py CHANGED
@@ -3,9 +3,11 @@ import requests
3
  import json
4
  import os
5
  import random
 
 
 
6
  from typing import List, Optional
7
 
8
- # Tu clase generadora de prompts
9
  class HyperrealisticPromptGenerator:
10
  def __init__(self):
11
  self.ROLES = [
@@ -83,7 +85,6 @@ class HyperrealisticPromptGenerator:
83
  def _choose_random(self, options: List[str]) -> str:
84
  return random.choice(options)
85
 
86
- # CORRECCI脫N DE SYNTAX ERROR: Uso de f-string con triple comilla
87
  def generate_single_prompt(self, role: Optional[str] = None) -> str:
88
  selected_role = role if role else self._choose_random(self.ROLES)
89
  age = self._choose_random(self.AGES)
@@ -93,172 +94,169 @@ class HyperrealisticPromptGenerator:
93
  pose = self._choose_random(self.POSES)
94
  setting = self._choose_random(self.SETTINGS)
95
  atmosphere = self._choose_random(self.ATMOSPHERES)
96
-
97
- prompt = f"""```
98
- Role: {selected_role}
99
- Age: {age}
100
- Hair: {hair_style} in {hair_color}
101
- Eyes: {eye_color} with expressive, captivating gaze
102
- Pose: {pose}
103
- Environment: {setting}
104
- Atmosphere: {atmosphere}
105
- Outfit: {self.CONDITION_FIXED}
106
-
107
- Technical specs:
108
- {self.TECHNICAL_DETAILS}
109
- ```"""
110
- return prompt
111
 
112
  def generate_prompt_automatic(self):
113
- # Simple m茅todo para generaci贸n autom谩tica sin rol pasado
114
  return self.generate_single_prompt()
115
 
116
- # Instancia del generador
117
  gen = HyperrealisticPromptGenerator()
118
 
119
- # Configuraci贸n API SambaNova
120
  API_KEY = os.getenv("SAMBANOVA_API_KEY")
121
  API_URL = "https://api.sambanova.ai/v1/chat/completions"
122
- headers = {
123
- "Authorization": f"Bearer {API_KEY}",
124
- "Content-Type": "application/json",
125
- }
126
 
127
- def analizar_imagen_y_generar_prompt(image_url_or_base64):
128
- # Se env铆a la imagen para obtener una descripci贸n en ingl茅s
 
 
 
 
 
 
129
  if not API_KEY:
130
- return "Error: La variable de entorno SAMBANOVA_API_KEY no est谩 configurada."
131
-
132
  messages = [
133
- {"role": "system", "content": "You are an assistant that describes images in detailed English."},
134
- {"role": "user", "content": [
135
- {"type": "image_url", "image_url": {"url": image_url_or_base64}},
136
- {"type": "text", "text": "Provide a detailed English description of this image to use as a prompt."},
137
- ]}
 
 
138
  ]
139
- json_data = {
140
- "model": "Llama-4-Maverick-17B-128E-Instruct",
141
- "messages": messages,
142
- "stream": False,
143
- }
144
  try:
145
  response = requests.post(API_URL, headers=headers, json=json_data)
146
  response.raise_for_status()
147
- resp_json = response.json()
148
- text_resp = resp_json["choices"][0]["message"]["content"]
149
-
150
- # CORRECCI脫N DE SYNTAX ERROR: Envuelve la respuesta en un bloque de c贸digo markdown
151
- prompt = f"```\n{text_resp}\n```"
152
- return prompt
153
  except Exception as e:
154
- return f"Error al analizar imagen y generar prompt: {e}"
 
 
 
 
 
 
 
 
155
 
156
- def chat_sambanova(user_message, image_input, auto_mode, chat_history):
157
- updated_history = chat_history.copy()
158
-
159
  if not API_KEY:
160
- error_msg = "Error: La variable de entorno SAMBANOVA_API_KEY no est谩 configurada."
161
- updated_history.append((user_message if user_message else "Intento de interacci贸n", error_msg))
162
- yield "", updated_history
163
  return
164
 
165
- if auto_mode and image_input:
166
- # Modo autom谩tico: analizar imagen y generar prompt autom谩ticamente
167
- prompt = analizar_imagen_y_generar_prompt(image_input)
168
-
169
- ai_msg = f"IA - Prompt autom谩tico generado:\n{prompt}"
170
- user_display = user_message if user_message else "An谩lisis de imagen autom谩tico"
171
- updated_history.append((user_display, ai_msg))
172
-
173
- yield "", updated_history
174
  return
175
 
176
- # Modo chat manual: enviar mensaje a SambaNova API
177
  messages = [{"role": "system", "content": "Eres un asistente 煤til"}]
178
  for user_msg, ai_msg in updated_history:
179
- # Si el mensaje es una tupla y es nuestro prompt interno, lo ignoramos para el contexto
180
- if isinstance(user_msg, str) and user_msg.startswith("IA - Prompt autom谩tico generado"):
181
- continue
182
-
183
  messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]})
184
  messages.append({"role": "assistant", "content": ai_msg})
185
 
186
  user_content = [{"type": "text", "text": user_message}]
187
- if image_input:
188
- user_content.append({"type": "image_url", "image_url": {"url": image_input}})
189
  messages.append({"role": "user", "content": user_content})
190
 
191
- json_data = {
192
- "model": "Llama-4-Maverick-17B-128E-Instruct",
193
- "messages": messages,
194
- "stream": True,
195
- }
196
 
197
  try:
198
  response = requests.post(API_URL, headers=headers, json=json_data, stream=True)
199
  response.raise_for_status()
200
-
201
  collected_text = ""
202
  updated_history.append((user_message, ""))
203
 
204
  for line in response.iter_lines(decode_unicode=True):
205
- if line:
206
- if line.startswith("data: "):
207
- json_str = line[len("data: "):]
208
- if json_str == "[DONE]":
209
- break
210
- try:
211
- data = json.loads(json_str)
212
- delta = data.get("choices", [{}])[0].get("delta", {})
213
- text_fragment = delta.get("content", "")
214
-
215
- collected_text += text_fragment
216
-
217
- if updated_history:
218
- updated_history[-1] = (updated_history[-1][0], collected_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
- yield "", updated_history
221
- except json.JSONDecodeError:
222
- continue
223
-
224
- yield "", updated_history
225
-
226
- except requests.exceptions.RequestException as err:
227
- error_msg = f"Error de conexi贸n con SambaNova: {err}"
228
- if not updated_history or updated_history[-1][0] != user_message:
229
- updated_history.append((user_message, error_msg))
230
- else:
231
- updated_history[-1] = (updated_history[-1][0], error_msg)
232
- yield "", updated_history
233
-
234
  def generar_prompt_interno():
235
- return gen.generate_prompt_automatic()
236
 
237
  with gr.Blocks() as demo:
238
  gr.Markdown("# Hyperrealistic Prompt Generator & Chatbot")
239
-
240
  chat_history = gr.State([])
241
- # CORRECCI脫N DE ADVERTENCIA: Se a帽ade type='messages'
 
 
242
  chatbot = gr.Chatbot(label="Chatbot IA (SambaNova - Llama-4 Maverick)", type='messages')
243
-
244
  with gr.Row():
245
  msg = gr.Textbox(label="Escribe tu mensaje", scale=4)
246
- img_input = gr.Textbox(label="Imagen base64 o URL (opcional)", placeholder="Pega aqu铆 la imagen o URL", scale=2)
247
-
248
  with gr.Row():
249
- auto_mode = gr.Checkbox(label="Modo autom谩tico (generar prompt auto desde imagen)", value=False)
250
  btn_send = gr.Button("Enviar mensaje", variant="primary")
251
- # CORRECCI脫N DE FUNCI脫N INCOMPLETA: Se llama a generar_prompt_interno
252
- btn_gen_prompt = gr.Button("Generar prompt autom谩tico interno (al cuadro de texto)", variant="secondary")
253
-
254
- # Eventos
255
- btn_send.click(chat_sambanova, inputs=[msg, img_input, auto_mode, chat_history], outputs=[msg, chatbot, chat_history])
256
- msg.submit(chat_sambanova, inputs=[msg, img_input, auto_mode, chat_history], outputs=[msg, chatbot, chat_history])
257
 
258
- # El bot贸n genera el prompt y lo coloca en el cuadro de texto 'msg'
259
- btn_gen_prompt.click(generar_prompt_interno, inputs=[], outputs=[msg])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
 
261
- # CORRECCI脫N DE INICIALIZACI脫N: Esto inicia el servidor Gradio, esencial para el despliegue
262
  if __name__ == "__main__":
263
- demo.launch()
264
-
 
 
 
 
3
  import json
4
  import os
5
  import random
6
+ import base64
7
+ import io
8
+ from PIL import Image
9
  from typing import List, Optional
10
 
 
11
  class HyperrealisticPromptGenerator:
12
  def __init__(self):
13
  self.ROLES = [
 
85
  def _choose_random(self, options: List[str]) -> str:
86
  return random.choice(options)
87
 
 
88
  def generate_single_prompt(self, role: Optional[str] = None) -> str:
89
  selected_role = role if role else self._choose_random(self.ROLES)
90
  age = self._choose_random(self.AGES)
 
94
  pose = self._choose_random(self.POSES)
95
  setting = self._choose_random(self.SETTINGS)
96
  atmosphere = self._choose_random(self.ATMOSPHERES)
97
+ return (
98
+ f"```
99
+ f"Eyes: {eye_color}\nPose: {pose}\nEnvironment: {setting}\nAtmosphere: {atmosphere}\n"
100
+ f"Outfit: {self.CONDITION_FIXED}\nTechnical specs: {self.TECHNICAL_DETAILS}\n```"
101
+ )
 
 
 
 
 
 
 
 
 
 
102
 
103
  def generate_prompt_automatic(self):
 
104
  return self.generate_single_prompt()
105
 
 
106
  gen = HyperrealisticPromptGenerator()
107
 
 
108
  API_KEY = os.getenv("SAMBANOVA_API_KEY")
109
  API_URL = "https://api.sambanova.ai/v1/chat/completions"
110
+ headers = {"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"}
 
 
 
111
 
112
+ def process_image(image):
113
+ if image is None:
114
+ return None
115
+ buffered = io.BytesIO()
116
+ image.save(buffered, format="PNG")
117
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
118
+
119
+ def analizar_imagen_y_generar_prompt(image_base64):
120
  if not API_KEY:
121
+ return "``````"
 
122
  messages = [
123
+ {"role": "system", "content": "Describe images in detailed English."},
124
+ {
125
+ "role": "user", "content": [
126
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}},
127
+ {"type": "text", "text": "Provide a detailed English description for a prompt."}
128
+ ]
129
+ }
130
  ]
131
+ json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": False}
 
 
 
 
132
  try:
133
  response = requests.post(API_URL, headers=headers, json=json_data)
134
  response.raise_for_status()
135
+ text_resp = response.json()["choices"][0]["message"]["content"]
136
+ return f"``````"
 
 
 
 
137
  except Exception as e:
138
+ return f"``````"
139
+
140
+ def chat_sambanova(user_message, image_input, auto_mode, chat_history, loading_state):
141
+ updated_history = chat_history[:] if chat_history else []
142
+ image_base64 = process_image(image_input) if image_input else None
143
+
144
+ # Indicador de carga
145
+ loading_state = "Procesando..."
146
+ yield "", updated_history, "", loading_state # Limpiar input, actualizar chat, error vac铆o, carga activo
147
 
 
 
 
148
  if not API_KEY:
149
+ error_msg = "Error: SAMBANOVA_API_KEY no configurada."
150
+ updated_history.append((user_message, error_msg))
151
+ yield "", updated_history, error_msg, ""
152
  return
153
 
154
+ if auto_mode and image_base64:
155
+ prompt = analizar_imagen_y_generar_prompt(image_base64)
156
+ updated_history.append((user_message or "An谩lisis autom谩tico", f"IA - Prompt generado:\n{prompt}"))
157
+ yield "", updated_history, "", ""
 
 
 
 
 
158
  return
159
 
 
160
  messages = [{"role": "system", "content": "Eres un asistente 煤til"}]
161
  for user_msg, ai_msg in updated_history:
 
 
 
 
162
  messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]})
163
  messages.append({"role": "assistant", "content": ai_msg})
164
 
165
  user_content = [{"type": "text", "text": user_message}]
166
+ if image_base64:
167
+ user_content.append({"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}})
168
  messages.append({"role": "user", "content": user_content})
169
 
170
+ json_data = {"model": "Llama-4-Maverick-17B-128E-Instruct", "messages": messages, "stream": True}
 
 
 
 
171
 
172
  try:
173
  response = requests.post(API_URL, headers=headers, json=json_data, stream=True)
174
  response.raise_for_status()
 
175
  collected_text = ""
176
  updated_history.append((user_message, ""))
177
 
178
  for line in response.iter_lines(decode_unicode=True):
179
+ if line.startswith("data: "):
180
+ json_str = line[len("data: "):]
181
+ if json_str == "[DONE]":
182
+ break
183
+ try:
184
+ data = json.loads(json_str)
185
+ delta = data.get("choices", [{}])[0].get("delta", {})
186
+ text_fragment = delta.get("content", "")
187
+ collected_text += text_fragment
188
+ updated_history[-1] = (user_message, collected_text)
189
+ yield "", updated_history, "", "Procesando..."
190
+ except json.JSONDecodeError:
191
+ continue
192
+ yield "", updated_history, "", ""
193
+ except requests.exceptions.HTTPError as http_err:
194
+ error_msg = f"Error HTTP {http_err.response.status_code}: {http_err.response.text}"
195
+ if updated_history:
196
+ updated_history[-1] = (user_message, error_msg)
197
+ yield "", updated_history, error_msg, ""
198
+ except requests.exceptions.ConnectionError:
199
+ error_msg = "Error: No se pudo conectar con la API de SambaNova. Verifica tu conexi贸n."
200
+ if updated_history:
201
+ updated_history[-1] = (user_message, error_msg)
202
+ yield "", updated_history, error_msg, ""
203
+ except requests.exceptions.Timeout:
204
+ error_msg = "Error: La solicitud a la API timed out."
205
+ if updated_history:
206
+ updated_history[-1] = (user_message, error_msg)
207
+ yield "", updated_history, error_msg, ""
208
+ except Exception as e:
209
+ error_msg = f"Error inesperado: {str(e)}"
210
+ if updated_history:
211
+ updated_history[-1] = (user_message, error_msg)
212
+ yield "", updated_history, error_msg, ""
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  def generar_prompt_interno():
215
+ return gen.generate_prompt_automatic(), ""
216
 
217
  with gr.Blocks() as demo:
218
  gr.Markdown("# Hyperrealistic Prompt Generator & Chatbot")
 
219
  chat_history = gr.State([])
220
+ error_display = gr.Textbox(label="Mensajes de error", interactive=False, visible=True)
221
+ loading_state = gr.State("")
222
+
223
  chatbot = gr.Chatbot(label="Chatbot IA (SambaNova - Llama-4 Maverick)", type='messages')
224
+ prompt_output = gr.Markdown(label="Prompt Generado", elem_classes=["prompt-output"])
225
  with gr.Row():
226
  msg = gr.Textbox(label="Escribe tu mensaje", scale=4)
227
+ img_input = gr.Image(label="Subir imagen (opcional)", type="pil", scale=2)
 
228
  with gr.Row():
229
+ auto_mode = gr.Checkbox(label="Modo autom谩tico (generar prompt desde imagen)", value=False)
230
  btn_send = gr.Button("Enviar mensaje", variant="primary")
231
+ btn_gen_prompt = gr.Button("Generar prompt autom谩tico", variant="secondary")
232
+ copy_button = gr.Button("Copiar Prompt")
233
+ with gr.Row():
234
+ loading = gr.Markdown(value=lambda x: f"**{x}**" if x else "", label="Estado")
 
 
235
 
236
+ btn_send.click(
237
+ fn=chat_sambanova,
238
+ inputs=[msg, img_input, auto_mode, chat_history, loading_state],
239
+ outputs=[msg, chatbot, chat_history, error_display, loading]
240
+ )
241
+ msg.submit(
242
+ fn=chat_sambanova,
243
+ inputs=[msg, img_input, auto_mode, chat_history, loading_state],
244
+ outputs=[msg, chatbot, chat_history, error_display, loading]
245
+ )
246
+ btn_gen_prompt.click(
247
+ fn=generar_prompt_interno,
248
+ inputs=[],
249
+ outputs=[msg, prompt_output]
250
+ )
251
+ copy_button.click(
252
+ fn=None,
253
+ _js="() => { navigator.clipboard.writeText(document.querySelector('.prompt-output').innerText); }",
254
+ outputs=None
255
+ )
256
 
 
257
  if __name__ == "__main__":
258
+ try:
259
+ demo.launch()
260
+ except Exception as e:
261
+ print(f"Error al iniciar Gradio: {str(e)}")
262
+