Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,7 +8,7 @@ from openai import OpenAI
|
|
| 8 |
from transformers import pipeline
|
| 9 |
from diffusers import DiffusionPipeline
|
| 10 |
|
| 11 |
-
# ---------- Load OpenAI Key from
|
| 12 |
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
| 13 |
|
| 14 |
# ---------- Configuration ----------
|
|
@@ -55,8 +55,9 @@ except Exception as e:
|
|
| 55 |
video_pipeline = None
|
| 56 |
video_enabled = False
|
| 57 |
|
| 58 |
-
# ---------- Main Terminal with
|
| 59 |
-
def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
|
|
|
|
| 60 |
if session_id not in chat_memory:
|
| 61 |
chat_memory[session_id] = []
|
| 62 |
|
|
@@ -87,7 +88,7 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
|
|
| 87 |
)
|
| 88 |
output = response.choices[0].message.content.strip()
|
| 89 |
except Exception as e:
|
| 90 |
-
yield f"[OpenAI
|
| 91 |
return
|
| 92 |
else:
|
| 93 |
if model_name not in text_model_cache:
|
|
@@ -108,7 +109,7 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
|
|
| 108 |
yield f"[Generation error]: {e}", None, None
|
| 109 |
return
|
| 110 |
|
| 111 |
-
# Stream
|
| 112 |
response_so_far = ""
|
| 113 |
for char in output:
|
| 114 |
response_so_far += char
|
|
@@ -121,8 +122,8 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
|
|
| 121 |
chat_memory[session_id].append(f"ποΈ You > {prompt}")
|
| 122 |
chat_memory[session_id].append(f"π§ Codette > {output}")
|
| 123 |
|
| 124 |
-
|
| 125 |
-
|
| 126 |
if generate_image and image_enabled:
|
| 127 |
try:
|
| 128 |
result = image_generator(prompt, num_images_per_prompt=batch_size)
|
|
@@ -130,8 +131,6 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
|
|
| 130 |
except Exception as e:
|
| 131 |
response_so_far += f"\n[Image error]: {e}"
|
| 132 |
|
| 133 |
-
# Video Generation
|
| 134 |
-
vid = None
|
| 135 |
if generate_video and video_enabled:
|
| 136 |
try:
|
| 137 |
result = video_pipeline(prompt, num_inference_steps=video_steps)
|
|
@@ -147,7 +146,7 @@ def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
|
|
| 147 |
# ---------- Gradio UI ----------
|
| 148 |
with gr.Blocks(title="𧬠Codette Terminal β Streamed AI Chat") as demo:
|
| 149 |
gr.Markdown("## 𧬠Codette Terminal (Chat + Image + Video + Fine-Tuned AI)")
|
| 150 |
-
gr.Markdown("Type a prompt, choose a model, and generate
|
| 151 |
|
| 152 |
with gr.Row():
|
| 153 |
session_id = gr.Textbox(value="session_default", visible=False)
|
|
@@ -162,11 +161,29 @@ with gr.Blocks(title="𧬠Codette Terminal β Streamed AI Chat") as demo:
|
|
| 162 |
video_steps_slider = gr.Slider(label="Video Inference Steps", minimum=10, maximum=100, step=10, value=50)
|
| 163 |
fps_slider = gr.Slider(label="Video FPS", minimum=4, maximum=24, step=2, value=8)
|
| 164 |
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
)
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
|
|
|
|
|
| 8 |
from transformers import pipeline
|
| 9 |
from diffusers import DiffusionPipeline
|
| 10 |
|
| 11 |
+
# ---------- Load OpenAI Key from Hugging Face Secret ----------
|
| 12 |
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
| 13 |
|
| 14 |
# ---------- Configuration ----------
|
|
|
|
| 55 |
video_pipeline = None
|
| 56 |
video_enabled = False
|
| 57 |
|
| 58 |
+
# ---------- Main Terminal with Rate Limits ----------
|
| 59 |
+
def codette_terminal_limited(prompt, model_name, generate_image, generate_video,
|
| 60 |
+
session_id, batch_size, video_steps, fps):
|
| 61 |
if session_id not in chat_memory:
|
| 62 |
chat_memory[session_id] = []
|
| 63 |
|
|
|
|
| 88 |
)
|
| 89 |
output = response.choices[0].message.content.strip()
|
| 90 |
except Exception as e:
|
| 91 |
+
yield f"[OpenAI error]: {e}", None, None
|
| 92 |
return
|
| 93 |
else:
|
| 94 |
if model_name not in text_model_cache:
|
|
|
|
| 109 |
yield f"[Generation error]: {e}", None, None
|
| 110 |
return
|
| 111 |
|
| 112 |
+
# Stream text output
|
| 113 |
response_so_far = ""
|
| 114 |
for char in output:
|
| 115 |
response_so_far += char
|
|
|
|
| 122 |
chat_memory[session_id].append(f"ποΈ You > {prompt}")
|
| 123 |
chat_memory[session_id].append(f"π§ Codette > {output}")
|
| 124 |
|
| 125 |
+
imgs, vid = None, None
|
| 126 |
+
|
| 127 |
if generate_image and image_enabled:
|
| 128 |
try:
|
| 129 |
result = image_generator(prompt, num_images_per_prompt=batch_size)
|
|
|
|
| 131 |
except Exception as e:
|
| 132 |
response_so_far += f"\n[Image error]: {e}"
|
| 133 |
|
|
|
|
|
|
|
| 134 |
if generate_video and video_enabled:
|
| 135 |
try:
|
| 136 |
result = video_pipeline(prompt, num_inference_steps=video_steps)
|
|
|
|
| 146 |
# ---------- Gradio UI ----------
|
| 147 |
with gr.Blocks(title="𧬠Codette Terminal β Streamed AI Chat") as demo:
|
| 148 |
gr.Markdown("## 𧬠Codette Terminal (Chat + Image + Video + Fine-Tuned AI)")
|
| 149 |
+
gr.Markdown("Type a prompt, choose a model, and generate responses. Type `'exit'` to reset the session.")
|
| 150 |
|
| 151 |
with gr.Row():
|
| 152 |
session_id = gr.Textbox(value="session_default", visible=False)
|
|
|
|
| 161 |
video_steps_slider = gr.Slider(label="Video Inference Steps", minimum=10, maximum=100, step=10, value=50)
|
| 162 |
fps_slider = gr.Slider(label="Video FPS", minimum=4, maximum=24, step=2, value=8)
|
| 163 |
|
| 164 |
+
with gr.Row():
|
| 165 |
+
user_input = gr.Textbox(
|
| 166 |
+
label="Your Prompt",
|
| 167 |
+
placeholder="e.g. A robot dreaming on Mars",
|
| 168 |
+
lines=1
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
with gr.Row():
|
| 172 |
+
output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
|
| 173 |
+
|
| 174 |
+
with gr.Row():
|
| 175 |
+
output_image = gr.Gallery(label="Generated Image(s)", columns=2)
|
| 176 |
+
output_video = gr.Video(label="Generated Video")
|
| 177 |
+
|
| 178 |
+
user_input.submit(
|
| 179 |
+
codette_terminal_limited,
|
| 180 |
+
inputs=[
|
| 181 |
+
user_input, model_dropdown, generate_image_toggle, generate_video_toggle,
|
| 182 |
+
session_id, batch_size_slider, video_steps_slider, fps_slider
|
| 183 |
+
],
|
| 184 |
+
outputs=[output_text, output_image, output_video]
|
| 185 |
)
|
| 186 |
+
|
| 187 |
+
# ---------- Launch ----------
|
| 188 |
+
if __name__ == "__main__":
|
| 189 |
+
demo.launch(mcp_server=True)
|