yingzhac-research commited on
Commit
7fe20c3
·
1 Parent(s): 8da5cdc

Update app to 4-image UI with extra controls

Browse files
Files changed (2) hide show
  1. app.py +69 -26
  2. zimage_pipeline.py +0 -0
app.py CHANGED
@@ -3,6 +3,8 @@ import spaces
3
  import gradio as gr
4
  from diffusers import DiffusionPipeline
5
 
 
 
6
  # Load the pipeline once at startup
7
  print("Loading Z-Image-Turbo pipeline...")
8
  pipe = DiffusionPipeline.from_pretrained(
@@ -19,22 +21,46 @@ spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant
19
  print("Pipeline loaded!")
20
 
21
  @spaces.GPU
22
- def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed, progress=gr.Progress(track_tqdm=True)):
23
- """Generate an image from the given prompt."""
 
 
 
 
 
 
 
 
 
 
24
  if randomize_seed:
25
- seed = torch.randint(0, 2**32 - 1, (1,)).item()
26
-
27
- generator = torch.Generator("cuda").manual_seed(int(seed))
28
- image = pipe(
29
- prompt=prompt,
30
- height=int(height),
31
- width=int(width),
32
- num_inference_steps=int(num_inference_steps),
33
- guidance_scale=0.0, # Guidance should be 0 for Turbo models
34
- generator=generator,
35
- ).images[0]
36
-
37
- return image, seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
 
40
  # Example prompts
@@ -64,7 +90,13 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
64
  placeholder="Enter your image description...",
65
  lines=4,
66
  )
67
-
 
 
 
 
 
 
68
  with gr.Row():
69
  height = gr.Slider(
70
  minimum=512,
@@ -90,6 +122,15 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
90
  label="Inference Steps",
91
  info="9 steps results in 8 DiT forwards",
92
  )
 
 
 
 
 
 
 
 
 
93
 
94
  with gr.Row():
95
  seed = gr.Number(
@@ -105,12 +146,14 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
105
  generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
106
 
107
  with gr.Column(scale=1):
108
- output_image = gr.Image(
109
- label="Generated Image",
110
- type="pil",
 
 
111
  )
112
- used_seed = gr.Number(
113
- label="Seed Used",
114
  interactive=False,
115
  )
116
 
@@ -126,16 +169,16 @@ with gr.Blocks(title="Z-Image-Turbo Demo") as demo:
126
  # Connect the generate button
127
  generate_btn.click(
128
  fn=generate_image,
129
- inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
130
- outputs=[output_image, used_seed],
131
  )
132
 
133
  # Also allow generating by pressing Enter in the prompt box
134
  prompt.submit(
135
  fn=generate_image,
136
- inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
137
- outputs=[output_image, used_seed],
138
  )
139
 
140
  if __name__ == "__main__":
141
- demo.launch(mcp_server=True)
 
3
  import gradio as gr
4
  from diffusers import DiffusionPipeline
5
 
6
+ MAX_SEED = 2**32 - 1
7
+
8
  # Load the pipeline once at startup
9
  print("Loading Z-Image-Turbo pipeline...")
10
  pipe = DiffusionPipeline.from_pretrained(
 
21
  print("Pipeline loaded!")
22
 
23
  @spaces.GPU
24
+ def generate_image(
25
+ prompt,
26
+ negative_prompt,
27
+ height,
28
+ width,
29
+ num_inference_steps,
30
+ guidance_scale,
31
+ seed,
32
+ randomize_seed,
33
+ progress=gr.Progress(track_tqdm=True),
34
+ ):
35
+ """Generate 4 images with seeds: seed, 2x, 3x, 4x (mod MAX_SEED)."""
36
  if randomize_seed:
37
+ seed = torch.randint(0, MAX_SEED, (1,)).item()
38
+
39
+ base_seed = int(seed) % MAX_SEED
40
+ if base_seed < 0:
41
+ base_seed += MAX_SEED
42
+
43
+ seeds = [(base_seed * i) % MAX_SEED for i in range(1, 5)]
44
+
45
+ neg_prompt = None
46
+ if isinstance(negative_prompt, str) and negative_prompt.strip():
47
+ neg_prompt = negative_prompt
48
+
49
+ images = []
50
+ for s in seeds:
51
+ generator = torch.Generator("cuda").manual_seed(int(s))
52
+ image = pipe(
53
+ prompt=prompt,
54
+ negative_prompt=neg_prompt,
55
+ height=int(height),
56
+ width=int(width),
57
+ num_inference_steps=int(num_inference_steps),
58
+ guidance_scale=float(guidance_scale), # 0.0 is recommended default for Turbo
59
+ generator=generator,
60
+ ).images[0]
61
+ images.append(image)
62
+
63
+ return images, ", ".join(str(s) for s in seeds)
64
 
65
 
66
  # Example prompts
 
90
  placeholder="Enter your image description...",
91
  lines=4,
92
  )
93
+
94
+ negative_prompt = gr.Textbox(
95
+ label="Negative Prompt",
96
+ placeholder="Things you don't want in the image...",
97
+ lines=3,
98
+ )
99
+
100
  with gr.Row():
101
  height = gr.Slider(
102
  minimum=512,
 
122
  label="Inference Steps",
123
  info="9 steps results in 8 DiT forwards",
124
  )
125
+
126
+ guidance_scale = gr.Slider(
127
+ minimum=0.0,
128
+ maximum=7.0,
129
+ value=0.0,
130
+ step=0.1,
131
+ label="CFG Guidance Scale",
132
+ info="0 = no CFG (recommended for Turbo models)",
133
+ )
134
 
135
  with gr.Row():
136
  seed = gr.Number(
 
146
  generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
147
 
148
  with gr.Column(scale=1):
149
+ output_images = gr.Gallery(
150
+ label="Generated Images",
151
+ columns=2,
152
+ rows=2,
153
+ preview=True,
154
  )
155
+ used_seeds = gr.Textbox(
156
+ label="Seeds Used (base, 2x, 3x, 4x)",
157
  interactive=False,
158
  )
159
 
 
169
  # Connect the generate button
170
  generate_btn.click(
171
  fn=generate_image,
172
+ inputs=[prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed, randomize_seed],
173
+ outputs=[output_images, used_seeds],
174
  )
175
 
176
  # Also allow generating by pressing Enter in the prompt box
177
  prompt.submit(
178
  fn=generate_image,
179
+ inputs=[prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed, randomize_seed],
180
+ outputs=[output_images, used_seeds],
181
  )
182
 
183
  if __name__ == "__main__":
184
+ demo.launch(mcp_server=True)
zimage_pipeline.py ADDED
Binary file (52.7 kB). View file