r3gm commited on
Commit
1513fb8
·
verified ·
1 Parent(s): affb9ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -73
app.py CHANGED
@@ -34,13 +34,16 @@ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
34
  MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
35
 
36
 
37
- pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
38
- transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
 
 
39
  subfolder='transformer',
40
  torch_dtype=torch.bfloat16,
41
  device_map='cuda',
42
  ),
43
- transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
 
44
  subfolder='transformer_2',
45
  torch_dtype=torch.bfloat16,
46
  device_map='cuda',
@@ -49,15 +52,15 @@ pipe = WanImageToVideoPipeline.from_pretrained(MODEL_ID,
49
  ).to('cuda')
50
 
51
  pipe.load_lora_weights(
52
- "Kijai/WanVideo_comfy",
53
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
54
  adapter_name="lightx2v"
55
  )
56
  kwargs_lora = {}
57
  kwargs_lora["load_into_transformer_2"] = True
58
  pipe.load_lora_weights(
59
- "Kijai/WanVideo_comfy",
60
- weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
61
  adapter_name="lightx2v_2", **kwargs_lora
62
  )
63
 
@@ -68,8 +71,8 @@ pipe.unload_lora_weights()
68
 
69
  # livewallpaper
70
  pipe.load_lora_weights(
71
- "voxvici/flux-lora",
72
- weight_name="livewallpaper_wan22_14b_i2v_low_model_0_1_e26.safetensors",
73
  adapter_name="livewallpaper"
74
  )
75
  pipe.set_adapters(["livewallpaper"], adapter_weights=[1.])
@@ -87,6 +90,7 @@ aoti.aoti_blocks_load(pipe.transformer_2, 'zerogpu-aoti/Wan2', variant='fp8da')
87
  default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
88
  default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
89
 
 
90
  def resize_image(image: Image.Image) -> Image.Image:
91
  """
92
  Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
@@ -98,12 +102,12 @@ def resize_image(image: Image.Image) -> Image.Image:
98
  return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
99
 
100
  aspect_ratio = width / height
101
-
102
- MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
103
- MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
104
 
105
  image_to_resize = image
106
-
107
  if aspect_ratio > MAX_ASPECT_RATIO:
108
  # Very wide image -> crop width to fit 832x480 aspect ratio
109
  target_w, target_h = MAX_DIM, MIN_DIM
@@ -129,10 +133,21 @@ def resize_image(image: Image.Image) -> Image.Image:
129
 
130
  final_w = max(MIN_DIM, min(MAX_DIM, final_w))
131
  final_h = max(MIN_DIM, min(MAX_DIM, final_h))
132
-
133
  return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
134
 
135
 
 
 
 
 
 
 
 
 
 
 
 
136
  def get_num_frames(duration_seconds: float):
137
  return 1 + int(np.clip(
138
  int(round(duration_seconds * FIXED_FPS)),
@@ -141,52 +156,81 @@ def get_num_frames(duration_seconds: float):
141
  ))
142
 
143
 
144
- def get_duration(
145
- input_image,
 
146
  prompt,
147
  steps,
148
  negative_prompt,
149
- duration_seconds,
150
  guidance_scale,
151
  guidance_scale_2,
152
- seed,
153
- randomize_seed,
154
- progress,
155
  ):
156
  BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
157
  BASE_STEP_DURATION = 15
158
- width, height = resize_image(input_image).size
159
- frames = get_num_frames(duration_seconds)
160
- factor = frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
161
  step_duration = BASE_STEP_DURATION * factor ** 1.5
162
  return 5 + int(steps) * step_duration
163
 
164
- @spaces.GPU(duration=get_duration)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  def generate_video(
166
  input_image,
 
167
  prompt,
168
- steps = 4,
169
  negative_prompt=default_negative_prompt,
170
- duration_seconds = MAX_DURATION,
171
- guidance_scale = 1,
172
- guidance_scale_2 = 1,
173
- seed = 42,
174
- randomize_seed = False,
175
  progress=gr.Progress(track_tqdm=True),
176
  ):
177
  """
178
  Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
179
-
180
  This function takes an input image and generates a video animation based on the provided
181
  prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
182
  for fast generation in 4-8 steps.
183
-
184
  Args:
185
  input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
 
186
  prompt (str): Text prompt describing the desired animation or motion.
187
  steps (int, optional): Number of inference steps. More steps = higher quality but slower.
188
  Defaults to 4. Range: 1-30.
189
- negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
190
  Defaults to default_negative_prompt (contains unwanted visual artifacts).
191
  duration_seconds (float, optional): Duration of the generated video in seconds.
192
  Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
@@ -199,15 +243,15 @@ def generate_video(
199
  randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
200
  Defaults to False.
201
  progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
202
-
203
  Returns:
204
  tuple: A tuple containing:
205
  - video_path (str): Path to the generated video file (.mp4)
206
  - current_seed (int): The seed used for generation (useful when randomize_seed=True)
207
-
208
  Raises:
209
  gr.Error: If input_image is None (no image uploaded).
210
-
211
  Note:
212
  - Frame count is calculated as duration_seconds * FIXED_FPS (24)
213
  - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
@@ -216,23 +260,27 @@ def generate_video(
216
  """
217
  if input_image is None:
218
  raise gr.Error("Please upload an input image.")
219
-
220
  num_frames = get_num_frames(duration_seconds)
221
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
222
  resized_image = resize_image(input_image)
223
 
224
- output_frames_list = pipe(
225
- image=resized_image,
226
- prompt=prompt,
227
- negative_prompt=negative_prompt,
228
- height=resized_image.height,
229
- width=resized_image.width,
230
- num_frames=num_frames,
231
- guidance_scale=float(guidance_scale),
232
- guidance_scale_2=float(guidance_scale_2),
233
- num_inference_steps=int(steps),
234
- generator=torch.Generator(device="cuda").manual_seed(current_seed),
235
- ).frames[0]
 
 
 
 
236
 
237
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
238
  video_path = tmpfile.name
@@ -241,54 +289,36 @@ def generate_video(
241
 
242
  return video_path, current_seed
243
 
 
244
  with gr.Blocks() as demo:
245
  gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA + Live Wallpaper LoRA")
246
  gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
247
  with gr.Row():
248
  with gr.Column():
249
  input_image_component = gr.Image(type="pil", label="Input Image")
 
250
  prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
251
  duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
252
-
253
  with gr.Accordion("Advanced Settings", open=False):
254
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
255
  seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
256
  randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
257
- steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
258
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
259
  guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
260
 
261
  generate_button = gr.Button("Generate Video", variant="primary")
262
  with gr.Column():
263
  video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
264
-
265
  ui_inputs = [
266
- input_image_component, prompt_input, steps_slider,
267
  negative_prompt_input, duration_seconds_input,
268
  guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
269
  ]
270
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
271
 
272
- gr.Examples(
273
- examples=[
274
- [
275
- "wan_i2v_input.JPG",
276
- "POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of cat’s face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
277
- 4,
278
- ],
279
- [
280
- "wan22_input_2.jpg",
281
- "A sleek lunar vehicle glides into view from left to right, kicking up moon dust as astronauts in white spacesuits hop aboard with characteristic lunar bouncing movements. In the distant background, a VTOL craft descends straight down and lands silently on the surface. Throughout the entire scene, ethereal aurora borealis ribbons dance across the star-filled sky, casting shimmering curtains of green, blue, and purple light that bathe the lunar landscape in an otherworldly, magical glow.",
282
- 4,
283
- ],
284
- [
285
- "kill_bill.jpeg",
286
- "Uma Thurman's character, Beatrix Kiddo, holds her razor-sharp katana blade steady in the cinematic lighting. Suddenly, the polished steel begins to soften and distort, like heated metal starting to lose its structural integrity. The blade's perfect edge slowly warps and droops, molten steel beginning to flow downward in silvery rivulets while maintaining its metallic sheen. The transformation starts subtly at first - a slight bend in the blade - then accelerates as the metal becomes increasingly fluid. The camera holds steady on her face as her piercing eyes gradually narrow, not with lethal focus, but with confusion and growing alarm as she watches her weapon dissolve before her eyes. Her breathing quickens slightly as she witnesses this impossible transformation. The melting intensifies, the katana's perfect form becoming increasingly abstract, dripping like liquid mercury from her grip. Molten droplets fall to the ground with soft metallic impacts. Her expression shifts from calm readiness to bewilderment and concern as her legendary instrument of vengeance literally liquefies in her hands, leaving her defenseless and disoriented.",
287
- 6,
288
- ],
289
- ],
290
- inputs=[input_image_component, prompt_input, steps_slider], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
291
- )
292
 
293
  if __name__ == "__main__":
294
  demo.queue().launch(mcp_server=True)
 
34
  MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
35
 
36
 
37
+ pipe = WanImageToVideoPipeline.from_pretrained(
38
+ MODEL_ID,
39
+ transformer=WanTransformer3DModel.from_pretrained(
40
+ 'cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
41
  subfolder='transformer',
42
  torch_dtype=torch.bfloat16,
43
  device_map='cuda',
44
  ),
45
+ transformer_2=WanTransformer3DModel.from_pretrained(
46
+ 'cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
47
  subfolder='transformer_2',
48
  torch_dtype=torch.bfloat16,
49
  device_map='cuda',
 
52
  ).to('cuda')
53
 
54
  pipe.load_lora_weights(
55
+ "Kijai/WanVideo_comfy",
56
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
57
  adapter_name="lightx2v"
58
  )
59
  kwargs_lora = {}
60
  kwargs_lora["load_into_transformer_2"] = True
61
  pipe.load_lora_weights(
62
+ "Kijai/WanVideo_comfy",
63
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
64
  adapter_name="lightx2v_2", **kwargs_lora
65
  )
66
 
 
71
 
72
  # livewallpaper
73
  pipe.load_lora_weights(
74
+ "voxvici/flux-lora",
75
+ weight_name="livewallpaper_wan22_14b_i2v_low_model_0_1_e26.safetensors",
76
  adapter_name="livewallpaper"
77
  )
78
  pipe.set_adapters(["livewallpaper"], adapter_weights=[1.])
 
90
  default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
91
  default_negative_prompt = "色调艳丽, 过曝, 静态, 细节模糊不清, 字幕, 风格, 作品, 画作, 画面, 静止, 整体发灰, 最差质量, 低质量, JPEG压缩残留, 丑陋的, 残缺的, 多余的手指, 画得不好的手部, 画得不好的脸部, 畸形的, 毁容的, 形态畸形的肢体, 手指融合, 静止不动的画面, 杂乱的背景, 三条腿, 背景人很多, 倒着走"
92
 
93
+
94
  def resize_image(image: Image.Image) -> Image.Image:
95
  """
96
  Resizes an image to fit within the model's constraints, preserving aspect ratio as much as possible.
 
102
  return image.resize((SQUARE_DIM, SQUARE_DIM), Image.LANCZOS)
103
 
104
  aspect_ratio = width / height
105
+
106
+ MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
107
+ MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
108
 
109
  image_to_resize = image
110
+
111
  if aspect_ratio > MAX_ASPECT_RATIO:
112
  # Very wide image -> crop width to fit 832x480 aspect ratio
113
  target_w, target_h = MAX_DIM, MIN_DIM
 
133
 
134
  final_w = max(MIN_DIM, min(MAX_DIM, final_w))
135
  final_h = max(MIN_DIM, min(MAX_DIM, final_h))
136
+
137
  return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
138
 
139
 
140
+ def resize_and_crop_to_match(target_image, reference_image):
141
+ """Resizes and center-crops the target image to match the reference image's dimensions."""
142
+ ref_width, ref_height = reference_image.size
143
+ target_width, target_height = target_image.size
144
+ scale = max(ref_width / target_width, ref_height / target_height)
145
+ new_width, new_height = int(target_width * scale), int(target_height * scale)
146
+ resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
147
+ left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
148
+ return resized.crop((left, top, left + ref_width, top + ref_height))
149
+
150
+
151
  def get_num_frames(duration_seconds: float):
152
  return 1 + int(np.clip(
153
  int(round(duration_seconds * FIXED_FPS)),
 
156
  ))
157
 
158
 
159
+ def get_inference_duration(
160
+ resized_image,
161
+ processed_last_image,
162
  prompt,
163
  steps,
164
  negative_prompt,
165
+ num_frames,
166
  guidance_scale,
167
  guidance_scale_2,
168
+ current_seed,
169
+ progress
 
170
  ):
171
  BASE_FRAMES_HEIGHT_WIDTH = 81 * 832 * 624
172
  BASE_STEP_DURATION = 15
173
+ width, height = resized_image.size
174
+ factor = num_frames * width * height / BASE_FRAMES_HEIGHT_WIDTH
 
175
  step_duration = BASE_STEP_DURATION * factor ** 1.5
176
  return 5 + int(steps) * step_duration
177
 
178
+
179
+ @spaces.GPU(duration=get_inference_duration)
180
+ def run_inference(
181
+ resized_image,
182
+ processed_last_image,
183
+ prompt,
184
+ steps,
185
+ negative_prompt,
186
+ num_frames,
187
+ guidance_scale,
188
+ guidance_scale_2,
189
+ current_seed,
190
+ progress=gr.Progress(track_tqdm=True),
191
+ ):
192
+ return pipe(
193
+ image=resized_image,
194
+ last_image=processed_last_image,
195
+ prompt=prompt,
196
+ negative_prompt=negative_prompt,
197
+ height=resized_image.height,
198
+ width=resized_image.width,
199
+ num_frames=num_frames,
200
+ guidance_scale=float(guidance_scale),
201
+ guidance_scale_2=float(guidance_scale_2),
202
+ num_inference_steps=int(steps),
203
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
204
+ ).frames[0]
205
+
206
+
207
  def generate_video(
208
  input_image,
209
+ last_image,
210
  prompt,
211
+ steps=4,
212
  negative_prompt=default_negative_prompt,
213
+ duration_seconds=MAX_DURATION,
214
+ guidance_scale=1,
215
+ guidance_scale_2=1,
216
+ seed=42,
217
+ randomize_seed=False,
218
  progress=gr.Progress(track_tqdm=True),
219
  ):
220
  """
221
  Generate a video from an input image using the Wan 2.2 14B I2V model with Lightning LoRA.
222
+
223
  This function takes an input image and generates a video animation based on the provided
224
  prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
225
  for fast generation in 4-8 steps.
226
+
227
  Args:
228
  input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
229
+ last_image (PIL.Image, optional): The optional last image for the video.
230
  prompt (str): Text prompt describing the desired animation or motion.
231
  steps (int, optional): Number of inference steps. More steps = higher quality but slower.
232
  Defaults to 4. Range: 1-30.
233
+ negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
234
  Defaults to default_negative_prompt (contains unwanted visual artifacts).
235
  duration_seconds (float, optional): Duration of the generated video in seconds.
236
  Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
 
243
  randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
244
  Defaults to False.
245
  progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
246
+
247
  Returns:
248
  tuple: A tuple containing:
249
  - video_path (str): Path to the generated video file (.mp4)
250
  - current_seed (int): The seed used for generation (useful when randomize_seed=True)
251
+
252
  Raises:
253
  gr.Error: If input_image is None (no image uploaded).
254
+
255
  Note:
256
  - Frame count is calculated as duration_seconds * FIXED_FPS (24)
257
  - Output dimensions are adjusted to be multiples of MOD_VALUE (32)
 
260
  """
261
  if input_image is None:
262
  raise gr.Error("Please upload an input image.")
263
+
264
  num_frames = get_num_frames(duration_seconds)
265
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
266
  resized_image = resize_image(input_image)
267
 
268
+ processed_last_image = None
269
+ if last_image:
270
+ processed_last_image = resize_and_crop_to_match(last_image, resized_image)
271
+
272
+ output_frames_list = run_inference(
273
+ resized_image,
274
+ processed_last_image,
275
+ prompt,
276
+ steps,
277
+ negative_prompt,
278
+ num_frames,
279
+ guidance_scale,
280
+ guidance_scale_2,
281
+ current_seed,
282
+ progress,
283
+ )
284
 
285
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
286
  video_path = tmpfile.name
 
289
 
290
  return video_path, current_seed
291
 
292
+
293
  with gr.Blocks() as demo:
294
  gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA + Live Wallpaper LoRA")
295
  gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
296
  with gr.Row():
297
  with gr.Column():
298
  input_image_component = gr.Image(type="pil", label="Input Image")
299
+ last_image_component = gr.Image(type="pil", label="Last Image (Optional)")
300
  prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
301
  duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
302
+
303
  with gr.Accordion("Advanced Settings", open=False):
304
  negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
305
  seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
306
  randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
307
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
308
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
309
  guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
310
 
311
  generate_button = gr.Button("Generate Video", variant="primary")
312
  with gr.Column():
313
  video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
314
+
315
  ui_inputs = [
316
+ input_image_component, last_image_component, prompt_input, steps_slider,
317
  negative_prompt_input, duration_seconds_input,
318
  guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
319
  ]
320
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
321
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
 
323
  if __name__ == "__main__":
324
  demo.queue().launch(mcp_server=True)