C4G-HKUST commited on
Commit
fc0b74d
·
1 Parent(s): a7b90fa

feat: time out check

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -617,8 +617,8 @@ def run_graio_demo(args):
617
  # 参考: https://huggingface.co/spaces/KlingTeam/LivePortrait/blob/main/app.py
618
  # @spaces.GPU 装饰器会自动处理 GPU 初始化,不需要手动初始化
619
 
620
- # 快速生成模式:200秒,固定10步去噪
621
- @spaces.GPU(duration=200)
622
  def gpu_wrapped_generate_video_fast(*args, **kwargs):
623
  # 固定使用10步去噪,通过关键字参数传递
624
  kwargs['fixed_steps'] = 8
@@ -835,7 +835,7 @@ def run_graio_demo(args):
835
 
836
  with gr.Row():
837
  run_i2v_button_fast = gr.Button(
838
- "Generate Video (Fast - 200s, 8 steps)",
839
  variant="secondary",
840
  scale=1
841
  )
@@ -846,10 +846,10 @@ def run_graio_demo(args):
846
  )
847
  gr.Markdown("""
848
  **Generation Modes:**
849
- - **Fast Mode (up to 200s GPU budget)**: Fixed 8 denoising steps for quick generation.
850
  - **Quality Mode (up to 720s GPU budget)**: Custom denoising steps (adjustable via "Diffusion steps" slider, default: 25 steps).
851
 
852
- *Note: The GPU duration (200s/720s) represents the maximum budget allocated, not the actual generation time. Multi-person videos generally require longer duration and more Usage Quota for better quality.*
853
  """)
854
 
855
  with gr.Column(scale=2):
@@ -859,7 +859,7 @@ def run_graio_demo(args):
859
  gr.Markdown("""
860
  ### Example Cases
861
 
862
- *Note: Generation time (tested on NVIDIA H200 GPU with 40 denoising steps) may vary depending on GPU specifications and system load.*
863
  """)
864
 
865
  # 创建一个函数来处理 examples 选择
@@ -933,7 +933,7 @@ def run_graio_demo(args):
933
  result = gpu_wrapped_generate_video_quality(*args)
934
  return result
935
 
936
- # 快速生成按钮:200秒,固定10步
937
  run_i2v_button_fast.click(
938
  fn=handle_fast_generation,
939
  inputs=[img2vid_image, img2vid_prompt, n_prompt, img2vid_audio_1, img2vid_audio_2, img2vid_audio_3, sd_steps, seed, guide_scale, person_num_selector, audio_mode_selector],
 
617
  # 参考: https://huggingface.co/spaces/KlingTeam/LivePortrait/blob/main/app.py
618
  # @spaces.GPU 装饰器会自动处理 GPU 初始化,不需要手动初始化
619
 
620
+ # 快速生成模式:121秒,固定10步去噪
621
+ @spaces.GPU(duration=121)
622
  def gpu_wrapped_generate_video_fast(*args, **kwargs):
623
  # 固定使用10步去噪,通过关键字参数传递
624
  kwargs['fixed_steps'] = 8
 
835
 
836
  with gr.Row():
837
  run_i2v_button_fast = gr.Button(
838
+ "Generate Video (Fast - 121s, 8 steps)",
839
  variant="secondary",
840
  scale=1
841
  )
 
846
  )
847
  gr.Markdown("""
848
  **Generation Modes:**
849
+ - **Fast Mode (up to 121s GPU budget)**: Fixed 8 denoising steps for quick generation.
850
  - **Quality Mode (up to 720s GPU budget)**: Custom denoising steps (adjustable via "Diffusion steps" slider, default: 25 steps).
851
 
852
+ *Note: The GPU duration (121s/720s) represents the maximum budget allocated, not the actual generation time. Multi-person videos generally require longer duration and more Usage Quota for better quality.*
853
  """)
854
 
855
  with gr.Column(scale=2):
 
859
  gr.Markdown("""
860
  ### Example Cases
861
 
862
+ *Note: Generation time (tested on NVIDIA H121 GPU with 40 denoising steps) may vary depending on GPU specifications and system load.*
863
  """)
864
 
865
  # 创建一个函数来处理 examples 选择
 
933
  result = gpu_wrapped_generate_video_quality(*args)
934
  return result
935
 
936
+ # 快速生成按钮:121秒,固定10步
937
  run_i2v_button_fast.click(
938
  fn=handle_fast_generation,
939
  inputs=[img2vid_image, img2vid_prompt, n_prompt, img2vid_audio_1, img2vid_audio_2, img2vid_audio_3, sd_steps, seed, guide_scale, person_num_selector, audio_mode_selector],