r3gm commited on
Commit
e1fdb35
·
verified ·
1 Parent(s): 0ed8b82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -8
app.py CHANGED
@@ -30,8 +30,8 @@ FIXED_FPS = 16
30
  MIN_FRAMES_MODEL = 8
31
  MAX_FRAMES_MODEL = 160
32
 
33
- MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS,1)
34
- MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS,1)
35
 
36
 
37
  pipe = WanImageToVideoPipeline.from_pretrained(
@@ -215,6 +215,7 @@ def generate_video(
215
  guidance_scale_2=1,
216
  seed=42,
217
  randomize_seed=False,
 
218
  progress=gr.Progress(track_tqdm=True),
219
  ):
220
  """
@@ -242,6 +243,8 @@ def generate_video(
242
  Range: 0 to MAX_SEED (2147483647).
243
  randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
244
  Defaults to False.
 
 
245
  progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
246
 
247
  Returns:
@@ -285,26 +288,28 @@ def generate_video(
285
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
286
  video_path = tmpfile.name
287
 
288
- export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
289
 
290
  return video_path, current_seed
291
 
292
 
293
  with gr.Blocks() as demo:
294
- gr.Markdown("# Fast 4 steps Wan 2.2 I2V (14B) with Lightning LoRA + Live Wallpaper LoRA")
 
295
  gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
296
  with gr.Row():
297
  with gr.Column():
298
  input_image_component = gr.Image(type="pil", label="Input Image")
299
- last_image_component = gr.Image(type="pil", label="Last Image (Optional)")
300
  prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
301
  duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
 
 
302
 
303
  with gr.Accordion("Advanced Settings", open=False):
304
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
 
305
  seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
306
  randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
307
- steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
308
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
309
  guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
310
 
@@ -315,7 +320,8 @@ with gr.Blocks() as demo:
315
  ui_inputs = [
316
  input_image_component, last_image_component, prompt_input, steps_slider,
317
  negative_prompt_input, duration_seconds_input,
318
- guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox
 
319
  ]
320
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
321
 
 
30
  MIN_FRAMES_MODEL = 8
31
  MAX_FRAMES_MODEL = 160
32
 
33
+ MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
34
+ MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
35
 
36
 
37
  pipe = WanImageToVideoPipeline.from_pretrained(
 
215
  guidance_scale_2=1,
216
  seed=42,
217
  randomize_seed=False,
218
+ quality=5,
219
  progress=gr.Progress(track_tqdm=True),
220
  ):
221
  """
 
243
  Range: 0 to MAX_SEED (2147483647).
244
  randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
245
  Defaults to False.
246
+ quality (float, optional): Video output quality. Default is 5. Uses variable bit rate.
247
+ Highest quality is 10, lowest is 0.
248
  progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
249
 
250
  Returns:
 
288
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
289
  video_path = tmpfile.name
290
 
291
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS, quality=quality)
292
 
293
  return video_path, current_seed
294
 
295
 
296
  with gr.Blocks() as demo:
297
+ gr.Markdown("# Wan 2.2 I2V (14B)")
298
+ gr.Markdown("ℹ️ **A Note on Performance:** This version prioritizes a straightforward setup over maximum speed, so performance may vary.")
299
  gr.Markdown("run Wan 2.2 in just 4-8 steps, with [Lightning LoRA](https://huggingface.co/Kijai/WanVideo_comfy/tree/main/Wan22-Lightning), fp8 quantization & AoT compilation - compatible with 🧨 diffusers and ZeroGPU⚡️")
300
  with gr.Row():
301
  with gr.Column():
302
  input_image_component = gr.Image(type="pil", label="Input Image")
 
303
  prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
304
  duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=3.5, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
305
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=6, label="Inference Steps")
306
+ quality_slider = gr.Slider(minimum=0, maximum=10, step=1, value=5, label="Video Quality")
307
 
308
  with gr.Accordion("Advanced Settings", open=False):
309
+ last_image_component = gr.Image(type="pil", label="Last Image (Optional)")
310
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, info="Used if any Guidance Scale > 1.", lines=3)
311
  seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
312
  randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
 
313
  guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
314
  guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale 2 - low noise stage")
315
 
 
320
  ui_inputs = [
321
  input_image_component, last_image_component, prompt_input, steps_slider,
322
  negative_prompt_input, duration_seconds_input,
323
+ guidance_scale_input, guidance_scale_2_input, seed_input, randomize_seed_checkbox,
324
+ quality_slider
325
  ]
326
  generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
327