Fabrice-TIERCELIN commited on
Commit
a11fc96
1 Parent(s): 74dd986

Compare with another AI to see where the error is

Browse files
Files changed (1) hide show
  1. GenVideo_app.py +91 -0
GenVideo_app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
4
+ from diffusers.utils import export_to_gif
5
+ from diffusers.utils import export_to_video
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
7
+ import uuid
8
+ import spaces
9
+
10
+ # Available adapters (replace with your actual adapter names)
11
+ adapter_options = {
12
+ "zoom-out":"guoyww/animatediff-motion-lora-zoom-out",
13
+ "zoom-in":"guoyww/animatediff-motion-lora-zoom-in",
14
+ "pan-left":"guoyww/animatediff-motion-lora-pan-left",
15
+ "pan-right":"guoyww/animatediff-motion-lora-pan-right",
16
+ "roll-clockwise":"guoyww/animatediff-motion-lora-rolling-clockwise",
17
+ "roll-anticlockwise":"guoyww/animatediff-motion-lora-rolling-anticlockwise",
18
+ "tilt-up":"guoyww/animatediff-motion-lora-tilt-up",
19
+ "tilt-down":"guoyww/animatediff-motion-lora-tilt-down"
20
+ }
21
+
22
+ def load_cached_examples():
23
+ examples = [
24
+ ["a cat playing with a ball of yarn", "blurry", 7.5, 12, ["zoom-in"]],
25
+ ["a dog running in a field", "dark, indoors", 8.0, 8, ["pan-left", "tilt-up"]],
26
+ ]
27
+ return examples
28
+
29
+ device = "cuda"
30
+ adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
31
+ model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
32
+
33
+ pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to(device)
34
+ scheduler = DDIMScheduler.from_pretrained(
35
+ model_id,
36
+ subfolder="scheduler",
37
+ clip_sample=False,
38
+ timestep_spacing="linspace",
39
+ beta_schedule="linear",
40
+ steps_offset=1,
41
+ )
42
+ pipe.scheduler = scheduler
43
+
44
+ @spaces.GPU
45
+ def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps, adapter_choices):
46
+
47
+ pipe.to(device)
48
+
49
+ # Set adapters based on user selection
50
+ if adapter_choices:
51
+ for i in range(len(adapter_choices)):
52
+ adapter_name = adapter_choices[i]
53
+ pipe.load_lora_weights(
54
+ adapter_options[adapter_name], adapter_name=adapter_name,
55
+ )
56
+ pipe.set_adapters(adapter_choices, adapter_weights=[1.0] * len(adapter_choices))
57
+ print(adapter_choices)
58
+
59
+ output = pipe(
60
+ prompt=prompt,
61
+ negative_prompt=negative_prompt,
62
+ num_frames=16,
63
+ guidance_scale=guidance_scale,
64
+ num_inference_steps=num_inference_steps,
65
+ )
66
+ name = str(uuid.uuid4()).replace("-", "")
67
+ path = f"/tmp/{name}.mp4"
68
+ export_to_video(output.frames[0], path, fps=10)
69
+ return path
70
+
71
+
72
+
73
+ iface = gr.Interface(
74
+ theme=gr.themes.Soft(primary_hue="cyan", secondary_hue="teal"),
75
+ fn=generate_video,
76
+ inputs=[
77
+ gr.Textbox(label="Prompt"),
78
+ gr.Textbox(label="Negative Prompt"),
79
+ gr.Slider(minimum=0.5, maximum=10, value=7.5, label="Guidance Scale"),
80
+ gr.Slider(minimum=4, maximum=24, step=4, value=4, label="Inference Steps"),
81
+ gr.CheckboxGroup(adapter_options.keys(), label="Adapter Choice",type='value'),
82
+ ],
83
+ outputs=gr.Video(label="Generated Video"),
84
+ examples = [
85
+ ["Urban ambiance, man walking, neon lights, rain, wet floor, high quality", "bad quality", 7.5, 24, []],
86
+ ["Nature, farms, mountains in background, drone shot, high quality","bad quality" ,8.0, 24, []],
87
+ ],
88
+ cache_examples=True
89
+ )
90
+
91
+ iface.launch()