multimodalart HF staff commited on
Commit
c8cac13
1 Parent(s): eaf8b26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -34
app.py CHANGED
@@ -28,40 +28,7 @@ pipe_schnell = DiffusionPipeline.from_pretrained(
28
  torch_dtype=torch.bfloat16
29
  )
30
 
31
- @spaces.GPU
32
- def run_dev_hyper(prompt):
33
- print("dev_hyper")
34
- pipe_dev.to("cuda")
35
- print(hyper_lora)
36
- pipe_dev.load_lora_weights(hyper_lora)
37
- print("Loaded hyper lora!")
38
- image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
39
- print("Ran!")
40
- pipe_dev.unload_lora_weights()
41
- return image
42
-
43
- @spaces.GPU
44
- def run_dev_turbo(prompt):
45
- print("dev_turbo")
46
- pipe_dev.to("cuda")
47
- print(turbo_lora)
48
- pipe_dev.load_lora_weights(turbo_lora)
49
- print("Loaded turbo lora!")
50
- image = pipe_dev(prompt, num_inference_steps=8).images[0]
51
- print("Ran!")
52
- pipe_dev.unload_lora_weights()
53
- return image
54
-
55
- @spaces.GPU
56
- def run_schnell(prompt):
57
- print("schnell")
58
- pipe_schnell.to("cuda")
59
- print("schnell on gpu")
60
- image = pipe_schnell(prompt, num_inference_steps=4).images[0]
61
- print("Ran!")
62
- return image
63
-
64
- @spaces.GPU
65
  def run_parallel_models(prompt):
66
  pipe_dev.load_lora_weights(hyper_lora)
67
  image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
 
28
  torch_dtype=torch.bfloat16
29
  )
30
 
31
+ @spaces.GPU(duration=75)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def run_parallel_models(prompt):
33
  pipe_dev.load_lora_weights(hyper_lora)
34
  image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]