multimodalart HF staff commited on
Commit
d1118c6
1 Parent(s): 91dbe72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -10,6 +10,14 @@ schnell_model = "black-forest-labs/FLUX.1-schnell"
10
 
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
 
 
 
 
 
 
 
 
 
13
  pipe_dev = DiffusionPipeline.from_pretrained(dev_model, torch_dtype=torch.bfloat16)
14
  pipe_schnell = DiffusionPipeline.from_pretrained(
15
  schnell_model,
@@ -19,13 +27,12 @@ pipe_schnell = DiffusionPipeline.from_pretrained(
19
  tokenizer_2=pipe_dev.tokenizer_2,
20
  torch_dtype=torch.bfloat16
21
  )
 
22
  @spaces.GPU
23
  def run_dev_hyper(prompt):
24
  print("dev_hyper")
25
  pipe_dev.to("cuda")
26
- repo_name = "ByteDance/Hyper-SD"
27
- ckpt_name = "Hyper-FLUX.1-dev-8steps-lora.safetensors"
28
- pipe_dev.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
29
  image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
30
  pipe_dev.unload_lora_weights()
31
  return image
@@ -34,9 +41,7 @@ def run_dev_hyper(prompt):
34
  def run_dev_turbo(prompt):
35
  print("dev_turbo")
36
  pipe_dev.to("cuda")
37
- repo_name = "alimama-creative/FLUX.1-Turbo-Alpha"
38
- ckpt_name = "diffusion_pytorch_model.safetensors"
39
- pipe_dev.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
40
  image = pipe_dev(prompt, num_inference_steps=8).images[0]
41
  pipe_dev.unload_lora_weights()
42
  return image
 
10
 
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
 
13
+ repo_name = "ByteDance/Hyper-SD"
14
+ ckpt_name = "Hyper-FLUX.1-dev-8steps-lora.safetensors"
15
+ hyper_lora = hf_hub_download(repo_name, ckpt_name)
16
+
17
+ repo_name = "alimama-creative/FLUX.1-Turbo-Alpha"
18
+ ckpt_name = "diffusion_pytorch_model.safetensors"
19
+ turbo_lora = hf_hub_download(repo_name, ckpt_name)
20
+
21
  pipe_dev = DiffusionPipeline.from_pretrained(dev_model, torch_dtype=torch.bfloat16)
22
  pipe_schnell = DiffusionPipeline.from_pretrained(
23
  schnell_model,
 
27
  tokenizer_2=pipe_dev.tokenizer_2,
28
  torch_dtype=torch.bfloat16
29
  )
30
+
31
  @spaces.GPU
32
  def run_dev_hyper(prompt):
33
  print("dev_hyper")
34
  pipe_dev.to("cuda")
35
+ pipe_dev.load_lora_weights(hyper_lora)
 
 
36
  image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
37
  pipe_dev.unload_lora_weights()
38
  return image
 
41
  def run_dev_turbo(prompt):
42
  print("dev_turbo")
43
  pipe_dev.to("cuda")
44
+ pipe_dev.load_lora_weights(turbo_lora)
 
 
45
  image = pipe_dev(prompt, num_inference_steps=8).images[0]
46
  pipe_dev.unload_lora_weights()
47
  return image