multimodalart HF staff commited on
Commit
eaf8b26
1 Parent(s): 003a39a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -18,7 +18,7 @@ repo_name = "alimama-creative/FLUX.1-Turbo-Alpha"
18
  ckpt_name = "diffusion_pytorch_model.safetensors"
19
  turbo_lora = hf_hub_download(repo_name, ckpt_name)
20
 
21
- pipe_dev = DiffusionPipeline.from_pretrained(dev_model, torch_dtype=torch.bfloat16)
22
  pipe_schnell = DiffusionPipeline.from_pretrained(
23
  schnell_model,
24
  text_encoder=pipe_dev.text_encoder,
@@ -63,7 +63,6 @@ def run_schnell(prompt):
63
 
64
  @spaces.GPU
65
  def run_parallel_models(prompt):
66
- pipe_dev.to("cuda")
67
  pipe_dev.load_lora_weights(hyper_lora)
68
  image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
69
  pipe_dev.unload_lora_weights()
 
18
  ckpt_name = "diffusion_pytorch_model.safetensors"
19
  turbo_lora = hf_hub_download(repo_name, ckpt_name)
20
 
21
+ pipe_dev = DiffusionPipeline.from_pretrained(dev_model, torch_dtype=torch.bfloat16).to("cuda")
22
  pipe_schnell = DiffusionPipeline.from_pretrained(
23
  schnell_model,
24
  text_encoder=pipe_dev.text_encoder,
 
63
 
64
  @spaces.GPU
65
  def run_parallel_models(prompt):
 
66
  pipe_dev.load_lora_weights(hyper_lora)
67
  image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
68
  pipe_dev.unload_lora_weights()