multimodalart's picture
Update app.py
a3e9651 verified
raw
history blame
2.04 kB
import gradio as gr
from diffusers import DiffusionPipeline
import spaces
dev_model = "black-forest-labs/FLUX.1-dev"
schnell_model = "black-forest-labs/FLUX.1-schnell"
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe_dev = DiffusionPipeline.from_pretrained(dev_model, torch_dtype=torch.bfloat16).to(device)
pipe_schnell = DiffusionPipeline.from_pretrained(schnell_model, torch_dtype=torch.bfloat16).to(device)
@spaces.GPU
def run_dev_hyper(prompt):
repo_name = "ByteDance/Hyper-SD"
ckpt_name = "Hyper-FLUX.1-dev-8steps-lora.safetensors"
pipe_dev.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
pipe_dev.unload_lora_weights()
return image
@spaces.GPU
def run_dev_turbo(prompt):
repo_name = "alimama-creative/FLUX.1-Turbo-Alpha"
ckpt_name = "diffusion_pytorch_model.safetensors"
pipe_dev.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
image = pipe_dev(prompt, num_inference_steps=8).images[0]
pipe_dev.unload_lora_weights()
return image
@spaces.GPU
def run_schnell(prompt):
image = pipe_schnell(prompt).images[0]
return image
def run_parallel_models(prompt):
with ProcessPoolExecutor(3) as e:
image_dev_hyper = run_dev_hyper(prompt)
image_dev_turbo = run_dev_turbo(prompt)
image_schnell = run_schnell(prompt)
return gr.update(), gr.update(), gr.update()
run_parallel_models.zerogpu = True
with gr.Blocks() as demo:
gr.Markdown("# Fast Flux Comparison")
with gr.Row():
prompt = gr.Textbox(label="Prompt")
submit = gr.Button()
with gr.Row():
schnell = gr.Image(label="FLUX Schnell (4 steps)")
hyper = gr.Image(label="FLUX.1[dev] HyperFLUX (8 steps)")
turbo = gr.Image(label="FLUX.1[dev]-Turbo-Alpha (8 steps)")
submit.click(
fn=run_parallel_models,
inputs=[prompt],
outputs=[schnell, hyper, turbo]
)
demo.launch()