multimodalart HF staff commited on
Commit
fad334a
1 Parent(s): b1db469

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -62,22 +62,23 @@ def run_schnell(prompt):
62
  return image
63
 
64
  def run_parallel_models(prompt):
65
- print(prompt)
66
  with ProcessPoolExecutor(max_workers=3) as executor:
67
  future_dev_hyper = executor.submit(run_dev_hyper, prompt)
68
  future_dev_turbo = executor.submit(run_dev_turbo, prompt)
69
  future_schnell = executor.submit(run_schnell, prompt)
70
 
71
  res_dev_hyper = future_dev_hyper.result()
 
72
  res_dev_turbo = future_dev_turbo.result()
 
73
  res_schnell = future_schnell.result()
74
- return res_dev_hyper, res_dev_turbo, res_schnell
75
 
76
  run_parallel_models.zerogpu = True
77
 
78
  with gr.Blocks() as demo:
79
  gr.Markdown("# Low Step Flux Comparison")
80
- gr.Markdown("Compare the quality (not the speed) of FLUX Schnell (4 steps), FLUX.1[dev] HyperFLUX (8 steps), FLUX.1[dev]-Turbo-Alpha (8 steps)")
81
  with gr.Row():
82
  with gr.Column(scale=2):
83
  prompt = gr.Textbox(label="Prompt")
@@ -89,14 +90,21 @@ with gr.Blocks() as demo:
89
  turbo = gr.Image(label="FLUX.1[dev]-Turbo-Alpha (8 steps)")
90
 
91
  gr.Examples(
92
- examples=[["the spirit of a Tamagotchi wandering in the city of Vienna"], ["a photo of a lavender cat"], ["a tiny astronaut hatching from an egg on the moon"], ["A delicious ceviche cheesecake slice"], ["an insect robot preparing a delicious meal"], ["A Charmander fine dining with a view to la Sagrada Família"]],
 
 
 
 
 
 
93
  fn=run_parallel_models,
94
  inputs=[prompt],
95
  outputs=[schnell, hyper, turbo],
96
  cache_examples="lazy"
97
  )
98
 
99
- submit.click(
 
100
  fn=run_parallel_models,
101
  inputs=[prompt],
102
  outputs=[schnell, hyper, turbo]
 
62
  return image
63
 
64
  def run_parallel_models(prompt):
 
65
  with ProcessPoolExecutor(max_workers=3) as executor:
66
  future_dev_hyper = executor.submit(run_dev_hyper, prompt)
67
  future_dev_turbo = executor.submit(run_dev_turbo, prompt)
68
  future_schnell = executor.submit(run_schnell, prompt)
69
 
70
  res_dev_hyper = future_dev_hyper.result()
71
+ yield res_dev_hyper, gr.update(), gr.update()
72
  res_dev_turbo = future_dev_turbo.result()
73
+ yield gr.update(), res_dev_turbo, gr.update()
74
  res_schnell = future_schnell.result()
75
+ yield gr.update(), gr.update(), res_dev_turbo
76
 
77
  run_parallel_models.zerogpu = True
78
 
79
  with gr.Blocks() as demo:
80
  gr.Markdown("# Low Step Flux Comparison")
81
+ gr.Markdown("Compare the quality (not the speed) of FLUX Schnell (4 steps), FLUX.1[dev] HyperFLUX (8 steps), FLUX.1[dev]-Turbo-Alpha (8 steps). It runs a bit slow as it's inferencing the three models.")
82
  with gr.Row():
83
  with gr.Column(scale=2):
84
  prompt = gr.Textbox(label="Prompt")
 
90
  turbo = gr.Image(label="FLUX.1[dev]-Turbo-Alpha (8 steps)")
91
 
92
  gr.Examples(
93
+ examples=[
94
+ ["the spirit of a Tamagotchi wandering in the city of Vienna"],
95
+ ["a photo of a lavender cat"],
96
+ ["a tiny astronaut hatching from an egg on the moon"],
97
+ ["a delicious ceviche cheesecake slice"],
98
+ ["an insect robot preparing a delicious meal"],
99
+ ["a Charmander fine dining with a view to la Sagrada Família"]],
100
  fn=run_parallel_models,
101
  inputs=[prompt],
102
  outputs=[schnell, hyper, turbo],
103
  cache_examples="lazy"
104
  )
105
 
106
+ gr.on(
107
+ triggers=[submit.click, prompt.submit],
108
  fn=run_parallel_models,
109
  inputs=[prompt],
110
  outputs=[schnell, hyper, turbo]