AlekseyCalvin commited on
Commit
5511562
1 Parent(s): 6ebd799

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -14,7 +14,7 @@ with open('loras.json', 'r') as f:
14
  loras = json.load(f)
15
 
16
  # Initialize the base model
17
- base_model = "AlekseyCalvin/MythsticColorV2FluxSoonr"
18
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
19
 
20
  MAX_SEED = 2**32-1
@@ -57,7 +57,7 @@ def update_selection(evt: gr.SelectData, width, height):
57
  )
58
 
59
  @spaces.GPU(duration=70)
60
- def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
61
  pipe.to("cuda")
62
  generator = torch.Generator(device="cuda").manual_seed(seed)
63
 
@@ -65,6 +65,7 @@ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height,
65
  # Generate image
66
  image = pipe(
67
  prompt=f"{prompt} {trigger_word}",
 
68
  num_inference_steps=steps,
69
  guidance_scale=cfg_scale,
70
  width=width,
@@ -74,7 +75,7 @@ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height,
74
  ).images[0]
75
  return image
76
 
77
- def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
78
  if selected_index is None:
79
  raise gr.Error("You must select a LoRA before proceeding.")
80
 
@@ -94,7 +95,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
94
  if randomize_seed:
95
  seed = random.randint(0, MAX_SEED)
96
 
97
- image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
98
  pipe.to("cpu")
99
  pipe.unload_lora_weights()
100
  return image, seed
@@ -110,22 +111,24 @@ css = '''
110
  '''
111
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
112
  title = gr.HTML(
113
- """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> SOONfactory </h1>""",
114
  elem_id="title",
115
  )
116
  # Info blob stating what the app is running
117
  info_blob = gr.HTML(
118
- """<div id="info_blob"> Img. Manufactory Running On: Our 'Mythstic Color Soonr V.2' 4-step(+/-) Flux Model (at AlekseyCalvin/AlekseyCalvin/MythsticColorV2FluxSoonr and AlekseyCalvin/Mythstic_Color_Soonr_Flux_V2 (for Safetensors version)). Now testing HST-triggerable historic photo-trained LoRAs (#s:2-8,11,12,14,16)for training-eval merging. </div>"""
119
  )
120
 
121
  # Info blob stating what the app is running
122
  info_blob = gr.HTML(
123
- """<div id="info_blob">Prephrase prompts w/: 1: RCA style || 2-thru-12: HST style analog film photo; HST autochrome photograph || 13: HST style in Peterhof || 14: LEN Vladimir Lenin || 15: SOTS style || 16: crisp photo || 17: TOK hybrid || 18: 2004 photo || 19: TOK portra || 20: flmft Kodachrome || 21: HST Austin Osman Spare style || 22: polaroid photo || 23: pficonics || 24: wh3r3sw4ld0 || 25: retrofuturism || 26: vintage cover || </div>"""
124
  )
125
  selected_index = gr.State(None)
126
  with gr.Row():
127
- with gr.Column(scale=3):
128
  prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Select LoRa/Style & type prompt!")
 
 
129
  with gr.Column(scale=1, elem_id="gen_column"):
130
  generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
131
  with gr.Row():
@@ -146,17 +149,17 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
146
  with gr.Accordion("Advanced Settings", open=True):
147
  with gr.Column():
148
  with gr.Row():
149
- cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=.5, value=0)
150
  steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=4)
151
 
152
  with gr.Row():
153
- width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=768)
154
- height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=768)
155
 
156
  with gr.Row():
157
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
158
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
159
- lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3.0, step=0.01, value=0.5)
160
 
161
  gallery.select(
162
  update_selection,
@@ -167,7 +170,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
167
  gr.on(
168
  triggers=[generate_button.click, prompt.submit],
169
  fn=run_lora,
170
- inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
171
  outputs=[result, seed]
172
  )
173
 
 
14
  loras = json.load(f)
15
 
16
  # Initialize the base model
17
+ base_model = "stabilityai/stable-diffusion-3.5-large-turbo"
18
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
19
 
20
  MAX_SEED = 2**32-1
 
57
  )
58
 
59
  @spaces.GPU(duration=70)
60
+ def infer(prompt, negative_prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
61
  pipe.to("cuda")
62
  generator = torch.Generator(device="cuda").manual_seed(seed)
63
 
 
65
  # Generate image
66
  image = pipe(
67
  prompt=f"{prompt} {trigger_word}",
68
+ negative_prompt=negative_prompt,
69
  num_inference_steps=steps,
70
  guidance_scale=cfg_scale,
71
  width=width,
 
75
  ).images[0]
76
  return image
77
 
78
+ def run_lora(prompt, negative_prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
79
  if selected_index is None:
80
  raise gr.Error("You must select a LoRA before proceeding.")
81
 
 
95
  if randomize_seed:
96
  seed = random.randint(0, MAX_SEED)
97
 
98
+ image = infer(prompt, negative_prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
99
  pipe.to("cpu")
100
  pipe.unload_lora_weights()
101
  return image, seed
 
111
  '''
112
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
113
  title = gr.HTML(
114
+ """<h1><img src="https://huggingface.co/AlekseyCalvin/HSTklimbimOPENfluxLora/resolve/main/acs62iv.png" alt="LoRA">OpenFlux LoRAsoon®</h1>""",
115
  elem_id="title",
116
  )
117
  # Info blob stating what the app is running
118
  info_blob = gr.HTML(
119
+ """<div id="info_blob"> SOON®'s curated LoRa Gallery & Art Manufactory Space.|Runs on Stable Diffusion 3.5 Turbo. Now testing HST-triggerable historic photo-trained LoRAs. </div>"""
120
  )
121
 
122
  # Info blob stating what the app is running
123
  info_blob = gr.HTML(
124
+ """<div id="info_blob">Prephrase prompts w/: "HST style autochrome photo" </div>"""
125
  )
126
  selected_index = gr.State(None)
127
  with gr.Row():
128
+ with gr.Column(scale=2):
129
  prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Select LoRa/Style & type prompt!")
130
+ with gr.Column(scale=2):
131
+ negative_prompt = gr.Textbox(label="Negative Prompt", lines=1, placeholder="What to exclude!")
132
  with gr.Column(scale=1, elem_id="gen_column"):
133
  generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
134
  with gr.Row():
 
149
  with gr.Accordion("Advanced Settings", open=True):
150
  with gr.Column():
151
  with gr.Row():
152
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=.5, value=4)
153
  steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=4)
154
 
155
  with gr.Row():
156
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
157
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
158
 
159
  with gr.Row():
160
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
161
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
162
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3.0, step=0.01, value=1.0)
163
 
164
  gallery.select(
165
  update_selection,
 
170
  gr.on(
171
  triggers=[generate_button.click, prompt.submit],
172
  fn=run_lora,
173
+ inputs=[prompt, negative_prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
174
  outputs=[result, seed]
175
  )
176