prithivMLmods commited on
Commit
1727a4e
1 Parent(s): 4061b41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -22
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import spaces
3
  import numpy as np
4
  import random
5
- from diffusers import DiffusionPipeline, AutoencoderTiny, StableDiffusion3Pipeline, SD3Transformer2DModel, FlashFlowMatchEulerDiscreteScheduler
6
  import torch
7
  from PIL import Image
8
 
@@ -10,25 +10,13 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model_repo_id = "stabilityai/stable-diffusion-3.5-large-turbo"
11
 
12
  torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
13
- # Load Tiny Autoencoder
14
- taesd3 = AutoencoderTiny.from_pretrained("madebyollin/taesd3", torch_dtype=torch.float16).to(device)
15
- taesd3.decoder.layers = torch.compile(
16
- taesd3.decoder.layers,
17
- fullgraph=True,
18
- dynamic=False,
19
- mode="max-autotune-no-cudagraphs",
20
- )
21
-
22
- # Load main Stable Diffusion pipeline
23
- pipe = StableDiffusion3Pipeline.from_pretrained(
24
- model_path,
25
- transformer=SD3Transformer2DModel.from_pretrained(model_path, torch_dtype=torch.float16),
26
- torch_dtype=torch_dtype,
27
- vae=taesd3,
28
- ).to(device)
29
-
30
- pipe.scheduler = FlashFlowMatchEulerDiscreteScheduler.from_pretrained(model_path, subfolder="scheduler")
31
- pipe.set_progress_bar_config(disable=True)
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
  MAX_IMAGE_SIZE = 1024
@@ -218,12 +206,27 @@ with gr.Blocks(css=css, theme="prithivMLmods/Minecraft-Theme") as demo:
218
  value=4,
219
  )
220
 
221
- gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
 
 
 
 
222
 
223
  gr.on(
224
  triggers=[run_button.click, prompt.submit],
225
  fn=infer,
226
- inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, style_selection, grid_size_selection],
 
 
 
 
 
 
 
 
 
 
 
227
  outputs=[result, seed],
228
  )
229
 
 
2
  import spaces
3
  import numpy as np
4
  import random
5
+ from diffusers import DiffusionPipeline
6
  import torch
7
  from PIL import Image
8
 
 
10
  model_repo_id = "stabilityai/stable-diffusion-3.5-large-turbo"
11
 
12
  torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
13
+
14
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
15
+ pipe = pipe.to(device)
16
+
17
+ pipe.load_lora_weights("prithivMLmods/SD3.5-Large-Turbo-HyperRealistic-LoRA", weight_name="SD3.5-4Step-Large-Turbo-HyperRealistic-LoRA.safetensors")
18
+ trigger_word = "hyper realistic" # Specify trigger word for LoRA
19
+ pipe.fuse_lora(lora_scale=1.0)
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  MAX_IMAGE_SIZE = 1024
 
206
  value=4,
207
  )
208
 
209
+ gr.Examples(examples=examples,
210
+ inputs=[prompt],
211
+ outputs=[result, seed],
212
+ fn=infer,
213
+ cache_examples=False)
214
 
215
  gr.on(
216
  triggers=[run_button.click, prompt.submit],
217
  fn=infer,
218
+ inputs=[
219
+ prompt,
220
+ negative_prompt,
221
+ seed,
222
+ randomize_seed,
223
+ width,
224
+ height,
225
+ guidance_scale,
226
+ num_inference_steps,
227
+ style_selection,
228
+ grid_size_selection,
229
+ ],
230
  outputs=[result, seed],
231
  )
232