Manjushri commited on
Commit
51d2448
1 Parent(s): 907025f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -256,7 +256,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
256
  if Model == "SDXL 1.0":
257
  torch.cuda.empty_cache()
258
  torch.cuda.max_memory_allocated(device=device)
259
- sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-cascade", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
260
  sdxl.enable_xformers_memory_efficient_attention()
261
  sdxl = sdxl.to(device)
262
  torch.cuda.empty_cache()
@@ -266,7 +266,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
266
  torch.cuda.empty_cache()
267
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
268
  torch.cuda.empty_cache()
269
- sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-cascade-prior", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
270
  sdxl.enable_xformers_memory_efficient_attention()
271
  sdxl = sdxl.to(device)
272
  torch.cuda.empty_cache()
 
256
  if Model == "SDXL 1.0":
257
  torch.cuda.empty_cache()
258
  torch.cuda.max_memory_allocated(device=device)
259
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
260
  sdxl.enable_xformers_memory_efficient_attention()
261
  sdxl = sdxl.to(device)
262
  torch.cuda.empty_cache()
 
266
  torch.cuda.empty_cache()
267
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
268
  torch.cuda.empty_cache()
269
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
270
  sdxl.enable_xformers_memory_efficient_attention()
271
  sdxl = sdxl.to(device)
272
  torch.cuda.empty_cache()