Manjushri commited on
Commit
4db9aa5
1 Parent(s): d20dcba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -329,11 +329,11 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
329
  else:
330
  if upscale == "Yes":
331
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
332
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
333
- upscaler.enable_xformers_memory_efficient_attention()
334
- upscaler = upscaler.to(device)
335
  torch.cuda.empty_cache()
336
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
337
  torch.cuda.empty_cache()
338
  return upscaled
339
  else:
 
329
  else:
330
  if upscale == "Yes":
331
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
332
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
333
+ pipe.enable_xformers_memory_efficient_attention()
334
+ pipe = pipe.to(device)
335
  torch.cuda.empty_cache()
336
+ upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
337
  torch.cuda.empty_cache()
338
  return upscaled
339
  else: