Manjushri commited on
Commit
d41ad4b
1 Parent(s): 70733c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -18,8 +18,9 @@ upscaler.enable_xformers_memory_efficient_attention()
18
  def genie (Prompt, negative_prompt, height, width, scale, steps, Seed, upscale):
19
  generator = torch.Generator(device=device).manual_seed(Seed)
20
  if upscale == "Yes":
21
- low_res_latents = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
22
- image = upscaler(Prompt, negative_prompt=negative_prompt, image=low_res_latents, num_inference_steps=5, guidance_scale=0, generator=generator).images[0]
 
23
  else:
24
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
25
  return (image, image)
 
18
  def genie (Prompt, negative_prompt, height, width, scale, steps, Seed, upscale):
19
  generator = torch.Generator(device=device).manual_seed(Seed)
20
  if upscale == "Yes":
21
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
22
+ upscaled = upscaler(Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=25, guidance_scale=0, generator=generator).images[0]
23
+ return (image, upscaled)
24
  else:
25
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
26
  return (image, image)