Manjushri commited on
Commit
d8c3206
1 Parent(s): 4db9aa5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -4
app.py CHANGED
@@ -287,8 +287,9 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
287
  return refined
288
  else:
289
  if upscale == "Yes":
290
- image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
291
-
 
292
  upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
293
  upscaler.enable_xformers_memory_efficient_attention()
294
  upscaler = upscaler.to(device)
@@ -302,14 +303,16 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
302
  torch.cuda.empty_cache()
303
 
304
  if Model == 'FusionXL':
305
-
 
306
  pipe = DiffusionPipeline.from_pretrained("circulus/canvers-fusionXL-v1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.8.1")
307
  pipe.enable_xformers_memory_efficient_attention()
308
  pipe = pipe.to(device)
309
  torch.cuda.empty_cache()
310
  if refine == "Yes":
 
 
311
  int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
312
-
313
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
314
  pipe.enable_xformers_memory_efficient_attention()
315
  pipe = pipe.to(device)
@@ -317,6 +320,8 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
317
  image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
318
  torch.cuda.empty_cache()
319
  if upscale == "Yes":
 
 
320
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
321
  pipe.enable_xformers_memory_efficient_attention()
322
  pipe = pipe.to(device)
@@ -328,7 +333,11 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
328
  return image
329
  else:
330
  if upscale == "Yes":
 
331
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
 
 
 
332
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
333
  pipe.enable_xformers_memory_efficient_attention()
334
  pipe = pipe.to(device)
 
287
  return refined
288
  else:
289
  if upscale == "Yes":
290
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
291
+ torch.cuda.empty_cache()
292
+ torch.cuda.max_memory_allocated(device=device)
293
  upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
294
  upscaler.enable_xformers_memory_efficient_attention()
295
  upscaler = upscaler.to(device)
 
303
  torch.cuda.empty_cache()
304
 
305
  if Model == 'FusionXL':
306
+ torch.cuda.empty_cache()
307
+ torch.cuda.max_memory_allocated(device=device)
308
  pipe = DiffusionPipeline.from_pretrained("circulus/canvers-fusionXL-v1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.8.1")
309
  pipe.enable_xformers_memory_efficient_attention()
310
  pipe = pipe.to(device)
311
  torch.cuda.empty_cache()
312
  if refine == "Yes":
313
+ torch.cuda.empty_cache()
314
+ torch.cuda.max_memory_allocated(device=device)
315
  int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
 
316
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
317
  pipe.enable_xformers_memory_efficient_attention()
318
  pipe = pipe.to(device)
 
320
  image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
321
  torch.cuda.empty_cache()
322
  if upscale == "Yes":
323
+ torch.cuda.empty_cache()
324
+ torch.cuda.max_memory_allocated(device=device)
325
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
326
  pipe.enable_xformers_memory_efficient_attention()
327
  pipe = pipe.to(device)
 
333
  return image
334
  else:
335
  if upscale == "Yes":
336
+
337
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
338
+ torch.cuda.empty_cache()
339
+ torch.cuda.max_memory_allocated(device=device)
340
+
341
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
342
  pipe.enable_xformers_memory_efficient_attention()
343
  pipe = pipe.to(device)