Manjushri's picture
Update app.py
670d739
raw
history blame
1.94 kB
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline, StableDiffusionLatentUpscalePipeline
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("prompthero/openjourney-v4", torch_dtype=torch.float16, safety_checker=None)
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
upscaler = upscaler.to(device)
pipe = pipe.to(device)
def genie (Prompt, scale, steps, Seed):
generator = torch.Generator(device=device).manual_seed(Seed)
#images = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
low_res_latents = pipe(Prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
upscaled_image = upscaler(prompt='', image=low_res_latents, num_inference_steps=10, guidance_scale=0, generator=generator).images[0]
return upscaled_image
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
gr.Slider(1, maximum=15, value=10, step=.25, label='Prompt Guidance Scale:', interactive=True),
gr.Slider(1, maximum=100, value=50, step=1, label='Number of Iterations: 50 is typically fine.'),
gr.Slider(minimum=1, step=10, maximum=999999999999999999, randomize=True, interactive=True)],
outputs=gr.Image(label='512x512 Generated Image'),
title="OpenJourney V4 GPU with SD x2 Upscaler",
description="OJ V4 GPU. Ultra Fast, now running on a T4 <br><br><b/>Warning: This Demo is capable of producing NSFW content.",
article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=True)