NewModel / app.py
gaur3009's picture
Update app.py
f5c0513 verified
raw
history blame contribute delete
No virus
4.53 kB
import numpy as np
import random
import torch
import gradio as gr
from diffusers import DiffusionPipeline
from PIL import Image
import io
device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
torch.cuda.max_memory_allocated(device=device)
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
else:
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
pipe = pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
def infer(prompt_part1, color, dress_type, design, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
prompt = f"{prompt_part1} {color} colored plain {dress_type} with {design} design, {prompt_part5}"
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
try:
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator
).images[0]
print("Image generated successfully.") # Debug: Confirm image generation
return image
except Exception as e:
print(f"Error generating image: {e}")
return None
examples = [
["red", "t-shirt", "yellow stripes"],
["blue", "hoodie", "minimalist"],
["red", "sweatshirt", "geometric design"],
]
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
power_device = "GPU" if torch.cuda.is_available() else "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Text-to-Image Gradio Template
Currently running on {power_device}.
""")
with gr.Row():
prompt_part1 = gr.Textbox(value="a single", label="Prompt Part 1", show_label=False, interactive=False, container=False, elem_id="prompt_part1", visible=False)
prompt_part2 = gr.Textbox(label="color", show_label=False, max_lines=1, placeholder="color (e.g., color category)", container=False)
prompt_part3 = gr.Textbox(label="dress_type", show_label=False, max_lines=1, placeholder="dress_type (e.g., t-shirt, sweatshirt, shirt, hoodie)", container=False)
prompt_part4 = gr.Textbox(label="design", show_label=False, max_lines=1, placeholder="design", container=False)
prompt_part5 = gr.Textbox(value="hanging on the plain grey wall", label="Prompt Part 5", show_label=False, interactive=False, container=False, elem_id="prompt_part5", visible=False)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Textbox(label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt", visible=False)
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
with gr.Row():
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=0.0)
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=12, step=1, value=2)
gr.Examples(examples=examples, inputs=[prompt_part2, prompt_part3, prompt_part4])
def run_infer():
output_image = infer(
prompt_part1.value,
prompt_part2.value,
prompt_part3.value,
prompt_part4.value,
prompt_part5.value,
negative_prompt.value,
seed.value,
randomize_seed.value,
width.value,
height.value,
guidance_scale.value,
num_inference_steps.value
)
return output_image
run_button.click(fn=run_infer, outputs=result)
demo.queue().launch()