File size: 4,525 Bytes
0e46931
 
a3d4537
21f0d86
 
 
 
a3d4537
0e46931
a3d4537
0e46931
 
 
 
 
 
 
 
a3d4537
0e46931
 
a3d4537
0e46931
 
a3d4537
0e46931
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21f0d86
0e46931
 
 
a3d4537
0e46931
 
 
 
 
a3d4537
0e46931
 
 
 
 
 
a3d4537
0e46931
a3d4537
0e46931
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3d4537
0e46931
 
 
 
 
 
 
 
 
 
 
 
a3d4537
21f0d86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3d4537
f5c0513
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import numpy as np
import random
import torch
import gradio as gr
from diffusers import DiffusionPipeline
from PIL import Image
import io

device = "cuda" if torch.cuda.is_available() else "cpu"

if torch.cuda.is_available():
    torch.cuda.max_memory_allocated(device=device)
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
    pipe.enable_xformers_memory_efficient_attention()
    pipe = pipe.to(device)
else:
    pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
    pipe = pipe.to(device)

MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

def infer(prompt_part1, color, dress_type, design, prompt_part5, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
    prompt = f"{prompt_part1} {color} colored plain {dress_type} with {design} design, {prompt_part5}"

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
        
    generator = torch.Generator().manual_seed(seed)
    
    try:
        image = pipe(
            prompt=prompt, 
            negative_prompt=negative_prompt,
            guidance_scale=guidance_scale, 
            num_inference_steps=num_inference_steps, 
            width=width, 
            height=height,
            generator=generator
        ).images[0]
        print("Image generated successfully.")  # Debug: Confirm image generation
        return image
    except Exception as e:
        print(f"Error generating image: {e}")
        return None

examples = [
    ["red", "t-shirt", "yellow stripes"],
    ["blue", "hoodie", "minimalist"],
    ["red", "sweatshirt", "geometric design"],
]

css = """
#col-container {
    margin: 0 auto;
    max-width: 520px;
}
"""

power_device = "GPU" if torch.cuda.is_available() else "CPU"

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown(f"""
        # Text-to-Image Gradio Template
        Currently running on {power_device}.
        """)
        
        with gr.Row():
            prompt_part1 = gr.Textbox(value="a single", label="Prompt Part 1", show_label=False, interactive=False, container=False, elem_id="prompt_part1", visible=False)
            prompt_part2 = gr.Textbox(label="color", show_label=False, max_lines=1, placeholder="color (e.g., color category)", container=False)
            prompt_part3 = gr.Textbox(label="dress_type", show_label=False, max_lines=1, placeholder="dress_type (e.g., t-shirt, sweatshirt, shirt, hoodie)", container=False)
            prompt_part4 = gr.Textbox(label="design", show_label=False, max_lines=1, placeholder="design", container=False)
            prompt_part5 = gr.Textbox(value="hanging on the plain grey wall", label="Prompt Part 5", show_label=False, interactive=False, container=False, elem_id="prompt_part5", visible=False)
            run_button = gr.Button("Run", scale=0)
        
        result = gr.Image(label="Result", show_label=False)

        with gr.Accordion("Advanced Settings", open=False):
            negative_prompt = gr.Textbox(label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt", visible=False)
            seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
            randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
            with gr.Row():
                width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
                height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
            with gr.Row():
                guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=0.0)
                num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=12, step=1, value=2)
        
        gr.Examples(examples=examples, inputs=[prompt_part2, prompt_part3, prompt_part4])

    def run_infer():
        output_image = infer(
            prompt_part1.value,
            prompt_part2.value,
            prompt_part3.value,
            prompt_part4.value,
            prompt_part5.value,
            negative_prompt.value,
            seed.value,
            randomize_seed.value,
            width.value,
            height.value,
            guidance_scale.value,
            num_inference_steps.value
        )
        return output_image

    run_button.click(fn=run_infer, outputs=result)

demo.queue().launch()