File size: 2,021 Bytes
e3effab
 
efc213f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3effab
efc213f
 
 
 
 
e3effab
efc213f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3effab
efc213f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import gradio as gr
import torch
from diffusers import FluxPipeline
from huggingface_hub import HfApi
import spaces

@spaces.GPU(duration=70)  # Allocate GPU for 70 seconds
def initialize_model():
    model_id = "Freepik/flux.1-lite-8B-alpha"
    pipe = FluxPipeline.from_pretrained(
        model_id,
        torch_dtype=torch.bfloat16
    ).to("cuda")
    return pipe

@spaces.GPU(duration=70)
def generate_image(
    prompt,
    guidance_scale=3.5,
    num_steps=28,
    seed=11,
    width=1024,
    height=1024
):
    # Initialize model within the GPU context
    pipe = initialize_model()
    
    with torch.inference_mode():
        image = pipe(
            prompt=prompt,
            generator=torch.Generator(device="cuda").manual_seed(seed),
            num_inference_steps=num_steps,
            guidance_scale=guidance_scale,
            height=height,
            width=width,
        ).images[0]
    
    return image

# Create the Gradio interface
demo = gr.Interface(
    fn=generate_image,
    inputs=[
        gr.Textbox(label="Prompt", placeholder="Enter your image description here..."),
        gr.Slider(minimum=1, maximum=20, value=3.5, label="Guidance Scale", step=0.5),
        gr.Slider(minimum=1, maximum=50, value=28, label="Number of Steps", step=1),
        gr.Slider(minimum=1, maximum=1000000, value=11, label="Seed", step=1),
        gr.Slider(minimum=128, maximum=1024, value=1024, label="Width", step=64),
        gr.Slider(minimum=128, maximum=1024, value=1024, label="Height", step=64)
    ],
    outputs=gr.Image(type="pil", label="Generated Image"),
    title="Flux Image Generator (Zero-GPU)",
    description="Generate images using Freepik's Flux model with Zero-GPU allocation",
    examples=[
        ["A close-up image of a green alien with fluorescent skin in the middle of a dark purple forest", 3.5, 28, 11, 1024, 1024],
        ["A serene landscape with mountains at sunset", 3.5, 28, 42, 1024, 1024],
    ]
)

# Launch the app
if __name__ == "__main__":
    demo.launch()