TheAwakenOne's picture
Updated requirements, README, and app code
efc213f
raw
history blame
2.02 kB
import gradio as gr
import torch
from diffusers import FluxPipeline
from huggingface_hub import HfApi
import spaces
@spaces.GPU(duration=70) # Allocate GPU for 70 seconds
def initialize_model():
model_id = "Freepik/flux.1-lite-8B-alpha"
pipe = FluxPipeline.from_pretrained(
model_id,
torch_dtype=torch.bfloat16
).to("cuda")
return pipe
@spaces.GPU(duration=70)
def generate_image(
prompt,
guidance_scale=3.5,
num_steps=28,
seed=11,
width=1024,
height=1024
):
# Initialize model within the GPU context
pipe = initialize_model()
with torch.inference_mode():
image = pipe(
prompt=prompt,
generator=torch.Generator(device="cuda").manual_seed(seed),
num_inference_steps=num_steps,
guidance_scale=guidance_scale,
height=height,
width=width,
).images[0]
return image
# Create the Gradio interface
demo = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter your image description here..."),
gr.Slider(minimum=1, maximum=20, value=3.5, label="Guidance Scale", step=0.5),
gr.Slider(minimum=1, maximum=50, value=28, label="Number of Steps", step=1),
gr.Slider(minimum=1, maximum=1000000, value=11, label="Seed", step=1),
gr.Slider(minimum=128, maximum=1024, value=1024, label="Width", step=64),
gr.Slider(minimum=128, maximum=1024, value=1024, label="Height", step=64)
],
outputs=gr.Image(type="pil", label="Generated Image"),
title="Flux Image Generator (Zero-GPU)",
description="Generate images using Freepik's Flux model with Zero-GPU allocation",
examples=[
["A close-up image of a green alien with fluorescent skin in the middle of a dark purple forest", 3.5, 28, 11, 1024, 1024],
["A serene landscape with mountains at sunset", 3.5, 28, 42, 1024, 1024],
]
)
# Launch the app
if __name__ == "__main__":
demo.launch()