|
import gradio as gr |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
|
|
def load_model(model_id): |
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
info = "Running on GPU (CUDA)" |
|
else: |
|
device = "cpu" |
|
info = "Running on CPU" |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32) |
|
pipe = pipe.to(device) |
|
|
|
return pipe, info |
|
|
|
|
|
def generate_image(model_id, prompt): |
|
pipe, info = load_model(model_id) |
|
image = pipe(prompt).images[0] |
|
return image, info |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Custom Text-to-Image Generator") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
model_id = gr.Textbox(label="Enter Model ID (e.g., nevreal/vMurderDrones)", placeholder="Model ID") |
|
prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate") |
|
generate_btn = gr.Button("Generate Image") |
|
|
|
with gr.Column(): |
|
output_image = gr.Image(label="Generated Image") |
|
device_info = gr.Markdown() |
|
|
|
|
|
generate_btn.click(fn=generate_image, inputs=[model_id, prompt], outputs=[output_image, device_info]) |
|
|
|
|
|
demo.launch() |
|
|