import gradio as gr from diffusers import StableDiffusionPipeline import torch # Function to automatically switch between GPU and CPU def load_model(model_id): if torch.cuda.is_available(): device = "cuda" info = "Running on GPU (CUDA)" else: device = "cpu" info = "Running on CPU" # Load the model dynamically on the correct device pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32) pipe = pipe.to(device) return pipe, info # Function for text-to-image generation with dynamic model ID and device info def generate_image(model_id, prompt): pipe, info = load_model(model_id) image = pipe(prompt).images[0] return image, info # Create the Gradio interface with gr.Blocks() as demo: gr.Markdown("## Custom Text-to-Image Generator") with gr.Row(): with gr.Column(): model_id = gr.Textbox(label="Enter Model ID (e.g., nevreal/vMurderDrones)", placeholder="Model ID") prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate") generate_btn = gr.Button("Generate Image") with gr.Column(): output_image = gr.Image(label="Generated Image") device_info = gr.Markdown() # To display if GPU or CPU is used # Link the button to the image generation function generate_btn.click(fn=generate_image, inputs=[model_id, prompt], outputs=[output_image, device_info]) # Launch the app demo.launch()