from transformers import pipeline import gradio as gr from PIL import Image pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large") def generate_caption(image): # Generate a caption for the image captions = pipe(image) return captions[0]['generated_text'] demo = gr.Interface( fn=generate_caption, inputs=gr.Image(type="pil", label="Upload an Image"), outputs=gr.Textbox(label="Generated Caption"), title="Image Caption Generator", description="Upload an image to generate a caption using the BLIP model." ) if __name__ == "__main__": demo.launch(share=True)