import gradio as gr from transformers import pipeline import numpy as np # Load the Whisper model and pipeline pipe = pipeline("automatic-speech-recognition", model="openai/whisper-small") def transcribe(audio): # No changes needed here """Transcribe the audio using the loaded Whisper pipeline.""" if audio is not None: # Check if audio is not None text = pipe(audio)["text"] return text else: return "No audio input received." # Handle empty audio iface = gr.Interface( fn=transcribe, inputs=gr.Audio(sources=["microphone", "upload"], type="filepath"), outputs="text", title="Whisper Small ASR", description="Transcribe audio using the OpenAI Whisper Small model." ) iface.launch()