MOM / app.py
Harshithtd's picture
Update app.py
008385d verified
raw
history blame contribute delete
No virus
1.27 kB
import gradio as gr
from transformers import pipeline
import torchaudio
import time
# Load Whisper ASR model
transcriber = pipeline(model="openai/whisper-base")
# Load summarization model
summarization_model = pipeline("summarization")
def translate_audio(audio):
# Step 1: Transcribe audio to text
transcription = transcriber(audio)
print('transcription', transcription)
# Step 2: Translate text to Hindi
summary = summarization_model(transcription['text'])
print('summary', summary)
return transcription['text'], summary[0]['summary_text']
# Create Gradio interface
with gr.Blocks() as iface:
gr.Markdown("# Audio Translator, Summarizer")
with gr.Row():
audio_input = gr.Audio(type="filepath", label="Upload Audio")
transcription_output = gr.Textbox(
label="Transcribed Text",
info="Initial text")
translation_output = gr.Textbox(
label="Summary",
info="Meeting minute")
translate_button = gr.Button("Translate Audio")
translate_button.click(
translate_audio,
inputs=[audio_input],
outputs=[transcription_output, translation_output]
)
# Launch the app
iface.launch(share=True) # 'share=True' to get a public link