hugoiabd's picture
Update app.py
14f970c verified
raw
history blame
No virus
1.67 kB
import gradio as gr
import torch
from transformers import pipeline
import numpy as np
import time
pipe_base = pipeline("automatic-speech-recognition", model="aitor-medrano/lara-base-pushed")
pipe_small = pipeline("automatic-speech-recognition", model="aitor-medrano/whisper-small-lara")
pipe_base_1600 = pipeline("automatic-speech-recognition", model="aitor-medrano/whisper-base-lara-1600")
def greet(grabacion):
inicio = time.time()
sr, y = grabacion
# Pasamos el array de muestras a tipo NumPy de 32 bits
y = y.astype(np.float32)
y /= np.max(np.abs(y))
inicio_base = time.time()
result_base = "base:" + pipe_base({"sampling_rate": sr, "raw": y})["text"]
fin_base = time.time()
inicio_small = time.time()
result_small = "small:" + pipe_small({"sampling_rate": sr, "raw": y})["text"]
fin_small = time.time()
inicio_1600 = time.time()
result_base_1600 = "base_2000:" + pipe_base_1600({"sampling_rate": sr, "raw": y})["text"]
fin_1600 = time.time()
fin = time.time()
return result_base, fin_base - inicio_base, result_small, fin_small - inicio_small, result_base_1600, fin_1600 - inicio_1600, fin - inicio
#return result_base, result_small, fin - inicio
demo = gr.Interface(fn=greet,
inputs=[
gr.Audio(),
],
outputs=[
gr.Text(label="Salida (Base)"),
gr.Number(label="Tiempo (Base)"),
gr.Text(label="Salida (Small)"),
gr.Number(label="Tiempo (Small)"),
gr.Text(label="Salida (Base 1600)"),
gr.Number(label="Tiempo (1600)"),
gr.Number(label="Tiempo")
])
demo.launch()