Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import pipeline | |
import numpy as np | |
import time | |
pipe_base = pipeline("automatic-speech-recognition", model="aitor-medrano/lara-base-pushed") | |
pipe_small = pipeline("automatic-speech-recognition", model="aitor-medrano/whisper-small-lara") | |
pipe_base_2000 = pipeline("automatic-speech-recognition", model="aitor-medrano/whisper-base-lara-2000") | |
def greet(grabacion): | |
inicio = time.time() | |
sr, y = grabacion | |
# Pasamos el array de muestras a tipo NumPy de 32 bits | |
y = y.astype(np.float32) | |
y /= np.max(np.abs(y)) | |
result_base = "base:" + pipe_base({"sampling_rate": sr, "raw": y})["text"] | |
result_small = "small:" + pipe_small({"sampling_rate": sr, "raw": y})["text"] | |
result_base_2000 = "base_2000:" + pipe_base_2000({"sampling_rate": sr, "raw": y})["text"] | |
fin = time.time() | |
return result_base, result_small, result_base_2000, fin - inicio | |
#return result_base, result_small, fin - inicio | |
demo = gr.Interface(fn=greet, | |
inputs=[ | |
gr.Audio(), | |
], | |
outputs=[ | |
gr.Text(label="Salida (Base)"), | |
gr.Text(label="Salida (Small)"), | |
gr.Text(label="Salida (Base 2000)"), | |
gr.Number(label="Tiempo") | |
]) | |
demo.launch() | |