#Importing all the necessary packages import gradio as gr import torch, librosa, torchaudio from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor from pyctcdecode import build_ctcdecoder # Define ASR MODEL class Speech2Text: def __init__(self): self.vocab = list(processor.tokenizer.get_vocab().keys()) self.decoder = build_ctcdecoder(self.vocab, kenlm_model_path='kenlm.scorer') def wav2feature(self, path): speech_array, sampling_rate = torchaudio.load(path) speech_array = librosa.resample(speech_array.squeeze().numpy(), sampling_rate, processor.feature_extractor.sampling_rate) return processor(speech_array, return_tensors="pt", sampling_rate=processor.feature_extractor.sampling_rate) def feature2logits(self, features): with torch.no_grad(): return wav2vec_model(features.input_values[0].to(device)).logits.numpy()[0] def __call__(self, path): logits = self.feature2logits(self.wav2feature(path)) return self.decoder.decode(logits) #Loading the model and the tokenizer model_name = 'masoudmzb/wav2vec2-xlsr-multilingual-53-fa' device = torch.device("cuda" if torch.cuda.is_available() else "cpu") wav2vec_model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device).eval() processor = Wav2Vec2Processor.from_pretrained(model_name) s2t = Speech2Text() def asr(path): return s2t(path) # themes="default", "huggingface", "seafoam", "grass", "peach" gr.Interface(asr, inputs = gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Record Your Beautiful Persian Voice"), outputs = gr.outputs.Textbox(label="Output Text"), title="Persian ASR using Wav2Vec 2.0", description = "This application displays transcribed text for given audio input", examples = [["example.wav"]], theme="huggingface").launch()