File size: 1,690 Bytes
c71fd53
b3fa900
c71fd53
 
 
 
 
 
1d88608
 
 
 
0c76638
c71fd53
 
 
be4797d
 
c71fd53
 
 
1d88608
c71fd53
 
 
 
 
1d88608
c71fd53
cf6d861
1d88608
c71fd53
 
0f7f59b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
#Importing all the necessary packages
import gradio as gr
import torch, librosa, torchaudio
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
from pyctcdecode import build_ctcdecoder

# Define ASR MODEL
class Speech2Text:
    def __init__(self, model_name='masoudmzb/wav2vec2-xlsr-multilingual-53-fa'):
        self.model = Wav2Vec2ForCTC.from_pretrained(model_name).eval()
        self.processor = Wav2Vec2Processor.from_pretrained(model_name)
        self.vocab = list(self.processor.tokenizer.get_vocab().keys())
        self.decoder = build_ctcdecoder(self.vocab, kenlm_model_path='kenlm.scorer')

    def wav2feature(self, path):
        speech_array, sampling_rate = torchaudio.load(path)
        speech_array = librosa.resample(speech_array.squeeze().numpy(), sampling_rate, self.processor.feature_extractor.sampling_rate)
        return processor(speech_array, return_tensors="pt", sampling_rate=self.processor.feature_extractor.sampling_rate)

    def feature2logits(self, features):
        with torch.no_grad():
            return self.model(features.input_values[0]).logits.numpy()[0]

    def __call__(self, path):
        logits = self.feature2logits(self.wav2feature(path))
        return self.decoder.decode(logits)
        
# Create an instance
s2t = Speech2Text()
    
gr.Interface(lambda path: s2t(path),
             inputs = gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Record Your Beautiful Persian Voice"),
             outputs = gr.outputs.Textbox(label="Output Text"),
             title="Persian ASR using Wav2Vec 2.0 & N-gram LM",
             description = "This is a Persian Speech to Text", theme="huggingface").launch()