File size: 2,827 Bytes
757a712
 
 
 
 
 
 
4ffb03b
757a712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19fad68
757a712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19fad68
757a712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ffb03b
 
757a712
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import gradio as gr
import librosa
from asr import transcribe
from tts import synthesize, TTS_EXAMPLES

ALL_LANGUAGES = {}

for task in ["tts", "asr", "lid"]:
    ALL_LANGUAGES.setdefault(task, {})
    with open(f"data/{task}/all_langs.tsv") as f:
        for line in f:
            iso, name = line.split(" ", 1)
            ALL_LANGUAGES[task][iso] = name


def identify(microphone, file_upload):
    LID_SAMPLING_RATE = 16_000

    warn_output = ""
    if (microphone is not None) and (file_upload is not None):
        warn_output = (
            "WARNING: You've uploaded an audio file and used the microphone. "
            "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
        )

    elif (microphone is None) and (file_upload is None):
        return "ERROR: You have to either use the microphone or upload an audio file"

    audio_fp = microphone if microphone is not None else file_upload
    inputs = librosa.load(audio_fp, sr=LID_SAMPLING_RATE, mono=True)[0]

    raw_output = {"eng": 0.9, "hin": 0.04, "heb": 0.03, "ara": 0.02, "fra": 0.01}
    return {(k + ": " + ALL_LANGUAGES["lid"][k]): v for k, v in raw_output.items()}


demo = gr.Blocks()

mms_transcribe = gr.Interface(
    fn=transcribe,
    inputs=[
        gr.Audio(source="microphone", type="filepath"),
        gr.Audio(source="upload", type="filepath"),
        gr.Dropdown(
            [f"{k}: {v}" for k, v in ALL_LANGUAGES["asr"].items()],
            label="Language",
            value="shn-script_latin: Shan",
        ),
    ],
    outputs="text",
    title="Speech-to-text",
    description=("Transcribe audio!"),
    allow_flagging="never",
)

mms_synthesize = gr.Interface(
    fn=synthesize,
    inputs=[
        gr.Text(label="Input text"),
        gr.Dropdown(
            [f"{k}: {v}" for k, v in ALL_LANGUAGES["tts"].items()],
            label="Language",
            value="shn-script_latin: Shan",
        ),
        gr.Slider(minimum=0.1, maximum=4.0, value=1.0, step=0.1, label="Speed"),
    ],
    outputs=[
        gr.Audio(label="Generated Audio", type="numpy"),
        gr.Text(label="Filtered text after removing OOVs"),
    ],
    examples=TTS_EXAMPLES,
    title="Text-to-speech",
    description=("Generate audio!"),
    allow_flagging="never",
)

mms_identify = gr.Interface(
    fn=identify,
    inputs=[
        gr.Audio(source="microphone", type="filepath"),
        gr.Audio(source="upload", type="filepath"),
    ],
    outputs=gr.Label(num_top_classes=10),
    title="Language Identification",
    description=("Identity the language of audio!"),
    allow_flagging="never",
)

with demo:
    gr.TabbedInterface(
        [mms_synthesize, mms_transcribe, mms_identify],
        ["Text-to-speech", "Speech-to-text", "Language Identification"],
    )

demo.launch()