File size: 779 Bytes
61fdf99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import gradio as gr
import numpy as np
import scipy.io.wavfile
import torch
import torch.nn.functional as F
from whisperspeech.pipeline import Pipeline

def process_audio(audio_elem):
    
    scipy.io.wavfile.write('test.mp3', 48000, audio_elem[1])

    # print out details about ut
    pipe = Pipeline(s2a_ref='collabora/whisperspeech:s2a-q4-base-en+pl.model')
    # save audio_elem as a file
    speaker = pipe.extract_spk_emb("test.mp3")
    speaker = speaker.cpu().numpy()  # Move tensor from GPU to CPU and convert to numpy array
    print(speaker)
    #save it locally
    np.savez_compressed("speaker", features=speaker)
    return "speaker.npz"


# Define Gradio interface
with gr.Interface(fn=process_audio, inputs="audio", outputs="file") as iface:
    iface.launch()