kalebu's picture
updated get_wave_label()
3bd0279
raw
history blame contribute delete
No virus
1.94 kB
# import library
import gradio as gr
import librosa
import pandas as pd
import numpy as np
import pickle
import os
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization, Input
def get_waveform_label(file):
#lab = tf.strings.split(file, os.path.sep)[-2]
print(file)
print(file.name)
audio_binary = tf.io.read_file(file.name)
audio, _ = tf.audio.decode_wav(audio_binary)
waveform=tf.squeeze(audio, axis=-1)
return waveform
def get_spectrogram_label(audio):
padding = tf.zeros([300000]-tf.shape(audio), dtype=tf.float32)
wave = tf.cast(audio, tf.float32)
eq_length = tf.concat([wave, padding], 0)
spectrogram = tf.signal.stft(eq_length, frame_length=210, frame_step=110)
spectrogram = tf.abs(spectrogram)
spectrogram = tf.expand_dims(spectrogram, -1)
return spectrogram
# %load saved model
model = pickle.load(open('audio_classifier_model.pkl', 'rb'))
def get_audio(audio):
audio_waveform = get_waveform_label(audio)
audio_spect = get_spectrogram_label(audio_waveform)
final_feat = np.array([audio_spect])
res = np.argmax(model.predict(final_feat),axis=1)
if res == 1:
res ="Dog Audio";
else:
res = "Cat Audio"
return res
# %gradio interface
inputs = gr.inputs.Audio(label="Input Audio", type="file")
outputs = "text"
title = "Cat/Dog Audio Classification"
description = "Gradio demo App for Cat and Dog Audio Classification with Tensorflow. To use it, simply upload your audio .wav format, or use sample audio by click the button below Example"
examples = [
['dog_barking_102.wav']
]
gr.Interface(get_audio, inputs, outputs, title=title, description=description, examples=examples).launch()