import gradio as gr import json import os import tempfile import uuid from transformers import pipeline import librosa import soundfile as sf SAMPLE_RATE = 16000 # Hz MAX_AUDIO_SECS = 30 # Maximum duration of audio in seconds src_lang = "en" tgt_lang = "en" pnc = "no" # Load the ASR pipeline asr_pipeline = pipeline("automatic-speech-recognition", model="nvidia/canary-1b") def convert_audio(audio_filepath, tmpdir, utt_id): """ Convert audio file to 16 kHz mono WAV format. Returns output filename and duration. """ data, sr = librosa.load(audio_filepath, sr=None, mono=True) duration = librosa.get_duration(y=data, sr=sr) if duration > MAX_AUDIO_SECS: raise gr.Error(f"Maximum audio duration exceeded. Please provide an audio file of up to {MAX_AUDIO_SECS} seconds.") if sr != SAMPLE_RATE: data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE) out_filename = os.path.join(tmpdir, f"{utt_id}.wav") sf.write(out_filename, data, SAMPLE_RATE) return out_filename, duration def transcribe(audio_filepath): if audio_filepath is None: raise gr.Error("Please provide some input audio: either upload an audio file or use the microphone") utt_id = uuid.uuid4() with tempfile.TemporaryDirectory() as tmpdir: converted_audio_filepath, duration = convert_audio(audio_filepath, tmpdir, str(utt_id)) transcribed_text = asr_pipeline(converted_audio_filepath, sampling_rate=SAMPLE_RATE)[0]["transcription"] return transcribed_text with gr.Interface(transcribe, gr.inputs.Audio(), "text", title="ASR with NeMo Canary Model") as iface: iface.launch() ''' # Function to capture audio using Canary ASR def capture_audio(): utt_id = uuid.uuid4() with tempfile.TemporaryDirectory() as tmpdir: converted_audio_filepath, duration = convert_audio(audio_filepath, tmpdir, str(utt_id)) manifest_data = { "audio_filepath": converted_audio_filepath, "source_lang": "en", "target_lang": "en", "taskname": taskname, "pnc": pnc, "answer": "predict", "duration": 10, } manifest_filepath = os.path.join(tmpdir, f'{utt_id}.json') print("Listening for cue words...") while True: audio_input = asr_pipeline(None)[0]['input_values'] transcript = asr_pipeline(audio_input)[0]['transcription'] if "hey canary" in transcript.lower(): print("Cue word detected!") break print("Listening...") return audio_input # AI assistant function def ai_assistant(audio_input): # Perform automatic speech recognition (ASR) transcript = asr_pipeline(audio_input)[0]['transcription'] # Perform question answering (QA) qa_result = qa_pipeline(question=transcript, context="Insert your context here") # Convert the QA result to speech using text-to-speech (TTS) tts_output = tts_pipeline(qa_result['answer']) return tts_output[0]['audio'] if __name__ == "__main__": # Create a Gradio interface gr.Interface(ai_assistant, inputs=gr.inputs.Audio(capture=capture_audio, label="Speak Here"), outputs=gr.outputs.Audio(type="audio", label="Assistant's Response"), title="AI Assistant", description="An AI Assistant that answers questions based on your speech input.").launch() '''