File size: 1,139 Bytes
4fb359e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from time import time
from datasets import load_dataset
from faster_whisper import WhisperModel
# from transformers import WhisperForConditionalGeneration, WhisperProcessor

ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation", cache_dir=".")

# processor = WhisperProcessor.from_pretrained("openai/whisper-large-v3")
# model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v3").to("mps")
model = WhisperModel("large-v3", device="cuda", compute_type="float16", download_root=".")

audio_sample = ds[0]["audio"]
waveform = audio_sample["array"]
sampling_rate = audio_sample["sampling_rate"]

tic = time()
# input_features = processor(
#     waveform, sampling_rate=sampling_rate, return_tensors="pt"
# ).input_features
segments, info = model.transcribe(waveform, beam_size=5)
# predicted_ids = model.generate(input_features.to("mps"))

# transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)

toc = time()

# print(transcription[0])
for segment in segments:
    print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
print(toc - tic)