-
Notifications
You must be signed in to change notification settings - Fork 1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Use Silero VAD in Batched Mode (#936)
Replace Pyannote VAD with Silero to reduce code duplication and requirements
- Loading branch information
1 parent
574e256
commit 2dbca5e
Showing
12 changed files
with
277 additions
and
508 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
include faster_whisper/assets/silero_vad.onnx | ||
include faster_whisper/assets/silero_encoder_v5.onnx | ||
include faster_whisper/assets/silero_decoder_v5.onnx | ||
include requirements.txt | ||
include requirements.conversion.txt | ||
include faster_whisper/assets/pyannote_vad_model.bin |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
import argparse | ||
import json | ||
import os | ||
|
||
from io import BytesIO | ||
|
||
from datasets import load_dataset | ||
from evaluate import load | ||
from pytubefix import YouTube | ||
from torch.utils.data import DataLoader | ||
from tqdm import tqdm | ||
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer | ||
|
||
from faster_whisper import BatchedInferencePipeline, WhisperModel, decode_audio | ||
|
||
|
||
def url_to_audio(row): | ||
buffer = BytesIO() | ||
yt = YouTube(row["link"]) | ||
video = ( | ||
yt.streams.filter(only_audio=True, mime_type="audio/mp4") | ||
.order_by("bitrate") | ||
.desc() | ||
.first() | ||
) | ||
video.stream_to_buffer(buffer) | ||
buffer.seek(0) | ||
row["audio"] = decode_audio(buffer) | ||
return row | ||
|
||
|
||
parser = argparse.ArgumentParser(description="WER benchmark") | ||
parser.add_argument( | ||
"--audio_numb", | ||
type=int, | ||
default=None, | ||
help="Specify the number of validation audio files in the dataset." | ||
" Set to None to retrieve all audio files.", | ||
) | ||
args = parser.parse_args() | ||
|
||
# define the evaluation metric | ||
wer_metric = load("wer") | ||
|
||
with open(os.path.join(os.path.dirname(__file__), "normalizer.json"), "r") as f: | ||
normalizer = EnglishTextNormalizer(json.load(f)) | ||
|
||
dataset = load_dataset("mobiuslabsgmbh/youtube-commons-asr-eval", streaming=True).map( | ||
url_to_audio | ||
) | ||
dataset = iter( | ||
DataLoader(dataset["test"], batch_size=1, prefetch_factor=4, num_workers=2) | ||
) | ||
|
||
model = WhisperModel("large-v3", device="cuda") | ||
pipeline = BatchedInferencePipeline(model, device="cuda") | ||
|
||
|
||
all_transcriptions = [] | ||
all_references = [] | ||
# iterate over the dataset and run inference | ||
for i, row in tqdm(enumerate(dataset), desc="Evaluating..."): | ||
result, info = pipeline.transcribe( | ||
row["audio"][0], | ||
batch_size=8, | ||
word_timestamps=False, | ||
without_timestamps=True, | ||
) | ||
|
||
all_transcriptions.append("".join(segment.text for segment in result)) | ||
all_references.append(row["text"][0]) | ||
if args.audio_numb and i == (args.audio_numb - 1): | ||
break | ||
|
||
# normalize predictions and references | ||
all_transcriptions = [normalizer(transcription) for transcription in all_transcriptions] | ||
all_references = [normalizer(reference) for reference in all_references] | ||
|
||
# compute the WER metric | ||
wer = 100 * wer_metric.compute( | ||
predictions=all_transcriptions, references=all_references | ||
) | ||
print("WER: %.3f" % wer) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,3 +4,4 @@ evaluate | |
datasets | ||
memory_profiler | ||
py3nvml | ||
pytubefix |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Oops, something went wrong.