Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added mock testing and local model #8

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 45 additions & 7 deletions src/stt_data_with_llm/audio_parser.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import io
import json
import logging
import os

import librosa
import requests
import torchaudio
from dotenv import load_dotenv
from pyannote.audio import Pipeline
from pydub import AudioSegment

Expand All @@ -13,10 +15,13 @@
AUDIO_SEG_UPPER_LIMIT,
HEADERS,
HYPER_PARAMETERS,
USE_AUTH_TOKEN,
)
from stt_data_with_llm.util import setup_logging

# load the evnironment variable
load_dotenv()

USE_AUTH_TOKEN = os.getenv("use_auth_token")
# Call the setup_logging function at the beginning of your script
setup_logging("audio_parser.log")

Expand Down Expand Up @@ -62,15 +67,21 @@ def sec_to_frame(sec, sr):
def initialize_vad_pipeline():
"""
Initializes the Voice Activity Detection (VAD) pipeline using Pyannote.

Returns:
Pipeline: Initialized VAD pipeline
"""
logging.info("Initializing Voice Activity Detection pipeline...")
vad_pipeline = Pipeline.from_pretrained(
"pyannote/voice-activity-detection",
use_auth_token=USE_AUTH_TOKEN,
)
try:
vad_pipeline = Pipeline.from_pretrained(
"pyannote/voice-activity-detection",
use_auth_token=USE_AUTH_TOKEN,
)
except Exception as e:
logging.warning(f"Failed to load online model: {e}. Using local model.")
vad_pipeline = Pipeline.from_pretrained(
"tests/pyannote_vad_model",
use_auth_token=False,
)
vad_pipeline.instantiate(HYPER_PARAMETERS)
logging.info("VAD pipeline initialized successfully.")
return vad_pipeline
Expand Down Expand Up @@ -287,6 +298,26 @@ def process_non_mute_segments(
return counter


def generate_vad_output(audio_file, output_json):
"""Generate VAD output for a given audio file and save it to a JSON file.

Args:
audio_file (_type_): _description_
output_json (_type_): _description_
"""
pipeline = initialize_vad_pipeline()
vad = pipeline(audio_file)
vad_output = {
"timeline": [
{"start": segment.start, "end": segment.end}
for segment in vad.get_timeline().support()
]
}

with open(output_json, "w", encoding="utf-8") as file:
json.dump(vad_output, file, ensure_ascii=False, indent=2)


def get_split_audio(
audio_data,
full_audio_id,
Expand Down Expand Up @@ -315,10 +346,17 @@ def get_split_audio(

if not os.path.exists(output_folder):
os.makedirs(output_folder)

vad_output_folder = "tests/data/vad_output"
if not os.path.exists(vad_output_folder):
os.makedirs(vad_output_folder)

# initialize vad pipeline
pipeline = initialize_vad_pipeline()
vad = pipeline(temp_audio_file)

generate_vad_output(
temp_audio_file, f"{vad_output_folder}/{full_audio_id}_vad_output.json"
)
original_audio_segment = AudioSegment.from_file(temp_audio_file)
original_audio_ndarray, sampling_rate = torchaudio.load(temp_audio_file)
original_audio_ndarray = original_audio_ndarray[0]
Expand Down
2 changes: 0 additions & 2 deletions src/stt_data_with_llm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,5 +36,3 @@
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0", # noqa: E501
}

USE_AUTH_TOKEN = "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"
27 changes: 27 additions & 0 deletions tests/pyannote_vad_model/.gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
70 changes: 70 additions & 0 deletions tests/pyannote_vad_model/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
---
tags:
- pyannote
- pyannote-audio
- pyannote-audio-pipeline
- audio
- voice
- speech
- speaker
- voice-activity-detection
- automatic-speech-recognition
datasets:
- ami
- dihard
- voxconverse
license: mit
extra_gated_prompt: "The collected information will help acquire a better knowledge of pyannote.audio userbase and help its maintainers apply for grants to improve it further. If you are an academic researcher, please cite the relevant papers in your own publications using the model. If you work for a company, please consider contributing back to pyannote.audio development (e.g. through unrestricted gifts). We also provide scientific consulting services around speaker diarization and machine listening."
extra_gated_fields:
Company/university: text
Website: text
I plan to use this model for (task, type of audio data, etc): text
---

Using this open-source model in production?
Consider switching to [pyannoteAI](https://www.pyannote.ai) for better and faster options.

# 🎹 Voice activity detection

Relies on pyannote.audio 2.1: see [installation instructions](https://github.com/pyannote/pyannote-audio#installation).


```python
# 1. visit hf.co/pyannote/segmentation and accept user conditions
# 2. visit hf.co/settings/tokens to create an access token
# 3. instantiate pretrained voice activity detection pipeline

from pyannote.audio import Pipeline
pipeline = Pipeline.from_pretrained("pyannote/voice-activity-detection",
use_auth_token="ACCESS_TOKEN_GOES_HERE")
output = pipeline("audio.wav")

for speech in output.get_timeline().support():
# active speech between speech.start and speech.end
...
```


## Citation

```bibtex
@inproceedings{Bredin2021,
Title = {{End-to-end speaker segmentation for overlap-aware resegmentation}},
Author = {{Bredin}, Herv{\'e} and {Laurent}, Antoine},
Booktitle = {Proc. Interspeech 2021},
Address = {Brno, Czech Republic},
Month = {August},
Year = {2021},
}
```

```bibtex
@inproceedings{Bredin2020,
Title = {{pyannote.audio: neural building blocks for speaker diarization}},
Author = {{Bredin}, Herv{\'e} and {Yin}, Ruiqing and {Coria}, Juan Manuel and {Gelly}, Gregory and {Korshunov}, Pavel and {Lavechin}, Marvin and {Fustes}, Diego and {Titeux}, Hadrien and {Bouaziz}, Wassim and {Gill}, Marie-Philippe},
Booktitle = {ICASSP 2020, IEEE International Conference on Acoustics, Speech, and Signal Processing},
Address = {Barcelona, Spain},
Month = {May},
Year = {2020},
}
```
10 changes: 10 additions & 0 deletions tests/pyannote_vad_model/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
pipeline:
name: pyannote.audio.pipelines.VoiceActivityDetection
params:
segmentation: pyannote/segmentation@Interspeech2021

params:
min_duration_off: 0.09791355693027545
min_duration_on: 0.05537587440407595
offset: 0.4806866463041527
onset: 0.8104268538848918
106 changes: 83 additions & 23 deletions tests/test_audio_parser.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,92 @@
import json
import logging
from unittest import TestCase, mock

from stt_data_with_llm.audio_parser import get_audio, get_split_audio
from stt_data_with_llm.config import AUDIO_SEG_LOWER_LIMIT, AUDIO_SEG_UPPER_LIMIT


def test_get_split_audio():
"""
Test function for the get_split_audio functionality.
"""
audio_urls = {
"NW_001": "https://www.rfa.org/tibetan/sargyur/golok-china-religious-restriction-08202024054225.html/@@stream", # noqa
"NW_002": "https://vot.org/wp-content/uploads/2024/03/tc88888888888888.mp3",
"NW_003": "https://voa-audio-ns.akamaized.net/vti/2024/04/13/01000000-0aff-0242-a7bb-08dc5bc45613.mp3",
}
num_of_seg_in_audios = {}
for seg_id, audio_url in audio_urls.items():

audio_data = get_audio(audio_url)
split_audio_data = get_split_audio(
audio_data, seg_id, AUDIO_SEG_LOWER_LIMIT, AUDIO_SEG_UPPER_LIMIT
)
num_split = len(split_audio_data)
num_of_seg_in_audios[seg_id] = num_split
expected_num_of_seg_in_audios = "tests/data/expected_audio_data.json"
with open(expected_num_of_seg_in_audios, encoding="utf-8") as file:
expected_num_split = json.load(file)
assert num_of_seg_in_audios == expected_num_split
class TestGetSplitAudio(TestCase):
@mock.patch("stt_data_with_llm.audio_parser.initialize_vad_pipeline")
@mock.patch("stt_data_with_llm.audio_parser.Pipeline")
def test_get_split_audio(self, mock_pipeline, mock_initialize_vad):
"""
Test function for the get_split_audio functionality.
"""
# Define mock VAD outputs for each audio file
vad_outputs = {
"NW_001": "./tests/vad_output/NW_001_vad_output.json",
"NW_002": "./tests/vad_output/NW_002_vad_output.json",
"NW_003": "./tests/vad_output/NW_003_vad_output.json",
}
# Load all VAD outputs dynamically
mock_vad_results = {}
for seg_id, vad_path in vad_outputs.items():
with open(vad_path, encoding="utf-8") as file:
mock_vad_results[seg_id] = json.load(file)

class MockVADPipeline:
def __init__(self, seg_id):
self.seg_id = seg_id

def __call__(self, audio_file):
return MockVADResult(self.seg_id)

class MockVADResult:
def __init__(self, seg_id):
self.vad_output = mock_vad_results[seg_id]

def get_timeline(self):
class MockTimeline:
def __init__(self, timeline):
self.timeline = timeline

def support(self):
return [
type(
"Segment",
(),
{"start": seg["start"], "end": seg["end"]},
)
for seg in self.timeline
]

return MockTimeline(self.vad_output["timeline"])

# Setup mock behavior
def mock_initialize_pipeline(seg_id):
try:
return MockVADPipeline(seg_id)
except Exception as e:
logging.warning(
f"Mocking failed: {e}. Falling back to actual function."
)
return None

audio_urls = {
"NW_001": "https://www.rfa.org/tibetan/sargyur/golok-china-religious-restriction-08202024054225.html/@@stream", # noqa
"NW_002": "https://vot.org/wp-content/uploads/2024/03/tc88888888888888.mp3",
"NW_003": "https://voa-audio-ns.akamaized.net/vti/2024/04/13/01000000-0aff-0242-a7bb-08dc5bc45613.mp3",
}
num_of_seg_in_audios = {}
for seg_id, audio_url in audio_urls.items():
mock_pipeline = mock_initialize_pipeline(seg_id)
if mock_pipeline:
mock_initialize_vad.return_value = mock_pipeline
else:
mock_initialize_vad.side_effect = None # Disable the mock for fallback

audio_data = get_audio(audio_url)
split_audio_data = get_split_audio(
audio_data, seg_id, AUDIO_SEG_LOWER_LIMIT, AUDIO_SEG_UPPER_LIMIT
)
num_split = len(split_audio_data)
num_of_seg_in_audios[seg_id] = num_split
expected_num_of_seg_in_audios = "tests/data/expected_audio_data.json"
with open(expected_num_of_seg_in_audios, encoding="utf-8") as file:
expected_num_split = json.load(file)
assert num_of_seg_in_audios == expected_num_split


if __name__ == "__main__":
test_get_split_audio()
TestGetSplitAudio().test_get_split_audio()
56 changes: 56 additions & 0 deletions tests/vad_output/NW_001_vad_output.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
{
"timeline": [
{
"start": 0.23346875,
"end": 2.5115937500000003
},
{
"start": 2.79846875,
"end": 25.86659375
},
{
"start": 26.18721875,
"end": 30.13596875
},
{
"start": 30.50721875,
"end": 34.540343750000005
},
{
"start": 34.77659375,
"end": 40.59846875
},
{
"start": 40.85159375,
"end": 46.43721875
},
{
"start": 46.84221875,
"end": 50.487218750000004
},
{
"start": 50.790968750000005,
"end": 53.001593750000005
},
{
"start": 53.28846875,
"end": 56.19096875
},
{
"start": 56.376593750000005,
"end": 68.35784375
},
{
"start": 68.67846875000001,
"end": 146.28659375
},
{
"start": 146.53971875000002,
"end": 161.86221875
},
{
"start": 162.21659375000002,
"end": 165.74346875
}
]
}
8 changes: 8 additions & 0 deletions tests/vad_output/NW_002_vad_output.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"timeline": [
{
"start": 0.03096875,
"end": 119.26971875000001
}
]
}
Loading
Loading