fama-data / scripts /speech_only.py
spapi's picture
Add speech_only.py script
d2b34bb verified
raw
history blame
2.76 kB
import sys
from silero_vad import load_silero_vad
from silero_vad import read_audio , get_speech_timestamps , save_audio
from silero_vad import VADIterator , collect_chunks
import torch
import json
import os
import argparse
import logging
from pathlib import Path
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOG_LEVEL", "INFO").upper())
LOGGER = logging.getLogger(__name__)
def main(args):
parser = argparse.ArgumentParser(args)
parser.add_argument("--folder", type=str, required=True, help="folder" )
parser.add_argument("--sfx", type=str, required=False, default="wav",help="audio suffix" )
parser.add_argument("--sr", type=int, required=False, default="16000",help="sampling rate" )
parser.add_argument("--out_folder", type=str, required=False, default=None,help="output folder" )
parser.add_argument("--out_file", type=str, required=True, help="json output" )
parser.add_argument("--reverse", action="store_true", help="reverse processing order in folder")
args = parser.parse_args()
model = load_silero_vad(onnx=True)
sr=args.sr
out_folder=args.out_folder
if out_folder:
Path(out_folder).mkdir(parents=True, exist_ok=True)
save_wav=True
audio_list = sorted(os.listdir(args.folder),reverse=args.reverse)
with open(args.out_file, "w") as outfile:
for audiofile in audio_list:
if audiofile.endswith(args.sfx):
audiopath = os.path.join(args.folder, audiofile)
audiofile = audiofile.replace(args.sfx,"wav")
audio_id = Path(audiofile).stem
out_audio=f"{out_folder}/{audiofile}"
if os.path.isfile(out_audio):
LOGGER.info(f"skipping {audiopath}")
continue
LOGGER.info(f"processing {audiopath}")
wav = read_audio(audiopath, sampling_rate=sr)
speech_timestamps = get_speech_timestamps(wav, model, sampling_rate=sr)
if speech_timestamps:
LOGGER.info(f"vad-processed {audiofile}: {len(speech_timestamps)} chunks")
for turn in speech_timestamps:
entry = {"audio_id": audio_id, "offset": turn['start']/sr, "duration": (turn['end']-turn['start'])/sr}
json.dump(entry, outfile)
outfile.write('\n')
if save_wav:
save_audio(out_audio,collect_chunks(speech_timestamps, wav), sampling_rate=sr)
LOGGER.info(f"vad-processed {audiofile} written to {out_audio}")
if __name__ == '__main__':
main(sys.argv)