-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathspeaker_diarization_for_conversation.py
More file actions
56 lines (47 loc) · 1.46 KB
/
speaker_diarization_for_conversation.py
File metadata and controls
56 lines (47 loc) · 1.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import argparse
import re
import numpy as np
import soundfile as sf
import torch
from pyannote.audio import Pipeline
from concat_wavfile import concat_all_file
from m4a_to_wav import m4a_to_wav
def main(parser):
args = parser.parse_args()
pipeline = Pipeline.from_pretrained(
"pyannote/speaker-diarization",
use_auth_token="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",
)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
pipeline.to(device)
diarization = pipeline(args.data_in)
y, sr = sf.read(args.data_in)
f = open("./result.txt", "w")
for turn, _, speaker in diarization.itertracks(yield_label=True):
print(f"start={turn.start:.1f}s stop={turn.end:.1f}s speaker_{speaker}", file=f)
s1, s2 = int(turn.start * sr), int(turn.end * sr)
sf.write(
args.data_out_dir
+ "/"
+ str(round(turn.start, 2))
+ "~"
+ str(round(turn.end, 2))
+ ".wav",
y[s1:s2],
16000,
format="WAV",
)
f = open("./result.txt", "r")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_in",
default="data_in",
help="target .wav file of sound data",
)
parser.add_argument(
"--data_out_dir",
default="data_out",
help="Directory containing concat .wav of sound data",
)
main(parser)