-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathspeaker_diarization_for_data.py
More file actions
92 lines (76 loc) · 2.61 KB
/
speaker_diarization_for_data.py
File metadata and controls
92 lines (76 loc) · 2.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import argparse
import re
import numpy as np
import soundfile as sf
import torch
from pyannote.audio import Pipeline
from concat_wavfile import concat_all_file
from m4a_to_wav import m4a_to_wav
def main(parser):
args = parser.parse_args()
m4a_to_wav(dir=args.data_in_dir)
target_file = concat_all_file(
input_dir=args.data_in_dir,
output_dir=args.data_out_dir,
filename=args.concat_filename,
)
pipeline = Pipeline.from_pretrained(
"pyannote/speaker-diarization",
use_auth_token="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",
)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
pipeline.to(device)
diarization = pipeline(target_file, num_speakers=6)
f = open("./result.txt", "w")
for turn, _, speaker in diarization.itertracks(yield_label=True):
print(f"start={turn.start:.1f}s stop={turn.end:.1f}s speaker_{speaker}", file=f)
y, sr = sf.read(target_file)
f = open("./result.txt", "r")
y0 = np.array([0])
y1 = np.array([0])
y2 = np.array([0])
y3 = np.array([0])
y4 = np.array([0])
y5 = np.array([0])
lines = f.readlines()
for line in lines:
numbers = re.findall(r"\d+[.]\d", line)
numbers = list(map(float, numbers))
s1, s2 = int(numbers[0] * sr), int(numbers[1] * sr)
if line[-2] == "0":
y0 = np.concatenate([y0, y[s1:s2]])
elif line[-2] == "1":
y1 = np.concatenate([y1, y[s1:s2]])
elif line[-2] == "2":
y2 = np.concatenate([y2, y[s1:s2]])
elif line[-2] == "3":
y3 = np.concatenate([y3, y[s1:s2]])
elif line[-2] == "4":
y4 = np.concatenate([y4, y[s1:s2]])
elif line[-2] == "5":
y5 = np.concatenate([y5, y[s1:s2]])
yy = [y0, y1, y2, y3, y4, y5]
lenth = [len(y0), len(y1), len(y2), len(y3), len(y4), len(y5)]
i = lenth.index(max(lenth))
sf.write(target_file, yy[i], 16000, format="WAV")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_in_dir",
default="data_in",
help="Directory containing *.m4a of sound data",
)
parser.add_argument(
"--data_out_dir",
default="data_out",
help="Directory containing concat .wav of sound data",
)
parser.add_argument(
"--concat_filename", default="test_concat_file", help="name of .wav sound data"
)
parser.add_argument(
"--data_collecting_method",
default="submit_recorded_files",
help="recording, submit_recorded_files",
)
main(parser)