Traceback (most recent call last):
File "/homes/eva/q/qdeegen/micromamba/envs/diarizen/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/homes/eva/q/qdeegen/micromamba/envs/diarizen/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/mnt/matylda5/qdeegen/deploy/software/meeteval/meeteval/der/__main__.py", line 96, in <module>
cli()
File "/mnt/matylda5/qdeegen/deploy/software/meeteval/meeteval/der/__main__.py", line 92, in cli
cli.run()
File "/mnt/matylda5/qdeegen/deploy/software/meeteval/meeteval/wer/__main__.py", line 766, in run
return fn(**kwargs)
File "/mnt/matylda5/qdeegen/deploy/software/meeteval/meeteval/der/__main__.py", line 47, in dscore
results = dscore(
File "/mnt/matylda5/qdeegen/deploy/software/meeteval/meeteval/der/api.py", line 47, in dscore
results = dscore_multifile(
File "/mnt/matylda5/qdeegen/deploy/software/meeteval/meeteval/der/nryant_dscore.py", line 190, in dscore_multifile
result = md_eval_22_multifile(
File "/mnt/matylda5/qdeegen/deploy/software/meeteval/meeteval/der/md_eval.py", line 301, in md_eval_22_multifile
raise RuntimeError(
RuntimeError: The error rate of md-eval-22.pl on all recordings (0.15625)
does not match the average error rate of md-eval-22.pl applied to each recording (0.1563).
There seems to be a slight difference in the averaged values. However, the rttm files seem to be correct.
When computing the DER for multiple recordings I get this error:
There seems to be a slight difference in the averaged values. However, the rttm files seem to be correct.