forked from CPJKU/madmom
-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate
executable file
·138 lines (112 loc) · 3.99 KB
/
evaluate
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#!/usr/bin/env python
# encoding: utf-8
"""
Evaluation script.
"""
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
import warnings
from madmom.evaluation import beats, chords, key, notes, onsets, tempo
from madmom.utils import match_file, search_files
def main():
"""Evaluation script"""
# define parser
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description='''
This program evaluates pairs of files containing annotations and
detections.
Please note that all available evaluation options have their individual
help messages, e.g.
$ evaluate onsets -h
''')
# add version
p.add_argument('--version', action='version', version='evaluate')
# add subparsers
sub_parsers = p.add_subparsers(title='available evaluation options')
# onset evaluation
onsets.add_parser(sub_parsers)
# beat evaluation
beats.add_parser(sub_parsers)
# tempo evaluation
tempo.add_parser(sub_parsers)
# key evaluation
key.add_parser(sub_parsers)
# note evaluation
notes.add_parser(sub_parsers)
# chord evaluation
chords.add_parser(sub_parsers)
# parse the args
args = p.parse_args()
# print the arguments
if args.verbose >= 2:
print(args)
if args.quiet:
warnings.filterwarnings("ignore")
# get detection and annotation files
if args.det_dir is None:
args.det_dir = args.files
if args.ann_dir is None:
args.ann_dir = args.files
det_files = search_files(args.det_dir, args.det_suffix)
ann_files = search_files(args.ann_dir, args.ann_suffix)
# quit if no files are found
if len(ann_files) == 0:
print("no files to evaluate. exiting.")
exit()
# list to collect the individual evaluation objects
eval_objects = []
# progress
progress = ''
# evaluate all files
num_files = len(ann_files)
for num_file, ann_file in enumerate(ann_files):
# print progress
progress_len = len(progress)
if args.verbose >= 2:
progress = 'evaluating %s' % os.path.basename(ann_file)
else:
progress = 'evaluating file %d of %d' % (num_file + 1, num_files)
sys.stderr.write('\r%s' % progress.ljust(progress_len))
sys.stderr.flush()
# get the matching detection files
matches = match_file(ann_file, det_files,
args.ann_suffix, args.det_suffix)
if len(matches) > 1:
# exit if multiple detections were found
raise SystemExit("multiple detections for %s found" % ann_file)
elif len(matches) == 0:
# ignore non-existing detections
if args.ignore_non_existing:
continue
# output a warning if no detections were found
warnings.warn(" can't find detections for %s" % ann_file)
# but continue and assume no detections
det_file = None
else:
# use the first (and only) matched detection file
det_file = matches[0]
# load detections and annotations
detections = args.load_fn(det_file)
annotations = args.load_fn(ann_file)
# evaluate them
e = args.eval(detections, annotations, name=os.path.basename(ann_file),
**vars(args))
# add this file's evaluation to the global evaluation list
eval_objects.append(e)
# clear progress
sys.stderr.write('\r%s\r' % ' '.ljust(len(progress)))
sys.stderr.flush()
# output every evaluation object individually
out_list = []
if args.verbose:
out_list.extend(eval_objects)
# add sum/mean evaluation to output
if args.sum_eval is not None:
out_list.append(args.sum_eval(eval_objects))
out_list.append(args.mean_eval(eval_objects))
# output everything
args.outfile.write(args.output_formatter(out_list, **vars(args)) + '\n')
if __name__ == '__main__':
main()