summaryrefslogtreecommitdiff
path: root/benchtests/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'benchtests/scripts')
-rwxr-xr-xbenchtests/scripts/bench.py22
-rw-r--r--benchtests/scripts/benchout.schema.json5
-rw-r--r--benchtests/scripts/benchout_strings.schema.json44
-rwxr-xr-xbenchtests/scripts/compare_bench.py42
-rwxr-xr-xbenchtests/scripts/compare_strings.py211
-rw-r--r--benchtests/scripts/import_bench.py2
-rwxr-xr-xbenchtests/scripts/validate_benchout.py2
7 files changed, 295 insertions, 33 deletions
diff --git a/benchtests/scripts/bench.py b/benchtests/scripts/bench.py
index 5a4fc94ba8..806aba6c90 100755
--- a/benchtests/scripts/bench.py
+++ b/benchtests/scripts/bench.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# Copyright (C) 2014-2016 Free Software Foundation, Inc.
+# Copyright (C) 2014-2018 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or
@@ -45,7 +45,7 @@ DEFINES_TEMPLATE = '''
# variant is represented by the _VARIANT structure. The ARGS structure
# represents a single set of arguments.
STRUCT_TEMPLATE = '''
-#define CALL_BENCH_FUNC(v, i) %(func)s (%(func_args)s)
+#define CALL_BENCH_FUNC(v, i, x) %(func)s (x %(func_args)s)
struct args
{
@@ -84,7 +84,9 @@ EPILOGUE = '''
#define RESULT(__v, __i) (variants[(__v)].in[(__i)].timing)
#define RESULT_ACCUM(r, v, i, old, new) \\
((RESULT ((v), (i))) = (RESULT ((v), (i)) * (old) + (r)) / ((new) + 1))
-#define BENCH_FUNC(i, j) ({%(getret)s CALL_BENCH_FUNC (i, j);})
+#define BENCH_FUNC(i, j) ({%(getret)s CALL_BENCH_FUNC (i, j, );})
+#define BENCH_FUNC_LAT(i, j) ({%(getret)s CALL_BENCH_FUNC (i, j, %(latarg)s);})
+#define BENCH_VARS %(defvar)s
#define FUNCNAME "%(func)s"
#include "bench-skeleton.c"'''
@@ -122,17 +124,23 @@ def gen_source(func, directives, all_vals):
# If we have a return value from the function, make sure it is
# assigned to prevent the compiler from optimizing out the
# call.
+ getret = ''
+ latarg = ''
+ defvar = ''
+
if directives['ret']:
print('static %s volatile ret;' % directives['ret'])
- getret = 'ret = '
- else:
- getret = ''
+ print('static %s zero __attribute__((used)) = 0;' % directives['ret'])
+ getret = 'ret = func_res = '
+ # Note this may not work if argument and result type are incompatible.
+ latarg = 'func_res * zero +'
+ defvar = '%s func_res = 0;' % directives['ret']
# Test initialization.
if directives['init']:
print('#define BENCH_INIT %s' % directives['init'])
- print(EPILOGUE % {'getret': getret, 'func': func})
+ print(EPILOGUE % {'getret': getret, 'func': func, 'latarg': latarg, 'defvar': defvar })
def _print_arg_data(func, directives, all_vals):
diff --git a/benchtests/scripts/benchout.schema.json b/benchtests/scripts/benchout.schema.json
index affb7c11f4..0eca21b6bd 100644
--- a/benchtests/scripts/benchout.schema.json
+++ b/benchtests/scripts/benchout.schema.json
@@ -13,12 +13,13 @@
"title": "Function names",
"type": "object",
"patternProperties": {
- "^[_a-zA-Z0-9]*$": {
+ "^[_a-zA-Z0-9,=.-]*$": {
"title": "Function variants",
"type": "object",
"properties": {
"duration": {"type": "number"},
"iterations": {"type": "number"},
+ "throughput": {"type": "number"},
"max": {"type": "number"},
"min": {"type": "number"},
"mean": {"type": "number"},
@@ -27,7 +28,7 @@
"items": {"type": "number"}
}
},
- "required": ["duration", "iterations", "max", "min", "mean"],
+ "required": ["duration", "iterations"],
"additionalProperties": false
}
},
diff --git a/benchtests/scripts/benchout_strings.schema.json b/benchtests/scripts/benchout_strings.schema.json
new file mode 100644
index 0000000000..7c9a67134b
--- /dev/null
+++ b/benchtests/scripts/benchout_strings.schema.json
@@ -0,0 +1,44 @@
+{
+ "title": "string benchmark",
+ "type": "object",
+ "properties": {
+ "timing_type": {
+ "type": "string"
+ },
+ "functions": {
+ "title": "Associative array of functions",
+ "type": "object",
+ "patternProperties": {
+ "^[_a-zA-Z][_a-zA-Z0-9]+$": {
+ "title": "Function names",
+ "type": "object",
+ "properties": {
+ "bench-variant": {"type": "string"},
+ "ifuncs": {
+ "type": "array",
+ "items": {"type": "string"}
+ },
+ "results": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "timings": {
+ "type": "array",
+ "items": {"type": "number"}
+ }
+ },
+ "additionalProperties": {"type": "number"},
+ "minProperties": 2
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "minProperties": 1
+ }
+ },
+ "required": ["timing_type", "functions"],
+ "additionalProperties": false
+}
diff --git a/benchtests/scripts/compare_bench.py b/benchtests/scripts/compare_bench.py
index b14f9ddc3d..88e8911d81 100755
--- a/benchtests/scripts/compare_bench.py
+++ b/benchtests/scripts/compare_bench.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# Copyright (C) 2015-2016 Free Software Foundation, Inc.
+# Copyright (C) 2015-2018 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or
@@ -25,6 +25,7 @@ import sys
import os
import pylab
import import_bench as bench
+import argparse
def do_compare(func, var, tl1, tl2, par, threshold):
"""Compare one of the aggregate measurements
@@ -151,26 +152,9 @@ def plot_graphs(bench1, bench2):
print('Writing out %s' % filename)
pylab.savefig(filename)
-
-def main(args):
- """Program Entry Point
-
- Take two benchmark output files and compare their timings.
- """
- if len(args) > 4 or len(args) < 3:
- print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0])
- sys.exit(os.EX_USAGE)
-
- bench1 = bench.parse_bench(args[1], args[0])
- bench2 = bench.parse_bench(args[2], args[0])
- if len(args) == 4:
- threshold = float(args[3])
- else:
- threshold = 10.0
-
- if (bench1['timing_type'] != bench2['timing_type']):
- print('Cannot compare benchmark outputs: timing types are different')
- return
+def main(bench1, bench2, schema, threshold):
+ bench1 = bench.parse_bench(bench1, schema)
+ bench2 = bench.parse_bench(bench2, schema)
plot_graphs(bench1, bench2)
@@ -181,4 +165,18 @@ def main(args):
if __name__ == '__main__':
- main(sys.argv[1:])
+ parser = argparse.ArgumentParser(description='Take two benchmark and compare their timings.')
+
+ # Required parameters
+ parser.add_argument('bench1', help='First bench to compare')
+ parser.add_argument('bench2', help='Second bench to compare')
+
+ # Optional parameters
+ parser.add_argument('--schema',
+ default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'benchout.schema.json'),
+ help='JSON file to validate source/dest files (default: %(default)s)')
+ parser.add_argument('--threshold', default=10.0, help='Only print those with equal or higher threshold (default: %(default)s)')
+
+ args = parser.parse_args()
+
+ main(args.bench1, args.bench2, args.schema, args.threshold)
diff --git a/benchtests/scripts/compare_strings.py b/benchtests/scripts/compare_strings.py
new file mode 100755
index 0000000000..e3ad8ff058
--- /dev/null
+++ b/benchtests/scripts/compare_strings.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright (C) 2017-2018 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+#
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+"""Compare results of string functions
+
+Given a string benchmark result file, print a table with comparisons with a
+baseline. The baseline is the first function, which typically is the builtin
+function.
+"""
+import matplotlib as mpl
+mpl.use('Agg')
+
+import sys
+import os
+import json
+import pylab
+import argparse
+
+try:
+ import jsonschema as validator
+except ImportError:
+ print('Could not find jsonschema module.')
+ raise
+
+
+def parse_file(filename, schema_filename):
+ try:
+ with open(schema_filename, 'r') as schemafile:
+ schema = json.load(schemafile)
+ with open(filename, 'r') as benchfile:
+ bench = json.load(benchfile)
+ validator.validate(bench, schema)
+ return bench
+ except FileNotFoundError:
+ sys.stderr.write('Invalid input file %s.\n' % filename)
+ sys.exit(os.EX_NOINPUT)
+
+def draw_graph(f, v, ifuncs, results):
+ """Plot graphs for functions
+
+ Plot line graphs for each of the ifuncs
+
+ Args:
+ f: Function name
+ v: Benchmark variant for the function.
+ ifuncs: List of ifunc names
+ results: Dictionary of results for each test criterion
+ """
+ print('Generating graph for %s, variant \'%s\'' % (f, v))
+ xkeys = results.keys()
+
+ pylab.clf()
+ fig = pylab.figure(frameon=False)
+ fig.set_size_inches(32, 18)
+ pylab.ylabel('Performance improvement from base')
+ X = range(len(xkeys))
+ pylab.xticks(X, xkeys)
+
+ i = 0
+
+ while i < len(ifuncs):
+ Y = [results[k][i] for k in xkeys]
+ lines = pylab.plot(X, Y, label=':'+ifuncs[i])
+ i = i + 1
+
+ pylab.legend()
+ pylab.grid()
+ pylab.savefig('%s-%s.png' % (f, v), bbox_inches='tight')
+
+
+def process_results(results, attrs, funcs, base_func, graph, no_diff, no_header):
+ """ Process results and print them
+
+ Args:
+ results: JSON dictionary of results
+ attrs: Attributes that form the test criteria
+ funcs: Functions that are selected
+ """
+
+ for f in results['functions'].keys():
+
+ v = results['functions'][f]['bench-variant']
+
+ selected = {}
+ index = 0
+ base_index = 0
+ if funcs:
+ ifuncs = []
+ first_func = True
+ for i in results['functions'][f]['ifuncs']:
+ if i in funcs:
+ if first_func:
+ base_index = index
+ first_func = False
+ selected[index] = 1
+ ifuncs.append(i)
+ else:
+ selected[index] = 0
+ index += 1
+ else:
+ ifuncs = results['functions'][f]['ifuncs']
+ for i in ifuncs:
+ selected[index] = 1
+ index += 1
+
+ if base_func:
+ try:
+ base_index = results['functions'][f]['ifuncs'].index(base_func)
+ except ValueError:
+ sys.stderr.write('Invalid -b "%s" parameter. Options: %s.\n' %
+ (base_func, ', '.join(results['functions'][f]['ifuncs'])))
+ sys.exit(os.EX_DATAERR)
+
+ if not no_header:
+ print('Function: %s' % f)
+ print('Variant: %s' % v)
+ print("%36s%s" % (' ', '\t'.join(ifuncs)))
+ print("=" * 120)
+
+ graph_res = {}
+ for res in results['functions'][f]['results']:
+ try:
+ attr_list = ['%s=%s' % (a, res[a]) for a in attrs]
+ except KeyError as ke:
+ sys.stderr.write('Invalid -a %s parameter. Options: %s.\n'
+ % (ke, ', '.join([a for a in res.keys() if a != 'timings'])))
+ sys.exit(os.EX_DATAERR)
+ i = 0
+ key = ', '.join(attr_list)
+ sys.stdout.write('%36s: ' % key)
+ graph_res[key] = res['timings']
+ for t in res['timings']:
+ if selected[i]:
+ sys.stdout.write ('%12.2f' % t)
+ if not no_diff:
+ if i != base_index:
+ base = res['timings'][base_index]
+ diff = (base - t) * 100 / base
+ sys.stdout.write (' (%6.2f%%)' % diff)
+ sys.stdout.write('\t')
+ i = i + 1
+ print('')
+
+ if graph:
+ draw_graph(f, v, results['functions'][f]['ifuncs'], graph_res)
+
+
+def main(args):
+ """Program Entry Point
+
+ Take a string benchmark output file and compare timings.
+ """
+
+ base_func = None
+ filename = args.input
+ schema_filename = args.schema
+ base_func = args.base
+ attrs = args.attributes.split(',')
+ if args.functions:
+ funcs = args.functions.split(',')
+ if base_func and not base_func in funcs:
+ print('Baseline function (%s) not found.' % base_func)
+ sys.exit(os.EX_DATAERR)
+ else:
+ funcs = None
+
+ results = parse_file(args.input, args.schema)
+ process_results(results, attrs, funcs, base_func, args.graph, args.no_diff, args.no_header)
+ return os.EX_OK
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+
+ # The required arguments.
+ req = parser.add_argument_group(title='required arguments')
+ req.add_argument('-a', '--attributes', required=True,
+ help='Comma separated list of benchmark attributes.')
+ req.add_argument('-i', '--input', required=True,
+ help='Input JSON benchmark result file.')
+ req.add_argument('-s', '--schema', required=True,
+ help='Schema file to validate the result file.')
+
+ # Optional arguments.
+ parser.add_argument('-f', '--functions',
+ help='Comma separated list of functions.')
+ parser.add_argument('-b', '--base',
+ help='IFUNC variant to set as baseline.')
+ parser.add_argument('-g', '--graph', action='store_true',
+ help='Generate a graph from results.')
+ parser.add_argument('--no-diff', action='store_true',
+ help='Do not print the difference from baseline.')
+ parser.add_argument('--no-header', action='store_true',
+ help='Do not print the header.')
+
+ args = parser.parse_args()
+ sys.exit(main(args))
diff --git a/benchtests/scripts/import_bench.py b/benchtests/scripts/import_bench.py
index ff88056b8d..602b3f954d 100644
--- a/benchtests/scripts/import_bench.py
+++ b/benchtests/scripts/import_bench.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# Copyright (C) 2015-2016 Free Software Foundation, Inc.
+# Copyright (C) 2015-2018 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or
diff --git a/benchtests/scripts/validate_benchout.py b/benchtests/scripts/validate_benchout.py
index 12977ed1d0..6147f05bec 100755
--- a/benchtests/scripts/validate_benchout.py
+++ b/benchtests/scripts/validate_benchout.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# Copyright (C) 2014-2016 Free Software Foundation, Inc.
+# Copyright (C) 2014-2018 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or