diff mbox series

[COMMITTED,1/3] benchtests: Use argparse to parse arguments

Message ID 1505556666-3043-1-git-send-email-siddhesh@sourceware.org
State New
Headers show
Series [COMMITTED,1/3] benchtests: Use argparse to parse arguments | expand

Commit Message

Siddhesh Poyarekar Sept. 16, 2017, 10:11 a.m. UTC
Make the script more usable by adding proper command line options
along with a way to query the options.  The script is capable of doing
a bunch of things right now like choosing a base for comparison,
choosing to generate graphs, etc. and they should be accessible via
command line switches.

	* benchtests/scripts/compare_strings.py: Use argparse.
	* benchtests/README: Document existence of compare_strings.py.
---
 ChangeLog                             |  5 +++++
 benchtests/README                     | 11 +++++++++++
 benchtests/scripts/compare_strings.py | 35 +++++++++++++++++++++++------------
 3 files changed, 39 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/ChangeLog b/ChangeLog
index 58acb54..fd9cc0c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@ 
+2017-09-16  Siddhesh Poyarekar  <siddhesh@sourceware.org>
+
+	* benchtests/scripts/compare_strings.py: Use argparse.
+	* benchtests/README: Document existence of compare_strings.py.
+
 2017-09-15  Joseph Myers  <joseph@codesourcery.com>
 
 	* math/s_fma.c: Include <libm-alias-double.h>.
diff --git a/benchtests/README b/benchtests/README
index b015acf..9aa750a 100644
--- a/benchtests/README
+++ b/benchtests/README
@@ -122,3 +122,14 @@  To add a benchset for `foo':
 - Write your bench-foo.c that prints out the measurements to stdout.
 - On execution, a bench-foo.out is created in $(objpfx) with the contents of
   stdout.
+
+Reading String Benchmark Results:
+================================
+
+Some of the string benchmark results are now in JSON to make it easier to read
+in scripts.  Use the benchtests/compare_strings.py script to show the results
+in a tabular format, generate graphs and more. Run
+
+    benchtests/scripts/compare_strings.py -h
+
+for usage information.
diff --git a/benchtests/scripts/compare_strings.py b/benchtests/scripts/compare_strings.py
index b3c57e2..3ca9429 100755
--- a/benchtests/scripts/compare_strings.py
+++ b/benchtests/scripts/compare_strings.py
@@ -28,6 +28,7 @@  import sys
 import os
 import json
 import pylab
+import argparse
 
 try:
     import jsonschema as validator
@@ -118,22 +119,32 @@  def main(args):
 
     Take a string benchmark output file and compare timings.
     """
-    if len(args) < 3:
-        print('Usage: %s <input file> <schema file> [-base=ifunc_name] attr1 [attr2 ...]' % sys.argv[0])
-        sys.exit(os.EX_USAGE)
 
     base_func = None
-    filename = args[0]
-    schema_filename = args[1]
-    if args[2].find('-base=') == 0:
-        base_func = args[2][6:]
-        attrs = args[3:]
-    else:
-        attrs = args[2:]
+    filename = args.input
+    schema_filename = args.schema
+    base_func = args.base
+    attrs = args.attributes.split(',')
 
-    results = parse_file(filename, schema_filename)
+    results = parse_file(args.input, args.schema)
     process_results(results, attrs, base_func)
 
 
 if __name__ == '__main__':
-    main(sys.argv[1:])
+    parser = argparse.ArgumentParser()
+
+    # The required arguments.
+    req = parser.add_argument_group(title='required arguments')
+    req.add_argument('-a', '--attributes', required=True,
+                        help='Comma separated list of benchmark attributes.')
+    req.add_argument('-i', '--input', required=True,
+                        help='Input JSON benchmark result file.')
+    req.add_argument('-s', '--schema', required=True,
+                        help='Schema file to validate the result file.')
+
+    # Optional arguments.
+    parser.add_argument('-b', '--base',
+                        help='IFUNC variant to set as baseline.')
+
+    args = parser.parse_args()
+    main(args)