Home | History | Annotate | Download | only in benchmark
      1 #!/usr/bin/env python2
      2 #
      3 # Copyright 2017 The Chromium OS Authors. All rights reserved.
      4 # Use of this source code is governed by a BSD-style license that can be
      5 # found in the LICENSE file.
      6 #
      7 # pylint: disable=cros-logging-import
      8 
      9 """Script to help generate json format report from raw data."""
     10 from __future__ import print_function
     11 
     12 import argparse
     13 import config
     14 import json
     15 import logging
     16 import sys
     17 
     18 # Turn the logging level to INFO before importing other autotest
     19 # code, to avoid having failed import logging messages confuse the
     20 # test_droid user.
     21 logging.basicConfig(level=logging.INFO)
     22 
     23 
     24 def _parse_arguments_internal(argv):
     25     parser = argparse.ArgumentParser(description='Convert result to JSON'
     26                                      'format')
     27 
     28     parser.add_argument(
     29         '-b', '--bench', help='Generate JSON format file for which benchmark.')
     30 
     31     parser.add_argument(
     32         '-i', '--input', help='Specify the input result file name.')
     33 
     34     parser.add_argument(
     35         '-o', '--output', help='Specify the output JSON format result file')
     36 
     37     parser.add_argument(
     38         '-p',
     39         '--platform',
     40         help='Indicate the platform(experiment or device) name '
     41         'to be shown in JSON')
     42 
     43     parser.add_argument(
     44         '--iterations',
     45         type=int,
     46         help='How many iterations does the result include.')
     47     return parser.parse_args(argv)
     48 
     49 # Collect data and generate JSON {} tuple from benchmark result
     50 def collect_data(infile, bench, it):
     51     result_dict = {}
     52     with open(infile + str(it)) as fin:
     53         if bench not in config.bench_parser_dict:
     54             logging.error('Please input the correct benchmark name.')
     55             raise ValueError('Wrong benchmark name: %s' % bench)
     56         parse = config.bench_parser_dict[bench]
     57         result_dict = parse(bench, fin)
     58     return result_dict
     59 
     60 # If there is no original output file, create a new one and init it.
     61 def create_outfile(outfile, bench):
     62     with open(outfile, 'w') as fout:
     63         obj_null = {'data': {bench.lower(): []}, 'platforms': []}
     64         json.dump(obj_null, fout)
     65 
     66 # Seek the original output file and try to add new result into it.
     67 def get_outfile(outfile, bench):
     68     try:
     69         return open(outfile)
     70     except IOError:
     71         create_outfile(outfile, bench)
     72         return open(outfile)
     73 
     74 def main(argv):
     75     arguments = _parse_arguments_internal(argv)
     76 
     77     bench = arguments.bench
     78     infile = arguments.input
     79     outfile = arguments.output
     80     platform = arguments.platform
     81     iteration = arguments.iterations
     82 
     83     result = []
     84     for i in xrange(iteration):
     85         result += collect_data(infile, bench, i)
     86 
     87     with get_outfile(outfile, bench) as fout:
     88         obj = json.load(fout)
     89     obj['platforms'].append(platform)
     90     obj['data'][bench.lower()].append(result)
     91     with open(outfile, 'w') as fout:
     92         json.dump(obj, fout)
     93 
     94 
     95 if __name__ == '__main__':
     96     main(sys.argv[1:])
     97