github.com/grumpyhome/grumpy@v0.3.1-0.20201208125205-7b775405bdf1/grumpy-tools-src/grumpy_tools/benchcmp.py (about)

     1  #!/usr/bin/env python
     2  
     3  # Copyright 2016 Google Inc. All Rights Reserved.
     4  #
     5  # Licensed under the Apache License, Version 2.0 (the "License");
     6  # you may not use this file except in compliance with the License.
     7  # You may obtain a copy of the License at
     8  #
     9  #     http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  # Unless required by applicable law or agreed to in writing, software
    12  # distributed under the License is distributed on an "AS IS" BASIS,
    13  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  # See the License for the specific language governing permissions and
    15  # limitations under the License.
    16  
    17  """Runs two benchmark programs and compares their results."""
    18  
    19  from __future__ import print_function
    20  
    21  import argparse
    22  import subprocess
    23  import sys
    24  
    25  try:
    26    xrange          # Python 2
    27  except NameError:
    28    xrange = range  # Python 3
    29  
    30  parser = argparse.ArgumentParser()
    31  parser.add_argument('prog1')
    32  parser.add_argument('prog2')
    33  parser.add_argument('--runs', default=1, type=int,
    34                      help='number of times to run each program')
    35  
    36  
    37  def main(args):
    38    results1 = _RunBenchmark(args.prog1)
    39    benchmarks = set(results1.keys())
    40    results2 = {}
    41    for _ in xrange(args.runs - 1):
    42      _MergeResults(results1, _RunBenchmark(args.prog1), benchmarks)
    43      _MergeResults(results2, _RunBenchmark(args.prog2), benchmarks)
    44    _MergeResults(results2, _RunBenchmark(args.prog2), benchmarks)
    45    for b in sorted(benchmarks):
    46      print(b, '{:+.1%}'.format(results2[b] / results1[b] - 1))
    47  
    48  
    49  def _MergeResults(merged, results, benchmarks):
    50    benchmarks = set(benchmarks)
    51    for k, v in results.iteritems():
    52      if k not in benchmarks:
    53        _Die('unmatched benchmark: {}', k)
    54      merged[k] = max(merged.get(k, 0), v)
    55      benchmarks.remove(k)
    56    if benchmarks:
    57      _Die('missing benchmark(s): {}', ', '.join(benchmarks))
    58  
    59  
    60  def _RunBenchmark(prog):
    61    """Executes prog and returns a dict mapping benchmark name -> result."""
    62    try:
    63      p = subprocess.Popen([prog], shell=True, stdout=subprocess.PIPE)
    64    except OSError as e:
    65      _Die(e)
    66    out, _ = p.communicate()
    67    if p.returncode:
    68      _Die('{} exited with status: {}', prog, p.returncode)
    69    results = {}
    70    for line in out.splitlines():
    71      line = line.strip()
    72      if not line:
    73        continue
    74      try:
    75        name, status, result = line.split()
    76      except ValueError:
    77        _Die('invalid benchmark output: {}', line)
    78      if status != 'PASSED':
    79        _Die('benchmark failed: {}', line)
    80      try:
    81        result = float(result)
    82      except ValueError:
    83        _Die('invalid benchmark result: {}', line)
    84      results[name] = result
    85    return results
    86  
    87  
    88  def _Die(msg, *args):
    89    if args:
    90      msg = msg.format(*args)
    91    print(msg, file=sys.stderr)
    92    sys.exit(1)
    93  
    94  
    95  if __name__ == '__main__':
    96    main(parser.parse_args())