test: use JSON instead of pickle for storing benchmarks cache
This commit is contained in:
parent
736723cc98
commit
a584923f1c
|
@ -6,6 +6,7 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
import json
|
||||||
import argparse
|
import argparse
|
||||||
import subprocess
|
import subprocess
|
||||||
from collections import defaultdict, namedtuple
|
from collections import defaultdict, namedtuple
|
||||||
|
@ -72,27 +73,11 @@ class BenchmarkConfiguration(Settings):
|
||||||
class BenchmarkResult:
|
class BenchmarkResult:
|
||||||
def __init__(self, config, output):
|
def __init__(self, config, output):
|
||||||
self.config = config
|
self.config = config
|
||||||
|
self._output = output
|
||||||
self.parse_output(output)
|
self.parse_output(output)
|
||||||
# instantiate the benchmarked soc to check its configuration
|
# instantiate the benchmarked soc to check its configuration
|
||||||
self.benchmark_soc = LiteDRAMBenchmarkSoC(**self.config._settings)
|
self.benchmark_soc = LiteDRAMBenchmarkSoC(**self.config._settings)
|
||||||
|
|
||||||
def parse_output(self, output):
|
|
||||||
bist_pattern = r'{stage}\s+{var}:\s+{value}'
|
|
||||||
|
|
||||||
def find(stage, var):
|
|
||||||
pattern = bist_pattern.format(
|
|
||||||
stage=stage,
|
|
||||||
var=var,
|
|
||||||
value=ng('value', '[0-9]+'),
|
|
||||||
)
|
|
||||||
result = re.search(pattern, output)
|
|
||||||
assert result is not None, 'Could not find pattern in output: %s, %s' % (pattern, output)
|
|
||||||
return int(result.group('value'))
|
|
||||||
|
|
||||||
self.generator_ticks = find('BIST-GENERATOR', 'ticks')
|
|
||||||
self.checker_errors = find('BIST-CHECKER', 'errors')
|
|
||||||
self.checker_ticks = find('BIST-CHECKER', 'ticks')
|
|
||||||
|
|
||||||
def cmd_count(self):
|
def cmd_count(self):
|
||||||
data_width = self.benchmark_soc.sdram.controller.interface.data_width
|
data_width = self.benchmark_soc.sdram.controller.interface.data_width
|
||||||
return self.config.bist_length / (data_width // 8)
|
return self.config.bist_length / (data_width // 8)
|
||||||
|
@ -113,6 +98,41 @@ class BenchmarkResult:
|
||||||
def read_efficiency(self):
|
def read_efficiency(self):
|
||||||
return self.cmd_count() / self.checker_ticks
|
return self.cmd_count() / self.checker_ticks
|
||||||
|
|
||||||
|
def parse_output(self, output):
|
||||||
|
bist_pattern = r'{stage}\s+{var}:\s+{value}'
|
||||||
|
|
||||||
|
def find(stage, var):
|
||||||
|
pattern = bist_pattern.format(
|
||||||
|
stage=stage,
|
||||||
|
var=var,
|
||||||
|
value=ng('value', '[0-9]+'),
|
||||||
|
)
|
||||||
|
result = re.search(pattern, output)
|
||||||
|
assert result is not None, 'Could not find pattern in output: %s, %s' % (pattern, output)
|
||||||
|
return int(result.group('value'))
|
||||||
|
|
||||||
|
self.generator_ticks = find('BIST-GENERATOR', 'ticks')
|
||||||
|
self.checker_errors = find('BIST-CHECKER', 'errors')
|
||||||
|
self.checker_ticks = find('BIST-CHECKER', 'ticks')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def dump_results_json(cls, results, file):
|
||||||
|
"""Save multiple results in a JSON file.
|
||||||
|
|
||||||
|
Only configurations and outpits are saved, as they can be used to reconstruct BenchmarkResult.
|
||||||
|
"""
|
||||||
|
# simply use config._settings as it defines the BenchmarkConfiguration
|
||||||
|
results_raw = [(r.config._settings, r._output) for r in results]
|
||||||
|
with open(file, 'w') as f:
|
||||||
|
json.dump(results_raw, f)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_results_json(cls, file):
|
||||||
|
"""Load results from a JSON file."""
|
||||||
|
with open(file, 'r') as f:
|
||||||
|
results_raw = json.load(f)
|
||||||
|
return [cls(BenchmarkConfiguration(**settings), output) for (settings, output) in results_raw]
|
||||||
|
|
||||||
# Results summary ----------------------------------------------------------------------------------
|
# Results summary ----------------------------------------------------------------------------------
|
||||||
|
|
||||||
class ResultsSummary:
|
class ResultsSummary:
|
||||||
|
@ -243,22 +263,19 @@ def run_benchmark(cmd_args):
|
||||||
|
|
||||||
|
|
||||||
def run_benchmarks(configurations):
|
def run_benchmarks(configurations):
|
||||||
benchmarks = []
|
results = []
|
||||||
for name, config in configurations.items():
|
for name, config in configurations.items():
|
||||||
cmd_args = config.as_args()
|
cmd_args = config.as_args()
|
||||||
print('{}: {}'.format(name, ' '.join(cmd_args)))
|
print('{}: {}'.format(name, ' '.join(cmd_args)))
|
||||||
output = run_benchmark(cmd_args)
|
output = run_benchmark(cmd_args)
|
||||||
|
|
||||||
# return raw outputs, not BenchmarkResult so that we can store them in a file
|
|
||||||
benchmarks.append((config, output))
|
|
||||||
|
|
||||||
# exit if checker had any read error
|
# exit if checker had any read error
|
||||||
result = BenchmarkResult(config, output)
|
result = BenchmarkResult(config, output)
|
||||||
if result.checker_errors != 0:
|
if result.checker_errors != 0:
|
||||||
print('Error during benchmark "{}": checker_errors = {}'.format(
|
print('Error during benchmark "{}": checker_errors = {}'.format(
|
||||||
name, result.checker_errors), file=sys.stderr)
|
name, result.checker_errors), file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return benchmarks
|
results.append(result)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
|
@ -274,7 +291,11 @@ def main(argv=None):
|
||||||
parser.add_argument('--plot-transparent', action='store_true', help='Use transparent background when saving plots')
|
parser.add_argument('--plot-transparent', action='store_true', help='Use transparent background when saving plots')
|
||||||
parser.add_argument('--plot-output-dir', default='plots', help='Specify where to save the plots')
|
parser.add_argument('--plot-output-dir', default='plots', help='Specify where to save the plots')
|
||||||
parser.add_argument('--plot-theme', default='default', help='Use different matplotlib theme')
|
parser.add_argument('--plot-theme', default='default', help='Use different matplotlib theme')
|
||||||
parser.add_argument('--output-cache', help='Cache benchmark outputs to given file if it exists, else load them from the file without running benchmarks. This allows to run the script multiple times to produce different outputs from the same run')
|
parser.add_argument('--results-cache', help="""Use given JSON file as results cache. If the file exists,
|
||||||
|
it will be loaded instead of running actual benchmarks,
|
||||||
|
else benchmarks will be run normally, and then saved
|
||||||
|
to the given file. This allows to easily rerun the script
|
||||||
|
to generate different summary without having to rerun benchmarks.""")
|
||||||
args = parser.parse_args(argv)
|
args = parser.parse_args(argv)
|
||||||
|
|
||||||
# load and filter configurations
|
# load and filter configurations
|
||||||
|
@ -289,26 +310,21 @@ def main(argv=None):
|
||||||
for f in filters:
|
for f in filters:
|
||||||
configurations = dict(filter(f, configurations.items()))
|
configurations = dict(filter(f, configurations.items()))
|
||||||
|
|
||||||
cache_exists = args.output_cache and os.path.isfile(args.output_cache)
|
cache_exists = args.results_cache and os.path.isfile(args.results_cache)
|
||||||
|
|
||||||
# load outputs from cache if it exsits
|
# load outputs from cache if it exsits
|
||||||
if args.output_cache and cache_exists:
|
if args.results_cache and cache_exists:
|
||||||
import pickle
|
cached_results = BenchmarkResult.load_results_json(args.results_cache)
|
||||||
with open(args.output_cache, 'rb') as f:
|
|
||||||
cached_benchmarks = pickle.load(f)
|
|
||||||
# take only those that match configurations
|
# take only those that match configurations
|
||||||
benchmarks = [(c, o) for c, o in cached_benchmarks if c in configurations.values()]
|
results = [r for r in cached_results if r.config in configurations.values()]
|
||||||
else: # run all the benchmarks normally
|
else: # run all the benchmarks normally
|
||||||
benchmarks = run_benchmarks(configurations)
|
results = run_benchmarks(configurations)
|
||||||
|
|
||||||
# store outputs in cache
|
# store outputs in cache
|
||||||
if args.output_cache and not cache_exists:
|
if args.results_cache and not cache_exists:
|
||||||
import pickle
|
BenchmarkResult.dump_results_json(results, args.results_cache)
|
||||||
with open(args.output_cache, 'wb') as f:
|
|
||||||
pickle.dump(benchmarks, f, pickle.HIGHEST_PROTOCOL)
|
|
||||||
|
|
||||||
# display the summary
|
# display the summary
|
||||||
results = [BenchmarkResult(config, output) for config, output in benchmarks]
|
|
||||||
summary = ResultsSummary(results)
|
summary = ResultsSummary(results)
|
||||||
summary.print()
|
summary.print()
|
||||||
if args.plot:
|
if args.plot:
|
||||||
|
|
Loading…
Reference in New Issue