From f6973aa9d7b66d70e68e1c3b6e252c6f75038d61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Boczar?= Date: Thu, 30 Jan 2020 15:04:47 +0100 Subject: [PATCH] test: load benchmark configurations from YAML file --- test/benchmarks.yml | 50 ++++++++++++++++++++++++++++++++ test/run_benchmarks.py | 65 ++++++++++++++++++++++++++++-------------- 2 files changed, 94 insertions(+), 21 deletions(-) create mode 100644 test/benchmarks.yml diff --git a/test/benchmarks.yml b/test/benchmarks.yml new file mode 100644 index 0000000..8d9b43b --- /dev/null +++ b/test/benchmarks.yml @@ -0,0 +1,50 @@ +{ + "test_0": { + "sdram_module": 'MT48LC16M16', + "sdram_data_width": 32, + "bist_length": 4096, + "bist_random": True, + }, + "test_1": { + "sdram_module": 'MT48LC16M16', + "sdram_data_width": 32, + "bist_length": 512, + "bist_random": False, + }, + "test_2": { + "sdram_module": 'MT46V32M16', + "sdram_data_width": 32, + "bist_length": 512, + "bist_random": False, + }, + "test_3": { + "sdram_module": 'MT46V32M16', + "sdram_data_width": 32, + "bist_length": 2048, + "bist_random": False, + }, + "test_4": { + "sdram_module": 'MT47H64M16', + "sdram_data_width": 32, + "bist_length": 1024, + "bist_random": False, + }, + "test_5": { + "sdram_module": 'MT47H64M16', + "sdram_data_width": 16, + "bist_length": 1024, + "bist_random": False, + }, + "test_6": { + "sdram_module": 'MT41K128M16', + "sdram_data_width": 16, + "bist_length": 1024, + "bist_random": False, + }, + "test_7": { + "sdram_module": 'MT41K128M16', + "sdram_data_width": 32, + "bist_length": 1024, + "bist_random": False, + }, +} diff --git a/test/run_benchmarks.py b/test/run_benchmarks.py index ee2dd19..0eb7440 100755 --- a/test/run_benchmarks.py +++ b/test/run_benchmarks.py @@ -5,6 +5,8 @@ import os import re +import yaml +import argparse import subprocess from litedram.common import Settings @@ -25,13 +27,7 @@ def human_readable(value): value /= 1024 return value, prefix - -def run_benchmark(args): - benchmark_script = os.path.join(os.path.dirname(__file__), 'benchmark.py') - command = ['python3', benchmark_script, *args] - proc = subprocess.run(command, capture_output=True, text=True, check=True) - return proc.stdout - +# Benchmark configuration -------------------------------------------------------------------------- class BenchmarkConfiguration(Settings): def __init__(self, sdram_module, sdram_data_width, bist_length, bist_random): @@ -49,6 +45,14 @@ class BenchmarkConfiguration(Settings): args.extend([arg_string, str(value)]) return args + @classmethod + def load_yaml(cls, yaml_file): + with open(yaml_file) as f: + description = yaml.safe_load(f) + configurations = {name: cls(**desc) for name, desc in description.items()} + return configurations + +# Benchmark results -------------------------------------------------------------------------------- class BenchmarkResult: def __init__(self, config, output): @@ -94,22 +98,12 @@ class BenchmarkResult: def read_efficiency(self): return self.cmd_count() / self.checker_ticks - -configurations = [ - BenchmarkConfiguration('MT48LC16M16', 32, 4096, True), - BenchmarkConfiguration('MT48LC16M16', 32, 512, False), - BenchmarkConfiguration('MT46V32M16', 32, 512, False), - BenchmarkConfiguration('MT46V32M16', 32, 2048, False), - BenchmarkConfiguration('MT47H64M16', 32, 1024, False), - BenchmarkConfiguration('MT47H64M16', 16, 1024, False), - BenchmarkConfiguration('MT41K128M16', 16, 1024, False), - BenchmarkConfiguration('MT41K128M16', 32, 1024, False), -] - +# Results summary ---------------------------------------------------------------------------------- class ResultsSummary: def __init__(self, results): self.results = results + # convert results, which map config->metrics to a mapping metric->(config->result) self.write_bandwidth = self.collect('write_bandwidth') self.read_bandwidth = self.collect('read_bandwidth') self.write_efficiency = self.collect('write_efficiency') @@ -145,12 +139,41 @@ class ResultsSummary: def plot(self): raise NotImplementedError() +# Run ---------------------------------------------------------------------------------------------- + +def run_benchmark(args): + benchmark_script = os.path.join(os.path.dirname(__file__), 'benchmark.py') + command = ['python3', benchmark_script, *args] + proc = subprocess.run(command, capture_output=True, text=True, check=True) + return proc.stdout + def main(): + parser = argparse.ArgumentParser( + description='Run LiteDRAM benchmarks and collect the results') + parser.add_argument('--yaml', required=True, help='Load benchmark configurations from YAML file') + parser.add_argument('--names', nargs='*', help='Limit benchmarks to given names') + parser.add_argument('--regex', help='Limit benchmarks to names matching the regex') + parser.add_argument('--not-regex', help='Limit benchmarks to names not matching the regex') + args = parser.parse_args() + + # load and filter configurations + configurations = BenchmarkConfiguration.load_yaml(args.yaml) + filters = [] + if args.regex: + filters.append(lambda name_value: re.search(args.regex, name_value[0])) + if args.not_regex: + filters.append(lambda name_value: not re.search(args.not_regex, name_value[0])) + if args.names: + filters.append(lambda name_value: name_value[0] in args.names) + for f in filters: + configurations = dict(filter(f, configurations.items())) + + # run the benchmarks results = [] - for config in configurations: + for name, config in configurations.items(): args = config.as_args() - print('Benchmark: %s' % ' '.join(args)) + print('{}: {}'.format(name, ' '.join(args))) result = BenchmarkResult(config, run_benchmark(args)) results.append(result)