test: load benchmark configurations from YAML file

This commit is contained in:
Jędrzej Boczar 2020-01-30 15:04:47 +01:00
parent 096de78c63
commit f6973aa9d7
2 changed files with 94 additions and 21 deletions

50
test/benchmarks.yml Normal file
View file

@ -0,0 +1,50 @@
{
"test_0": {
"sdram_module": 'MT48LC16M16',
"sdram_data_width": 32,
"bist_length": 4096,
"bist_random": True,
},
"test_1": {
"sdram_module": 'MT48LC16M16',
"sdram_data_width": 32,
"bist_length": 512,
"bist_random": False,
},
"test_2": {
"sdram_module": 'MT46V32M16',
"sdram_data_width": 32,
"bist_length": 512,
"bist_random": False,
},
"test_3": {
"sdram_module": 'MT46V32M16',
"sdram_data_width": 32,
"bist_length": 2048,
"bist_random": False,
},
"test_4": {
"sdram_module": 'MT47H64M16',
"sdram_data_width": 32,
"bist_length": 1024,
"bist_random": False,
},
"test_5": {
"sdram_module": 'MT47H64M16',
"sdram_data_width": 16,
"bist_length": 1024,
"bist_random": False,
},
"test_6": {
"sdram_module": 'MT41K128M16',
"sdram_data_width": 16,
"bist_length": 1024,
"bist_random": False,
},
"test_7": {
"sdram_module": 'MT41K128M16',
"sdram_data_width": 32,
"bist_length": 1024,
"bist_random": False,
},
}

View file

@ -5,6 +5,8 @@
import os
import re
import yaml
import argparse
import subprocess
from litedram.common import Settings
@ -25,13 +27,7 @@ def human_readable(value):
value /= 1024
return value, prefix
def run_benchmark(args):
benchmark_script = os.path.join(os.path.dirname(__file__), 'benchmark.py')
command = ['python3', benchmark_script, *args]
proc = subprocess.run(command, capture_output=True, text=True, check=True)
return proc.stdout
# Benchmark configuration --------------------------------------------------------------------------
class BenchmarkConfiguration(Settings):
def __init__(self, sdram_module, sdram_data_width, bist_length, bist_random):
@ -49,6 +45,14 @@ class BenchmarkConfiguration(Settings):
args.extend([arg_string, str(value)])
return args
@classmethod
def load_yaml(cls, yaml_file):
with open(yaml_file) as f:
description = yaml.safe_load(f)
configurations = {name: cls(**desc) for name, desc in description.items()}
return configurations
# Benchmark results --------------------------------------------------------------------------------
class BenchmarkResult:
def __init__(self, config, output):
@ -94,22 +98,12 @@ class BenchmarkResult:
def read_efficiency(self):
return self.cmd_count() / self.checker_ticks
configurations = [
BenchmarkConfiguration('MT48LC16M16', 32, 4096, True),
BenchmarkConfiguration('MT48LC16M16', 32, 512, False),
BenchmarkConfiguration('MT46V32M16', 32, 512, False),
BenchmarkConfiguration('MT46V32M16', 32, 2048, False),
BenchmarkConfiguration('MT47H64M16', 32, 1024, False),
BenchmarkConfiguration('MT47H64M16', 16, 1024, False),
BenchmarkConfiguration('MT41K128M16', 16, 1024, False),
BenchmarkConfiguration('MT41K128M16', 32, 1024, False),
]
# Results summary ----------------------------------------------------------------------------------
class ResultsSummary:
def __init__(self, results):
self.results = results
# convert results, which map config->metrics to a mapping metric->(config->result)
self.write_bandwidth = self.collect('write_bandwidth')
self.read_bandwidth = self.collect('read_bandwidth')
self.write_efficiency = self.collect('write_efficiency')
@ -145,12 +139,41 @@ class ResultsSummary:
def plot(self):
raise NotImplementedError()
# Run ----------------------------------------------------------------------------------------------
def run_benchmark(args):
benchmark_script = os.path.join(os.path.dirname(__file__), 'benchmark.py')
command = ['python3', benchmark_script, *args]
proc = subprocess.run(command, capture_output=True, text=True, check=True)
return proc.stdout
def main():
parser = argparse.ArgumentParser(
description='Run LiteDRAM benchmarks and collect the results')
parser.add_argument('--yaml', required=True, help='Load benchmark configurations from YAML file')
parser.add_argument('--names', nargs='*', help='Limit benchmarks to given names')
parser.add_argument('--regex', help='Limit benchmarks to names matching the regex')
parser.add_argument('--not-regex', help='Limit benchmarks to names not matching the regex')
args = parser.parse_args()
# load and filter configurations
configurations = BenchmarkConfiguration.load_yaml(args.yaml)
filters = []
if args.regex:
filters.append(lambda name_value: re.search(args.regex, name_value[0]))
if args.not_regex:
filters.append(lambda name_value: not re.search(args.not_regex, name_value[0]))
if args.names:
filters.append(lambda name_value: name_value[0] in args.names)
for f in filters:
configurations = dict(filter(f, configurations.items()))
# run the benchmarks
results = []
for config in configurations:
for name, config in configurations.items():
args = config.as_args()
print('Benchmark: %s' % ' '.join(args))
print('{}: {}'.format(name, ' '.join(args)))
result = BenchmarkResult(config, run_benchmark(args))
results.append(result)