test: keep benchmark failures in data frame and filter out when needed

This commit is contained in:
Jędrzej Boczar 2020-02-14 17:17:22 +01:00
parent bba49f2df8
commit c6cc0e068d
1 changed files with 33 additions and 24 deletions

View File

@ -230,9 +230,12 @@ class ResultsSummary:
def __init__(self, run_data, plots_dir='plots'):
self.plots_dir = plots_dir
# filter out failures
self.failed_configs = [data.config for data in run_data if data.result is None]
run_data = [data for data in run_data if data.result is not None]
# because .sdram_controller_data_width may fail for unimplemented modules
def except_none(func):
try:
return func()
except:
return None
# gather results into tabular data
column_mappings = {
@ -246,14 +249,14 @@ class ResultsSummary:
'bist_random': lambda d: getattr(d.config.access_pattern, 'bist_random', None),
'pattern_file': lambda d: getattr(d.config.access_pattern, 'pattern_file', None),
'length': lambda d: d.config.length,
'generator_ticks': lambda d: d.result.generator_ticks,
'checker_errors': lambda d: d.result.checker_errors,
'checker_ticks': lambda d: d.result.checker_ticks,
'ctrl_data_width': lambda d: d.config.sdram_controller_data_width,
'generator_ticks': lambda d: getattr(d.result, 'generator_ticks', None), # None means benchmark failure
'checker_errors': lambda d: getattr(d.result, 'checker_errors', None),
'checker_ticks': lambda d: getattr(d.result, 'checker_ticks', None),
'ctrl_data_width': lambda d: except_none(lambda: d.config.sdram_controller_data_width),
'clk_freq': lambda d: d.config.sdram_clk_freq,
}
columns = {name: [mapping(data) for data in run_data] for name, mapping, in column_mappings.items()}
self.df = df = pd.DataFrame(columns)
self._df = df = pd.DataFrame(columns)
# replace None with NaN
df.fillna(value=np.nan, inplace=True)
@ -300,6 +303,16 @@ class ResultsSummary:
'read_latency': ScalarFormatter(),
}
def df(self, ok=True, failures=False):
is_failure = lambda df: pd.isna(df['generator_ticks']) | pd.isna(df['checker_ticks']) | pd.isna(df['checker_errors'])
df = self._df
if not ok: # remove ok
is_ok = ~is_failure(df)
df = df[~is_ok]
if not failures: # remove failures
df = df[~is_failure(df)]
return df
def header(self, text):
return '===> {}'.format(text)
@ -309,9 +322,9 @@ class ResultsSummary:
print(self.header(title + ':'))
print(df)
def get_summary(self, mask=None, columns=None, column_formatting=None, sort_kwargs=None):
def get_summary(self, df, mask=None, columns=None, column_formatting=None, sort_kwargs=None):
# work on a copy
df = self.df.copy()
df = df.copy()
if sort_kwargs is not None:
df = df.sort_values(**sort_kwargs)
@ -360,7 +373,7 @@ class ResultsSummary:
))
def groupped_results(self, formatters=None):
df = self.df
df = self.df()
if formatters is None:
formatters = self.text_formatters
@ -368,27 +381,32 @@ class ResultsSummary:
common_columns = ['name', 'sdram_module', 'sdram_data_width', 'bist_alternating', 'num_generators', 'num_checkers']
latency_columns = ['write_latency', 'read_latency']
performance_columns = ['write_bandwidth', 'read_bandwidth', 'write_efficiency', 'read_efficiency']
failure_columns = ['bist_length', 'bist_random', 'pattern_file', 'length', 'generator_ticks', 'checker_errors', 'checker_ticks']
yield 'Latency', self.get_summary(
yield 'Latency', self.get_summary(df,
mask=df['is_latency'] == True,
columns=common_columns + latency_columns,
column_formatting=formatters,
)
yield 'Custom access pattern', self.get_summary(
yield 'Custom access pattern', self.get_summary(df,
mask=(df['is_latency'] == False) & (~pd.isna(df['pattern_file'])),
columns=common_columns + ['length', 'pattern_file'] + performance_columns,
column_formatting=formatters,
),
yield 'Sequential access pattern', self.get_summary(
yield 'Sequential access pattern', self.get_summary(df,
mask=(df['is_latency'] == False) & (pd.isna(df['pattern_file'])) & (df['bist_random'] == False),
columns=common_columns + ['bist_length'] + performance_columns, # could be length
column_formatting=formatters,
),
yield 'Random access pattern', self.get_summary(
yield 'Random access pattern', self.get_summary(df,
mask=(df['is_latency'] == False) & (pd.isna(df['pattern_file'])) & (df['bist_random'] == True),
columns=common_columns + ['bist_length'] + performance_columns,
column_formatting=formatters,
),
yield 'Failures', self.get_summary(self.df(ok=False, failures=True),
columns=common_columns + failure_columns,
column_formatting=None,
),
def plot_summary(self, plots_dir='plots', backend='Agg', theme='default', save_format='png', **savefig_kw):
matplotlib.use(backend)
@ -441,14 +459,6 @@ class ResultsSummary:
return axis
def failures_summary(self):
if len(self.failed_configs) > 0:
print(self.header('Failures:'))
for config in self.failed_configs:
print(' {}: {}'.format(config.name, config.as_args()))
else:
print(self.header('All benchmarks ok.'))
# Run ----------------------------------------------------------------------------------------------
class RunCache(list):
@ -618,7 +628,6 @@ def main(argv=None):
if _summary:
summary = ResultsSummary(run_data)
summary.text_summary()
summary.failures_summary()
if args.html:
summary.html_summary(args.html_output_dir)
if args.plot: