summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy2020-10-21 16:58:52 +0200
committerMax Reitz2020-12-18 12:35:55 +0100
commit4a44554a65dfc5ccd5dad428981d0ab2959b4b8f (patch)
tree823eb56b7d82f6d1a6c2b4b0510838fee91c51f7
parentscripts/simplebench: fix grammar: s/successed/succeeded/ (diff)
downloadqemu-4a44554a65dfc5ccd5dad428981d0ab2959b4b8f.tar.gz
qemu-4a44554a65dfc5ccd5dad428981d0ab2959b4b8f.tar.xz
qemu-4a44554a65dfc5ccd5dad428981d0ab2959b4b8f.zip
scripts/simplebench: support iops
Support benchmarks returning not seconds but iops. We'll use it for further new test. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20201021145859.11201-15-vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
-rw-r--r--scripts/simplebench/simplebench.py38
1 files changed, 28 insertions, 10 deletions
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
index 2445932fc2..2251cd34ea 100644
--- a/scripts/simplebench/simplebench.py
+++ b/scripts/simplebench/simplebench.py
@@ -24,9 +24,12 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
test_func -- benchmarking function with prototype
test_func(env, case), which takes test_env and test_case
- arguments and returns {'seconds': int} (which is benchmark
- result) on success and {'error': str} on error. Returned
- dict may contain any other additional fields.
+ arguments and on success returns dict with 'seconds' or
+ 'iops' (or both) fields, specifying the benchmark result.
+ If both 'iops' and 'seconds' provided, the 'iops' is
+ considered the main, and 'seconds' is just an additional
+ info. On failure test_func should return {'error': str}.
+ Returned dict may contain any other additional fields.
test_env -- test environment - opaque first argument for test_func
test_case -- test case - opaque second argument for test_func
count -- how many times to call test_func, to calculate average
@@ -34,8 +37,9 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
Returns dict with the following fields:
'runs': list of test_func results
- 'average': average seconds per run (exists only if at least one run
- succeeded)
+ 'dimension': dimension of results, may be 'seconds' or 'iops'
+ 'average': average value (iops or seconds) per run (exists only if at
+ least one run succeeded)
'delta': maximum delta between test_func result and the average
(exists only if at least one run succeeded)
'n-failed': number of failed runs (exists only if at least one run
@@ -54,11 +58,19 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
result = {'runs': runs}
- succeeded = [r for r in runs if ('seconds' in r)]
+ succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
if succeeded:
- avg = sum(r['seconds'] for r in succeeded) / len(succeeded)
+ if 'iops' in succeeded[0]:
+ assert all('iops' in r for r in succeeded)
+ dim = 'iops'
+ else:
+ assert all('seconds' in r for r in succeeded)
+ assert all('iops' not in r for r in succeeded)
+ dim = 'seconds'
+ avg = sum(r[dim] for r in succeeded) / len(succeeded)
+ result['dimension'] = dim
result['average'] = avg
- result['delta'] = max(abs(r['seconds'] - avg) for r in succeeded)
+ result['delta'] = max(abs(r[dim] - avg) for r in succeeded)
if len(succeeded) < count:
result['n-failed'] = count - len(succeeded)
@@ -118,11 +130,17 @@ def ascii(results):
"""Return ASCII representation of bench() returned dict."""
from tabulate import tabulate
+ dim = None
tab = [[""] + [c['id'] for c in results['envs']]]
for case in results['cases']:
row = [case['id']]
for env in results['envs']:
- row.append(ascii_one(results['tab'][case['id']][env['id']]))
+ res = results['tab'][case['id']][env['id']]
+ if dim is None:
+ dim = res['dimension']
+ else:
+ assert dim == res['dimension']
+ row.append(ascii_one(res))
tab.append(row)
- return tabulate(tab)
+ return f'All results are in {dim}\n\n' + tabulate(tab)