summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/simplebench/bench-example.py3
-rwxr-xr-xscripts/simplebench/bench_prealloc.py132
-rwxr-xr-xscripts/simplebench/bench_write_req.py3
-rwxr-xr-xscripts/simplebench/results_to_text.py126
-rw-r--r--scripts/simplebench/simplebench.py66
5 files changed, 289 insertions, 41 deletions
diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py
index c642a5b891..d9c7f7bc17 100644
--- a/scripts/simplebench/bench-example.py
+++ b/scripts/simplebench/bench-example.py
@@ -19,6 +19,7 @@
#
import simplebench
+from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd
@@ -77,4 +78,4 @@ test_envs = [
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
-print(simplebench.ascii(result))
+print(results_to_text(result))
diff --git a/scripts/simplebench/bench_prealloc.py b/scripts/simplebench/bench_prealloc.py
new file mode 100755
index 0000000000..85f588c597
--- /dev/null
+++ b/scripts/simplebench/bench_prealloc.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+#
+# Benchmark preallocate filter
+#
+# Copyright (c) 2020 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys
+import os
+import subprocess
+import re
+import json
+
+import simplebench
+from results_to_text import results_to_text
+
+
+def qemu_img_bench(args):
+ p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+
+ if p.returncode == 0:
+ try:
+ m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
+ return {'seconds': float(m.group(1))}
+ except Exception:
+ return {'error': f'failed to parse qemu-img output: {p.stdout}'}
+ else:
+ return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
+
+
+def bench_func(env, case):
+ fname = f"{case['dir']}/prealloc-test.qcow2"
+ try:
+ os.remove(fname)
+ except OSError:
+ pass
+
+ subprocess.run([env['qemu-img-binary'], 'create', '-f', 'qcow2', fname,
+ '16G'], stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL, check=True)
+
+ args = [env['qemu-img-binary'], 'bench', '-c', str(case['count']),
+ '-d', '64', '-s', case['block-size'], '-t', 'none', '-n', '-w']
+ if env['prealloc']:
+ args += ['--image-opts',
+ 'driver=qcow2,file.driver=preallocate,file.file.driver=file,'
+ f'file.file.filename={fname}']
+ else:
+ args += ['-f', 'qcow2', fname]
+
+ return qemu_img_bench(args)
+
+
+def auto_count_bench_func(env, case):
+ case['count'] = 100
+ while True:
+ res = bench_func(env, case)
+ if 'error' in res:
+ return res
+
+ if res['seconds'] >= 1:
+ break
+
+ case['count'] *= 10
+
+ if res['seconds'] < 5:
+ case['count'] = round(case['count'] * 5 / res['seconds'])
+ res = bench_func(env, case)
+ if 'error' in res:
+ return res
+
+ res['iops'] = case['count'] / res['seconds']
+ return res
+
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ print(f'USAGE: {sys.argv[0]} <qemu-img binary> '
+ 'DISK_NAME:DIR_PATH ...')
+ exit(1)
+
+ qemu_img = sys.argv[1]
+
+ envs = [
+ {
+ 'id': 'no-prealloc',
+ 'qemu-img-binary': qemu_img,
+ 'prealloc': False
+ },
+ {
+ 'id': 'prealloc',
+ 'qemu-img-binary': qemu_img,
+ 'prealloc': True
+ }
+ ]
+
+ aligned_cases = []
+ unaligned_cases = []
+
+ for disk in sys.argv[2:]:
+ name, path = disk.split(':')
+ aligned_cases.append({
+ 'id': f'{name}, aligned sequential 16k',
+ 'block-size': '16k',
+ 'dir': path
+ })
+ unaligned_cases.append({
+ 'id': f'{name}, unaligned sequential 64k',
+ 'block-size': '16k',
+ 'dir': path
+ })
+
+ result = simplebench.bench(auto_count_bench_func, envs,
+ aligned_cases + unaligned_cases, count=5)
+ print(results_to_text(result))
+ with open('results.json', 'w') as f:
+ json.dump(result, f, indent=4)
diff --git a/scripts/simplebench/bench_write_req.py b/scripts/simplebench/bench_write_req.py
index ca1178fd68..da601ea2fe 100755
--- a/scripts/simplebench/bench_write_req.py
+++ b/scripts/simplebench/bench_write_req.py
@@ -26,6 +26,7 @@ import sys
import os
import subprocess
import simplebench
+from results_to_text import results_to_text
def bench_func(env, case):
@@ -167,4 +168,4 @@ if __name__ == '__main__':
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
initial_run=False)
- print(simplebench.ascii(result))
+ print(results_to_text(result))
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
new file mode 100755
index 0000000000..d561e5e2db
--- /dev/null
+++ b/scripts/simplebench/results_to_text.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# Simple benchmarking framework
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import math
+import tabulate
+
+# We want leading whitespace for difference row cells (see below)
+tabulate.PRESERVE_WHITESPACE = True
+
+
+def format_value(x, stdev):
+ stdev_pr = stdev / x * 100
+ if stdev_pr < 1.5:
+ # don't care too much
+ return f'{x:.2g}'
+ else:
+ return f'{x:.2g} ± {math.ceil(stdev_pr)}%'
+
+
+def result_to_text(result):
+ """Return text representation of bench_one() returned dict."""
+ if 'average' in result:
+ s = format_value(result['average'], result['stdev'])
+ if 'n-failed' in result:
+ s += '\n({} failed)'.format(result['n-failed'])
+ return s
+ else:
+ return 'FAILED'
+
+
+def results_dimension(results):
+ dim = None
+ for case in results['cases']:
+ for env in results['envs']:
+ res = results['tab'][case['id']][env['id']]
+ if dim is None:
+ dim = res['dimension']
+ else:
+ assert dim == res['dimension']
+
+ assert dim in ('iops', 'seconds')
+
+ return dim
+
+
+def results_to_text(results):
+ """Return text representation of bench() returned dict."""
+ n_columns = len(results['envs'])
+ named_columns = n_columns > 2
+ dim = results_dimension(results)
+ tab = []
+
+ if named_columns:
+ # Environment columns are named A, B, ...
+ tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])
+
+ tab.append([''] + [c['id'] for c in results['envs']])
+
+ for case in results['cases']:
+ row = [case['id']]
+ case_results = results['tab'][case['id']]
+ for env in results['envs']:
+ res = case_results[env['id']]
+ row.append(result_to_text(res))
+ tab.append(row)
+
+ # Add row of difference between columns. For each column starting from
+ # B we calculate difference with all previous columns.
+ row = ['', ''] # case name and first column
+ for i in range(1, n_columns):
+ cell = ''
+ env = results['envs'][i]
+ res = case_results[env['id']]
+
+ if 'average' not in res:
+ # Failed result
+ row.append(cell)
+ continue
+
+ for j in range(0, i):
+ env_j = results['envs'][j]
+ res_j = case_results[env_j['id']]
+ cell += ' '
+
+ if 'average' not in res_j:
+ # Failed result
+ cell += '--'
+ continue
+
+ col_j = tab[0][j + 1] if named_columns else ''
+ diff_pr = round((res['average'] - res_j['average']) /
+ res_j['average'] * 100)
+ cell += f' {col_j}{diff_pr:+}%'
+ row.append(cell)
+ tab.append(row)
+
+ return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
+
+
+if __name__ == '__main__':
+ import sys
+ import json
+
+ if len(sys.argv) < 2:
+ print(f'USAGE: {sys.argv[0]} results.json')
+ exit(1)
+
+ with open(sys.argv[1]) as f:
+ print(results_to_text(json.load(f)))
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
index 59e7314ff6..f61513af90 100644
--- a/scripts/simplebench/simplebench.py
+++ b/scripts/simplebench/simplebench.py
@@ -18,15 +18,20 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
+import statistics
+
def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
"""Benchmark one test-case
test_func -- benchmarking function with prototype
test_func(env, case), which takes test_env and test_case
- arguments and returns {'seconds': int} (which is benchmark
- result) on success and {'error': str} on error. Returned
- dict may contain any other additional fields.
+ arguments and on success returns dict with 'seconds' or
+ 'iops' (or both) fields, specifying the benchmark result.
+ If both 'iops' and 'seconds' provided, the 'iops' is
+ considered the main, and 'seconds' is just an additional
+ info. On failure test_func should return {'error': str}.
+ Returned dict may contain any other additional fields.
test_env -- test environment - opaque first argument for test_func
test_case -- test case - opaque second argument for test_func
count -- how many times to call test_func, to calculate average
@@ -34,9 +39,10 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
Returns dict with the following fields:
'runs': list of test_func results
- 'average': average seconds per run (exists only if at least one run
- succeeded)
- 'delta': maximum delta between test_func result and the average
+ 'dimension': dimension of results, may be 'seconds' or 'iops'
+ 'average': average value (iops or seconds) per run (exists only if at
+ least one run succeeded)
+ 'stdev': standard deviation of results
(exists only if at least one run succeeded)
'n-failed': number of failed runs (exists only if at least one run
failed)
@@ -54,29 +60,25 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
result = {'runs': runs}
- successed = [r for r in runs if ('seconds' in r)]
- if successed:
- avg = sum(r['seconds'] for r in successed) / len(successed)
- result['average'] = avg
- result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
-
- if len(successed) < count:
- result['n-failed'] = count - len(successed)
+ succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
+ if succeeded:
+ if 'iops' in succeeded[0]:
+ assert all('iops' in r for r in succeeded)
+ dim = 'iops'
+ else:
+ assert all('seconds' in r for r in succeeded)
+ assert all('iops' not in r for r in succeeded)
+ dim = 'seconds'
+ result['dimension'] = dim
+ result['average'] = statistics.mean(r[dim] for r in succeeded)
+ result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
+
+ if len(succeeded) < count:
+ result['n-failed'] = count - len(succeeded)
return result
-def ascii_one(result):
- """Return ASCII representation of bench_one() returned dict."""
- if 'average' in result:
- s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta'])
- if 'n-failed' in result:
- s += '\n({} failed)'.format(result['n-failed'])
- return s
- else:
- return 'FAILED'
-
-
def bench(test_func, test_envs, test_cases, *args, **vargs):
"""Fill benchmark table
@@ -112,17 +114,3 @@ def bench(test_func, test_envs, test_cases, *args, **vargs):
print('Done')
return results
-
-
-def ascii(results):
- """Return ASCII representation of bench() returned dict."""
- from tabulate import tabulate
-
- tab = [[""] + [c['id'] for c in results['envs']]]
- for case in results['cases']:
- row = [case['id']]
- for env in results['envs']:
- row.append(ascii_one(results['tab'][case['id']][env['id']]))
- tab.append(row)
-
- return tabulate(tab)