summaryrefslogtreecommitdiffstats
path: root/scripts/simplebench
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy2020-10-21 16:58:55 +0200
committerMax Reitz2020-12-18 12:35:55 +0100
commit8e979febb01222edb1e53fb61a93a4c803924869 (patch)
treeb372c48742c3c553233777d47ae508117a23f5ec /scripts/simplebench
parentsimplebench: rename ascii() to results_to_text() (diff)
downloadqemu-8e979febb01222edb1e53fb61a93a4c803924869.tar.gz
qemu-8e979febb01222edb1e53fb61a93a4c803924869.tar.xz
qemu-8e979febb01222edb1e53fb61a93a4c803924869.zip
simplebench: move results_to_text() into separate file
Let's keep view part in separate: this way it's better to improve it in the following commits. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20201021145859.11201-18-vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'scripts/simplebench')
-rw-r--r--scripts/simplebench/bench-example.py3
-rwxr-xr-xscripts/simplebench/bench_write_req.py3
-rw-r--r--scripts/simplebench/results_to_text.py48
-rw-r--r--scripts/simplebench/simplebench.py31
4 files changed, 52 insertions, 33 deletions
diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py
index f24cf22fe9..d9c7f7bc17 100644
--- a/scripts/simplebench/bench-example.py
+++ b/scripts/simplebench/bench-example.py
@@ -19,6 +19,7 @@
#
import simplebench
+from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd
@@ -77,4 +78,4 @@ test_envs = [
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
-print(simplebench.results_to_text(result))
+print(results_to_text(result))
diff --git a/scripts/simplebench/bench_write_req.py b/scripts/simplebench/bench_write_req.py
index e175bcd7a4..da601ea2fe 100755
--- a/scripts/simplebench/bench_write_req.py
+++ b/scripts/simplebench/bench_write_req.py
@@ -26,6 +26,7 @@ import sys
import os
import subprocess
import simplebench
+from results_to_text import results_to_text
def bench_func(env, case):
@@ -167,4 +168,4 @@ if __name__ == '__main__':
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
initial_run=False)
- print(simplebench.results_to_text(result))
+ print(results_to_text(result))
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py
new file mode 100644
index 0000000000..58d909ffd9
--- /dev/null
+++ b/scripts/simplebench/results_to_text.py
@@ -0,0 +1,48 @@
+# Simple benchmarking framework
+#
+# Copyright (c) 2019 Virtuozzo International GmbH.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+def result_to_text(result):
+ """Return text representation of bench_one() returned dict."""
+ if 'average' in result:
+ s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
+ if 'n-failed' in result:
+ s += '\n({} failed)'.format(result['n-failed'])
+ return s
+ else:
+ return 'FAILED'
+
+
+def results_to_text(results):
+ """Return text representation of bench() returned dict."""
+ from tabulate import tabulate
+
+ dim = None
+ tab = [[""] + [c['id'] for c in results['envs']]]
+ for case in results['cases']:
+ row = [case['id']]
+ for env in results['envs']:
+ res = results['tab'][case['id']][env['id']]
+ if dim is None:
+ dim = res['dimension']
+ else:
+ assert dim == res['dimension']
+ row.append(result_to_text(res))
+ tab.append(row)
+
+ return f'All results are in {dim}\n\n' + tabulate(tab)
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
index aa74b78a04..f61513af90 100644
--- a/scripts/simplebench/simplebench.py
+++ b/scripts/simplebench/simplebench.py
@@ -79,17 +79,6 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
return result
-def result_to_text(result):
- """Return text representation of bench_one() returned dict."""
- if 'average' in result:
- s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev'])
- if 'n-failed' in result:
- s += '\n({} failed)'.format(result['n-failed'])
- return s
- else:
- return 'FAILED'
-
-
def bench(test_func, test_envs, test_cases, *args, **vargs):
"""Fill benchmark table
@@ -125,23 +114,3 @@ def bench(test_func, test_envs, test_cases, *args, **vargs):
print('Done')
return results
-
-
-def results_to_text(results):
- """Return text representation of bench() returned dict."""
- from tabulate import tabulate
-
- dim = None
- tab = [[""] + [c['id'] for c in results['envs']]]
- for case in results['cases']:
- row = [case['id']]
- for env in results['envs']:
- res = results['tab'][case['id']][env['id']]
- if dim is None:
- dim = res['dimension']
- else:
- assert dim == res['dimension']
- row.append(result_to_text(res))
- tab.append(row)
-
- return f'All results are in {dim}\n\n' + tabulate(tab)