From 749ad5e887e85fd51d83bf4d08ed72858f937fd9 Mon Sep 17 00:00:00 2001 From: John Snow Date: Thu, 5 Nov 2015 18:13:08 -0500 Subject: iotests: add transactional incremental backup test Test simple usage cases for using transactions to create and synchronize incremental backups. Signed-off-by: John Snow Reviewed-by: Max Reitz Reviewed-by: Stefan Hajnoczi Signed-off-by: Stefan Hajnoczi Reviewed-by: Fam Zheng Signed-off-by: Fam Zheng Message-id: 1446765200-3054-3-git-send-email-jsnow@redhat.com Signed-off-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf --- tests/qemu-iotests/124 | 54 ++++++++++++++++++++++++++++++++++++++++++++++ tests/qemu-iotests/124.out | 4 ++-- 2 files changed, 56 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124 index 9ccd11809f..9c1977ea82 100644 --- a/tests/qemu-iotests/124 +++ b/tests/qemu-iotests/124 @@ -36,6 +36,23 @@ def try_remove(img): pass +def transaction_action(action, **kwargs): + return { + 'type': action, + 'data': kwargs + } + + +def transaction_bitmap_clear(node, name, **kwargs): + return transaction_action('block-dirty-bitmap-clear', + node=node, name=name, **kwargs) + + +def transaction_drive_backup(device, target, **kwargs): + return transaction_action('drive-backup', device=device, target=target, + **kwargs) + + class Bitmap: def __init__(self, name, drive): self.name = name @@ -264,6 +281,43 @@ class TestIncrementalBackup(iotests.QMPTestCase): return self.do_incremental_simple(granularity=131072) + def test_incremental_transaction(self): + '''Test: Verify backups made from transactionally created bitmaps. + + Create a bitmap "before" VM execution begins, then create a second + bitmap AFTER writes have already occurred. Use transactions to create + a full backup and synchronize both bitmaps to this backup. + Create an incremental backup through both bitmaps and verify that + both backups match the current drive0 image. + ''' + + drive0 = self.drives[0] + bitmap0 = self.add_bitmap('bitmap0', drive0) + self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), + ('0xfe', '16M', '256k'), + ('0x64', '32736k', '64k'))) + bitmap1 = self.add_bitmap('bitmap1', drive0) + + result = self.vm.qmp('transaction', actions=[ + transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name), + transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name), + transaction_drive_backup(drive0['id'], drive0['backup'], + sync='full', format=drive0['fmt']) + ]) + self.assert_qmp(result, 'return', {}) + self.wait_until_completed(drive0['id']) + self.files.append(drive0['backup']) + + self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), + ('0x55', '8M', '352k'), + ('0x78', '15872k', '1M'))) + # Both bitmaps should be correctly in sync. + self.create_incremental(bitmap0) + self.create_incremental(bitmap1) + self.vm.shutdown() + self.check_backups() + + def test_incremental_failure(self): '''Test: Verify backups made after a failure are correct. diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out index 2f7d3902f2..594c16f49f 100644 --- a/tests/qemu-iotests/124.out +++ b/tests/qemu-iotests/124.out @@ -1,5 +1,5 @@ -....... +........ ---------------------------------------------------------------------- -Ran 7 tests +Ran 8 tests OK -- cgit v1.2.3-55-g7522 From fc6c796ff2b049303473702d1ade9196d1843f5d Mon Sep 17 00:00:00 2001 From: John Snow Date: Thu, 5 Nov 2015 18:13:19 -0500 Subject: iotests: 124 - transactional failure test Use a transaction to request an incremental backup across two drives. Coerce one of the jobs to fail, and then re-run the transaction. Verify that no bitmap data was lost due to the partial transaction failure. To support the 'err-cancel' QMP argument name it's necessary for transaction_action() to convert underscores in Python argument names to hyphens for QMP argument names. Signed-off-by: John Snow Reviewed-by: Max Reitz Signed-off-by: Stefan Hajnoczi Signed-off-by: Fam Zheng Message-id: 1446765200-3054-14-git-send-email-jsnow@redhat.com Signed-off-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf --- tests/qemu-iotests/124 | 130 ++++++++++++++++++++++++++++++++++++++++++++- tests/qemu-iotests/124.out | 4 +- 2 files changed, 130 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124 index 9c1977ea82..c928f0101b 100644 --- a/tests/qemu-iotests/124 +++ b/tests/qemu-iotests/124 @@ -39,7 +39,7 @@ def try_remove(img): def transaction_action(action, **kwargs): return { 'type': action, - 'data': kwargs + 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems()) } @@ -139,9 +139,12 @@ class TestIncrementalBackup(iotests.QMPTestCase): def do_qmp_backup(self, error='Input/output error', **kwargs): res = self.vm.qmp('drive-backup', **kwargs) self.assert_qmp(res, 'return', {}) + return self.wait_qmp_backup(kwargs['device'], error) + + def wait_qmp_backup(self, device, error='Input/output error'): event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", - match={'data': {'device': kwargs['device']}}) + match={'data': {'device': device}}) self.assertNotEqual(event, None) try: @@ -156,6 +159,12 @@ class TestIncrementalBackup(iotests.QMPTestCase): return False + def wait_qmp_backup_cancelled(self, device): + event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED', + match={'data': {'device': device}}) + self.assertNotEqual(event, None) + + def create_anchor_backup(self, drive=None): if drive is None: drive = self.drives[-1] @@ -375,6 +384,123 @@ class TestIncrementalBackup(iotests.QMPTestCase): self.check_backups() + def test_transaction_failure(self): + '''Test: Verify backups made from a transaction that partially fails. + + Add a second drive with its own unique pattern, and add a bitmap to each + drive. Use blkdebug to interfere with the backup on just one drive and + attempt to create a coherent incremental backup across both drives. + + verify a failure in one but not both, then delete the failed stubs and + re-run the same transaction. + + verify that both incrementals are created successfully. + ''' + + # Create a second drive, with pattern: + drive1 = self.add_node('drive1') + self.img_create(drive1['file'], drive1['fmt']) + io_write_patterns(drive1['file'], (('0x14', 0, 512), + ('0x5d', '1M', '32k'), + ('0xcd', '32M', '124k'))) + + # Create a blkdebug interface to this img as 'drive1' + result = self.vm.qmp('blockdev-add', options={ + 'id': drive1['id'], + 'driver': drive1['fmt'], + 'file': { + 'driver': 'blkdebug', + 'image': { + 'driver': 'file', + 'filename': drive1['file'] + }, + 'set-state': [{ + 'event': 'flush_to_disk', + 'state': 1, + 'new_state': 2 + }], + 'inject-error': [{ + 'event': 'read_aio', + 'errno': 5, + 'state': 2, + 'immediately': False, + 'once': True + }], + } + }) + self.assert_qmp(result, 'return', {}) + + # Create bitmaps and full backups for both drives + drive0 = self.drives[0] + dr0bm0 = self.add_bitmap('bitmap0', drive0) + dr1bm0 = self.add_bitmap('bitmap0', drive1) + self.create_anchor_backup(drive0) + self.create_anchor_backup(drive1) + self.assert_no_active_block_jobs() + self.assertFalse(self.vm.get_qmp_events(wait=False)) + + # Emulate some writes + self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), + ('0xfe', '16M', '256k'), + ('0x64', '32736k', '64k'))) + self.hmp_io_writes(drive1['id'], (('0xba', 0, 512), + ('0xef', '16M', '256k'), + ('0x46', '32736k', '64k'))) + + # Create incremental backup targets + target0 = self.prepare_backup(dr0bm0) + target1 = self.prepare_backup(dr1bm0) + + # Ask for a new incremental backup per-each drive, + # expecting drive1's backup to fail: + transaction = [ + transaction_drive_backup(drive0['id'], target0, sync='incremental', + format=drive0['fmt'], mode='existing', + bitmap=dr0bm0.name), + transaction_drive_backup(drive1['id'], target1, sync='incremental', + format=drive1['fmt'], mode='existing', + bitmap=dr1bm0.name) + ] + result = self.vm.qmp('transaction', actions=transaction, + properties={'completion-mode': 'grouped'} ) + self.assert_qmp(result, 'return', {}) + + # Observe that drive0's backup is cancelled and drive1 completes with + # an error. + self.wait_qmp_backup_cancelled(drive0['id']) + self.assertFalse(self.wait_qmp_backup(drive1['id'])) + error = self.vm.event_wait('BLOCK_JOB_ERROR') + self.assert_qmp(error, 'data', {'device': drive1['id'], + 'action': 'report', + 'operation': 'read'}) + self.assertFalse(self.vm.get_qmp_events(wait=False)) + self.assert_no_active_block_jobs() + + # Delete drive0's successful target and eliminate our record of the + # unsuccessful drive1 target. Then re-run the same transaction. + dr0bm0.del_target() + dr1bm0.del_target() + target0 = self.prepare_backup(dr0bm0) + target1 = self.prepare_backup(dr1bm0) + + # Re-run the exact same transaction. + result = self.vm.qmp('transaction', actions=transaction, + properties={'completion-mode':'grouped'}) + self.assert_qmp(result, 'return', {}) + + # Both should complete successfully this time. + self.assertTrue(self.wait_qmp_backup(drive0['id'])) + self.assertTrue(self.wait_qmp_backup(drive1['id'])) + self.make_reference_backup(dr0bm0) + self.make_reference_backup(dr1bm0) + self.assertFalse(self.vm.get_qmp_events(wait=False)) + self.assert_no_active_block_jobs() + + # And the images should of course validate. + self.vm.shutdown() + self.check_backups() + + def test_sync_dirty_bitmap_missing(self): self.assert_no_active_block_jobs() self.files.append(self.err_img) diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out index 594c16f49f..dae404e278 100644 --- a/tests/qemu-iotests/124.out +++ b/tests/qemu-iotests/124.out @@ -1,5 +1,5 @@ -........ +......... ---------------------------------------------------------------------- -Ran 8 tests +Ran 9 tests OK -- cgit v1.2.3-55-g7522 From 6c6f312dd752c74c124ecfc4c34e8aaf7254c3b8 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Nov 2015 18:13:20 -0500 Subject: tests: add BlockJobTxn unit test The BlockJobTxn unit test verifies that both single jobs and pairs of jobs behave as a transaction group. Either all jobs complete successfully or the group is cancelled. Signed-off-by: Stefan Hajnoczi Reviewed-by: Max Reitz Reviewed-by: John Snow Signed-off-by: Fam Zheng Signed-off-by: John Snow Message-id: 1446765200-3054-15-git-send-email-jsnow@redhat.com Signed-off-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf --- tests/Makefile | 3 + tests/test-blockjob-txn.c | 250 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 253 insertions(+) create mode 100644 tests/test-blockjob-txn.c (limited to 'tests') diff --git a/tests/Makefile b/tests/Makefile index 92969e8288..6a6b1fc0a6 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -47,6 +47,8 @@ check-unit-y += tests/test-thread-pool$(EXESUF) gcov-files-test-thread-pool-y = thread-pool.c gcov-files-test-hbitmap-y = util/hbitmap.c check-unit-y += tests/test-hbitmap$(EXESUF) +gcov-files-test-hbitmap-y = blockjob.c +check-unit-y += tests/test-blockjob-txn$(EXESUF) check-unit-y += tests/test-x86-cpuid$(EXESUF) # all code tested by test-x86-cpuid is inside topology.h gcov-files-test-x86-cpuid-y = @@ -390,6 +392,7 @@ tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y) tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y) tests/test-rfifolock$(EXESUF): tests/test-rfifolock.o $(test-util-obj-y) tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y) +tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y) tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(test-block-obj-y) tests/test-iov$(EXESUF): tests/test-iov.o $(test-util-obj-y) tests/test-hbitmap$(EXESUF): tests/test-hbitmap.o $(test-util-obj-y) diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c new file mode 100644 index 0000000000..34747e924d --- /dev/null +++ b/tests/test-blockjob-txn.c @@ -0,0 +1,250 @@ +/* + * Blockjob transactions tests + * + * Copyright Red Hat, Inc. 2015 + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "block/blockjob.h" + +typedef struct { + BlockJob common; + unsigned int iterations; + bool use_timer; + int rc; + int *result; +} TestBlockJob; + +static const BlockJobDriver test_block_job_driver = { + .instance_size = sizeof(TestBlockJob), +}; + +static void test_block_job_complete(BlockJob *job, void *opaque) +{ + BlockDriverState *bs = job->bs; + int rc = (intptr_t)opaque; + + if (block_job_is_cancelled(job)) { + rc = -ECANCELED; + } + + block_job_completed(job, rc); + bdrv_unref(bs); +} + +static void coroutine_fn test_block_job_run(void *opaque) +{ + TestBlockJob *s = opaque; + BlockJob *job = &s->common; + + while (s->iterations--) { + if (s->use_timer) { + block_job_sleep_ns(job, QEMU_CLOCK_REALTIME, 0); + } else { + block_job_yield(job); + } + + if (block_job_is_cancelled(job)) { + break; + } + } + + block_job_defer_to_main_loop(job, test_block_job_complete, + (void *)(intptr_t)s->rc); +} + +typedef struct { + TestBlockJob *job; + int *result; +} TestBlockJobCBData; + +static void test_block_job_cb(void *opaque, int ret) +{ + TestBlockJobCBData *data = opaque; + if (!ret && block_job_is_cancelled(&data->job->common)) { + ret = -ECANCELED; + } + *data->result = ret; + g_free(data); +} + +/* Create a block job that completes with a given return code after a given + * number of event loop iterations. The return code is stored in the given + * result pointer. + * + * The event loop iterations can either be handled automatically with a 0 delay + * timer, or they can be stepped manually by entering the coroutine. + */ +static BlockJob *test_block_job_start(unsigned int iterations, + bool use_timer, + int rc, int *result) +{ + BlockDriverState *bs; + TestBlockJob *s; + TestBlockJobCBData *data; + + data = g_new0(TestBlockJobCBData, 1); + bs = bdrv_new(); + s = block_job_create(&test_block_job_driver, bs, 0, test_block_job_cb, + data, &error_abort); + s->iterations = iterations; + s->use_timer = use_timer; + s->rc = rc; + s->result = result; + s->common.co = qemu_coroutine_create(test_block_job_run); + data->job = s; + data->result = result; + qemu_coroutine_enter(s->common.co, s); + return &s->common; +} + +static void test_single_job(int expected) +{ + BlockJob *job; + BlockJobTxn *txn; + int result = -EINPROGRESS; + + txn = block_job_txn_new(); + job = test_block_job_start(1, true, expected, &result); + block_job_txn_add_job(txn, job); + + if (expected == -ECANCELED) { + block_job_cancel(job); + } + + while (result == -EINPROGRESS) { + aio_poll(qemu_get_aio_context(), true); + } + g_assert_cmpint(result, ==, expected); + + block_job_txn_unref(txn); +} + +static void test_single_job_success(void) +{ + test_single_job(0); +} + +static void test_single_job_failure(void) +{ + test_single_job(-EIO); +} + +static void test_single_job_cancel(void) +{ + test_single_job(-ECANCELED); +} + +static void test_pair_jobs(int expected1, int expected2) +{ + BlockJob *job1; + BlockJob *job2; + BlockJobTxn *txn; + int result1 = -EINPROGRESS; + int result2 = -EINPROGRESS; + + txn = block_job_txn_new(); + job1 = test_block_job_start(1, true, expected1, &result1); + block_job_txn_add_job(txn, job1); + job2 = test_block_job_start(2, true, expected2, &result2); + block_job_txn_add_job(txn, job2); + + if (expected1 == -ECANCELED) { + block_job_cancel(job1); + } + if (expected2 == -ECANCELED) { + block_job_cancel(job2); + } + + while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { + aio_poll(qemu_get_aio_context(), true); + } + + /* Failure or cancellation of one job cancels the other job */ + if (expected1 != 0) { + expected2 = -ECANCELED; + } else if (expected2 != 0) { + expected1 = -ECANCELED; + } + + g_assert_cmpint(result1, ==, expected1); + g_assert_cmpint(result2, ==, expected2); + + block_job_txn_unref(txn); +} + +static void test_pair_jobs_success(void) +{ + test_pair_jobs(0, 0); +} + +static void test_pair_jobs_failure(void) +{ + /* Test both orderings. The two jobs run for a different number of + * iterations so the code path is different depending on which job fails + * first. + */ + test_pair_jobs(-EIO, 0); + test_pair_jobs(0, -EIO); +} + +static void test_pair_jobs_cancel(void) +{ + test_pair_jobs(-ECANCELED, 0); + test_pair_jobs(0, -ECANCELED); +} + +static void test_pair_jobs_fail_cancel_race(void) +{ + BlockJob *job1; + BlockJob *job2; + BlockJobTxn *txn; + int result1 = -EINPROGRESS; + int result2 = -EINPROGRESS; + + txn = block_job_txn_new(); + job1 = test_block_job_start(1, true, -ECANCELED, &result1); + block_job_txn_add_job(txn, job1); + job2 = test_block_job_start(2, false, 0, &result2); + block_job_txn_add_job(txn, job2); + + block_job_cancel(job1); + + /* Now make job2 finish before the main loop kicks jobs. This simulates + * the race between a pending kick and another job completing. + */ + block_job_enter(job2); + block_job_enter(job2); + + while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { + aio_poll(qemu_get_aio_context(), true); + } + + g_assert_cmpint(result1, ==, -ECANCELED); + g_assert_cmpint(result2, ==, -ECANCELED); + + block_job_txn_unref(txn); +} + +int main(int argc, char **argv) +{ + qemu_init_main_loop(&error_abort); + + g_test_init(&argc, &argv, NULL); + g_test_add_func("/single/success", test_single_job_success); + g_test_add_func("/single/failure", test_single_job_failure); + g_test_add_func("/single/cancel", test_single_job_cancel); + g_test_add_func("/pair/success", test_pair_jobs_success); + g_test_add_func("/pair/failure", test_pair_jobs_failure); + g_test_add_func("/pair/cancel", test_pair_jobs_cancel); + g_test_add_func("/pair/fail-cancel-race", test_pair_jobs_fail_cancel_race); + return g_test_run(); +} -- cgit v1.2.3-55-g7522 From bd797fc15b4290e02c219b7cd6289a33cd6cd18b Mon Sep 17 00:00:00 2001 From: Alberto Garcia Date: Wed, 28 Oct 2015 17:33:01 +0200 Subject: util: Infrastructure for computing recent averages This module computes the average of a set of values within a time window, keeping also track of the minimum and maximum values. In order to produce more accurate results it works internally by creating two time windows of the same period, offsetted by half of that period. Values are accounted on both windows and the data is always returned from the oldest one. [Add missing util/replay.o to test-timed-average dependencies to fix the build. --Stefan] Signed-off-by: Alberto Garcia Message-id: 201b09c21bbc9c329779d2b2365ee2b9c80dceeb.1446044837.git.berto@igalia.com Signed-off-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf --- include/qemu/timed-average.h | 63 +++++++++++++ tests/Makefile | 4 + tests/test-timed-average.c | 90 +++++++++++++++++++ util/Makefile.objs | 1 + util/timed-average.c | 210 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 368 insertions(+) create mode 100644 include/qemu/timed-average.h create mode 100644 tests/test-timed-average.c create mode 100644 util/timed-average.c (limited to 'tests') diff --git a/include/qemu/timed-average.h b/include/qemu/timed-average.h new file mode 100644 index 0000000000..f1cdddc48b --- /dev/null +++ b/include/qemu/timed-average.h @@ -0,0 +1,63 @@ +/* + * QEMU timed average computation + * + * Copyright (C) Nodalink, EURL. 2014 + * Copyright (C) Igalia, S.L. 2015 + * + * Authors: + * Benoît Canet + * Alberto Garcia + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) version 3 or any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef TIMED_AVERAGE_H +#define TIMED_AVERAGE_H + +#include + +#include "qemu/timer.h" + +typedef struct TimedAverageWindow TimedAverageWindow; +typedef struct TimedAverage TimedAverage; + +/* All fields of both structures are private */ + +struct TimedAverageWindow { + uint64_t min; /* minimum value accounted in the window */ + uint64_t max; /* maximum value accounted in the window */ + uint64_t sum; /* sum of all values */ + uint64_t count; /* number of values */ + int64_t expiration; /* the end of the current window in ns */ +}; + +struct TimedAverage { + uint64_t period; /* period in nanoseconds */ + TimedAverageWindow windows[2]; /* two overlapping windows of with + * an offset of period / 2 between them */ + unsigned current; /* the current window index: it's also the + * oldest window index */ + QEMUClockType clock_type; /* the clock used */ +}; + +void timed_average_init(TimedAverage *ta, QEMUClockType clock_type, + uint64_t period); + +void timed_average_account(TimedAverage *ta, uint64_t value); + +uint64_t timed_average_min(TimedAverage *ta); +uint64_t timed_average_avg(TimedAverage *ta); +uint64_t timed_average_max(TimedAverage *ta); + +#endif diff --git a/tests/Makefile b/tests/Makefile index 6a6b1fc0a6..90c4141ac5 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -83,6 +83,7 @@ check-unit-y += tests/test-crypto-cipher$(EXESUF) check-unit-$(CONFIG_GNUTLS) += tests/test-crypto-tlscredsx509$(EXESUF) check-unit-$(CONFIG_GNUTLS) += tests/test-crypto-tlssession$(EXESUF) check-unit-$(CONFIG_LINUX) += tests/test-qga$(EXESUF) +check-unit-y += tests/test-timed-average$(EXESUF) check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh @@ -412,6 +413,9 @@ tests/test-vmstate$(EXESUF): tests/test-vmstate.o \ migration/vmstate.o migration/qemu-file.o migration/qemu-file-buf.o \ migration/qemu-file-unix.o qjson.o \ $(test-qom-obj-y) +tests/test-timed-average$(EXESUF): tests/test-timed-average.o qemu-timer.o \ + libqemuutil.a stubs/clock-warp.o stubs/cpu-get-icount.o \ + stubs/notify-event.o stubs/replay.o tests/test-qapi-types.c tests/test-qapi-types.h :\ $(SRC_PATH)/tests/qapi-schema/qapi-schema-test.json $(SRC_PATH)/scripts/qapi-types.py $(qapi-py) diff --git a/tests/test-timed-average.c b/tests/test-timed-average.c new file mode 100644 index 0000000000..a049799b80 --- /dev/null +++ b/tests/test-timed-average.c @@ -0,0 +1,90 @@ +/* + * Timed average computation tests + * + * Copyright Nodalink, EURL. 2014 + * + * Authors: + * Benoît Canet + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include +#include + +#include "qemu/timed-average.h" + +/* This is the clock for QEMU_CLOCK_VIRTUAL */ +static int64_t my_clock_value; + +int64_t cpu_get_clock(void) +{ + return my_clock_value; +} + +static void account(TimedAverage *ta) +{ + timed_average_account(ta, 1); + timed_average_account(ta, 5); + timed_average_account(ta, 2); + timed_average_account(ta, 4); + timed_average_account(ta, 3); +} + +static void test_average(void) +{ + TimedAverage ta; + uint64_t result; + int i; + + /* we will compute some average on a period of 1 second */ + timed_average_init(&ta, QEMU_CLOCK_VIRTUAL, NANOSECONDS_PER_SECOND); + + result = timed_average_min(&ta); + g_assert(result == 0); + result = timed_average_avg(&ta); + g_assert(result == 0); + result = timed_average_max(&ta); + g_assert(result == 0); + + for (i = 0; i < 100; i++) { + account(&ta); + result = timed_average_min(&ta); + g_assert(result == 1); + result = timed_average_avg(&ta); + g_assert(result == 3); + result = timed_average_max(&ta); + g_assert(result == 5); + my_clock_value += NANOSECONDS_PER_SECOND / 10; + } + + my_clock_value += NANOSECONDS_PER_SECOND * 100; + + result = timed_average_min(&ta); + g_assert(result == 0); + result = timed_average_avg(&ta); + g_assert(result == 0); + result = timed_average_max(&ta); + g_assert(result == 0); + + for (i = 0; i < 100; i++) { + account(&ta); + result = timed_average_min(&ta); + g_assert(result == 1); + result = timed_average_avg(&ta); + g_assert(result == 3); + result = timed_average_max(&ta); + g_assert(result == 5); + my_clock_value += NANOSECONDS_PER_SECOND / 10; + } +} + +int main(int argc, char **argv) +{ + /* tests in the same order as the header function declarations */ + g_test_init(&argc, &argv, NULL); + g_test_add_func("/timed-average/average", test_average); + return g_test_run(); +} + diff --git a/util/Makefile.objs b/util/Makefile.objs index d7cc39907f..89dd80ef86 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -29,3 +29,4 @@ util-obj-y += qemu-coroutine.o qemu-coroutine-lock.o qemu-coroutine-io.o util-obj-y += qemu-coroutine-sleep.o util-obj-y += coroutine-$(CONFIG_COROUTINE_BACKEND).o util-obj-y += buffer.o +util-obj-y += timed-average.o diff --git a/util/timed-average.c b/util/timed-average.c new file mode 100644 index 0000000000..98a1170204 --- /dev/null +++ b/util/timed-average.c @@ -0,0 +1,210 @@ +/* + * QEMU timed average computation + * + * Copyright (C) Nodalink, EURL. 2014 + * Copyright (C) Igalia, S.L. 2015 + * + * Authors: + * Benoît Canet + * Alberto Garcia + * + * This program is free sofware: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Sofware Foundation, either version 2 of the License, or + * (at your option) version 3 or any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include "qemu/timed-average.h" + +/* This module computes an average of a set of values within a time + * window. + * + * Algorithm: + * + * - Create two windows with a certain expiration period, and + * offsetted by period / 2. + * - Each time you want to account a new value, do it in both windows. + * - The minimum / maximum / average values are always returned from + * the oldest window. + * + * Example: + * + * t=0 |t=0.5 |t=1 |t=1.5 |t=2 + * wnd0: [0,0.5)|wnd0: [0.5,1.5) | |wnd0: [1.5,2.5) | + * wnd1: [0,1) | |wnd1: [1,2) | | + * + * Values are returned from: + * + * wnd0---------|wnd1------------|wnd0---------|wnd1-------------| + */ + +/* Update the expiration of a time window + * + * @w: the window used + * @now: the current time in nanoseconds + * @period: the expiration period in nanoseconds + */ +static void update_expiration(TimedAverageWindow *w, int64_t now, + int64_t period) +{ + /* time elapsed since the last theoretical expiration */ + int64_t elapsed = (now - w->expiration) % period; + /* time remaininging until the next expiration */ + int64_t remaining = period - elapsed; + /* compute expiration */ + w->expiration = now + remaining; +} + +/* Reset a window + * + * @w: the window to reset + */ +static void window_reset(TimedAverageWindow *w) +{ + w->min = UINT64_MAX; + w->max = 0; + w->sum = 0; + w->count = 0; +} + +/* Get the current window (that is, the one with the earliest + * expiration time). + * + * @ta: the TimedAverage structure + * @ret: a pointer to the current window + */ +static TimedAverageWindow *current_window(TimedAverage *ta) +{ + return &ta->windows[ta->current]; +} + +/* Initialize a TimedAverage structure + * + * @ta: the TimedAverage structure + * @clock_type: the type of clock to use + * @period: the time window period in nanoseconds + */ +void timed_average_init(TimedAverage *ta, QEMUClockType clock_type, + uint64_t period) +{ + int64_t now = qemu_clock_get_ns(clock_type); + + /* Returned values are from the oldest window, so they belong to + * the interval [ta->period/2,ta->period). By adjusting the + * requested period by 4/3, we guarantee that they're in the + * interval [2/3 period,4/3 period), closer to the requested + * period on average */ + ta->period = (uint64_t) period * 4 / 3; + ta->clock_type = clock_type; + ta->current = 0; + + window_reset(&ta->windows[0]); + window_reset(&ta->windows[1]); + + /* Both windows are offsetted by half a period */ + ta->windows[0].expiration = now + ta->period / 2; + ta->windows[1].expiration = now + ta->period; +} + +/* Check if the time windows have expired, updating their counters and + * expiration time if that's the case. + * + * @ta: the TimedAverage structure + */ +static void check_expirations(TimedAverage *ta) +{ + int64_t now = qemu_clock_get_ns(ta->clock_type); + int i; + + assert(ta->period != 0); + + /* Check if the windows have expired */ + for (i = 0; i < 2; i++) { + TimedAverageWindow *w = &ta->windows[i]; + if (w->expiration <= now) { + window_reset(w); + update_expiration(w, now, ta->period); + } + } + + /* Make ta->current point to the oldest window */ + if (ta->windows[0].expiration < ta->windows[1].expiration) { + ta->current = 0; + } else { + ta->current = 1; + } +} + +/* Account a value + * + * @ta: the TimedAverage structure + * @value: the value to account + */ +void timed_average_account(TimedAverage *ta, uint64_t value) +{ + int i; + check_expirations(ta); + + /* Do the accounting in both windows at the same time */ + for (i = 0; i < 2; i++) { + TimedAverageWindow *w = &ta->windows[i]; + + w->sum += value; + w->count++; + + if (value < w->min) { + w->min = value; + } + + if (value > w->max) { + w->max = value; + } + } +} + +/* Get the minimum value + * + * @ta: the TimedAverage structure + * @ret: the minimum value + */ +uint64_t timed_average_min(TimedAverage *ta) +{ + TimedAverageWindow *w; + check_expirations(ta); + w = current_window(ta); + return w->min < UINT64_MAX ? w->min : 0; +} + +/* Get the average value + * + * @ta: the TimedAverage structure + * @ret: the average value + */ +uint64_t timed_average_avg(TimedAverage *ta) +{ + TimedAverageWindow *w; + check_expirations(ta); + w = current_window(ta); + return w->count > 0 ? w->sum / w->count : 0; +} + +/* Get the maximum value + * + * @ta: the TimedAverage structure + * @ret: the maximum value + */ +uint64_t timed_average_max(TimedAverage *ta) +{ + check_expirations(ta); + return current_window(ta)->max; +} -- cgit v1.2.3-55-g7522 From 4214face09bed08279c68a67fa1a4b01be887346 Mon Sep 17 00:00:00 2001 From: Alberto Garcia Date: Wed, 28 Oct 2015 17:33:10 +0200 Subject: iotests: Add test for the block device statistics Signed-off-by: Alberto Garcia Message-id: 0fb8501bbf3666b3d5d3f67fa899729c88f21baf.1446044838.git.berto@igalia.com Signed-off-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf --- tests/qemu-iotests/136 | 349 +++++++++++++++++++++++++++++++++++++++++++++ tests/qemu-iotests/136.out | 5 + tests/qemu-iotests/group | 1 + 3 files changed, 355 insertions(+) create mode 100644 tests/qemu-iotests/136 create mode 100644 tests/qemu-iotests/136.out (limited to 'tests') diff --git a/tests/qemu-iotests/136 b/tests/qemu-iotests/136 new file mode 100644 index 0000000000..f574d83ff7 --- /dev/null +++ b/tests/qemu-iotests/136 @@ -0,0 +1,349 @@ +#!/usr/bin/env python +# +# Tests for block device statistics +# +# Copyright (C) 2015 Igalia, S.L. +# Author: Alberto Garcia +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import iotests +import os + +interval_length = 10 +nsec_per_sec = 1000000000 +op_latency = nsec_per_sec / 1000 # See qtest_latency_ns in accounting.c +bad_sector = 8192 +bad_offset = bad_sector * 512 +blkdebug_file = os.path.join(iotests.test_dir, 'blkdebug.conf') + +class BlockDeviceStatsTestCase(iotests.QMPTestCase): + test_img = "null-aio://" + total_rd_bytes = 0 + total_rd_ops = 0 + total_wr_bytes = 0 + total_wr_ops = 0 + total_wr_merged = 0 + total_flush_ops = 0 + failed_rd_ops = 0 + failed_wr_ops = 0 + invalid_rd_ops = 0 + invalid_wr_ops = 0 + wr_highest_offset = 0 + account_invalid = False + account_failed = False + + def blockstats(self, device): + result = self.vm.qmp("query-blockstats") + for r in result['return']: + if r['device'] == device: + return r['stats'] + raise Exception("Device not found for blockstats: %s" % device) + + def create_blkdebug_file(self): + file = open(blkdebug_file, 'w') + file.write(''' +[inject-error] +event = "read_aio" +errno = "5" +sector = "%d" + +[inject-error] +event = "write_aio" +errno = "5" +sector = "%d" +''' % (bad_sector, bad_sector)) + file.close() + + def setUp(self): + drive_args = [] + drive_args.append("stats-intervals=%d" % interval_length) + drive_args.append("stats-account-invalid=%s" % + (self.account_invalid and "on" or "off")) + drive_args.append("stats-account-failed=%s" % + (self.account_failed and "on" or "off")) + self.create_blkdebug_file() + self.vm = iotests.VM().add_drive('blkdebug:%s:%s ' % + (blkdebug_file, self.test_img), + ','.join(drive_args)) + self.vm.launch() + # Set an initial value for the clock + self.vm.qtest("clock_step %d" % nsec_per_sec) + + def tearDown(self): + self.vm.shutdown() + os.remove(blkdebug_file) + + def accounted_ops(self, read = False, write = False, flush = False): + ops = 0 + if write: + ops += self.total_wr_ops + if self.account_failed: + ops += self.failed_wr_ops + if self.account_invalid: + ops += self.invalid_wr_ops + if read: + ops += self.total_rd_ops + if self.account_failed: + ops += self.failed_rd_ops + if self.account_invalid: + ops += self.invalid_rd_ops + if flush: + ops += self.total_flush_ops + return ops + + def accounted_latency(self, read = False, write = False, flush = False): + latency = 0 + if write: + latency += self.total_wr_ops * op_latency + if self.account_failed: + latency += self.failed_wr_ops * op_latency + if read: + latency += self.total_rd_ops * op_latency + if self.account_failed: + latency += self.failed_rd_ops * op_latency + if flush: + latency += self.total_flush_ops * op_latency + return latency + + def check_values(self): + stats = self.blockstats('drive0') + + # Check that the totals match with what we have calculated + self.assertEqual(self.total_rd_bytes, stats['rd_bytes']) + self.assertEqual(self.total_wr_bytes, stats['wr_bytes']) + self.assertEqual(self.total_rd_ops, stats['rd_operations']) + self.assertEqual(self.total_wr_ops, stats['wr_operations']) + self.assertEqual(self.total_flush_ops, stats['flush_operations']) + self.assertEqual(self.wr_highest_offset, stats['wr_highest_offset']) + self.assertEqual(self.failed_rd_ops, stats['failed_rd_operations']) + self.assertEqual(self.failed_wr_ops, stats['failed_wr_operations']) + self.assertEqual(self.invalid_rd_ops, stats['invalid_rd_operations']) + self.assertEqual(self.invalid_wr_ops, stats['invalid_wr_operations']) + self.assertEqual(self.account_invalid, stats['account_invalid']) + self.assertEqual(self.account_failed, stats['account_failed']) + self.assertEqual(self.total_wr_merged, stats['wr_merged']) + + # Check that there's exactly one interval with the length we defined + self.assertEqual(1, len(stats['timed_stats'])) + timed_stats = stats['timed_stats'][0] + self.assertEqual(interval_length, timed_stats['interval_length']) + + total_rd_latency = self.accounted_latency(read = True) + if (total_rd_latency != 0): + self.assertEqual(total_rd_latency, stats['rd_total_time_ns']) + self.assertEqual(op_latency, timed_stats['min_rd_latency_ns']) + self.assertEqual(op_latency, timed_stats['max_rd_latency_ns']) + self.assertEqual(op_latency, timed_stats['avg_rd_latency_ns']) + self.assertLess(0, timed_stats['avg_rd_queue_depth']) + else: + self.assertEqual(0, stats['rd_total_time_ns']) + self.assertEqual(0, timed_stats['min_rd_latency_ns']) + self.assertEqual(0, timed_stats['max_rd_latency_ns']) + self.assertEqual(0, timed_stats['avg_rd_latency_ns']) + self.assertEqual(0, timed_stats['avg_rd_queue_depth']) + + # min read latency <= avg read latency <= max read latency + self.assertLessEqual(timed_stats['min_rd_latency_ns'], + timed_stats['avg_rd_latency_ns']) + self.assertLessEqual(timed_stats['avg_rd_latency_ns'], + timed_stats['max_rd_latency_ns']) + + total_wr_latency = self.accounted_latency(write = True) + if (total_wr_latency != 0): + self.assertEqual(total_wr_latency, stats['wr_total_time_ns']) + self.assertEqual(op_latency, timed_stats['min_wr_latency_ns']) + self.assertEqual(op_latency, timed_stats['max_wr_latency_ns']) + self.assertEqual(op_latency, timed_stats['avg_wr_latency_ns']) + self.assertLess(0, timed_stats['avg_wr_queue_depth']) + else: + self.assertEqual(0, stats['wr_total_time_ns']) + self.assertEqual(0, timed_stats['min_wr_latency_ns']) + self.assertEqual(0, timed_stats['max_wr_latency_ns']) + self.assertEqual(0, timed_stats['avg_wr_latency_ns']) + self.assertEqual(0, timed_stats['avg_wr_queue_depth']) + + # min write latency <= avg write latency <= max write latency + self.assertLessEqual(timed_stats['min_wr_latency_ns'], + timed_stats['avg_wr_latency_ns']) + self.assertLessEqual(timed_stats['avg_wr_latency_ns'], + timed_stats['max_wr_latency_ns']) + + total_flush_latency = self.accounted_latency(flush = True) + if (total_flush_latency != 0): + self.assertEqual(total_flush_latency, stats['flush_total_time_ns']) + self.assertEqual(op_latency, timed_stats['min_flush_latency_ns']) + self.assertEqual(op_latency, timed_stats['max_flush_latency_ns']) + self.assertEqual(op_latency, timed_stats['avg_flush_latency_ns']) + else: + self.assertEqual(0, stats['flush_total_time_ns']) + self.assertEqual(0, timed_stats['min_flush_latency_ns']) + self.assertEqual(0, timed_stats['max_flush_latency_ns']) + self.assertEqual(0, timed_stats['avg_flush_latency_ns']) + + # min flush latency <= avg flush latency <= max flush latency + self.assertLessEqual(timed_stats['min_flush_latency_ns'], + timed_stats['avg_flush_latency_ns']) + self.assertLessEqual(timed_stats['avg_flush_latency_ns'], + timed_stats['max_flush_latency_ns']) + + # idle_time_ns must be > 0 if we have performed any operation + if (self.accounted_ops(read = True, write = True, flush = True) != 0): + self.assertLess(0, stats['idle_time_ns']) + else: + self.assertFalse(stats.has_key('idle_time_ns')) + + # This test does not alter these, so they must be all 0 + self.assertEqual(0, stats['rd_merged']) + self.assertEqual(0, stats['failed_flush_operations']) + self.assertEqual(0, stats['invalid_flush_operations']) + + def do_test_stats(self, rd_size = 0, rd_ops = 0, wr_size = 0, wr_ops = 0, + flush_ops = 0, invalid_rd_ops = 0, invalid_wr_ops = 0, + failed_rd_ops = 0, failed_wr_ops = 0, wr_merged = 0): + # The 'ops' list will contain all the requested I/O operations + ops = [] + for i in range(rd_ops): + ops.append("aio_read %d %d" % (i * rd_size, rd_size)) + + for i in range(wr_ops): + ops.append("aio_write %d %d" % (i * wr_size, wr_size)) + + for i in range(flush_ops): + ops.append("aio_flush") + + highest_offset = wr_ops * wr_size + + # Two types of invalid operations: unaligned length and unaligned offset + for i in range(invalid_rd_ops / 2): + ops.append("aio_read 0 511") + + for i in range(invalid_rd_ops / 2, invalid_rd_ops): + ops.append("aio_read 13 512") + + for i in range(invalid_wr_ops / 2): + ops.append("aio_write 0 511") + + for i in range(invalid_wr_ops / 2, invalid_wr_ops): + ops.append("aio_write 13 512") + + for i in range(failed_rd_ops): + ops.append("aio_read %d 512" % bad_offset) + + for i in range(failed_wr_ops): + ops.append("aio_write %d 512" % bad_offset) + + if failed_wr_ops > 0: + highest_offset = max(highest_offset, bad_offset + 512) + + for i in range(wr_merged): + first = i * wr_size * 2 + second = first + wr_size + ops.append("multiwrite %d %d ; %d %d" % + (first, wr_size, second, wr_size)) + + highest_offset = max(highest_offset, wr_merged * wr_size * 2) + + # Now perform all operations + for op in ops: + self.vm.hmp_qemu_io("drive0", op) + + # Update the expected totals + self.total_rd_bytes += rd_ops * rd_size + self.total_rd_ops += rd_ops + self.total_wr_bytes += wr_ops * wr_size + self.total_wr_ops += wr_ops + self.total_wr_merged += wr_merged + self.total_flush_ops += flush_ops + self.invalid_rd_ops += invalid_rd_ops + self.invalid_wr_ops += invalid_wr_ops + self.failed_rd_ops += failed_rd_ops + self.failed_wr_ops += failed_wr_ops + + self.wr_highest_offset = max(self.wr_highest_offset, highest_offset) + + # Advance the clock so idle_time_ns has a meaningful value + self.vm.qtest("clock_step %d" % nsec_per_sec) + + # And check that the actual statistics match the expected ones + self.check_values() + + def test_read_only(self): + test_values = [[512, 1], + [65536, 1], + [512, 12], + [65536, 12]] + for i in test_values: + self.do_test_stats(rd_size = i[0], rd_ops = i[1]) + + def test_write_only(self): + test_values = [[512, 1], + [65536, 1], + [512, 12], + [65536, 12]] + for i in test_values: + self.do_test_stats(wr_size = i[0], wr_ops = i[1]) + + def test_invalid(self): + self.do_test_stats(invalid_rd_ops = 7) + self.do_test_stats(invalid_wr_ops = 3) + self.do_test_stats(invalid_rd_ops = 4, invalid_wr_ops = 5) + + def test_failed(self): + self.do_test_stats(failed_rd_ops = 8) + self.do_test_stats(failed_wr_ops = 6) + self.do_test_stats(failed_rd_ops = 5, failed_wr_ops = 12) + + def test_flush(self): + self.do_test_stats(flush_ops = 8) + + def test_merged(self): + for i in range(5): + self.do_test_stats(wr_merged = i * 3) + + def test_all(self): + # rd_size, rd_ops, wr_size, wr_ops, flush_ops + # invalid_rd_ops, invalid_wr_ops, + # failed_rd_ops, failed_wr_ops + # wr_merged + test_values = [[512, 1, 512, 1, 1, 4, 7, 5, 2, 1], + [65536, 1, 2048, 12, 7, 7, 5, 2, 5, 5], + [32768, 9, 8192, 1, 4, 3, 2, 4, 6, 4], + [16384, 11, 3584, 16, 9, 8, 6, 7, 3, 4]] + for i in test_values: + self.do_test_stats(*i) + + def test_no_op(self): + # All values must be sane before doing any I/O + self.check_values() + + +class BlockDeviceStatsTestAccountInvalid(BlockDeviceStatsTestCase): + account_invalid = True + account_failed = False + +class BlockDeviceStatsTestAccountFailed(BlockDeviceStatsTestCase): + account_invalid = False + account_failed = True + +class BlockDeviceStatsTestAccountBoth(BlockDeviceStatsTestCase): + account_invalid = True + account_failed = True + +class BlockDeviceStatsTestCoroutine(BlockDeviceStatsTestCase): + test_img = "null-co://" + +if __name__ == '__main__': + iotests.main(supported_fmts=["raw"]) diff --git a/tests/qemu-iotests/136.out b/tests/qemu-iotests/136.out new file mode 100644 index 0000000000..0a5e9583a4 --- /dev/null +++ b/tests/qemu-iotests/136.out @@ -0,0 +1,5 @@ +........................................ +---------------------------------------------------------------------- +Ran 40 tests + +OK diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index c69265decd..5a0880893a 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -136,6 +136,7 @@ 132 rw auto quick 134 rw auto quick 135 rw auto +136 rw auto 137 rw auto 138 rw auto quick 139 rw auto quick -- cgit v1.2.3-55-g7522