diff options
author | Peter Maydell | 2019-08-16 17:43:46 +0200 |
---|---|---|
committer | Peter Maydell | 2019-08-16 17:43:46 +0200 |
commit | e018ccb3fbfa1d446ca7b49266c8a80dce40612d (patch) | |
tree | 335595d6245022743679088f2de7b0d6d5c8a6d6 /tests | |
parent | Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2019-08-15' into st... (diff) | |
parent | file-posix: Handle undetectable alignment (diff) | |
download | qemu-e018ccb3fbfa1d446ca7b49266c8a80dce40612d.tar.gz qemu-e018ccb3fbfa1d446ca7b49266c8a80dce40612d.tar.xz qemu-e018ccb3fbfa1d446ca7b49266c8a80dce40612d.zip |
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block layer patches:
- file-posix: Fix O_DIRECT alignment detection
- Fixes for concurrent block jobs
- block-backend: Queue requests while drained (fix IDE vs. job crashes)
- qemu-img convert: Deprecate using -n and -o together
- iotests: Migration tests with filter nodes
- iotests: More media change tests
# gpg: Signature made Fri 16 Aug 2019 10:29:18 BST
# gpg: using RSA key 7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6
* remotes/kevin/tags/for-upstream:
file-posix: Handle undetectable alignment
qemu-img convert: Deprecate using -n and -o together
block-backend: Queue requests while drained
mirror: Keep mirror_top_bs drained after dropping permissions
block: Remove blk_pread_unthrottled()
iotests: Add test for concurrent stream/commit
tests: Test mid-drain bdrv_replace_child_noperm()
tests: Test polling in bdrv_drop_intermediate()
block: Reduce (un)drains when replacing a child
block: Keep subtree drained in drop_intermediate
block: Simplify bdrv_filter_default_perms()
iotests: Test migration with all kinds of filter nodes
iotests: Move migration helpers to iotests.py
iotests/118: Add -blockdev based tests
iotests/118: Create test classes dynamically
iotests/118: Test media change for scsi-cd
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tests')
-rwxr-xr-x | tests/qemu-iotests/118 | 84 | ||||
-rw-r--r-- | tests/qemu-iotests/118.out | 4 | ||||
-rwxr-xr-x | tests/qemu-iotests/234 | 30 | ||||
-rwxr-xr-x | tests/qemu-iotests/258 | 163 | ||||
-rw-r--r-- | tests/qemu-iotests/258.out | 33 | ||||
-rwxr-xr-x | tests/qemu-iotests/262 | 82 | ||||
-rw-r--r-- | tests/qemu-iotests/262.out | 17 | ||||
-rw-r--r-- | tests/qemu-iotests/group | 2 | ||||
-rw-r--r-- | tests/qemu-iotests/iotests.py | 16 | ||||
-rw-r--r-- | tests/test-bdrv-drain.c | 476 |
10 files changed, 845 insertions, 62 deletions
diff --git a/tests/qemu-iotests/118 b/tests/qemu-iotests/118 index 499c5f0901..6f45779ee9 100755 --- a/tests/qemu-iotests/118 +++ b/tests/qemu-iotests/118 @@ -33,6 +33,8 @@ def interface_to_device_name(interface): return 'ide-cd' elif interface == 'floppy': return 'floppy' + elif interface == 'scsi': + return 'scsi-cd' else: return None @@ -40,10 +42,14 @@ class ChangeBaseClass(iotests.QMPTestCase): has_opened = False has_closed = False + device_name = 'qdev0' + use_drive = False + def process_events(self): for event in self.vm.get_qmp_events(wait=False): if (event['event'] == 'DEVICE_TRAY_MOVED' and - event['data']['device'] == 'drive0'): + (event['data']['device'] == 'drive0' or + event['data']['id'] == self.device_name)): if event['data']['tray-open'] == False: self.has_closed = True else: @@ -67,9 +73,11 @@ class ChangeBaseClass(iotests.QMPTestCase): class GeneralChangeTestsBaseClass(ChangeBaseClass): - device_name = 'qdev0' - def test_change(self): + # 'change' requires a drive name, so skip the test for blockdev + if not self.use_drive: + return + result = self.vm.qmp('change', device='drive0', target=new_img, arg=iotests.imgfmt) self.assert_qmp(result, 'return', {}) @@ -292,13 +300,21 @@ class GeneralChangeTestsBaseClass(ChangeBaseClass): class TestInitiallyFilled(GeneralChangeTestsBaseClass): was_empty = False - def setUp(self, media, interface): + def setUp(self): qemu_img('create', '-f', iotests.imgfmt, old_img, '1440k') qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') self.vm = iotests.VM() - self.vm.add_drive(old_img, 'media=%s' % media, 'none') + if self.use_drive: + self.vm.add_drive(old_img, 'media=%s' % self.media, 'none') + else: + self.vm.add_blockdev([ 'node-name=drive0', + 'driver=%s' % iotests.imgfmt, + 'file.driver=file', + 'file.filename=%s' % old_img ]) + if self.interface == 'scsi': + self.vm.add_device('virtio-scsi-pci') self.vm.add_device('%s,drive=drive0,id=%s' % - (interface_to_device_name(interface), + (interface_to_device_name(self.interface), self.device_name)) self.vm.launch() @@ -327,11 +343,16 @@ class TestInitiallyFilled(GeneralChangeTestsBaseClass): class TestInitiallyEmpty(GeneralChangeTestsBaseClass): was_empty = True - def setUp(self, media, interface): + def setUp(self): qemu_img('create', '-f', iotests.imgfmt, new_img, '1440k') - self.vm = iotests.VM().add_drive(None, 'media=%s' % media, 'none') - self.vm.add_device('%s,drive=drive0,id=%s' % - (interface_to_device_name(interface), + self.vm = iotests.VM() + if self.use_drive: + self.vm.add_drive(None, 'media=%s' % self.media, 'none') + if self.interface == 'scsi': + self.vm.add_device('virtio-scsi-pci') + self.vm.add_device('%s,%sid=%s' % + (interface_to_device_name(self.interface), + 'drive=drive0,' if self.use_drive else '', self.device_name)) self.vm.launch() @@ -349,36 +370,25 @@ class TestInitiallyEmpty(GeneralChangeTestsBaseClass): # Should be a no-op self.assert_qmp(result, 'return', {}) -class TestCDInitiallyFilled(TestInitiallyFilled): - TestInitiallyFilled = TestInitiallyFilled - has_real_tray = True - - def setUp(self): - self.TestInitiallyFilled.setUp(self, 'cdrom', 'ide') +# Do this in a function to avoid leaking variables like case into the global +# name space (otherwise tests would be run for the abstract base classes) +def create_basic_test_classes(): + for (media, interface, has_real_tray) in [ ('cdrom', 'ide', True), + ('cdrom', 'scsi', True), + ('disk', 'floppy', False) ]: -class TestCDInitiallyEmpty(TestInitiallyEmpty): - TestInitiallyEmpty = TestInitiallyEmpty - has_real_tray = True - - def setUp(self): - self.TestInitiallyEmpty.setUp(self, 'cdrom', 'ide') + for case in [ TestInitiallyFilled, TestInitiallyEmpty ]: + for use_drive in [ True, False ]: + attr = { 'media': media, + 'interface': interface, + 'has_real_tray': has_real_tray, + 'use_drive': use_drive } -class TestFloppyInitiallyFilled(TestInitiallyFilled): - TestInitiallyFilled = TestInitiallyFilled - has_real_tray = False + name = '%s_%s_%s_%s' % (case.__name__, media, interface, + 'drive' if use_drive else 'blockdev') + globals()[name] = type(name, (case, ), attr) - def setUp(self): - self.TestInitiallyFilled.setUp(self, 'disk', 'floppy') - -class TestFloppyInitiallyEmpty(TestInitiallyEmpty): - TestInitiallyEmpty = TestInitiallyEmpty - has_real_tray = False - - def setUp(self): - self.TestInitiallyEmpty.setUp(self, 'disk', 'floppy') - # FDDs not having a real tray and there not being a medium inside the - # tray at startup means the tray will be considered open - self.has_opened = True +create_basic_test_classes() class TestChangeReadOnly(ChangeBaseClass): device_name = 'qdev0' diff --git a/tests/qemu-iotests/118.out b/tests/qemu-iotests/118.out index 4823c113d5..bf5bfd5aca 100644 --- a/tests/qemu-iotests/118.out +++ b/tests/qemu-iotests/118.out @@ -1,5 +1,5 @@ -............................................................... +....................................................................................................................................................................... ---------------------------------------------------------------------- -Ran 63 tests +Ran 167 tests OK diff --git a/tests/qemu-iotests/234 b/tests/qemu-iotests/234 index c4c26bc21e..34c818c485 100755 --- a/tests/qemu-iotests/234 +++ b/tests/qemu-iotests/234 @@ -26,22 +26,6 @@ import os iotests.verify_image_format(supported_fmts=['qcow2']) iotests.verify_platform(['linux']) -def enable_migration_events(vm, name): - iotests.log('Enabling migration QMP events on %s...' % name) - iotests.log(vm.qmp('migrate-set-capabilities', capabilities=[ - { - 'capability': 'events', - 'state': True - } - ])) - -def wait_migration(vm): - while True: - event = vm.event_wait('MIGRATION') - iotests.log(event, filters=[iotests.filter_qmp_event]) - if event['data']['status'] == 'completed': - break - with iotests.FilePath('img') as img_path, \ iotests.FilePath('backing') as backing_path, \ iotests.FilePath('mig_fifo_a') as fifo_a, \ @@ -62,7 +46,7 @@ with iotests.FilePath('img') as img_path, \ .add_blockdev('%s,file=drive0-backing-file,node-name=drive0-backing' % (iotests.imgfmt)) .launch()) - enable_migration_events(vm_a, 'A') + vm_a.enable_migration_events('A') iotests.log('Launching destination VM...') (vm_b.add_blockdev('file,filename=%s,node-name=drive0-file' % (img_path)) @@ -72,7 +56,7 @@ with iotests.FilePath('img') as img_path, \ .add_incoming("exec: cat '%s'" % (fifo_a)) .launch()) - enable_migration_events(vm_b, 'B') + vm_b.enable_migration_events('B') # Add a child node that was created after the parent node. The reverse case # is covered by the -blockdev options above. @@ -85,9 +69,9 @@ with iotests.FilePath('img') as img_path, \ iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo_a))) with iotests.Timeout(3, 'Migration does not complete'): # Wait for the source first (which includes setup=setup) - wait_migration(vm_a) + vm_a.wait_migration() # Wait for the destination second (which does not) - wait_migration(vm_b) + vm_b.wait_migration() iotests.log(vm_a.qmp('query-migrate')['return']['status']) iotests.log(vm_b.qmp('query-migrate')['return']['status']) @@ -105,7 +89,7 @@ with iotests.FilePath('img') as img_path, \ .add_incoming("exec: cat '%s'" % (fifo_b)) .launch()) - enable_migration_events(vm_a, 'A') + vm_a.enable_migration_events('A') iotests.log(vm_a.qmp('blockdev-snapshot', node='drive0-backing', overlay='drive0')) @@ -114,9 +98,9 @@ with iotests.FilePath('img') as img_path, \ iotests.log(vm_b.qmp('migrate', uri='exec:cat >%s' % (fifo_b))) with iotests.Timeout(3, 'Migration does not complete'): # Wait for the source first (which includes setup=setup) - wait_migration(vm_b) + vm_b.wait_migration() # Wait for the destination second (which does not) - wait_migration(vm_a) + vm_a.wait_migration() iotests.log(vm_a.qmp('query-migrate')['return']['status']) iotests.log(vm_b.qmp('query-migrate')['return']['status']) diff --git a/tests/qemu-iotests/258 b/tests/qemu-iotests/258 new file mode 100755 index 0000000000..b84cf02254 --- /dev/null +++ b/tests/qemu-iotests/258 @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# +# Very specific tests for adjacent commit/stream block jobs +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# Creator/Owner: Max Reitz <mreitz@redhat.com> + +import iotests +from iotests import log, qemu_img, qemu_io_silent, \ + filter_qmp_testfiles, filter_qmp_imgfmt + +# Need backing file and change-backing-file support +iotests.verify_image_format(supported_fmts=['qcow2', 'qed']) +iotests.verify_platform(['linux']) + + +# Returns a node for blockdev-add +def node(node_name, path, backing=None, fmt=None, throttle=None): + if fmt is None: + fmt = iotests.imgfmt + + res = { + 'node-name': node_name, + 'driver': fmt, + 'file': { + 'driver': 'file', + 'filename': path + } + } + + if backing is not None: + res['backing'] = backing + + if throttle: + res['file'] = { + 'driver': 'throttle', + 'throttle-group': throttle, + 'file': res['file'] + } + + return res + +# Finds a node in the debug block graph +def find_graph_node(graph, node_id): + return next(node for node in graph['nodes'] if node['id'] == node_id) + + +def test_concurrent_finish(write_to_stream_node): + log('') + log('=== Commit and stream finish concurrently (letting %s write) ===' % \ + ('stream' if write_to_stream_node else 'commit')) + log('') + + # All chosen in such a way that when the commit job wants to + # finish, it polls and thus makes stream finish concurrently -- + # and the other way around, depending on whether the commit job + # is finalized before stream completes or not. + + with iotests.FilePath('node4.img') as node4_path, \ + iotests.FilePath('node3.img') as node3_path, \ + iotests.FilePath('node2.img') as node2_path, \ + iotests.FilePath('node1.img') as node1_path, \ + iotests.FilePath('node0.img') as node0_path, \ + iotests.VM() as vm: + + # It is important to use raw for the base layer (so that + # permissions are just handed through to the protocol layer) + assert qemu_img('create', '-f', 'raw', node0_path, '64M') == 0 + + stream_throttle=None + commit_throttle=None + + for path in [node1_path, node2_path, node3_path, node4_path]: + assert qemu_img('create', '-f', iotests.imgfmt, path, '64M') == 0 + + if write_to_stream_node: + # This is what (most of the time) makes commit finish + # earlier and then pull in stream + assert qemu_io_silent(node2_path, + '-c', 'write %iK 64K' % (65536 - 192), + '-c', 'write %iK 64K' % (65536 - 64)) == 0 + + stream_throttle='tg' + else: + # And this makes stream finish earlier + assert qemu_io_silent(node1_path, + '-c', 'write %iK 64K' % (65536 - 64)) == 0 + + commit_throttle='tg' + + vm.launch() + + vm.qmp_log('object-add', + qom_type='throttle-group', + id='tg', + props={ + 'x-iops-write': 1, + 'x-iops-write-max': 1 + }) + + vm.qmp_log('blockdev-add', + filters=[filter_qmp_testfiles, filter_qmp_imgfmt], + **node('node4', node4_path, throttle=stream_throttle, + backing=node('node3', node3_path, + backing=node('node2', node2_path, + backing=node('node1', node1_path, + backing=node('node0', node0_path, throttle=commit_throttle, + fmt='raw')))))) + + vm.qmp_log('block-commit', + job_id='commit', + device='node4', + filter_node_name='commit-filter', + top_node='node1', + base_node='node0', + auto_finalize=False) + + vm.qmp_log('block-stream', + job_id='stream', + device='node3', + base_node='commit-filter') + + if write_to_stream_node: + vm.run_job('commit', auto_finalize=False, auto_dismiss=True) + vm.run_job('stream', auto_finalize=True, auto_dismiss=True) + else: + # No, the jobs do not really finish concurrently here, + # the stream job does complete strictly before commit. + # But still, this is close enough for what we want to + # test. + vm.run_job('stream', auto_finalize=True, auto_dismiss=True) + vm.run_job('commit', auto_finalize=False, auto_dismiss=True) + + # Assert that the backing node of node3 is node 0 now + graph = vm.qmp('x-debug-query-block-graph')['return'] + for edge in graph['edges']: + if edge['name'] == 'backing' and \ + find_graph_node(graph, edge['parent'])['name'] == 'node3': + assert find_graph_node(graph, edge['child'])['name'] == 'node0' + break + + +def main(): + log('Running tests:') + test_concurrent_finish(True) + test_concurrent_finish(False) + +if __name__ == '__main__': + main() diff --git a/tests/qemu-iotests/258.out b/tests/qemu-iotests/258.out new file mode 100644 index 0000000000..ce6e9ba3e5 --- /dev/null +++ b/tests/qemu-iotests/258.out @@ -0,0 +1,33 @@ +Running tests: + +=== Commit and stream finish concurrently (letting stream write) === + +{"execute": "object-add", "arguments": {"id": "tg", "props": {"x-iops-write": 1, "x-iops-write-max": 1}, "qom-type": "throttle-group"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": {"backing": {"backing": {"backing": {"backing": {"driver": "raw", "file": {"driver": "file", "filename": "TEST_DIR/PID-node0.img"}, "node-name": "node0"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node1.img"}, "node-name": "node1"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node2.img"}, "node-name": "node2"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node3.img"}, "node-name": "node3"}, "driver": "IMGFMT", "file": {"driver": "throttle", "file": {"driver": "file", "filename": "TEST_DIR/PID-node4.img"}, "throttle-group": "tg"}, "node-name": "node4"}} +{"return": {}} +{"execute": "block-commit", "arguments": {"auto-finalize": false, "base-node": "node0", "device": "node4", "filter-node-name": "commit-filter", "job-id": "commit", "top-node": "node1"}} +{"return": {}} +{"execute": "block-stream", "arguments": {"base-node": "commit-filter", "device": "node3", "job-id": "stream"}} +{"return": {}} +{"execute": "job-finalize", "arguments": {"id": "commit"}} +{"return": {}} +{"data": {"id": "commit", "type": "commit"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "commit", "len": 67108864, "offset": 67108864, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "stream", "len": 67108864, "offset": 67108864, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} + +=== Commit and stream finish concurrently (letting commit write) === + +{"execute": "object-add", "arguments": {"id": "tg", "props": {"x-iops-write": 1, "x-iops-write-max": 1}, "qom-type": "throttle-group"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": {"backing": {"backing": {"backing": {"backing": {"driver": "raw", "file": {"driver": "throttle", "file": {"driver": "file", "filename": "TEST_DIR/PID-node0.img"}, "throttle-group": "tg"}, "node-name": "node0"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node1.img"}, "node-name": "node1"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node2.img"}, "node-name": "node2"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node3.img"}, "node-name": "node3"}, "driver": "IMGFMT", "file": {"driver": "file", "filename": "TEST_DIR/PID-node4.img"}, "node-name": "node4"}} +{"return": {}} +{"execute": "block-commit", "arguments": {"auto-finalize": false, "base-node": "node0", "device": "node4", "filter-node-name": "commit-filter", "job-id": "commit", "top-node": "node1"}} +{"return": {}} +{"execute": "block-stream", "arguments": {"base-node": "commit-filter", "device": "node3", "job-id": "stream"}} +{"return": {}} +{"data": {"device": "stream", "len": 67108864, "offset": 67108864, "speed": 0, "type": "stream"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"execute": "job-finalize", "arguments": {"id": "commit"}} +{"return": {}} +{"data": {"id": "commit", "type": "commit"}, "event": "BLOCK_JOB_PENDING", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "commit", "len": 67108864, "offset": 67108864, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} diff --git a/tests/qemu-iotests/262 b/tests/qemu-iotests/262 new file mode 100755 index 0000000000..398f63587e --- /dev/null +++ b/tests/qemu-iotests/262 @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# Creator/Owner: Kevin Wolf <kwolf@redhat.com> +# +# Test migration with filter drivers present. Keep everything in an +# iothread just for fun. + +import iotests +import os + +iotests.verify_image_format(supported_fmts=['qcow2']) +iotests.verify_platform(['linux']) + +with iotests.FilePath('img') as img_path, \ + iotests.FilePath('mig_fifo') as fifo, \ + iotests.VM(path_suffix='a') as vm_a, \ + iotests.VM(path_suffix='b') as vm_b: + + def add_opts(vm): + vm.add_object('iothread,id=iothread0') + vm.add_object('throttle-group,id=tg0,x-bps-total=65536') + vm.add_blockdev('file,filename=%s,node-name=drive0-file' % (img_path)) + vm.add_blockdev('%s,file=drive0-file,node-name=drive0-fmt' % (iotests.imgfmt)) + vm.add_blockdev('copy-on-read,file=drive0-fmt,node-name=drive0-cor') + vm.add_blockdev('throttle,file=drive0-cor,node-name=drive0-throttle,throttle-group=tg0') + vm.add_blockdev('blkdebug,image=drive0-throttle,node-name=drive0-dbg') + vm.add_blockdev('null-co,node-name=null,read-zeroes=on') + vm.add_blockdev('blkverify,test=drive0-dbg,raw=null,node-name=drive0-verify') + + if iotests.supports_quorum(): + vm.add_blockdev('quorum,children.0=drive0-verify,vote-threshold=1,node-name=drive0-quorum') + root = "drive0-quorum" + else: + root = "drive0-verify" + + vm.add_device('virtio-blk,drive=%s,iothread=iothread0' % root) + + iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, img_path, '64M') + + os.mkfifo(fifo) + + iotests.log('Launching source VM...') + add_opts(vm_a) + vm_a.launch() + + vm_a.enable_migration_events('A') + + iotests.log('Launching destination VM...') + add_opts(vm_b) + vm_b.add_incoming("exec: cat '%s'" % (fifo)) + vm_b.launch() + + vm_b.enable_migration_events('B') + + iotests.log('Starting migration to B...') + iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo))) + with iotests.Timeout(3, 'Migration does not complete'): + # Wait for the source first (which includes setup=setup) + vm_a.wait_migration() + # Wait for the destination second (which does not) + vm_b.wait_migration() + + iotests.log(vm_a.qmp('query-migrate')['return']['status']) + iotests.log(vm_b.qmp('query-migrate')['return']['status']) + + iotests.log(vm_a.qmp('query-status')) + iotests.log(vm_b.qmp('query-status')) diff --git a/tests/qemu-iotests/262.out b/tests/qemu-iotests/262.out new file mode 100644 index 0000000000..5a58e5e9f8 --- /dev/null +++ b/tests/qemu-iotests/262.out @@ -0,0 +1,17 @@ +Launching source VM... +Enabling migration QMP events on A... +{"return": {}} +Launching destination VM... +Enabling migration QMP events on B... +{"return": {}} +Starting migration to B... +{"return": {}} +{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +completed +completed +{"return": {"running": false, "singlestep": false, "status": "postmigrate"}} +{"return": {"running": true, "singlestep": false, "status": "running"}} diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index f13e5f2e23..5a37839e35 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -271,3 +271,5 @@ 254 rw backing quick 255 rw quick 256 rw quick +258 rw quick +262 rw quick migration diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index ce74177ab1..91172c39a5 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -583,6 +583,22 @@ class VM(qtest.QEMUQtestMachine): elif status == 'null': return error + def enable_migration_events(self, name): + log('Enabling migration QMP events on %s...' % name) + log(self.qmp('migrate-set-capabilities', capabilities=[ + { + 'capability': 'events', + 'state': True + } + ])) + + def wait_migration(self): + while True: + event = self.event_wait('MIGRATION') + log(event, filters=[filter_qmp_event]) + if event['data']['status'] == 'completed': + break + def node_info(self, node_name): nodes = self.qmp('query-named-block-nodes') for x in nodes['return']: diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index 481b7508df..374bef6bb2 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -101,6 +101,13 @@ static void bdrv_test_child_perm(BlockDriverState *bs, BdrvChild *c, nperm, nshared); } +static int bdrv_test_change_backing_file(BlockDriverState *bs, + const char *backing_file, + const char *backing_fmt) +{ + return 0; +} + static BlockDriver bdrv_test = { .format_name = "test", .instance_size = sizeof(BDRVTestState), @@ -112,6 +119,8 @@ static BlockDriver bdrv_test = { .bdrv_co_drain_end = bdrv_test_co_drain_end, .bdrv_child_perm = bdrv_test_child_perm, + + .bdrv_change_backing_file = bdrv_test_change_backing_file, }; static void aio_ret_cb(void *opaque, int ret) @@ -678,6 +687,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) &error_abort); s = bs->opaque; blk_insert_bs(blk, bs, &error_abort); + blk_set_disable_request_queuing(blk, true); blk_set_aio_context(blk, ctx_a, &error_abort); aio_context_acquire(ctx_a); @@ -1672,6 +1682,466 @@ static void test_blockjob_commit_by_drained_end(void) bdrv_unref(bs_child); } + +typedef struct TestSimpleBlockJob { + BlockJob common; + bool should_complete; + bool *did_complete; +} TestSimpleBlockJob; + +static int coroutine_fn test_simple_job_run(Job *job, Error **errp) +{ + TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); + + while (!s->should_complete) { + job_sleep_ns(job, 0); + } + + return 0; +} + +static void test_simple_job_clean(Job *job) +{ + TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); + *s->did_complete = true; +} + +static const BlockJobDriver test_simple_job_driver = { + .job_driver = { + .instance_size = sizeof(TestSimpleBlockJob), + .free = block_job_free, + .user_resume = block_job_user_resume, + .drain = block_job_drain, + .run = test_simple_job_run, + .clean = test_simple_job_clean, + }, +}; + +static int drop_intermediate_poll_update_filename(BdrvChild *child, + BlockDriverState *new_base, + const char *filename, + Error **errp) +{ + /* + * We are free to poll here, which may change the block graph, if + * it is not drained. + */ + + /* If the job is not drained: Complete it, schedule job_exit() */ + aio_poll(qemu_get_current_aio_context(), false); + /* If the job is not drained: Run job_exit(), finish the job */ + aio_poll(qemu_get_current_aio_context(), false); + + return 0; +} + +/** + * Test a poll in the midst of bdrv_drop_intermediate(). + * + * bdrv_drop_intermediate() calls BdrvChildRole.update_filename(), + * which can yield or poll. This may lead to graph changes, unless + * the whole subtree in question is drained. + * + * We test this on the following graph: + * + * Job + * + * | + * job-node + * | + * v + * + * job-node + * + * | + * backing + * | + * v + * + * node-2 --chain--> node-1 --chain--> node-0 + * + * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0). + * + * This first updates node-2's backing filename by invoking + * drop_intermediate_poll_update_filename(), which polls twice. This + * causes the job to finish, which in turns causes the job-node to be + * deleted. + * + * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it + * already has a pointer to the BdrvChild edge between job-node and + * node-1. When it tries to handle that edge, we probably get a + * segmentation fault because the object no longer exists. + * + * + * The solution is for bdrv_drop_intermediate() to drain top's + * subtree. This prevents graph changes from happening just because + * BdrvChildRole.update_filename() yields or polls. Thus, the block + * job is paused during that drained section and must finish before or + * after. + * + * (In addition, bdrv_replace_child() must keep the job paused.) + */ +static void test_drop_intermediate_poll(void) +{ + static BdrvChildRole chain_child_role; + BlockDriverState *chain[3]; + TestSimpleBlockJob *job; + BlockDriverState *job_node; + bool job_has_completed = false; + int i; + int ret; + + chain_child_role = child_backing; + chain_child_role.update_filename = drop_intermediate_poll_update_filename; + + for (i = 0; i < 3; i++) { + char name[32]; + snprintf(name, 32, "node-%i", i); + + chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort); + } + + job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR, + &error_abort); + bdrv_set_backing_hd(job_node, chain[1], &error_abort); + + /* + * Establish the chain last, so the chain links are the first + * elements in the BDS.parents lists + */ + for (i = 0; i < 3; i++) { + if (i) { + /* Takes the reference to chain[i - 1] */ + chain[i]->backing = bdrv_attach_child(chain[i], chain[i - 1], + "chain", &chain_child_role, + &error_abort); + } + } + + job = block_job_create("job", &test_simple_job_driver, NULL, job_node, + 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); + + /* The job has a reference now */ + bdrv_unref(job_node); + + job->did_complete = &job_has_completed; + + job_start(&job->common.job); + job->should_complete = true; + + g_assert(!job_has_completed); + ret = bdrv_drop_intermediate(chain[1], chain[0], NULL); + g_assert(ret == 0); + g_assert(job_has_completed); + + bdrv_unref(chain[2]); +} + + +typedef struct BDRVReplaceTestState { + bool was_drained; + bool was_undrained; + bool has_read; + + int drain_count; + + bool yield_before_read; + Coroutine *io_co; + Coroutine *drain_co; +} BDRVReplaceTestState; + +static void bdrv_replace_test_close(BlockDriverState *bs) +{ +} + +/** + * If @bs has a backing file: + * Yield if .yield_before_read is true (and wait for drain_begin to + * wake us up). + * Forward the read to bs->backing. Set .has_read to true. + * If drain_begin has woken us, wake it in turn. + * + * Otherwise: + * Set .has_read to true and return success. + */ +static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs, + uint64_t offset, + uint64_t bytes, + QEMUIOVector *qiov, + int flags) +{ + BDRVReplaceTestState *s = bs->opaque; + + if (bs->backing) { + int ret; + + g_assert(!s->drain_count); + + s->io_co = qemu_coroutine_self(); + if (s->yield_before_read) { + s->yield_before_read = false; + qemu_coroutine_yield(); + } + s->io_co = NULL; + + ret = bdrv_preadv(bs->backing, offset, qiov); + s->has_read = true; + + /* Wake up drain_co if it runs */ + if (s->drain_co) { + aio_co_wake(s->drain_co); + } + + return ret; + } + + s->has_read = true; + return 0; +} + +/** + * If .drain_count is 0, wake up .io_co if there is one; and set + * .was_drained. + * Increment .drain_count. + */ +static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs) +{ + BDRVReplaceTestState *s = bs->opaque; + + if (!s->drain_count) { + /* Keep waking io_co up until it is done */ + s->drain_co = qemu_coroutine_self(); + while (s->io_co) { + aio_co_wake(s->io_co); + s->io_co = NULL; + qemu_coroutine_yield(); + } + s->drain_co = NULL; + + s->was_drained = true; + } + s->drain_count++; +} + +/** + * Reduce .drain_count, set .was_undrained once it reaches 0. + * If .drain_count reaches 0 and the node has a backing file, issue a + * read request. + */ +static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs) +{ + BDRVReplaceTestState *s = bs->opaque; + + g_assert(s->drain_count > 0); + if (!--s->drain_count) { + int ret; + + s->was_undrained = true; + + if (bs->backing) { + char data; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); + + /* Queue a read request post-drain */ + ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); + g_assert(ret >= 0); + } + } +} + +static BlockDriver bdrv_replace_test = { + .format_name = "replace_test", + .instance_size = sizeof(BDRVReplaceTestState), + + .bdrv_close = bdrv_replace_test_close, + .bdrv_co_preadv = bdrv_replace_test_co_preadv, + + .bdrv_co_drain_begin = bdrv_replace_test_co_drain_begin, + .bdrv_co_drain_end = bdrv_replace_test_co_drain_end, + + .bdrv_child_perm = bdrv_format_default_perms, +}; + +static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque) +{ + int ret; + char data; + + ret = blk_co_pread(opaque, 0, 1, &data, 0); + g_assert(ret >= 0); +} + +/** + * We test two things: + * (1) bdrv_replace_child_noperm() must not undrain the parent if both + * children are drained. + * (2) bdrv_replace_child_noperm() must never flush I/O requests to a + * drained child. If the old child is drained, it must flush I/O + * requests after the new one has been attached. If the new child + * is drained, it must flush I/O requests before the old one is + * detached. + * + * To do so, we create one parent node and two child nodes; then + * attach one of the children (old_child_bs) to the parent, then + * drain both old_child_bs and new_child_bs according to + * old_drain_count and new_drain_count, respectively, and finally + * we invoke bdrv_replace_node() to replace old_child_bs by + * new_child_bs. + * + * The test block driver we use here (bdrv_replace_test) has a read + * function that: + * - For the parent node, can optionally yield, and then forwards the + * read to bdrv_preadv(), + * - For the child node, just returns immediately. + * + * If the read yields, the drain_begin function will wake it up. + * + * The drain_end function issues a read on the parent once it is fully + * undrained (which simulates requests starting to come in again). + */ +static void do_test_replace_child_mid_drain(int old_drain_count, + int new_drain_count) +{ + BlockBackend *parent_blk; + BlockDriverState *parent_bs; + BlockDriverState *old_child_bs, *new_child_bs; + BDRVReplaceTestState *parent_s; + BDRVReplaceTestState *old_child_s, *new_child_s; + Coroutine *io_co; + int i; + + parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0, + &error_abort); + parent_s = parent_bs->opaque; + + parent_blk = blk_new(qemu_get_aio_context(), + BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); + blk_insert_bs(parent_blk, parent_bs, &error_abort); + + old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0, + &error_abort); + new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0, + &error_abort); + old_child_s = old_child_bs->opaque; + new_child_s = new_child_bs->opaque; + + /* So that we can read something */ + parent_bs->total_sectors = 1; + old_child_bs->total_sectors = 1; + new_child_bs->total_sectors = 1; + + bdrv_ref(old_child_bs); + parent_bs->backing = bdrv_attach_child(parent_bs, old_child_bs, "child", + &child_backing, &error_abort); + + for (i = 0; i < old_drain_count; i++) { + bdrv_drained_begin(old_child_bs); + } + for (i = 0; i < new_drain_count; i++) { + bdrv_drained_begin(new_child_bs); + } + + if (!old_drain_count) { + /* + * Start a read operation that will yield, so it will not + * complete before the node is drained. + */ + parent_s->yield_before_read = true; + io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co, + parent_blk); + qemu_coroutine_enter(io_co); + } + + /* If we have started a read operation, it should have yielded */ + g_assert(!parent_s->has_read); + + /* Reset drained status so we can see what bdrv_replace_node() does */ + parent_s->was_drained = false; + parent_s->was_undrained = false; + + g_assert(parent_bs->quiesce_counter == old_drain_count); + bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); + g_assert(parent_bs->quiesce_counter == new_drain_count); + + if (!old_drain_count && !new_drain_count) { + /* + * From undrained to undrained drains and undrains the parent, + * because bdrv_replace_node() contains a drained section for + * @old_child_bs. + */ + g_assert(parent_s->was_drained && parent_s->was_undrained); + } else if (!old_drain_count && new_drain_count) { + /* + * From undrained to drained should drain the parent and keep + * it that way. + */ + g_assert(parent_s->was_drained && !parent_s->was_undrained); + } else if (old_drain_count && !new_drain_count) { + /* + * From drained to undrained should undrain the parent and + * keep it that way. + */ + g_assert(!parent_s->was_drained && parent_s->was_undrained); + } else /* if (old_drain_count && new_drain_count) */ { + /* + * From drained to drained must not undrain the parent at any + * point + */ + g_assert(!parent_s->was_drained && !parent_s->was_undrained); + } + + if (!old_drain_count || !new_drain_count) { + /* + * If !old_drain_count, we have started a read request before + * bdrv_replace_node(). If !new_drain_count, the parent must + * have been undrained at some point, and + * bdrv_replace_test_co_drain_end() starts a read request + * then. + */ + g_assert(parent_s->has_read); + } else { + /* + * If the parent was never undrained, there is no way to start + * a read request. + */ + g_assert(!parent_s->has_read); + } + + /* A drained child must have not received any request */ + g_assert(!(old_drain_count && old_child_s->has_read)); + g_assert(!(new_drain_count && new_child_s->has_read)); + + for (i = 0; i < new_drain_count; i++) { + bdrv_drained_end(new_child_bs); + } + for (i = 0; i < old_drain_count; i++) { + bdrv_drained_end(old_child_bs); + } + + /* + * By now, bdrv_replace_test_co_drain_end() must have been called + * at some point while the new child was attached to the parent. + */ + g_assert(parent_s->has_read); + g_assert(new_child_s->has_read); + + blk_unref(parent_blk); + bdrv_unref(parent_bs); + bdrv_unref(old_child_bs); + bdrv_unref(new_child_bs); +} + +static void test_replace_child_mid_drain(void) +{ + int old_drain_count, new_drain_count; + + for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) { + for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) { + do_test_replace_child_mid_drain(old_drain_count, new_drain_count); + } + } +} + int main(int argc, char **argv) { int ret; @@ -1758,6 +2228,12 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end", test_blockjob_commit_by_drained_end); + g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll", + test_drop_intermediate_poll); + + g_test_add_func("/bdrv-drain/replace_child/mid-drain", + test_replace_child_mid_drain); + ret = g_test_run(); qemu_event_destroy(&done_event); return ret; |