summaryrefslogtreecommitdiffstats
path: root/migration
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy2021-03-22 10:49:05 +0100
committerStefan Hajnoczi2021-03-24 14:41:19 +0100
commit4290b4834c4ffb9633a9851a9b932a147fcac928 (patch)
tree31e41b71daef3edddeb6ae88a03c377076f19de4 /migration
parentMerge remote-tracking branch 'remotes/kraxel/tags/ui-20210323-pull-request' i... (diff)
downloadqemu-4290b4834c4ffb9633a9851a9b932a147fcac928.tar.gz
qemu-4290b4834c4ffb9633a9851a9b932a147fcac928.tar.xz
qemu-4290b4834c4ffb9633a9851a9b932a147fcac928.zip
migration/block-dirty-bitmap: make incoming disabled bitmaps busy
Incoming enabled bitmaps are busy, because we do bdrv_dirty_bitmap_create_successor() for them. But disabled bitmaps being migrated are not marked busy, and user can remove them during the incoming migration. Then we may crash in cancel_incoming_locked() when try to remove the bitmap that was already removed by user, like this: #0 qemu_mutex_lock_impl (mutex=0x5593d88c50d1, file=0x559680554b20 "../block/dirty-bitmap.c", line=64) at ../util/qemu-thread-posix.c:77 #1 bdrv_dirty_bitmaps_lock (bs=0x5593d88c0ee9) at ../block/dirty-bitmap.c:64 #2 bdrv_release_dirty_bitmap (bitmap=0x5596810e9570) at ../block/dirty-bitmap.c:362 #3 cancel_incoming_locked (s=0x559680be8208 <dbm_state+40>) at ../migration/block-dirty-bitmap.c:918 #4 dirty_bitmap_load (f=0x559681d02b10, opaque=0x559680be81e0 <dbm_state>, version_id=1) at ../migration/block-dirty-bitmap.c:1194 #5 vmstate_load (f=0x559681d02b10, se=0x559680fb5810) at ../migration/savevm.c:908 #6 qemu_loadvm_section_part_end (f=0x559681d02b10, mis=0x559680fb4a30) at ../migration/savevm.c:2473 #7 qemu_loadvm_state_main (f=0x559681d02b10, mis=0x559680fb4a30) at ../migration/savevm.c:2626 #8 postcopy_ram_listen_thread (opaque=0x0) at ../migration/savevm.c:1871 #9 qemu_thread_start (args=0x5596817ccd10) at ../util/qemu-thread-posix.c:521 #10 start_thread () at /lib64/libpthread.so.0 #11 clone () at /lib64/libc.so.6 Note bs pointer taken from bitmap: it's definitely bad aligned. That's because we are in use after free, bitmap is already freed. So, let's make disabled bitmaps (being migrated) busy during incoming migration. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20210322094906.5079-2-vsementsov@virtuozzo.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/block-dirty-bitmap.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
index 975093610a..35f5ef688d 100644
--- a/migration/block-dirty-bitmap.c
+++ b/migration/block-dirty-bitmap.c
@@ -839,6 +839,8 @@ static int dirty_bitmap_load_start(QEMUFile *f, DBMLoadState *s)
error_report_err(local_err);
return -EINVAL;
}
+ } else {
+ bdrv_dirty_bitmap_set_busy(s->bitmap, true);
}
b = g_new(LoadBitmapState, 1);
@@ -914,6 +916,8 @@ static void cancel_incoming_locked(DBMLoadState *s)
assert(!s->before_vm_start_handled || !b->migrated);
if (bdrv_dirty_bitmap_has_successor(b->bitmap)) {
bdrv_reclaim_dirty_bitmap(b->bitmap, &error_abort);
+ } else {
+ bdrv_dirty_bitmap_set_busy(b->bitmap, false);
}
bdrv_release_dirty_bitmap(b->bitmap);
}
@@ -951,6 +955,8 @@ static void dirty_bitmap_load_complete(QEMUFile *f, DBMLoadState *s)
if (bdrv_dirty_bitmap_has_successor(s->bitmap)) {
bdrv_reclaim_dirty_bitmap(s->bitmap, &error_abort);
+ } else {
+ bdrv_dirty_bitmap_set_busy(s->bitmap, false);
}
for (item = s->bitmaps; item; item = g_slist_next(item)) {