diff options
author | Denis V. Lunev | 2016-02-24 09:53:39 +0100 |
---|---|---|
committer | Amit Shah | 2016-02-26 16:10:08 +0100 |
commit | ea6a55bcc0d144ac5086cebf7f84afa7071afe90 (patch) | |
tree | 321162f47f00aa2900e104ce625f4307f593e001 /migration | |
parent | migration (ordinary): move bdrv_invalidate_cache_all of of coroutine context (diff) | |
download | qemu-ea6a55bcc0d144ac5086cebf7f84afa7071afe90.tar.gz qemu-ea6a55bcc0d144ac5086cebf7f84afa7071afe90.tar.xz qemu-ea6a55bcc0d144ac5086cebf7f84afa7071afe90.zip |
migration (postcopy): move bdrv_invalidate_cache_all of of coroutine context
There is a possibility to hit an assert in qcow2_get_specific_info that
s->qcow_version is undefined. This happens when VM in starting from
suspended state, i.e. it processes incoming migration, and in the same
time 'info block' is called.
The problem is that qcow2_invalidate_cache() closes the image and
memset()s BDRVQcowState in the middle.
The patch moves processing of bdrv_invalidate_cache_all out of
coroutine context for postcopy migration to avoid that. This function
is called with the following stack:
process_incoming_migration_co
qemu_loadvm_state
qemu_loadvm_state_main
loadvm_process_command
loadvm_postcopy_handle_run
Signed-off-by: Denis V. Lunev <den@openvz.org>
Tested-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Juan Quintela <quintela@redhat.com>
CC: Amit Shah <amit.shah@redhat.com>
Message-Id: <1456304019-10507-3-git-send-email-den@openvz.org>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r-- | migration/savevm.c | 29 |
1 files changed, 19 insertions, 10 deletions
diff --git a/migration/savevm.c b/migration/savevm.c index 02e8487441..b45915612f 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1495,17 +1495,10 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) return 0; } -/* After all discards we can start running and asking for pages */ -static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) +static void loadvm_postcopy_handle_run_bh(void *opaque) { - PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING); Error *local_err = NULL; - - trace_loadvm_postcopy_handle_run(); - if (ps != POSTCOPY_INCOMING_LISTENING) { - error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); - return -1; - } + MigrationIncomingState *mis = opaque; /* TODO we should move all of this lot into postcopy_ram.c or a shared code * in migration.c @@ -1518,7 +1511,6 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) bdrv_invalidate_cache_all(&local_err); if (local_err) { error_report_err(local_err); - return -1; } trace_loadvm_postcopy_handle_run_cpu_sync(); @@ -1534,6 +1526,23 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) runstate_set(RUN_STATE_PAUSED); } + qemu_bh_delete(mis->bh); +} + +/* After all discards we can start running and asking for pages */ +static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) +{ + PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING); + + trace_loadvm_postcopy_handle_run(); + if (ps != POSTCOPY_INCOMING_LISTENING) { + error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); + return -1; + } + + mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL); + qemu_bh_schedule(mis->bh); + /* We need to finish reading the stream from the package * and also stop reading anything more from the stream that loaded the * package (since it's now being read by the listener thread). |