summaryrefslogtreecommitdiffstats
path: root/migration
diff options
context:
space:
mode:
Diffstat (limited to 'migration')
-rw-r--r--migration/channel.c26
-rw-r--r--migration/dirtyrate.c78
-rw-r--r--migration/dirtyrate.h8
-rw-r--r--migration/migration.c3
-rw-r--r--migration/migration.h4
-rw-r--r--migration/multifd.c8
-rw-r--r--migration/qemu-file-channel.c4
-rw-r--r--migration/rdma.c11
-rw-r--r--migration/socket.c24
9 files changed, 138 insertions, 28 deletions
diff --git a/migration/channel.c b/migration/channel.c
index c9ee902021..01275a9162 100644
--- a/migration/channel.c
+++ b/migration/channel.c
@@ -38,18 +38,19 @@ void migration_channel_process_incoming(QIOChannel *ioc)
trace_migration_set_incoming_channel(
ioc, object_get_typename(OBJECT(ioc)));
- if (object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_SOCKET)) {
- yank_register_function(MIGRATION_YANK_INSTANCE,
- migration_yank_iochannel,
- QIO_CHANNEL(ioc));
- }
-
if (s->parameters.tls_creds &&
*s->parameters.tls_creds &&
!object_dynamic_cast(OBJECT(ioc),
TYPE_QIO_CHANNEL_TLS)) {
migration_tls_channel_process_incoming(s, ioc, &local_err);
} else {
+ if (object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_SOCKET) ||
+ object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_TLS)) {
+ yank_register_function(MIGRATION_YANK_INSTANCE,
+ migration_yank_iochannel,
+ QIO_CHANNEL(ioc));
+ }
+
migration_ioc_process_incoming(ioc, &local_err);
}
@@ -76,12 +77,6 @@ void migration_channel_connect(MigrationState *s,
ioc, object_get_typename(OBJECT(ioc)), hostname, error);
if (!error) {
- if (object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_SOCKET)) {
- yank_register_function(MIGRATION_YANK_INSTANCE,
- migration_yank_iochannel,
- QIO_CHANNEL(ioc));
- }
-
if (s->parameters.tls_creds &&
*s->parameters.tls_creds &&
!object_dynamic_cast(OBJECT(ioc),
@@ -99,6 +94,13 @@ void migration_channel_connect(MigrationState *s,
} else {
QEMUFile *f = qemu_fopen_channel_output(ioc);
+ if (object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_SOCKET) ||
+ object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_TLS)) {
+ yank_register_function(MIGRATION_YANK_INSTANCE,
+ migration_yank_iochannel,
+ QIO_CHANNEL(ioc));
+ }
+
qemu_mutex_lock(&s->qemu_file_lock);
s->to_dst_file = f;
qemu_mutex_unlock(&s->qemu_file_lock);
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index ccb98147e8..320c56ba2c 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -20,6 +20,9 @@
#include "ram.h"
#include "trace.h"
#include "dirtyrate.h"
+#include "monitor/hmp.h"
+#include "monitor/monitor.h"
+#include "qapi/qmp/qdict.h"
static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
static struct DirtyRateStat DirtyStat;
@@ -48,6 +51,12 @@ static bool is_sample_period_valid(int64_t sec)
return true;
}
+static bool is_sample_pages_valid(int64_t pages)
+{
+ return pages >= MIN_SAMPLE_PAGE_COUNT &&
+ pages <= MAX_SAMPLE_PAGE_COUNT;
+}
+
static int dirtyrate_set_state(int *state, int old_state, int new_state)
{
assert(new_state < DIRTY_RATE_STATUS__MAX);
@@ -72,13 +81,15 @@ static struct DirtyRateInfo *query_dirty_rate_info(void)
info->status = CalculatingState;
info->start_time = DirtyStat.start_time;
info->calc_time = DirtyStat.calc_time;
+ info->sample_pages = DirtyStat.sample_pages;
trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
return info;
}
-static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time)
+static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time,
+ uint64_t sample_pages)
{
DirtyStat.total_dirty_samples = 0;
DirtyStat.total_sample_count = 0;
@@ -86,6 +97,7 @@ static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time)
DirtyStat.dirty_rate = -1;
DirtyStat.start_time = start_time;
DirtyStat.calc_time = calc_time;
+ DirtyStat.sample_pages = sample_pages;
}
static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
@@ -361,6 +373,7 @@ void *get_dirtyrate_thread(void *arg)
int ret;
int64_t start_time;
int64_t calc_time;
+ uint64_t sample_pages;
ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
DIRTY_RATE_STATUS_MEASURING);
@@ -371,7 +384,8 @@ void *get_dirtyrate_thread(void *arg)
start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
calc_time = config.sample_period_seconds;
- init_dirtyrate_stat(start_time, calc_time);
+ sample_pages = config.sample_pages_per_gigabytes;
+ init_dirtyrate_stat(start_time, calc_time, sample_pages);
calculate_dirtyrate(config);
@@ -383,7 +397,8 @@ void *get_dirtyrate_thread(void *arg)
return NULL;
}
-void qmp_calc_dirty_rate(int64_t calc_time, Error **errp)
+void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages,
+ int64_t sample_pages, Error **errp)
{
static struct DirtyRateConfig config;
QemuThread thread;
@@ -404,6 +419,17 @@ void qmp_calc_dirty_rate(int64_t calc_time, Error **errp)
return;
}
+ if (has_sample_pages) {
+ if (!is_sample_pages_valid(sample_pages)) {
+ error_setg(errp, "sample-pages is out of range[%d, %d].",
+ MIN_SAMPLE_PAGE_COUNT,
+ MAX_SAMPLE_PAGE_COUNT);
+ return;
+ }
+ } else {
+ sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
+ }
+
/*
* Init calculation state as unstarted.
*/
@@ -415,7 +441,7 @@ void qmp_calc_dirty_rate(int64_t calc_time, Error **errp)
}
config.sample_period_seconds = calc_time;
- config.sample_pages_per_gigabytes = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
+ config.sample_pages_per_gigabytes = sample_pages;
qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
(void *)&config, QEMU_THREAD_DETACHED);
}
@@ -424,3 +450,47 @@ struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
{
return query_dirty_rate_info();
}
+
+void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
+{
+ DirtyRateInfo *info = query_dirty_rate_info();
+
+ monitor_printf(mon, "Status: %s\n",
+ DirtyRateStatus_str(info->status));
+ monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
+ info->start_time);
+ monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
+ info->sample_pages);
+ monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
+ info->calc_time);
+ monitor_printf(mon, "Dirty rate: ");
+ if (info->has_dirty_rate) {
+ monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
+ } else {
+ monitor_printf(mon, "(not ready)\n");
+ }
+ g_free(info);
+}
+
+void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
+{
+ int64_t sec = qdict_get_try_int(qdict, "second", 0);
+ int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
+ bool has_sample_pages = (sample_pages != -1);
+ Error *err = NULL;
+
+ if (!sec) {
+ monitor_printf(mon, "Incorrect period length specified!\n");
+ return;
+ }
+
+ qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, &err);
+ if (err) {
+ hmp_handle_error(mon, err);
+ return;
+ }
+
+ monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
+ " seconds\n", sec);
+ monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
+}
diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
index 6ec429534d..e1fd29089e 100644
--- a/migration/dirtyrate.h
+++ b/migration/dirtyrate.h
@@ -15,7 +15,6 @@
/*
* Sample 512 pages per GB as default.
- * TODO: Make it configurable.
*/
#define DIRTYRATE_DEFAULT_SAMPLE_PAGES 512
@@ -35,6 +34,12 @@
#define MIN_FETCH_DIRTYRATE_TIME_SEC 1
#define MAX_FETCH_DIRTYRATE_TIME_SEC 60
+/*
+ * Take 1/16 pages in 1G as the maxmum sample page count
+ */
+#define MIN_SAMPLE_PAGE_COUNT 128
+#define MAX_SAMPLE_PAGE_COUNT 16384
+
struct DirtyRateConfig {
uint64_t sample_pages_per_gigabytes; /* sample pages per GB */
int64_t sample_period_seconds; /* time duration between two sampling */
@@ -63,6 +68,7 @@ struct DirtyRateStat {
int64_t dirty_rate; /* dirty rate in MB/s */
int64_t start_time; /* calculation start time in units of second */
int64_t calc_time; /* time duration of two sampling in units of second */
+ uint64_t sample_pages; /* sample pages per GB */
};
void *get_dirtyrate_thread(void *arg);
diff --git a/migration/migration.c b/migration/migration.c
index 1885860d7b..393299e150 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -280,6 +280,9 @@ void migration_incoming_state_destroy(void)
g_array_free(mis->postcopy_remote_fds, TRUE);
mis->postcopy_remote_fds = NULL;
}
+ if (mis->transport_cleanup) {
+ mis->transport_cleanup(mis->transport_data);
+ }
qemu_event_reset(&mis->main_thread_load_event);
diff --git a/migration/migration.h b/migration/migration.h
index b88bd8fe07..2ebb740dfa 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -49,6 +49,10 @@ struct PostcopyBlocktimeContext;
struct MigrationIncomingState {
QEMUFile *from_src_file;
+ /* A hook to allow cleanup at the end of incoming migration */
+ void *transport_data;
+ void (*transport_cleanup)(void *data);
+
/*
* Free at the start of the main state load, set as the main thread finishes
* loading state.
diff --git a/migration/multifd.c b/migration/multifd.c
index 0a4803cfcc..ab41590e71 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -987,7 +987,8 @@ int multifd_load_cleanup(Error **errp)
for (i = 0; i < migrate_multifd_channels(); i++) {
MultiFDRecvParams *p = &multifd_recv_state->params[i];
- if (object_dynamic_cast(OBJECT(p->c), TYPE_QIO_CHANNEL_SOCKET)
+ if ((object_dynamic_cast(OBJECT(p->c), TYPE_QIO_CHANNEL_SOCKET) ||
+ object_dynamic_cast(OBJECT(p->c), TYPE_QIO_CHANNEL_TLS))
&& OBJECT(p->c)->ref == 1) {
yank_unregister_function(MIGRATION_YANK_INSTANCE,
migration_yank_iochannel,
@@ -1165,6 +1166,11 @@ bool multifd_recv_all_channels_created(void)
return true;
}
+ if (!multifd_recv_state) {
+ /* Called before any connections created */
+ return false;
+ }
+
return thread_count == qatomic_read(&multifd_recv_state->count);
}
diff --git a/migration/qemu-file-channel.c b/migration/qemu-file-channel.c
index 876d05a540..fad340ea7a 100644
--- a/migration/qemu-file-channel.c
+++ b/migration/qemu-file-channel.c
@@ -26,6 +26,7 @@
#include "qemu-file-channel.h"
#include "qemu-file.h"
#include "io/channel-socket.h"
+#include "io/channel-tls.h"
#include "qemu/iov.h"
#include "qemu/yank.h"
#include "yank_functions.h"
@@ -106,7 +107,8 @@ static int channel_close(void *opaque, Error **errp)
int ret;
QIOChannel *ioc = QIO_CHANNEL(opaque);
ret = qio_channel_close(ioc, errp);
- if (object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_SOCKET)
+ if ((object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_SOCKET) ||
+ object_dynamic_cast(OBJECT(ioc), TYPE_QIO_CHANNEL_TLS))
&& OBJECT(ioc)->ref == 1) {
yank_unregister_function(MIGRATION_YANK_INSTANCE,
migration_yank_iochannel,
diff --git a/migration/rdma.c b/migration/rdma.c
index 1cdb4561f3..d90b29a4b5 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -1539,16 +1539,20 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma)
if (pfds[1].revents) {
ret = rdma_get_cm_event(rdma->channel, &cm_event);
- if (!ret) {
- rdma_ack_cm_event(cm_event);
+ if (ret) {
+ error_report("failed to get cm event while wait "
+ "completion channel");
+ return -EPIPE;
}
error_report("receive cm event while wait comp channel,"
"cm event is %d", cm_event->event);
if (cm_event->event == RDMA_CM_EVENT_DISCONNECTED ||
cm_event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+ rdma_ack_cm_event(cm_event);
return -EPIPE;
}
+ rdma_ack_cm_event(cm_event);
}
break;
@@ -3285,7 +3289,6 @@ static void rdma_cm_poll_handler(void *opaque)
error_report("get_cm_event failed %d", errno);
return;
}
- rdma_ack_cm_event(cm_event);
if (cm_event->event == RDMA_CM_EVENT_DISCONNECTED ||
cm_event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
@@ -3298,12 +3301,14 @@ static void rdma_cm_poll_handler(void *opaque)
rdma->return_path->error_state = -EPIPE;
}
}
+ rdma_ack_cm_event(cm_event);
if (mis->migration_incoming_co) {
qemu_coroutine_enter(mis->migration_incoming_co);
}
return;
}
+ rdma_ack_cm_event(cm_event);
}
static int qemu_rdma_accept(RDMAContext *rdma)
diff --git a/migration/socket.c b/migration/socket.c
index 6016642e04..05705a32d8 100644
--- a/migration/socket.c
+++ b/migration/socket.c
@@ -126,22 +126,31 @@ static void socket_accept_incoming_migration(QIONetListener *listener,
{
trace_migration_socket_incoming_accepted();
- qio_channel_set_name(QIO_CHANNEL(cioc), "migration-socket-incoming");
- migration_channel_process_incoming(QIO_CHANNEL(cioc));
-
if (migration_has_all_channels()) {
- /* Close listening socket as its no longer needed */
- qio_net_listener_disconnect(listener);
- object_unref(OBJECT(listener));
+ error_report("%s: Extra incoming migration connection; ignoring",
+ __func__);
+ return;
}
+
+ qio_channel_set_name(QIO_CHANNEL(cioc), "migration-socket-incoming");
+ migration_channel_process_incoming(QIO_CHANNEL(cioc));
}
+static void
+socket_incoming_migration_end(void *opaque)
+{
+ QIONetListener *listener = opaque;
+
+ qio_net_listener_disconnect(listener);
+ object_unref(OBJECT(listener));
+}
static void
socket_start_incoming_migration_internal(SocketAddress *saddr,
Error **errp)
{
QIONetListener *listener = qio_net_listener_new();
+ MigrationIncomingState *mis = migration_incoming_get_current();
size_t i;
int num = 1;
@@ -156,6 +165,9 @@ socket_start_incoming_migration_internal(SocketAddress *saddr,
return;
}
+ mis->transport_data = listener;
+ mis->transport_cleanup = socket_incoming_migration_end;
+
qio_net_listener_set_client_func_full(listener,
socket_accept_incoming_migration,
NULL, NULL,