diff options
author | Peter Maydell | 2019-10-14 17:09:52 +0200 |
---|---|---|
committer | Peter Maydell | 2019-10-14 17:09:52 +0200 |
commit | c760cb77e511eb05094df67c1b30029a952efa35 (patch) | |
tree | d57ed37e184c39091bc94f67a760e417b0fc2d3b /migration/ram.c | |
parent | Merge remote-tracking branch 'remotes/awilliam/tags/vfio-update-20191010.0' i... (diff) | |
parent | migration: Support gtree migration (diff) | |
download | qemu-c760cb77e511eb05094df67c1b30029a952efa35.tar.gz qemu-c760cb77e511eb05094df67c1b30029a952efa35.tar.xz qemu-c760cb77e511eb05094df67c1b30029a952efa35.zip |
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20191011a' into staging
Migration pull 2019-10-11
Mostly cleanups and minor fixes
[Note I'm seeing a hang on the aarch64 hosted x86-64 tcg migration
test in xbzrle; but I'm seeing that on current head as well]
# gpg: Signature made Fri 11 Oct 2019 20:14:31 BST
# gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full]
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7
* remotes/dgilbert/tags/pull-migration-20191011a: (21 commits)
migration: Support gtree migration
migration/multifd: pages->used would be cleared when attach to multifd_send_state
migration/multifd: initialize packet->magic/version once at setup stage
migration/multifd: use pages->allocated instead of the static max
migration/multifd: fix a typo in comment of multifd_recv_unfill_packet()
migration/postcopy: check PostcopyState before setting to POSTCOPY_INCOMING_RUNNING
migration/postcopy: rename postcopy_ram_enable_notify to postcopy_ram_incoming_setup
migration/postcopy: postpone setting PostcopyState to END
migration/postcopy: mis->have_listen_thread check will never be touched
migration: report SaveStateEntry id and name on failure
migration: pass in_postcopy instead of check state again
migration/postcopy: fix typo in mark_postcopy_blocktime_begin's comment
migration/postcopy: map large zero page in postcopy_ram_incoming_setup()
migration/postcopy: allocate tmp_page in setup stage
migration: Don't try and recover return path in non-postcopy
rcu: Use automatic rc_read unlock in core memory/exec code
migration: Use automatic rcu_read unlock in rdma.c
migration: Use automatic rcu_read unlock in ram.c
migration: Fix missing rcu_read_unlock
rcu: Add automatically released rcu_read_lock variants
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 298 |
1 files changed, 139 insertions, 159 deletions
diff --git a/migration/ram.c b/migration/ram.c index 22423f08cd..5078f94490 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -181,14 +181,14 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque) RAMBlock *block; int ret = 0; - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); + RAMBLOCK_FOREACH_NOT_IGNORED(block) { ret = func(block, opaque); if (ret) { break; } } - rcu_read_unlock(); return ret; } @@ -791,13 +791,10 @@ static void multifd_pages_clear(MultiFDPages_t *pages) static void multifd_send_fill_packet(MultiFDSendParams *p) { MultiFDPacket_t *packet = p->packet; - uint32_t page_max = MULTIFD_PACKET_SIZE / qemu_target_page_size(); int i; - packet->magic = cpu_to_be32(MULTIFD_MAGIC); - packet->version = cpu_to_be32(MULTIFD_VERSION); packet->flags = cpu_to_be32(p->flags); - packet->pages_alloc = cpu_to_be32(page_max); + packet->pages_alloc = cpu_to_be32(p->pages->allocated); packet->pages_used = cpu_to_be32(p->pages->used); packet->next_packet_size = cpu_to_be32(p->next_packet_size); packet->packet_num = cpu_to_be64(p->packet_num); @@ -838,7 +835,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) packet->pages_alloc = be32_to_cpu(packet->pages_alloc); /* - * If we recevied a packet that is 100 times bigger than expected + * If we received a packet that is 100 times bigger than expected * just stop migration. It is a magic number. */ if (packet->pages_alloc > pages_max * 100) { @@ -1132,7 +1129,6 @@ static void *multifd_send_thread(void *opaque) p->flags = 0; p->num_packets++; p->num_pages += used; - p->pages->used = 0; qemu_mutex_unlock(&p->mutex); trace_multifd_send(p->id, packet_num, used, flags, @@ -1241,6 +1237,8 @@ int multifd_save_setup(void) p->packet_len = sizeof(MultiFDPacket_t) + sizeof(ram_addr_t) * page_count; p->packet = g_malloc0(p->packet_len); + p->packet->magic = cpu_to_be32(MULTIFD_MAGIC); + p->packet->version = cpu_to_be32(MULTIFD_VERSION); p->name = g_strdup_printf("multifdsend_%d", i); socket_send_channel_create(multifd_new_send_channel_async, p); } @@ -1848,12 +1846,12 @@ static void migration_bitmap_sync(RAMState *rs) memory_global_dirty_log_sync(); qemu_mutex_lock(&rs->bitmap_mutex); - rcu_read_lock(); - RAMBLOCK_FOREACH_NOT_IGNORED(block) { - ramblock_sync_dirty_bitmap(rs, block); + WITH_RCU_READ_LOCK_GUARD() { + RAMBLOCK_FOREACH_NOT_IGNORED(block) { + ramblock_sync_dirty_bitmap(rs, block); + } + ram_counters.remaining = ram_bytes_remaining(); } - ram_counters.remaining = ram_bytes_remaining(); - rcu_read_unlock(); qemu_mutex_unlock(&rs->bitmap_mutex); memory_global_after_dirty_log_sync(); @@ -2397,13 +2395,12 @@ static void migration_page_queue_free(RAMState *rs) /* This queue generally should be empty - but in the case of a failed * migration might have some droppings in. */ - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { memory_region_unref(mspr->rb->mr); QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); g_free(mspr); } - rcu_read_unlock(); } /** @@ -2424,7 +2421,8 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) RAMState *rs = ram_state; ram_counters.postcopy_requests++; - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); + if (!rbname) { /* Reuse last RAMBlock */ ramblock = rs->last_req_rb; @@ -2466,12 +2464,10 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); migration_make_urgent_request(); qemu_mutex_unlock(&rs->src_page_req_mutex); - rcu_read_unlock(); return 0; err: - rcu_read_unlock(); return -1; } @@ -2700,7 +2696,8 @@ static uint64_t ram_bytes_total_common(bool count_ignored) RAMBlock *block; uint64_t total = 0; - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); + if (count_ignored) { RAMBLOCK_FOREACH_MIGRATABLE(block) { total += block->used_length; @@ -2710,7 +2707,6 @@ static uint64_t ram_bytes_total_common(bool count_ignored) total += block->used_length; } } - rcu_read_unlock(); return total; } @@ -3034,7 +3030,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) RAMBlock *block; int ret; - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); /* This should be our last sync, the src is now paused */ migration_bitmap_sync(rs); @@ -3048,7 +3044,6 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) /* Deal with TPS != HPS and huge pages */ ret = postcopy_chunk_hostpages(ms, block); if (ret) { - rcu_read_unlock(); return ret; } @@ -3060,7 +3055,6 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) trace_ram_postcopy_send_discard_bitmap(); ret = postcopy_each_ram_send_discard(ms); - rcu_read_unlock(); return ret; } @@ -3081,7 +3075,7 @@ int ram_discard_range(const char *rbname, uint64_t start, size_t length) trace_ram_discard_range(rbname, start, length); - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); RAMBlock *rb = qemu_ram_block_by_name(rbname); if (!rb) { @@ -3101,8 +3095,6 @@ int ram_discard_range(const char *rbname, uint64_t start, size_t length) ret = ram_block_discard_range(rb, start, length); err: - rcu_read_unlock(); - return ret; } @@ -3231,13 +3223,12 @@ static void ram_init_bitmaps(RAMState *rs) /* For memory_global_dirty_log_start below. */ qemu_mutex_lock_iothread(); qemu_mutex_lock_ramlist(); - rcu_read_lock(); - - ram_list_init_bitmaps(); - memory_global_dirty_log_start(); - migration_bitmap_sync_precopy(rs); - rcu_read_unlock(); + WITH_RCU_READ_LOCK_GUARD() { + ram_list_init_bitmaps(); + memory_global_dirty_log_start(); + migration_bitmap_sync_precopy(rs); + } qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); } @@ -3373,24 +3364,23 @@ static int ram_save_setup(QEMUFile *f, void *opaque) } (*rsp)->f = f; - rcu_read_lock(); - - qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE); + WITH_RCU_READ_LOCK_GUARD() { + qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE); - RAMBLOCK_FOREACH_MIGRATABLE(block) { - qemu_put_byte(f, strlen(block->idstr)); - qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); - qemu_put_be64(f, block->used_length); - if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) { - qemu_put_be64(f, block->page_size); - } - if (migrate_ignore_shared()) { - qemu_put_be64(f, block->mr->addr); + RAMBLOCK_FOREACH_MIGRATABLE(block) { + qemu_put_byte(f, strlen(block->idstr)); + qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); + qemu_put_be64(f, block->used_length); + if (migrate_postcopy_ram() && block->page_size != + qemu_host_page_size) { + qemu_put_be64(f, block->page_size); + } + if (migrate_ignore_shared()) { + qemu_put_be64(f, block->mr->addr); + } } } - rcu_read_unlock(); - ram_control_before_iterate(f, RAM_CONTROL_SETUP); ram_control_after_iterate(f, RAM_CONTROL_SETUP); @@ -3425,55 +3415,57 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) goto out; } - rcu_read_lock(); - if (ram_list.version != rs->last_version) { - ram_state_reset(rs); - } - - /* Read version before ram_list.blocks */ - smp_rmb(); + WITH_RCU_READ_LOCK_GUARD() { + if (ram_list.version != rs->last_version) { + ram_state_reset(rs); + } - ram_control_before_iterate(f, RAM_CONTROL_ROUND); + /* Read version before ram_list.blocks */ + smp_rmb(); - t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - i = 0; - while ((ret = qemu_file_rate_limit(f)) == 0 || - !QSIMPLEQ_EMPTY(&rs->src_page_requests)) { - int pages; + ram_control_before_iterate(f, RAM_CONTROL_ROUND); - if (qemu_file_get_error(f)) { - break; - } + t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + i = 0; + while ((ret = qemu_file_rate_limit(f)) == 0 || + !QSIMPLEQ_EMPTY(&rs->src_page_requests)) { + int pages; - pages = ram_find_and_save_block(rs, false); - /* no more pages to sent */ - if (pages == 0) { - done = 1; - break; - } + if (qemu_file_get_error(f)) { + break; + } - if (pages < 0) { - qemu_file_set_error(f, pages); - break; - } + pages = ram_find_and_save_block(rs, false); + /* no more pages to sent */ + if (pages == 0) { + done = 1; + break; + } - rs->target_page_count += pages; - - /* we want to check in the 1st loop, just in case it was the 1st time - and we had to sync the dirty bitmap. - qemu_clock_get_ns() is a bit expensive, so we only check each some - iterations - */ - if ((i & 63) == 0) { - uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; - if (t1 > MAX_WAIT) { - trace_ram_save_iterate_big_wait(t1, i); + if (pages < 0) { + qemu_file_set_error(f, pages); break; } + + rs->target_page_count += pages; + + /* + * we want to check in the 1st loop, just in case it was the 1st + * time and we had to sync the dirty bitmap. + * qemu_clock_get_ns() is a bit expensive, so we only check each + * some iterations + */ + if ((i & 63) == 0) { + uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / + 1000000; + if (t1 > MAX_WAIT) { + trace_ram_save_iterate_big_wait(t1, i); + break; + } + } + i++; } - i++; } - rcu_read_unlock(); /* * Must occur before EOS (or any QEMUFile operation) @@ -3511,35 +3503,33 @@ static int ram_save_complete(QEMUFile *f, void *opaque) RAMState *rs = *temp; int ret = 0; - rcu_read_lock(); - - if (!migration_in_postcopy()) { - migration_bitmap_sync_precopy(rs); - } + WITH_RCU_READ_LOCK_GUARD() { + if (!migration_in_postcopy()) { + migration_bitmap_sync_precopy(rs); + } - ram_control_before_iterate(f, RAM_CONTROL_FINISH); + ram_control_before_iterate(f, RAM_CONTROL_FINISH); - /* try transferring iterative blocks of memory */ + /* try transferring iterative blocks of memory */ - /* flush all remaining blocks regardless of rate limiting */ - while (true) { - int pages; + /* flush all remaining blocks regardless of rate limiting */ + while (true) { + int pages; - pages = ram_find_and_save_block(rs, !migration_in_colo_state()); - /* no more blocks to sent */ - if (pages == 0) { - break; - } - if (pages < 0) { - ret = pages; - break; + pages = ram_find_and_save_block(rs, !migration_in_colo_state()); + /* no more blocks to sent */ + if (pages == 0) { + break; + } + if (pages < 0) { + ret = pages; + break; + } } - } - flush_compressed_data(rs); - ram_control_after_iterate(f, RAM_CONTROL_FINISH); - - rcu_read_unlock(); + flush_compressed_data(rs); + ram_control_after_iterate(f, RAM_CONTROL_FINISH); + } multifd_send_sync_main(rs); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); @@ -3562,9 +3552,9 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, if (!migration_in_postcopy() && remaining_size < max_size) { qemu_mutex_lock_iothread(); - rcu_read_lock(); - migration_bitmap_sync_precopy(rs); - rcu_read_unlock(); + WITH_RCU_READ_LOCK_GUARD() { + migration_bitmap_sync_precopy(rs); + } qemu_mutex_unlock_iothread(); remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; } @@ -3908,7 +3898,13 @@ int colo_init_ram_cache(void) error_report("%s: Can't alloc memory for COLO cache of block %s," "size 0x" RAM_ADDR_FMT, __func__, block->idstr, block->used_length); - goto out_locked; + RAMBLOCK_FOREACH_NOT_IGNORED(block) { + if (block->colo_cache) { + qemu_anon_ram_free(block->colo_cache, block->used_length); + block->colo_cache = NULL; + } + } + return -errno; } memcpy(block->colo_cache, block->host, block->used_length); } @@ -3934,18 +3930,6 @@ int colo_init_ram_cache(void) memory_global_dirty_log_start(); return 0; - -out_locked: - - RAMBLOCK_FOREACH_NOT_IGNORED(block) { - if (block->colo_cache) { - qemu_anon_ram_free(block->colo_cache, block->used_length); - block->colo_cache = NULL; - } - } - - rcu_read_unlock(); - return -errno; } /* It is need to hold the global lock to call this helper */ @@ -3959,16 +3943,14 @@ void colo_release_ram_cache(void) block->bmap = NULL; } - rcu_read_lock(); - - RAMBLOCK_FOREACH_NOT_IGNORED(block) { - if (block->colo_cache) { - qemu_anon_ram_free(block->colo_cache, block->used_length); - block->colo_cache = NULL; + WITH_RCU_READ_LOCK_GUARD() { + RAMBLOCK_FOREACH_NOT_IGNORED(block) { + if (block->colo_cache) { + qemu_anon_ram_free(block->colo_cache, block->used_length); + block->colo_cache = NULL; + } } } - - rcu_read_unlock(); qemu_mutex_destroy(&ram_state->bitmap_mutex); g_free(ram_state); ram_state = NULL; @@ -4048,7 +4030,7 @@ static int ram_load_postcopy(QEMUFile *f) bool matches_target_page_size = false; MigrationIncomingState *mis = migration_incoming_get_current(); /* Temporary page that is later 'placed' */ - void *postcopy_host_page = postcopy_get_tmp_page(mis); + void *postcopy_host_page = mis->postcopy_tmp_page; void *last_host = NULL; bool all_zero = false; @@ -4206,31 +4188,30 @@ static void colo_flush_ram_cache(void) unsigned long offset = 0; memory_global_dirty_log_sync(); - rcu_read_lock(); - RAMBLOCK_FOREACH_NOT_IGNORED(block) { - ramblock_sync_dirty_bitmap(ram_state, block); + WITH_RCU_READ_LOCK_GUARD() { + RAMBLOCK_FOREACH_NOT_IGNORED(block) { + ramblock_sync_dirty_bitmap(ram_state, block); + } } - rcu_read_unlock(); trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); - rcu_read_lock(); - block = QLIST_FIRST_RCU(&ram_list.blocks); + WITH_RCU_READ_LOCK_GUARD() { + block = QLIST_FIRST_RCU(&ram_list.blocks); - while (block) { - offset = migration_bitmap_find_dirty(ram_state, block, offset); + while (block) { + offset = migration_bitmap_find_dirty(ram_state, block, offset); - if (offset << TARGET_PAGE_BITS >= block->used_length) { - offset = 0; - block = QLIST_NEXT_RCU(block, next); - } else { - migration_bitmap_clear_dirty(ram_state, block, offset); - dst_host = block->host + (offset << TARGET_PAGE_BITS); - src_host = block->colo_cache + (offset << TARGET_PAGE_BITS); - memcpy(dst_host, src_host, TARGET_PAGE_SIZE); + if (offset << TARGET_PAGE_BITS >= block->used_length) { + offset = 0; + block = QLIST_NEXT_RCU(block, next); + } else { + migration_bitmap_clear_dirty(ram_state, block, offset); + dst_host = block->host + (offset << TARGET_PAGE_BITS); + src_host = block->colo_cache + (offset << TARGET_PAGE_BITS); + memcpy(dst_host, src_host, TARGET_PAGE_SIZE); + } } } - - rcu_read_unlock(); trace_colo_flush_ram_cache_end(); } @@ -4429,16 +4410,15 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) * it will be necessary to reduce the granularity of this * critical section. */ - rcu_read_lock(); + WITH_RCU_READ_LOCK_GUARD() { + if (postcopy_running) { + ret = ram_load_postcopy(f); + } else { + ret = ram_load_precopy(f); + } - if (postcopy_running) { - ret = ram_load_postcopy(f); - } else { - ret = ram_load_precopy(f); + ret |= wait_for_decompress_done(); } - - ret |= wait_for_decompress_done(); - rcu_read_unlock(); trace_ram_load_complete(ret, seq_iter); if (!ret && migration_incoming_in_colo_state()) { |