summaryrefslogtreecommitdiffstats
path: root/migration/postcopy-ram.c
diff options
context:
space:
mode:
authorRichard Henderson2021-11-02 15:07:27 +0100
committerRichard Henderson2021-11-02 15:07:27 +0100
commit91e8394415f9bc9cd81c02bfafe02012855d4f98 (patch)
tree6d91ada2942583821944194f6e3f4ef778964d50 /migration/postcopy-ram.c
parentMerge remote-tracking branch 'remotes/mcayland/tags/qemu-openbios-20211101' i... (diff)
parentmigration/dirtyrate: implement dirty-bitmap dirtyrate calculation (diff)
downloadqemu-91e8394415f9bc9cd81c02bfafe02012855d4f98.tar.gz
qemu-91e8394415f9bc9cd81c02bfafe02012855d4f98.tar.xz
qemu-91e8394415f9bc9cd81c02bfafe02012855d4f98.zip
Merge remote-tracking branch 'remotes/juanquintela/tags/migration-20211031-pull-request' into staging
Migration Pull request Hi this includes pending bits of migration patches. - virtio-mem support by David Hildenbrand - dirtyrate improvements by Hyman Huang - fix rdma wrid by Li Zhijian - dump-guest-memory fixes by Peter Xu Pleas apply. Thanks, Juan. # gpg: Signature made Mon 01 Nov 2021 06:03:44 PM EDT # gpg: using RSA key 1899FF8EDEBF58CCEE034B82F487EF185872D723 # gpg: Good signature from "Juan Quintela <quintela@redhat.com>" [full] # gpg: aka "Juan Quintela <quintela@trasno.org>" [full] * remotes/juanquintela/tags/migration-20211031-pull-request: migration/dirtyrate: implement dirty-bitmap dirtyrate calculation memory: introduce total_dirty_pages to stat dirty pages migration/ram: Handle RAMBlocks with a RamDiscardManager on background snapshots migration/ram: Factor out populating pages readable in ram_block_populate_pages() migration: Simplify alignment and alignment checks migration/postcopy: Handle RAMBlocks with a RamDiscardManager on the destination virtio-mem: Drop precopy notifier migration/ram: Handle RAMBlocks with a RamDiscardManager on the migration source virtio-mem: Implement replay_discarded RamDiscardManager callback memory: Introduce replay_discarded callback for RamDiscardManager dump-guest-memory: Block live migration migration: Add migrate_add_blocker_internal() migration: Make migration blocker work for snapshots too migration/dirtyrate: implement dirty-ring dirtyrate calculation migration/dirtyrate: move init step of calculation to main thread migration/dirtyrate: adjust order of registering thread migration/dirtyrate: introduce struct and adjust DirtyRateStat memory: make global_dirty_tracking a bitmask KVM: introduce dirty_pages and kvm_dirty_ring_enabled migration/rdma: Fix out of order wrid Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'migration/postcopy-ram.c')
-rw-r--r--migration/postcopy-ram.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 2e9697bdd2..e721f69d0f 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -402,7 +402,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
strerror(errno));
goto out;
}
- g_assert(((size_t)testarea & (pagesize - 1)) == 0);
+ g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize));
reg_struct.range.start = (uintptr_t)testarea;
reg_struct.range.len = pagesize;
@@ -660,7 +660,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
struct uffdio_range range;
int ret;
trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
- range.start = client_addr & ~(pagesize - 1);
+ range.start = ROUND_DOWN(client_addr, pagesize);
range.len = pagesize;
ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
if (ret) {
@@ -671,6 +671,29 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
return ret;
}
+static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
+ ram_addr_t start, uint64_t haddr)
+{
+ void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
+
+ /*
+ * Discarded pages (via RamDiscardManager) are never migrated. On unlikely
+ * access, place a zeropage, which will also set the relevant bits in the
+ * recv_bitmap accordingly, so we won't try placing a zeropage twice.
+ *
+ * Checking a single bit is sufficient to handle pagesize > TPS as either
+ * all relevant bits are set or not.
+ */
+ assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb)));
+ if (ramblock_page_is_discarded(rb, start)) {
+ bool received = ramblock_recv_bitmap_test_byte_offset(rb, start);
+
+ return received ? 0 : postcopy_place_page_zero(mis, aligned, rb);
+ }
+
+ return migrate_send_rp_req_pages(mis, rb, start, haddr);
+}
+
/*
* Callback from shared fault handlers to ask for a page,
* the page must be specified by a RAMBlock and an offset in that rb
@@ -679,8 +702,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
uint64_t client_addr, uint64_t rb_offset)
{
- size_t pagesize = qemu_ram_pagesize(rb);
- uint64_t aligned_rbo = rb_offset & ~(pagesize - 1);
+ uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
MigrationIncomingState *mis = migration_incoming_get_current();
trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
@@ -690,7 +712,7 @@ int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
qemu_ram_get_idstr(rb), rb_offset);
return postcopy_wake_shared(pcfd, client_addr, rb);
}
- migrate_send_rp_req_pages(mis, rb, aligned_rbo, client_addr);
+ postcopy_request_page(mis, rb, aligned_rbo, client_addr);
return 0;
}
@@ -970,7 +992,7 @@ static void *postcopy_ram_fault_thread(void *opaque)
break;
}
- rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
+ rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
qemu_ram_get_idstr(rb),
rb_offset,
@@ -984,8 +1006,8 @@ retry:
* Send the request to the source - we want to request one
* of our host page sizes (which is >= TPS)
*/
- ret = migrate_send_rp_req_pages(mis, rb, rb_offset,
- msg.arg.pagefault.address);
+ ret = postcopy_request_page(mis, rb, rb_offset,
+ msg.arg.pagefault.address);
if (ret) {
/* May be network failure, try to wait for recovery */
if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
@@ -993,7 +1015,7 @@ retry:
goto retry;
} else {
/* This is a unavoidable fault */
- error_report("%s: migrate_send_rp_req_pages() get %d",
+ error_report("%s: postcopy_request_page() get %d",
__func__, ret);
break;
}