diff options
author | Juan Quintela | 2017-03-22 15:18:04 +0100 |
---|---|---|
committer | Juan Quintela | 2017-05-04 10:00:38 +0200 |
commit | 6b6712efccd383b48a909bee0b29e079a57601ec (patch) | |
tree | d9d71f7dce54e59bd0c9e1b5ef9fc9d876603880 /migration/ram.c | |
parent | Merge remote-tracking branch 'sthibault/tags/samuel-thibault' into staging (diff) | |
download | qemu-6b6712efccd383b48a909bee0b29e079a57601ec.tar.gz qemu-6b6712efccd383b48a909bee0b29e079a57601ec.tar.xz qemu-6b6712efccd383b48a909bee0b29e079a57601ec.zip |
ram: Split dirty bitmap by RAMBlock
Both the ram bitmap and the unsent bitmap are split by RAMBlock.
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
--
Fix compilation when DEBUG_POSTCOPY is enabled (thanks Hailiang)
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 265 |
1 files changed, 102 insertions, 163 deletions
diff --git a/migration/ram.c b/migration/ram.c index f48664ec62..1f03469c13 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -138,19 +138,6 @@ out: return ret; } -struct RAMBitmap { - struct rcu_head rcu; - /* Main migration bitmap */ - unsigned long *bmap; - /* bitmap of pages that haven't been sent even once - * only maintained and used in postcopy at the moment - * where it's used to send the dirtymap at the start - * of the postcopy phase - */ - unsigned long *unsentmap; -}; -typedef struct RAMBitmap RAMBitmap; - /* * An outstanding page request, on the source, having been received * and queued @@ -220,8 +207,6 @@ struct RAMState { uint64_t postcopy_requests; /* protects modification of the bitmap */ QemuMutex bitmap_mutex; - /* Ram Bitmap protected by RCU */ - RAMBitmap *ram_bitmap; /* The RAMBlock used in the last src_page_requests */ RAMBlock *last_req_rb; /* Queue of outstanding page requests from the destination */ @@ -614,22 +599,17 @@ static inline unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, unsigned long start) { - unsigned long base = rb->offset >> TARGET_PAGE_BITS; - unsigned long nr = base + start; - uint64_t rb_size = rb->used_length; - unsigned long size = base + (rb_size >> TARGET_PAGE_BITS); - unsigned long *bitmap; - + unsigned long size = rb->used_length >> TARGET_PAGE_BITS; + unsigned long *bitmap = rb->bmap; unsigned long next; - bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - if (rs->ram_bulk_stage && nr > base) { - next = nr + 1; + if (rs->ram_bulk_stage && start > 0) { + next = start + 1; } else { - next = find_next_bit(bitmap, size, nr); + next = find_next_bit(bitmap, size, start); } - return next - base; + return next; } static inline bool migration_bitmap_clear_dirty(RAMState *rs, @@ -637,10 +617,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, unsigned long page) { bool ret; - unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page; - ret = test_and_clear_bit(nr, bitmap); + ret = test_and_clear_bit(page, rb->bmap); if (ret) { rs->migration_dirty_pages--; @@ -651,10 +629,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, ram_addr_t start, ram_addr_t length) { - unsigned long *bitmap; - bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; rs->migration_dirty_pages += - cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length, + cpu_physical_memory_sync_dirty_bitmap(rb, start, length, &rs->num_dirty_pages_period); } @@ -1153,17 +1129,13 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) * search already sent it. */ if (block) { - unsigned long *bitmap; unsigned long page; - bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - page = (block->offset + offset) >> TARGET_PAGE_BITS; - dirty = test_bit(page, bitmap); + page = offset >> TARGET_PAGE_BITS; + dirty = test_bit(page, block->bmap); if (!dirty) { trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, - page, - test_bit(page, - atomic_rcu_read(&rs->ram_bitmap)->unsentmap)); + page, test_bit(page, block->unsentmap)); } else { trace_get_queued_page(block->idstr, (uint64_t)offset, page); } @@ -1301,16 +1273,13 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, /* Check the pages is dirty and if it is send it */ if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { - unsigned long *unsentmap; /* * If xbzrle is on, stop using the data compression after first * round of migration even if compression is enabled. In theory, * xbzrle can do better than compression. */ - unsigned long page = - (pss->block->offset >> TARGET_PAGE_BITS) + pss->page; - if (migrate_use_compression() - && (rs->ram_bulk_stage || !migrate_use_xbzrle())) { + if (migrate_use_compression() && + (rs->ram_bulk_stage || !migrate_use_xbzrle())) { res = ram_save_compressed_page(rs, pss, last_stage); } else { res = ram_save_page(rs, pss, last_stage); @@ -1319,9 +1288,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, if (res < 0) { return res; } - unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; - if (unsentmap) { - clear_bit(page, unsentmap); + if (pss->block->unsentmap) { + clear_bit(pss->page, pss->block->unsentmap); } } @@ -1451,25 +1419,20 @@ void free_xbzrle_decoded_buf(void) xbzrle_decoded_buf = NULL; } -static void migration_bitmap_free(RAMBitmap *bmap) -{ - g_free(bmap->bmap); - g_free(bmap->unsentmap); - g_free(bmap); -} - static void ram_migration_cleanup(void *opaque) { - RAMState *rs = opaque; + RAMBlock *block; /* caller have hold iothread lock or is in a bh, so there is * no writing race against this migration_bitmap */ - RAMBitmap *bitmap = rs->ram_bitmap; - atomic_rcu_set(&rs->ram_bitmap, NULL); - if (bitmap) { - memory_global_dirty_log_stop(); - call_rcu(bitmap, migration_bitmap_free, rcu); + memory_global_dirty_log_stop(); + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + g_free(block->bmap); + block->bmap = NULL; + g_free(block->unsentmap); + block->unsentmap = NULL; } XBZRLE_cache_lock(); @@ -1501,27 +1464,22 @@ static void ram_state_reset(RAMState *rs) * of; it won't bother printing lines that are all this value. * If 'todump' is null the migration bitmap is dumped. */ -void ram_debug_dump_bitmap(unsigned long *todump, bool expected) +void ram_debug_dump_bitmap(unsigned long *todump, bool expected, + unsigned long pages) { - unsigned long ram_pages = last_ram_page(); - RAMState *rs = &ram_state; int64_t cur; int64_t linelen = 128; char linebuf[129]; - if (!todump) { - todump = atomic_rcu_read(&rs->ram_bitmap)->bmap; - } - - for (cur = 0; cur < ram_pages; cur += linelen) { + for (cur = 0; cur < pages; cur += linelen) { int64_t curb; bool found = false; /* * Last line; catch the case where the line length * is longer than remaining ram */ - if (cur + linelen > ram_pages) { - linelen = ram_pages - cur; + if (cur + linelen > pages) { + linelen = pages - cur; } for (curb = 0; curb < linelen; curb++) { bool thisbit = test_bit(cur + curb, todump); @@ -1539,14 +1497,12 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected) void ram_postcopy_migrated_memory_release(MigrationState *ms) { - RAMState *rs = &ram_state; struct RAMBlock *block; - unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { - unsigned long first = block->offset >> TARGET_PAGE_BITS; - unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS); - unsigned long run_start = find_next_zero_bit(bitmap, range, first); + unsigned long *bitmap = block->bmap; + unsigned long range = block->used_length >> TARGET_PAGE_BITS; + unsigned long run_start = find_next_zero_bit(bitmap, range, 0); while (run_start < range) { unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); @@ -1573,16 +1529,13 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms) */ static int postcopy_send_discard_bm_ram(MigrationState *ms, PostcopyDiscardState *pds, - unsigned long start, - unsigned long length) + RAMBlock *block) { - RAMState *rs = &ram_state; - unsigned long end = start + length; /* one after the end */ + unsigned long end = block->used_length >> TARGET_PAGE_BITS; unsigned long current; - unsigned long *unsentmap; + unsigned long *unsentmap = block->unsentmap; - unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; - for (current = start; current < end; ) { + for (current = 0; current < end; ) { unsigned long one = find_next_bit(unsentmap, end, current); if (one <= end) { @@ -1625,18 +1578,15 @@ static int postcopy_each_ram_send_discard(MigrationState *ms) int ret; QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { - unsigned long first = block->offset >> TARGET_PAGE_BITS; - PostcopyDiscardState *pds = postcopy_discard_send_init(ms, - first, - block->idstr); + PostcopyDiscardState *pds = + postcopy_discard_send_init(ms, block->idstr); /* * Postcopy sends chunks of bitmap over the wire, but it * just needs indexes at this point, avoids it having * target page specific code. */ - ret = postcopy_send_discard_bm_ram(ms, pds, first, - block->used_length >> TARGET_PAGE_BITS); + ret = postcopy_send_discard_bm_ram(ms, pds, block); postcopy_discard_send_finish(ms, pds); if (ret) { return ret; @@ -1667,12 +1617,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, PostcopyDiscardState *pds) { RAMState *rs = &ram_state; - unsigned long *bitmap; - unsigned long *unsentmap; + unsigned long *bitmap = block->bmap; + unsigned long *unsentmap = block->unsentmap; unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; - unsigned long first = block->offset >> TARGET_PAGE_BITS; - unsigned long len = block->used_length >> TARGET_PAGE_BITS; - unsigned long last = first + (len - 1); + unsigned long pages = block->used_length >> TARGET_PAGE_BITS; unsigned long run_start; if (block->page_size == TARGET_PAGE_SIZE) { @@ -1680,18 +1628,15 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, return; } - bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; - if (unsent_pass) { /* Find a sent page */ - run_start = find_next_zero_bit(unsentmap, last + 1, first); + run_start = find_next_zero_bit(unsentmap, pages, 0); } else { /* Find a dirty page */ - run_start = find_next_bit(bitmap, last + 1, first); + run_start = find_next_bit(bitmap, pages, 0); } - while (run_start <= last) { + while (run_start < pages) { bool do_fixup = false; unsigned long fixup_start_addr; unsigned long host_offset; @@ -1711,9 +1656,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, /* Find the end of this run */ unsigned long run_end; if (unsent_pass) { - run_end = find_next_bit(unsentmap, last + 1, run_start + 1); + run_end = find_next_bit(unsentmap, pages, run_start + 1); } else { - run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1); + run_end = find_next_zero_bit(bitmap, pages, run_start + 1); } /* * If the end isn't at the start of a host page, then the @@ -1770,11 +1715,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, if (unsent_pass) { /* Find the next sent page for the next iteration */ - run_start = find_next_zero_bit(unsentmap, last + 1, - run_start); + run_start = find_next_zero_bit(unsentmap, pages, run_start); } else { /* Find the next dirty page for the next iteration */ - run_start = find_next_bit(bitmap, last + 1, run_start); + run_start = find_next_bit(bitmap, pages, run_start); } } } @@ -1791,34 +1735,22 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, * Returns zero on success * * @ms: current migration state + * @block: block we want to work with */ -static int postcopy_chunk_hostpages(MigrationState *ms) +static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block) { - RAMState *rs = &ram_state; - struct RAMBlock *block; - - /* Easiest way to make sure we don't resume in the middle of a host-page */ - rs->last_seen_block = NULL; - rs->last_sent_block = NULL; - rs->last_page = 0; - - QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { - unsigned long first = block->offset >> TARGET_PAGE_BITS; - - PostcopyDiscardState *pds = - postcopy_discard_send_init(ms, first, block->idstr); - - /* First pass: Discard all partially sent host pages */ - postcopy_chunk_hostpages_pass(ms, true, block, pds); - /* - * Second pass: Ensure that all partially dirty host pages are made - * fully dirty. - */ - postcopy_chunk_hostpages_pass(ms, false, block, pds); + PostcopyDiscardState *pds = + postcopy_discard_send_init(ms, block->idstr); - postcopy_discard_send_finish(ms, pds); - } /* ram_list loop */ + /* First pass: Discard all partially sent host pages */ + postcopy_chunk_hostpages_pass(ms, true, block, pds); + /* + * Second pass: Ensure that all partially dirty host pages are made + * fully dirty. + */ + postcopy_chunk_hostpages_pass(ms, false, block, pds); + postcopy_discard_send_finish(ms, pds); return 0; } @@ -1840,43 +1772,49 @@ static int postcopy_chunk_hostpages(MigrationState *ms) int ram_postcopy_send_discard_bitmap(MigrationState *ms) { RAMState *rs = &ram_state; + RAMBlock *block; int ret; - unsigned long *bitmap, *unsentmap; rcu_read_lock(); /* This should be our last sync, the src is now paused */ migration_bitmap_sync(rs); - unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; - if (!unsentmap) { - /* We don't have a safe way to resize the sentmap, so - * if the bitmap was resized it will be NULL at this - * point. - */ - error_report("migration ram resized during precopy phase"); - rcu_read_unlock(); - return -EINVAL; - } - - /* Deal with TPS != HPS and huge pages */ - ret = postcopy_chunk_hostpages(ms); - if (ret) { - rcu_read_unlock(); - return ret; - } - - /* - * Update the unsentmap to be unsentmap = unsentmap | dirty - */ - bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page()); + /* Easiest way to make sure we don't resume in the middle of a host-page */ + rs->last_seen_block = NULL; + rs->last_sent_block = NULL; + rs->last_page = 0; + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + unsigned long pages = block->used_length >> TARGET_PAGE_BITS; + unsigned long *bitmap = block->bmap; + unsigned long *unsentmap = block->unsentmap; + + if (!unsentmap) { + /* We don't have a safe way to resize the sentmap, so + * if the bitmap was resized it will be NULL at this + * point. + */ + error_report("migration ram resized during precopy phase"); + rcu_read_unlock(); + return -EINVAL; + } + /* Deal with TPS != HPS and huge pages */ + ret = postcopy_chunk_hostpages(ms, block); + if (ret) { + rcu_read_unlock(); + return ret; + } - trace_ram_postcopy_send_discard_bitmap(); + /* + * Update the unsentmap to be unsentmap = unsentmap | dirty + */ + bitmap_or(unsentmap, unsentmap, bitmap, pages); #ifdef DEBUG_POSTCOPY - ram_debug_dump_bitmap(unsentmap, true); + ram_debug_dump_bitmap(unsentmap, true, pages); #endif + } + trace_ram_postcopy_send_discard_bitmap(); ret = postcopy_each_ram_send_discard(ms); rcu_read_unlock(); @@ -1918,8 +1856,6 @@ err: static int ram_state_init(RAMState *rs) { - unsigned long ram_bitmap_pages; - memset(rs, 0, sizeof(*rs)); qemu_mutex_init(&rs->bitmap_mutex); qemu_mutex_init(&rs->src_page_req_mutex); @@ -1961,16 +1897,19 @@ static int ram_state_init(RAMState *rs) rcu_read_lock(); ram_state_reset(rs); - rs->ram_bitmap = g_new0(RAMBitmap, 1); /* Skip setting bitmap if there is no RAM */ if (ram_bytes_total()) { - ram_bitmap_pages = last_ram_page(); - rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages); - bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages); + RAMBlock *block; - if (migrate_postcopy_ram()) { - rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages); - bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages); + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + unsigned long pages = block->max_length >> TARGET_PAGE_BITS; + + block->bmap = bitmap_new(pages); + bitmap_set(block->bmap, 0, pages); + if (migrate_postcopy_ram()) { + block->unsentmap = bitmap_new(pages); + bitmap_set(block->unsentmap, 0, pages); + } } } |