summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Xu2022-10-04 20:24:29 +0200
committerJuan Quintela2022-11-21 11:58:10 +0100
commitcedb70eafb4fd51d9c714981509d97b9f4055be5 (patch)
treea230b94aab982dab7966e935680acd1ac408e229 /include
parentmigration: Disallow postcopy preempt to be used with compress (diff)
downloadqemu-cedb70eafb4fd51d9c714981509d97b9f4055be5.tar.gz
qemu-cedb70eafb4fd51d9c714981509d97b9f4055be5.tar.xz
qemu-cedb70eafb4fd51d9c714981509d97b9f4055be5.zip
migration: Use non-atomic ops for clear log bitmap
Since we already have bitmap_mutex to protect either the dirty bitmap or the clear log bitmap, we don't need atomic operations to set/clear/test on the clear log bitmap. Switching all ops from atomic to non-atomic versions, meanwhile touch up the comments to show which lock is in charge. Introduced non-atomic version of bitmap_test_and_clear_atomic(), mostly the same as the atomic version but simplified a few places, e.g. dropped the "old_bits" variable, and also the explicit memory barriers. Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/exec/ram_addr.h11
-rw-r--r--include/exec/ramblock.h3
-rw-r--r--include/qemu/bitmap.h1
3 files changed, 10 insertions, 5 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 1500680458..f4fb6a2111 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -42,7 +42,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
}
/**
- * clear_bmap_set: set clear bitmap for the page range
+ * clear_bmap_set: set clear bitmap for the page range. Must be with
+ * bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @start: the start page number
@@ -55,12 +56,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
{
uint8_t shift = rb->clear_bmap_shift;
- bitmap_set_atomic(rb->clear_bmap, start >> shift,
- clear_bmap_size(npages, shift));
+ bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
}
/**
- * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
+ * Must be with bitmap_mutex held.
*
* @rb: the ramblock to operate on
* @page: the page number to check
@@ -71,7 +72,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
{
uint8_t shift = rb->clear_bmap_shift;
- return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
+ return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
}
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
index 6cbedf9e0c..adc03df59c 100644
--- a/include/exec/ramblock.h
+++ b/include/exec/ramblock.h
@@ -53,6 +53,9 @@ struct RAMBlock {
* and split clearing of dirty bitmap on the remote node (e.g.,
* KVM). The bitmap will be set only when doing global sync.
*
+ * It is only used during src side of ram migration, and it is
+ * protected by the global ram_state.bitmap_mutex.
+ *
* NOTE: this bitmap is different comparing to the other bitmaps
* in that one bit can represent multiple guest pages (which is
* decided by the `clear_bmap_shift' variable below). On
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index 82a1d2f41f..3ccb00865f 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -253,6 +253,7 @@ void bitmap_set(unsigned long *map, long i, long len);
void bitmap_set_atomic(unsigned long *map, long i, long len);
void bitmap_clear(unsigned long *map, long start, long nr);
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
+bool bitmap_test_and_clear(unsigned long *map, long start, long nr);
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
long nr);
unsigned long bitmap_find_next_zero_area(unsigned long *map,