summaryrefslogtreecommitdiffstats
path: root/include/exec/ram_addr.h
diff options
context:
space:
mode:
authorDr. David Alan Gilbert2019-10-07 16:36:41 +0200
committerDr. David Alan Gilbert2019-10-11 15:20:02 +0200
commit694ea274d9236cada361282960e3a0aba660dee5 (patch)
tree4a9bd737f1821e586e982fb914fb3c38042322fc /include/exec/ram_addr.h
parentmigration: Use automatic rcu_read unlock in rdma.c (diff)
downloadqemu-694ea274d9236cada361282960e3a0aba660dee5.tar.gz
qemu-694ea274d9236cada361282960e3a0aba660dee5.tar.xz
qemu-694ea274d9236cada361282960e3a0aba660dee5.zip
rcu: Use automatic rc_read unlock in core memory/exec code
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Message-Id: <20191007143642.301445-6-dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'include/exec/ram_addr.h')
-rw-r--r--include/exec/ram_addr.h138
1 files changed, 65 insertions, 73 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index e96e621de5..ad158bb247 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -193,30 +193,29 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
-
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_bit(blocks->blocks[idx],
+ num, offset);
+ if (found < num) {
+ dirty = true;
+ break;
+ }
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- unsigned long num = next - base;
- unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
- if (found < num) {
- dirty = true;
- break;
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
}
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
return dirty;
}
@@ -234,7 +233,7 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
@@ -256,8 +255,6 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
return dirty;
}
@@ -309,13 +306,11 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]);
-
- rcu_read_unlock();
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
@@ -334,39 +329,37 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ }
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
- }
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, next - page);
+ }
- if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
- offset, next - page);
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
}
- if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
- offset, next - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
- offset, next - page);
- }
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
xen_hvm_modified_memory(start, length);
}
@@ -396,36 +389,35 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
DIRTY_MEMORY_BLOCK_SIZE);
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
- }
+ for (k = 0; k < nr; k++) {
+ if (bitmap[k]) {
+ unsigned long temp = leul_to_cpu(bitmap[k]);
- for (k = 0; k < nr; k++) {
- if (bitmap[k]) {
- unsigned long temp = leul_to_cpu(bitmap[k]);
+ atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
- atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
+ if (global_dirty_log) {
+ atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
+ }
- if (global_dirty_log) {
- atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
- temp);
+ if (tcg_enabled()) {
+ atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
+ temp);
+ }
}
- if (tcg_enabled()) {
- atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
}
}
-
- if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
- offset = 0;
- idx++;
- }
}
- rcu_read_unlock();
-
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;