summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell2022-07-20 23:33:35 +0200
committerPeter Maydell2022-07-20 23:33:35 +0200
commitfe16c833fdb74642c296fa96a472e39229cd4351 (patch)
treeba1d92b35dc4eb227d72a0c4839c06d4d375cc76 /include
parentMerge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging (diff)
parentRevert "gitlab: disable accelerated zlib for s390x" (diff)
downloadqemu-fe16c833fdb74642c296fa96a472e39229cd4351.tar.gz
qemu-fe16c833fdb74642c296fa96a472e39229cd4351.tar.xz
qemu-fe16c833fdb74642c296fa96a472e39229cd4351.zip
Merge tag 'pull-migration-20220720c' of https://gitlab.com/dagrh/qemu into staging
Migration pull 2022-07-20 This replaces yesterdays pull and: a) Fixes some test build errors without TLS b) Reenabled the zlib acceleration on s390 now that we have Ilya's fix Hyman's dirty page rate limit set Ilya's fix for zlib vs migration Peter's postcopy-preempt Cleanup from Dan zero-copy tidy ups from Leo multifd doc fix from Juan Revert disable of zlib acceleration on s390x Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> # gpg: Signature made Wed 20 Jul 2022 12:18:56 BST # gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7 # gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full] # Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7 * tag 'pull-migration-20220720c' of https://gitlab.com/dagrh/qemu: (30 commits) Revert "gitlab: disable accelerated zlib for s390x" migration: Avoid false-positive on non-supported scenarios for zero-copy-send multifd: Document the locking of MultiFD{Send/Recv}Params migration/multifd: Report to user when zerocopy not working Add dirty-sync-missed-zero-copy migration stat QIOChannelSocket: Fix zero-copy flush returning code 1 when nothing sent migration: remove unreachable code after reading data tests: Add postcopy preempt tests tests: Add postcopy tls recovery migration test tests: Add postcopy tls migration test tests: Move MigrateCommon upper migration: Respect postcopy request order in preemption mode migration: Enable TLS for preempt channel migration: Export tls-[creds|hostname|authz] params to cmdline too migration: Add helpers to detect TLS capability migration: Add property x-postcopy-preempt-break-huge migration: Create the postcopy preempt channel asynchronously migration: Postcopy recover with preempt enabled migration: Postcopy preemption enablement migration: Postcopy preemption preparation on channel creation ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/cpu-common.h1
-rw-r--r--include/exec/memory.h5
-rw-r--r--include/hw/core/cpu.h6
-rw-r--r--include/monitor/hmp.h3
-rw-r--r--include/sysemu/dirtylimit.h37
-rw-r--r--include/sysemu/dirtyrate.h28
-rw-r--r--include/sysemu/kvm.h2
7 files changed, 81 insertions, 1 deletions
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 5968551a05..2281be4e10 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -35,6 +35,7 @@ extern intptr_t qemu_host_page_mask;
void qemu_init_cpu_list(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
+unsigned int cpu_list_generation_id_get(void);
void tcg_flush_softmmu_tlb(CPUState *cs);
diff --git a/include/exec/memory.h b/include/exec/memory.h
index a6a0f4d8ad..bfb1de8eea 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -69,7 +69,10 @@ static inline void fuzz_dma_read_cb(size_t addr,
/* Dirty tracking enabled because measuring dirty rate */
#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
-#define GLOBAL_DIRTY_MASK (0x3)
+/* Dirty tracking enabled because dirty limit */
+#define GLOBAL_DIRTY_LIMIT (1U << 2)
+
+#define GLOBAL_DIRTY_MASK (0x7)
extern unsigned int global_dirty_tracking;
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 996f94059f..500503da13 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -418,6 +418,12 @@ struct CPUState {
*/
bool throttle_thread_scheduled;
+ /*
+ * Sleep throttle_us_per_full microseconds once dirty ring is full
+ * if dirty page rate limit is enabled.
+ */
+ int64_t throttle_us_per_full;
+
bool ignore_memory_transaction_failures;
/* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
index 2e89a97bd6..a618eb1e4e 100644
--- a/include/monitor/hmp.h
+++ b/include/monitor/hmp.h
@@ -131,6 +131,9 @@ void hmp_replay_delete_break(Monitor *mon, const QDict *qdict);
void hmp_replay_seek(Monitor *mon, const QDict *qdict);
void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict);
void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict);
+void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
+void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict);
void hmp_human_readable_text_helper(Monitor *mon,
HumanReadableText *(*qmp_handler)(Error **));
void hmp_info_stats(Monitor *mon, const QDict *qdict);
diff --git a/include/sysemu/dirtylimit.h b/include/sysemu/dirtylimit.h
new file mode 100644
index 0000000000..8d2c1f3a6b
--- /dev/null
+++ b/include/sysemu/dirtylimit.h
@@ -0,0 +1,37 @@
+/*
+ * Dirty page rate limit common functions
+ *
+ * Copyright (c) 2022 CHINA TELECOM CO.,LTD.
+ *
+ * Authors:
+ * Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_DIRTYRLIMIT_H
+#define QEMU_DIRTYRLIMIT_H
+
+#define DIRTYLIMIT_CALC_TIME_MS 1000 /* 1000ms */
+
+int64_t vcpu_dirty_rate_get(int cpu_index);
+void vcpu_dirty_rate_stat_start(void);
+void vcpu_dirty_rate_stat_stop(void);
+void vcpu_dirty_rate_stat_initialize(void);
+void vcpu_dirty_rate_stat_finalize(void);
+
+void dirtylimit_state_lock(void);
+void dirtylimit_state_unlock(void);
+void dirtylimit_state_initialize(void);
+void dirtylimit_state_finalize(void);
+bool dirtylimit_in_service(void);
+bool dirtylimit_vcpu_index_valid(int cpu_index);
+void dirtylimit_process(void);
+void dirtylimit_change(bool start);
+void dirtylimit_set_vcpu(int cpu_index,
+ uint64_t quota,
+ bool enable);
+void dirtylimit_set_all(uint64_t quota,
+ bool enable);
+void dirtylimit_vcpu_execute(CPUState *cpu);
+#endif
diff --git a/include/sysemu/dirtyrate.h b/include/sysemu/dirtyrate.h
new file mode 100644
index 0000000000..4d3b9a4902
--- /dev/null
+++ b/include/sysemu/dirtyrate.h
@@ -0,0 +1,28 @@
+/*
+ * dirty page rate helper functions
+ *
+ * Copyright (c) 2022 CHINA TELECOM CO.,LTD.
+ *
+ * Authors:
+ * Hyman Huang(黄勇) <huangy81@chinatelecom.cn>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_DIRTYRATE_H
+#define QEMU_DIRTYRATE_H
+
+typedef struct VcpuStat {
+ int nvcpu; /* number of vcpu */
+ DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */
+} VcpuStat;
+
+int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
+ VcpuStat *stat,
+ unsigned int flag,
+ bool one_shot);
+
+void global_dirty_log_change(unsigned int flag,
+ bool start);
+#endif
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index a783c78868..efd6dee818 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -582,4 +582,6 @@ bool kvm_cpu_check_are_resettable(void);
bool kvm_arch_cpu_check_are_resettable(void);
bool kvm_dirty_ring_enabled(void);
+
+uint32_t kvm_dirty_ring_size(void);
#endif