diff options
author | Peter Maydell | 2018-08-23 20:03:53 +0200 |
---|---|---|
committer | Peter Maydell | 2018-08-23 20:03:54 +0200 |
commit | 3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa (patch) | |
tree | 44c5189aacbe795b125bfb4b5144a37fe122d2e4 /include/qemu/rcu_queue.h | |
parent | Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging (diff) | |
parent | KVM: cleanup unnecessary #ifdef KVM_CAP_... (diff) | |
download | qemu-3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa.tar.gz qemu-3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa.tar.xz qemu-3c825bb7c1b4289ef05f51b5b77ac0967b6a27fa.zip |
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* x86 TCG fixes for 64-bit call gates (Andrew)
* qumu-guest-agent freeze-hook tweak (Christian)
* pm_smbus improvements (Corey)
* Move validation to pre_plug for pc-dimm (David)
* Fix memory leaks (Eduardo, Marc-André)
* synchronization profiler (Emilio)
* Convert the CPU list to RCU (Emilio)
* LSI support for PPR Extended Message (George)
* vhost-scsi support for protection information (Greg)
* Mark mptsas as a storage device in the help (Guenter)
* checkpatch tweak cherry-picked from Linux (me)
* Typos, cleanups and dead-code removal (Julia, Marc-André)
* qemu-pr-helper support for old libmultipath (Murilo)
* Annotate fallthroughs (me)
* MemoryRegionOps cleanup (me, Peter)
* Make s390 qtests independent from libqos, which doesn't actually support it (me)
* Make cpu_get_ticks independent from BQL (me)
* Introspection fixes (Thomas)
* Support QEMU_MODULE_DIR environment variable (ryang)
# gpg: Signature made Thu 23 Aug 2018 17:46:30 BST
# gpg: using RSA key BFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83
* remotes/bonzini/tags/for-upstream: (69 commits)
KVM: cleanup unnecessary #ifdef KVM_CAP_...
target/i386: update MPX flags when CPL changes
i2c: pm_smbus: Add the ability to force block transfer enable
i2c: pm_smbus: Don't delay host status register busy bit when interrupts are enabled
i2c: pm_smbus: Add interrupt handling
i2c: pm_smbus: Add block transfer capability
i2c: pm_smbus: Make the I2C block read command read-only
i2c: pm_smbus: Fix the semantics of block I2C transfers
i2c: pm_smbus: Clean up some style issues
pc-dimm: assign and verify the "addr" property during pre_plug
pc: drop memory region alignment check for 0
util/oslib-win32: indicate alignment for qemu_anon_ram_alloc()
pc-dimm: assign and verify the "slot" property during pre_plug
ipmi: Use proper struct reference for BT vmstate
vhost-scsi: expose 't10_pi' property for VIRTIO_SCSI_F_T10_PI
vhost-scsi: unify vhost-scsi get_features implementations
vhost-user-scsi: move host_features into VHostSCSICommon
cpus: allow cpu_get_ticks out of BQL
cpus: protect TimerState writes with a spinlock
seqlock: add QemuLockable support
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include/qemu/rcu_queue.h')
-rw-r--r-- | include/qemu/rcu_queue.h | 135 |
1 files changed, 133 insertions, 2 deletions
diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h index 01be77407b..904b3372dc 100644 --- a/include/qemu/rcu_queue.h +++ b/include/qemu/rcu_queue.h @@ -36,7 +36,7 @@ extern "C" { /* * List access methods. */ -#define QLIST_EMPTY_RCU(head) (atomic_rcu_read(&(head)->lh_first) == NULL) +#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL) #define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first)) #define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next)) @@ -112,7 +112,7 @@ extern "C" { (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ } \ - *(elm)->field.le_prev = (elm)->field.le_next; \ + atomic_set((elm)->field.le_prev, (elm)->field.le_next); \ } while (/*CONSTCOND*/0) /* List traversal must occur within an RCU critical section. */ @@ -128,6 +128,137 @@ extern "C" { ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \ (var) = (next_var)) +/* + * RCU simple queue + */ + +/* Simple queue access methods */ +#define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL) +#define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first) +#define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next) + +/* Simple queue functions */ +#define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \ + (elm)->field.sqe_next = (head)->sqh_first; \ + if ((elm)->field.sqe_next == NULL) { \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + } \ + atomic_rcu_set(&(head)->sqh_first, (elm)); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + atomic_rcu_set((head)->sqh_last, (elm)); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \ + (elm)->field.sqe_next = (listelm)->field.sqe_next; \ + if ((elm)->field.sqe_next == NULL) { \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + } \ + atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \ + atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \ + if ((head)->sqh_first == NULL) { \ + (head)->sqh_last = &(head)->sqh_first; \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE_RCU(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + QSIMPLEQ_REMOVE_HEAD_RCU((head), field); \ + } else { \ + struct type *curr = (head)->sqh_first; \ + while (curr->field.sqe_next != (elm)) { \ + curr = curr->field.sqe_next; \ + } \ + atomic_set(&curr->field.sqe_next, \ + curr->field.sqe_next->field.sqe_next); \ + if (curr->field.sqe_next == NULL) { \ + (head)->sqh_last = &(curr)->field.sqe_next; \ + } \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_FOREACH_RCU(var, head, field) \ + for ((var) = atomic_rcu_read(&(head)->sqh_first); \ + (var); \ + (var) = atomic_rcu_read(&(var)->field.sqe_next)) + +#define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \ + for ((var) = atomic_rcu_read(&(head)->sqh_first); \ + (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \ + (var) = (next)) + +/* + * RCU tail queue + */ + +/* Tail queue access methods */ +#define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL) +#define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first) +#define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next) + +/* Tail queue functions */ +#define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \ + (elm)->field.tqe_next = (head)->tqh_first; \ + if ((elm)->field.tqe_next != NULL) { \ + (head)->tqh_first->field.tqe_prev = &(elm)->field.tqe_next; \ + } else { \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + } \ + atomic_rcu_set(&(head)->tqh_first, (elm)); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + atomic_rcu_set((head)->tqh_last, (elm)); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \ + (elm)->field.tqe_next = (listelm)->field.tqe_next; \ + if ((elm)->field.tqe_next != NULL) { \ + (elm)->field.tqe_next->field.tqe_prev = &(elm)->field.tqe_next; \ + } else { \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + } \ + atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + atomic_rcu_set((listelm)->field.tqe_prev, (elm)); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ + } while (/*CONSTCOND*/0) + +#define QTAILQ_REMOVE_RCU(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) { \ + (elm)->field.tqe_next->field.tqe_prev = (elm)->field.tqe_prev; \ + } else { \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + } \ + atomic_set((elm)->field.tqe_prev, (elm)->field.tqe_next); \ + (elm)->field.tqe_prev = NULL; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_FOREACH_RCU(var, head, field) \ + for ((var) = atomic_rcu_read(&(head)->tqh_first); \ + (var); \ + (var) = atomic_rcu_read(&(var)->field.tqe_next)) + +#define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \ + for ((var) = atomic_rcu_read(&(head)->tqh_first); \ + (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \ + (var) = (next)) + #ifdef __cplusplus } #endif |