diff options
author | Paolo Bonzini | 2020-04-07 16:07:45 +0200 |
---|---|---|
committer | Stefan Hajnoczi | 2020-04-09 17:16:28 +0200 |
commit | 3c18a92dc4b55ca8cc37a755ed119f11c0f34099 (patch) | |
tree | 1153ea1812c170bcc50e72820bae2d585827c0a0 /include | |
parent | aio-posix: signal-proof fdmon-io_uring (diff) | |
download | qemu-3c18a92dc4b55ca8cc37a755ed119f11c0f34099.tar.gz qemu-3c18a92dc4b55ca8cc37a755ed119f11c0f34099.tar.xz qemu-3c18a92dc4b55ca8cc37a755ed119f11c0f34099.zip |
aio-wait: delegate polling of main AioContext if BQL not held
Any thread that is not a iothread returns NULL for qemu_get_current_aio_context().
As a result, it would also return true for
in_aio_context_home_thread(qemu_get_aio_context()), causing
AIO_WAIT_WHILE to invoke aio_poll() directly. This is incorrect
if the BQL is not held, because aio_poll() does not expect to
run concurrently from multiple threads, and it can actually
happen when savevm writes to the vmstate file from the
migration thread.
Therefore, restrict in_aio_context_home_thread to return true
for the main AioContext only if the BQL is held.
The function is moved to aio-wait.h because it is mostly used
there and to avoid a circular reference between main-loop.h
and block/aio.h.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200407140746.8041-5-pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/block/aio-wait.h | 22 | ||||
-rw-r--r-- | include/block/aio.h | 29 |
2 files changed, 32 insertions, 19 deletions
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index afeeb18f95..716d2639df 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -26,6 +26,7 @@ #define QEMU_AIO_WAIT_H #include "block/aio.h" +#include "qemu/main-loop.h" /** * AioWait: @@ -124,4 +125,25 @@ void aio_wait_kick(void); */ void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); +/** + * in_aio_context_home_thread: + * @ctx: the aio context + * + * Return whether we are running in the thread that normally runs @ctx. Note + * that acquiring/releasing ctx does not affect the outcome, each AioContext + * still only has one home thread that is responsible for running it. + */ +static inline bool in_aio_context_home_thread(AioContext *ctx) +{ + if (ctx == qemu_get_current_aio_context()) { + return true; + } + + if (ctx == qemu_get_aio_context()) { + return qemu_mutex_iothread_locked(); + } else { + return false; + } +} + #endif /* QEMU_AIO_WAIT_H */ diff --git a/include/block/aio.h b/include/block/aio.h index cb1989105a..62ed954344 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -133,12 +133,16 @@ struct AioContext { AioHandlerList deleted_aio_handlers; /* Used to avoid unnecessary event_notifier_set calls in aio_notify; - * accessed with atomic primitives. If this field is 0, everything - * (file descriptors, bottom halves, timers) will be re-evaluated - * before the next blocking poll(), thus the event_notifier_set call - * can be skipped. If it is non-zero, you may need to wake up a - * concurrent aio_poll or the glib main event loop, making - * event_notifier_set necessary. + * only written from the AioContext home thread, or under the BQL in + * the case of the main AioContext. However, it is read from any + * thread so it is still accessed with atomic primitives. + * + * If this field is 0, everything (file descriptors, bottom halves, + * timers) will be re-evaluated before the next blocking poll() or + * io_uring wait; therefore, the event_notifier_set call can be + * skipped. If it is non-zero, you may need to wake up a concurrent + * aio_poll or the glib main event loop, making event_notifier_set + * necessary. * * Bit 0 is reserved for GSource usage of the AioContext, and is 1 * between a call to aio_ctx_prepare and the next call to aio_ctx_check. @@ -682,19 +686,6 @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co); AioContext *qemu_get_current_aio_context(void); /** - * in_aio_context_home_thread: - * @ctx: the aio context - * - * Return whether we are running in the thread that normally runs @ctx. Note - * that acquiring/releasing ctx does not affect the outcome, each AioContext - * still only has one home thread that is responsible for running it. - */ -static inline bool in_aio_context_home_thread(AioContext *ctx) -{ - return ctx == qemu_get_current_aio_context(); -} - -/** * aio_context_setup: * @ctx: the aio context * |