From 1f050a4690f62a1e7dabc4f44141e9f762c3769f Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Mar 2020 17:08:02 +0000 Subject: aio-posix: extract ppoll(2) and epoll(7) fd monitoring The ppoll(2) and epoll(7) file descriptor monitoring implementations are mixed with the core util/aio-posix.c code. Before adding another implementation for Linux io_uring, extract out the existing ones so there is a clear interface and the core code is simpler. The new interface is AioContext->fdmon_ops, a pointer to a FDMonOps struct. See the patch for details. Semantic changes: 1. ppoll(2) now reflects events from pollfds[] back into AioHandlers while we're still on the clock for adaptive polling. This was already happening for epoll(7), so if it's really an issue then we'll need to fix both in the future. 2. epoll(7)'s fallback to ppoll(2) while external events are disabled was broken when the number of fds exceeded the epoll(7) upgrade threshold. I guess this code path simply wasn't tested and no one noticed the bug. I didn't go out of my way to fix it but the correct code is simpler than preserving the bug. I also took some liberties in removing the unnecessary AioContext->epoll_available (just check AioContext->epollfd != -1 instead) and AioContext->epoll_enabled (it's implicit if our AioContext->fdmon_ops callbacks are being invoked) fields. Signed-off-by: Stefan Hajnoczi Link: https://lore.kernel.org/r/20200305170806.1313245-4-stefanha@redhat.com Message-Id: <20200305170806.1313245-4-stefanha@redhat.com> --- include/block/aio.h | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) (limited to 'include/block') diff --git a/include/block/aio.h b/include/block/aio.h index 9dd61cee7e..90e07d7507 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -52,6 +52,38 @@ struct ThreadPool; struct LinuxAioState; struct LuringState; +/* Callbacks for file descriptor monitoring implementations */ +typedef struct { + /* + * update: + * @ctx: the AioContext + * @node: the handler + * @is_new: is the file descriptor already being monitored? + * + * Add/remove/modify a monitored file descriptor. There are three cases: + * 1. node->pfd.events == 0 means remove the file descriptor. + * 2. !is_new means modify an already monitored file descriptor. + * 3. is_new means add a new file descriptor. + * + * Called with ctx->list_lock acquired. + */ + void (*update)(AioContext *ctx, AioHandler *node, bool is_new); + + /* + * wait: + * @ctx: the AioContext + * @ready_list: list for handlers that become ready + * @timeout: maximum duration to wait, in nanoseconds + * + * Wait for file descriptors to become ready and place them on ready_list. + * + * Called with ctx->list_lock incremented but not locked. + * + * Returns: number of ready file descriptors. + */ + int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout); +} FDMonOps; + /* * Each aio_bh_poll() call carves off a slice of the BH list, so that newly * scheduled BHs are not processed until the next aio_bh_poll() call. All @@ -173,8 +205,8 @@ struct AioContext { /* epoll(7) state used when built with CONFIG_EPOLL */ int epollfd; - bool epoll_enabled; - bool epoll_available; + + const FDMonOps *fdmon_ops; }; /** -- cgit v1.2.3-55-g7522 From b321051cf48ccc2d3d832af111d688f2282f089b Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Mar 2020 17:08:03 +0000 Subject: aio-posix: simplify FDMonOps->update() prototype The AioHandler *node, bool is_new arguments are more complicated to think about than simply being given AioHandler *old_node, AioHandler *new_node. Furthermore, the new Linux io_uring file descriptor monitoring mechanism added by the new patch requires access to both the old and the new nodes. Make this change now in preparation. Signed-off-by: Stefan Hajnoczi Link: https://lore.kernel.org/r/20200305170806.1313245-5-stefanha@redhat.com Message-Id: <20200305170806.1313245-5-stefanha@redhat.com> --- include/block/aio.h | 13 ++++++------- util/aio-posix.c | 7 +------ util/fdmon-epoll.c | 21 ++++++++++++--------- util/fdmon-poll.c | 4 +++- 4 files changed, 22 insertions(+), 23 deletions(-) (limited to 'include/block') diff --git a/include/block/aio.h b/include/block/aio.h index 90e07d7507..bd76b08f1a 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -57,17 +57,16 @@ typedef struct { /* * update: * @ctx: the AioContext - * @node: the handler - * @is_new: is the file descriptor already being monitored? + * @old_node: the existing handler or NULL if this file descriptor is being + * monitored for the first time + * @new_node: the new handler or NULL if this file descriptor is being + * removed * - * Add/remove/modify a monitored file descriptor. There are three cases: - * 1. node->pfd.events == 0 means remove the file descriptor. - * 2. !is_new means modify an already monitored file descriptor. - * 3. is_new means add a new file descriptor. + * Add/remove/modify a monitored file descriptor. * * Called with ctx->list_lock acquired. */ - void (*update)(AioContext *ctx, AioHandler *node, bool is_new); + void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node); /* * wait: diff --git a/util/aio-posix.c b/util/aio-posix.c index bc0b86547c..028b2abded 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -139,12 +139,7 @@ void aio_set_fd_handler(AioContext *ctx, atomic_set(&ctx->poll_disable_cnt, atomic_read(&ctx->poll_disable_cnt) + poll_disable_change); - if (new_node) { - ctx->fdmon_ops->update(ctx, new_node, is_new); - } else if (node) { - /* Unregister deleted fd_handler */ - ctx->fdmon_ops->update(ctx, node, false); - } + ctx->fdmon_ops->update(ctx, node, new_node); qemu_lockcnt_unlock(&ctx->list_lock); aio_notify(ctx); diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index 29c1454469..d56b69468b 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -30,21 +30,24 @@ static inline int epoll_events_from_pfd(int pfd_events) (pfd_events & G_IO_ERR ? EPOLLERR : 0); } -static void fdmon_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) +static void fdmon_epoll_update(AioContext *ctx, + AioHandler *old_node, + AioHandler *new_node) { - struct epoll_event event; + struct epoll_event event = { + .data.ptr = new_node, + .events = new_node ? epoll_events_from_pfd(new_node->pfd.events) : 0, + }; int r; - int ctl; - if (!node->pfd.events) { - ctl = EPOLL_CTL_DEL; + if (!new_node) { + r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, old_node->pfd.fd, &event); + } else if (!old_node) { + r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, new_node->pfd.fd, &event); } else { - event.data.ptr = node; - event.events = epoll_events_from_pfd(node->pfd.events); - ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD; + r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, new_node->pfd.fd, &event); } - r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event); if (r) { fdmon_epoll_disable(ctx); } diff --git a/util/fdmon-poll.c b/util/fdmon-poll.c index 67992116b8..28114a0f39 100644 --- a/util/fdmon-poll.c +++ b/util/fdmon-poll.c @@ -93,7 +93,9 @@ static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list, return ret; } -static void fdmon_poll_update(AioContext *ctx, AioHandler *node, bool is_new) +static void fdmon_poll_update(AioContext *ctx, + AioHandler *old_node, + AioHandler *new_node) { /* Do nothing, AioHandler already contains the state we'll need */ } -- cgit v1.2.3-55-g7522 From 73fd282e7b6dd4e4ea1c3bbb3d302c8db51e4ccf Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Mar 2020 17:08:04 +0000 Subject: aio-posix: add io_uring fd monitoring implementation The recent Linux io_uring API has several advantages over ppoll(2) and epoll(2). Details are given in the source code. Add an io_uring implementation and make it the default on Linux. Performance is the same as with epoll(7) but later patches add optimizations that take advantage of io_uring. It is necessary to change how aio_set_fd_handler() deals with deleting AioHandlers since removing monitored file descriptors is asynchronous in io_uring. fdmon_io_uring_remove() marks the AioHandler deleted and aio_set_fd_handler() will let it handle deletion in that case. Signed-off-by: Stefan Hajnoczi Link: https://lore.kernel.org/r/20200305170806.1313245-6-stefanha@redhat.com Message-Id: <20200305170806.1313245-6-stefanha@redhat.com> --- configure | 5 + include/block/aio.h | 9 ++ util/Makefile.objs | 1 + util/aio-posix.c | 20 +++- util/aio-posix.h | 20 +++- util/fdmon-io_uring.c | 326 ++++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 376 insertions(+), 5 deletions(-) create mode 100644 util/fdmon-io_uring.c (limited to 'include/block') diff --git a/configure b/configure index cbf864bff1..3c7470096f 100755 --- a/configure +++ b/configure @@ -4093,6 +4093,11 @@ if test "$linux_io_uring" != "no" ; then linux_io_uring_cflags=$($pkg_config --cflags liburing) linux_io_uring_libs=$($pkg_config --libs liburing) linux_io_uring=yes + + # io_uring is used in libqemuutil.a where per-file -libs variables are not + # seen by programs linking the archive. It's not ideal, but just add the + # library dependency globally. + LIBS="$linux_io_uring_libs $LIBS" else if test "$linux_io_uring" = "yes" ; then feature_not_found "linux io_uring" "Install liburing devel" diff --git a/include/block/aio.h b/include/block/aio.h index bd76b08f1a..83fc9b844d 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -14,6 +14,9 @@ #ifndef QEMU_AIO_H #define QEMU_AIO_H +#ifdef CONFIG_LINUX_IO_URING +#include +#endif #include "qemu/queue.h" #include "qemu/event_notifier.h" #include "qemu/thread.h" @@ -96,6 +99,8 @@ struct BHListSlice { QSIMPLEQ_ENTRY(BHListSlice) next; }; +typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; + struct AioContext { GSource source; @@ -181,6 +186,10 @@ struct AioContext { * locking. */ struct LuringState *linux_io_uring; + + /* State for file descriptor monitoring using Linux io_uring */ + struct io_uring fdmon_io_uring; + AioHandlerSList submit_list; #endif /* TimerLists for calling timers - one per clock type. Has its own diff --git a/util/Makefile.objs b/util/Makefile.objs index 6439077a68..6718a38b61 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -7,6 +7,7 @@ util-obj-$(call lnot,$(CONFIG_ATOMIC64)) += atomic64.o util-obj-$(CONFIG_POSIX) += aio-posix.o util-obj-$(CONFIG_POSIX) += fdmon-poll.o util-obj-$(CONFIG_EPOLL_CREATE1) += fdmon-epoll.o +util-obj-$(CONFIG_LINUX_IO_URING) += fdmon-io_uring.o util-obj-$(CONFIG_POSIX) += compatfd.o util-obj-$(CONFIG_POSIX) += event_notifier-posix.o util-obj-$(CONFIG_POSIX) += mmap-alloc.o diff --git a/util/aio-posix.c b/util/aio-posix.c index 028b2abded..ffd9cc381b 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -57,10 +57,16 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node) g_source_remove_poll(&ctx->source, &node->pfd); } + node->pfd.revents = 0; + + /* If the fd monitor has already marked it deleted, leave it alone */ + if (QLIST_IS_INSERTED(node, node_deleted)) { + return false; + } + /* If a read is in progress, just mark the node as deleted */ if (qemu_lockcnt_count(&ctx->list_lock)) { QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted); - node->pfd.revents = 0; return false; } /* Otherwise, delete it for real. We can't just mark it as @@ -126,9 +132,6 @@ void aio_set_fd_handler(AioContext *ctx, QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node); } - if (node) { - deleted = aio_remove_fd_handler(ctx, node); - } /* No need to order poll_disable_cnt writes against other updates; * the counter is only used to avoid wasting time and latency on @@ -140,6 +143,9 @@ void aio_set_fd_handler(AioContext *ctx, atomic_read(&ctx->poll_disable_cnt) + poll_disable_change); ctx->fdmon_ops->update(ctx, node, new_node); + if (node) { + deleted = aio_remove_fd_handler(ctx, node); + } qemu_lockcnt_unlock(&ctx->list_lock); aio_notify(ctx); @@ -565,11 +571,17 @@ void aio_context_setup(AioContext *ctx) ctx->fdmon_ops = &fdmon_poll_ops; ctx->epollfd = -1; + /* Use the fastest fd monitoring implementation if available */ + if (fdmon_io_uring_setup(ctx)) { + return; + } + fdmon_epoll_setup(ctx); } void aio_context_destroy(AioContext *ctx) { + fdmon_io_uring_destroy(ctx); fdmon_epoll_disable(ctx); } diff --git a/util/aio-posix.h b/util/aio-posix.h index 97899d0fbc..55fc771327 100644 --- a/util/aio-posix.h +++ b/util/aio-posix.h @@ -27,10 +27,14 @@ struct AioHandler { IOHandler *io_poll_begin; IOHandler *io_poll_end; void *opaque; - bool is_external; QLIST_ENTRY(AioHandler) node; QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */ QLIST_ENTRY(AioHandler) node_deleted; +#ifdef CONFIG_LINUX_IO_URING + QSLIST_ENTRY(AioHandler) node_submitted; + unsigned flags; /* see fdmon-io_uring.c */ +#endif + bool is_external; }; /* Add a handler to a ready list */ @@ -58,4 +62,18 @@ static inline void fdmon_epoll_disable(AioContext *ctx) } #endif /* !CONFIG_EPOLL_CREATE1 */ +#ifdef CONFIG_LINUX_IO_URING +bool fdmon_io_uring_setup(AioContext *ctx); +void fdmon_io_uring_destroy(AioContext *ctx); +#else +static inline bool fdmon_io_uring_setup(AioContext *ctx) +{ + return false; +} + +static inline void fdmon_io_uring_destroy(AioContext *ctx) +{ +} +#endif /* !CONFIG_LINUX_IO_URING */ + #endif /* AIO_POSIX_H */ diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c new file mode 100644 index 0000000000..fb99b4b61e --- /dev/null +++ b/util/fdmon-io_uring.c @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Linux io_uring file descriptor monitoring + * + * The Linux io_uring API supports file descriptor monitoring with a few + * advantages over existing APIs like poll(2) and epoll(7): + * + * 1. Userspace polling of events is possible because the completion queue (cq + * ring) is shared between the kernel and userspace. This allows + * applications that rely on userspace polling to also monitor file + * descriptors in the same userspace polling loop. + * + * 2. Submission and completion is batched and done together in a single system + * call. This minimizes the number of system calls. + * + * 3. File descriptor monitoring is O(1) like epoll(7) so it scales better than + * poll(2). + * + * 4. Nanosecond timeouts are supported so it requires fewer syscalls than + * epoll(7). + * + * This code only monitors file descriptors and does not do asynchronous disk + * I/O. Implementing disk I/O efficiently has other requirements and should + * use a separate io_uring so it does not make sense to unify the code. + * + * File descriptor monitoring is implemented using the following operations: + * + * 1. IORING_OP_POLL_ADD - adds a file descriptor to be monitored. + * 2. IORING_OP_POLL_REMOVE - removes a file descriptor being monitored. When + * the poll mask changes for a file descriptor it is first removed and then + * re-added with the new poll mask, so this operation is also used as part + * of modifying an existing monitored file descriptor. + * 3. IORING_OP_TIMEOUT - added every time a blocking syscall is made to wait + * for events. This operation self-cancels if another event completes + * before the timeout. + * + * io_uring calls the submission queue the "sq ring" and the completion queue + * the "cq ring". Ring entries are called "sqe" and "cqe", respectively. + * + * The code is structured so that sq/cq rings are only modified within + * fdmon_io_uring_wait(). Changes to AioHandlers are made by enqueuing them on + * ctx->submit_list so that fdmon_io_uring_wait() can submit IORING_OP_POLL_ADD + * and/or IORING_OP_POLL_REMOVE sqes for them. + */ + +#include "qemu/osdep.h" +#include +#include "qemu/rcu_queue.h" +#include "aio-posix.h" + +enum { + FDMON_IO_URING_ENTRIES = 128, /* sq/cq ring size */ + + /* AioHandler::flags */ + FDMON_IO_URING_PENDING = (1 << 0), + FDMON_IO_URING_ADD = (1 << 1), + FDMON_IO_URING_REMOVE = (1 << 2), +}; + +static inline int poll_events_from_pfd(int pfd_events) +{ + return (pfd_events & G_IO_IN ? POLLIN : 0) | + (pfd_events & G_IO_OUT ? POLLOUT : 0) | + (pfd_events & G_IO_HUP ? POLLHUP : 0) | + (pfd_events & G_IO_ERR ? POLLERR : 0); +} + +static inline int pfd_events_from_poll(int poll_events) +{ + return (poll_events & POLLIN ? G_IO_IN : 0) | + (poll_events & POLLOUT ? G_IO_OUT : 0) | + (poll_events & POLLHUP ? G_IO_HUP : 0) | + (poll_events & POLLERR ? G_IO_ERR : 0); +} + +/* + * Returns an sqe for submitting a request. Only be called within + * fdmon_io_uring_wait(). + */ +static struct io_uring_sqe *get_sqe(AioContext *ctx) +{ + struct io_uring *ring = &ctx->fdmon_io_uring; + struct io_uring_sqe *sqe = io_uring_get_sqe(ring); + int ret; + + if (likely(sqe)) { + return sqe; + } + + /* No free sqes left, submit pending sqes first */ + ret = io_uring_submit(ring); + assert(ret > 1); + sqe = io_uring_get_sqe(ring); + assert(sqe); + return sqe; +} + +/* Atomically enqueue an AioHandler for sq ring submission */ +static void enqueue(AioHandlerSList *head, AioHandler *node, unsigned flags) +{ + unsigned old_flags; + + old_flags = atomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags); + if (!(old_flags & FDMON_IO_URING_PENDING)) { + QSLIST_INSERT_HEAD_ATOMIC(head, node, node_submitted); + } +} + +/* Dequeue an AioHandler for sq ring submission. Called by fill_sq_ring(). */ +static AioHandler *dequeue(AioHandlerSList *head, unsigned *flags) +{ + AioHandler *node = QSLIST_FIRST(head); + + if (!node) { + return NULL; + } + + /* Doesn't need to be atomic since fill_sq_ring() moves the list */ + QSLIST_REMOVE_HEAD(head, node_submitted); + + /* + * Don't clear FDMON_IO_URING_REMOVE. It's sticky so it can serve two + * purposes: telling fill_sq_ring() to submit IORING_OP_POLL_REMOVE and + * telling process_cqe() to delete the AioHandler when its + * IORING_OP_POLL_ADD completes. + */ + *flags = atomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING | + FDMON_IO_URING_ADD)); + return node; +} + +static void fdmon_io_uring_update(AioContext *ctx, + AioHandler *old_node, + AioHandler *new_node) +{ + if (new_node) { + enqueue(&ctx->submit_list, new_node, FDMON_IO_URING_ADD); + } + + if (old_node) { + /* + * Deletion is tricky because IORING_OP_POLL_ADD and + * IORING_OP_POLL_REMOVE are async. We need to wait for the original + * IORING_OP_POLL_ADD to complete before this handler can be freed + * safely. + * + * It's possible that the file descriptor becomes ready and the + * IORING_OP_POLL_ADD cqe is enqueued before IORING_OP_POLL_REMOVE is + * submitted, too. + * + * Mark this handler deleted right now but don't place it on + * ctx->deleted_aio_handlers yet. Instead, manually fudge the list + * entry to make QLIST_IS_INSERTED() think this handler has been + * inserted and other code recognizes this AioHandler as deleted. + * + * Once the original IORING_OP_POLL_ADD completes we enqueue the + * handler on the real ctx->deleted_aio_handlers list to be freed. + */ + assert(!QLIST_IS_INSERTED(old_node, node_deleted)); + old_node->node_deleted.le_prev = &old_node->node_deleted.le_next; + + enqueue(&ctx->submit_list, old_node, FDMON_IO_URING_REMOVE); + } +} + +static void add_poll_add_sqe(AioContext *ctx, AioHandler *node) +{ + struct io_uring_sqe *sqe = get_sqe(ctx); + int events = poll_events_from_pfd(node->pfd.events); + + io_uring_prep_poll_add(sqe, node->pfd.fd, events); + io_uring_sqe_set_data(sqe, node); +} + +static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node) +{ + struct io_uring_sqe *sqe = get_sqe(ctx); + + io_uring_prep_poll_remove(sqe, node); +} + +/* Add a timeout that self-cancels when another cqe becomes ready */ +static void add_timeout_sqe(AioContext *ctx, int64_t ns) +{ + struct io_uring_sqe *sqe; + struct __kernel_timespec ts = { + .tv_sec = ns / NANOSECONDS_PER_SECOND, + .tv_nsec = ns % NANOSECONDS_PER_SECOND, + }; + + sqe = get_sqe(ctx); + io_uring_prep_timeout(sqe, &ts, 1, 0); +} + +/* Add sqes from ctx->submit_list for submission */ +static void fill_sq_ring(AioContext *ctx) +{ + AioHandlerSList submit_list; + AioHandler *node; + unsigned flags; + + QSLIST_MOVE_ATOMIC(&submit_list, &ctx->submit_list); + + while ((node = dequeue(&submit_list, &flags))) { + /* Order matters, just in case both flags were set */ + if (flags & FDMON_IO_URING_ADD) { + add_poll_add_sqe(ctx, node); + } + if (flags & FDMON_IO_URING_REMOVE) { + add_poll_remove_sqe(ctx, node); + } + } +} + +/* Returns true if a handler became ready */ +static bool process_cqe(AioContext *ctx, + AioHandlerList *ready_list, + struct io_uring_cqe *cqe) +{ + AioHandler *node = io_uring_cqe_get_data(cqe); + unsigned flags; + + /* poll_timeout and poll_remove have a zero user_data field */ + if (!node) { + return false; + } + + /* + * Deletion can only happen when IORING_OP_POLL_ADD completes. If we race + * with enqueue() here then we can safely clear the FDMON_IO_URING_REMOVE + * bit before IORING_OP_POLL_REMOVE is submitted. + */ + flags = atomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE); + if (flags & FDMON_IO_URING_REMOVE) { + QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted); + return false; + } + + aio_add_ready_handler(ready_list, node, pfd_events_from_poll(cqe->res)); + + /* IORING_OP_POLL_ADD is one-shot so we must re-arm it */ + add_poll_add_sqe(ctx, node); + return true; +} + +static int process_cq_ring(AioContext *ctx, AioHandlerList *ready_list) +{ + struct io_uring *ring = &ctx->fdmon_io_uring; + struct io_uring_cqe *cqe; + unsigned num_cqes = 0; + unsigned num_ready = 0; + unsigned head; + + io_uring_for_each_cqe(ring, head, cqe) { + if (process_cqe(ctx, ready_list, cqe)) { + num_ready++; + } + + num_cqes++; + } + + io_uring_cq_advance(ring, num_cqes); + return num_ready; +} + +static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list, + int64_t timeout) +{ + unsigned wait_nr = 1; /* block until at least one cqe is ready */ + int ret; + + /* Fall back while external clients are disabled */ + if (atomic_read(&ctx->external_disable_cnt)) { + return fdmon_poll_ops.wait(ctx, ready_list, timeout); + } + + if (timeout == 0) { + wait_nr = 0; /* non-blocking */ + } else if (timeout > 0) { + add_timeout_sqe(ctx, timeout); + } + + fill_sq_ring(ctx); + + ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); + assert(ret >= 0); + + return process_cq_ring(ctx, ready_list); +} + +static const FDMonOps fdmon_io_uring_ops = { + .update = fdmon_io_uring_update, + .wait = fdmon_io_uring_wait, +}; + +bool fdmon_io_uring_setup(AioContext *ctx) +{ + int ret; + + ret = io_uring_queue_init(FDMON_IO_URING_ENTRIES, &ctx->fdmon_io_uring, 0); + if (ret != 0) { + return false; + } + + QSLIST_INIT(&ctx->submit_list); + ctx->fdmon_ops = &fdmon_io_uring_ops; + return true; +} + +void fdmon_io_uring_destroy(AioContext *ctx) +{ + if (ctx->fdmon_ops == &fdmon_io_uring_ops) { + AioHandler *node; + + io_uring_queue_exit(&ctx->fdmon_io_uring); + + /* No need to submit these anymore, just free them. */ + while ((node = QSLIST_FIRST_RCU(&ctx->submit_list))) { + QSLIST_REMOVE_HEAD_RCU(&ctx->submit_list, node_submitted); + QLIST_REMOVE(node, node); + g_free(node); + } + + ctx->fdmon_ops = &fdmon_poll_ops; + } +} -- cgit v1.2.3-55-g7522 From aa38e19f05c3a5ae64dff84f44e1aa31281a5b14 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Mar 2020 17:08:05 +0000 Subject: aio-posix: support userspace polling of fd monitoring Unlike ppoll(2) and epoll(7), Linux io_uring completions can be polled from userspace. Previously userspace polling was only allowed when all AioHandler's had an ->io_poll() callback. This prevented starvation of fds by userspace pollable handlers. Add the FDMonOps->need_wait() callback that enables userspace polling even when some AioHandlers lack ->io_poll(). For example, it's now possible to do userspace polling when a TCP/IP socket is monitored thanks to Linux io_uring. Signed-off-by: Stefan Hajnoczi Link: https://lore.kernel.org/r/20200305170806.1313245-7-stefanha@redhat.com Message-Id: <20200305170806.1313245-7-stefanha@redhat.com> --- include/block/aio.h | 19 +++++++++++++++++++ util/aio-posix.c | 11 ++++++++--- util/fdmon-epoll.c | 1 + util/fdmon-io_uring.c | 6 ++++++ util/fdmon-poll.c | 1 + 5 files changed, 35 insertions(+), 3 deletions(-) (limited to 'include/block') diff --git a/include/block/aio.h b/include/block/aio.h index 83fc9b844d..f07ebb76b8 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -55,6 +55,9 @@ struct ThreadPool; struct LinuxAioState; struct LuringState; +/* Is polling disabled? */ +bool aio_poll_disabled(AioContext *ctx); + /* Callbacks for file descriptor monitoring implementations */ typedef struct { /* @@ -84,6 +87,22 @@ typedef struct { * Returns: number of ready file descriptors. */ int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout); + + /* + * need_wait: + * @ctx: the AioContext + * + * Tell aio_poll() when to stop userspace polling early because ->wait() + * has fds ready. + * + * File descriptor monitoring implementations that cannot poll fd readiness + * from userspace should use aio_poll_disabled() here. This ensures that + * file descriptors are not starved by handlers that frequently make + * progress via userspace polling. + * + * Returns: true if ->wait() should be called, false otherwise. + */ + bool (*need_wait)(AioContext *ctx); } FDMonOps; /* diff --git a/util/aio-posix.c b/util/aio-posix.c index ffd9cc381b..759989b45b 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -22,6 +22,11 @@ #include "trace.h" #include "aio-posix.h" +bool aio_poll_disabled(AioContext *ctx) +{ + return atomic_read(&ctx->poll_disable_cnt); +} + void aio_add_ready_handler(AioHandlerList *ready_list, AioHandler *node, int revents) @@ -423,7 +428,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time; max_ns = qemu_soonest_timeout(*timeout, max_ns); assert(!(max_ns && progress)); - } while (elapsed_time < max_ns && !atomic_read(&ctx->poll_disable_cnt)); + } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx)); /* If time has passed with no successful polling, adjust *timeout to * keep the same ending time. @@ -451,7 +456,7 @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout) { int64_t max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns); - if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) { + if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) { poll_set_started(ctx, true); if (run_poll_handlers(ctx, max_ns, timeout)) { @@ -501,7 +506,7 @@ bool aio_poll(AioContext *ctx, bool blocking) /* If polling is allowed, non-blocking aio_poll does not need the * system call---a single round of run_poll_handlers_once suffices. */ - if (timeout || atomic_read(&ctx->poll_disable_cnt)) { + if (timeout || ctx->fdmon_ops->need_wait(ctx)) { ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout); } diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index d56b69468b..fcd989d47d 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -100,6 +100,7 @@ out: static const FDMonOps fdmon_epoll_ops = { .update = fdmon_epoll_update, .wait = fdmon_epoll_wait, + .need_wait = aio_poll_disabled, }; static bool fdmon_epoll_try_enable(AioContext *ctx) diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c index fb99b4b61e..893b79b622 100644 --- a/util/fdmon-io_uring.c +++ b/util/fdmon-io_uring.c @@ -288,9 +288,15 @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list, return process_cq_ring(ctx, ready_list); } +static bool fdmon_io_uring_need_wait(AioContext *ctx) +{ + return io_uring_cq_ready(&ctx->fdmon_io_uring); +} + static const FDMonOps fdmon_io_uring_ops = { .update = fdmon_io_uring_update, .wait = fdmon_io_uring_wait, + .need_wait = fdmon_io_uring_need_wait, }; bool fdmon_io_uring_setup(AioContext *ctx) diff --git a/util/fdmon-poll.c b/util/fdmon-poll.c index 28114a0f39..488067b679 100644 --- a/util/fdmon-poll.c +++ b/util/fdmon-poll.c @@ -103,4 +103,5 @@ static void fdmon_poll_update(AioContext *ctx, const FDMonOps fdmon_poll_ops = { .update = fdmon_poll_update, .wait = fdmon_poll_wait, + .need_wait = aio_poll_disabled, }; -- cgit v1.2.3-55-g7522 From d37d0e365afb6825a90d8356fc6adcc1f58f40f3 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Mar 2020 17:08:06 +0000 Subject: aio-posix: remove idle poll handlers to improve scalability When there are many poll handlers it's likely that some of them are idle most of the time. Remove handlers that haven't had activity recently so that the polling loop scales better for guests with a large number of devices. This feature only takes effect for the Linux io_uring fd monitoring implementation because it is capable of combining fd monitoring with userspace polling. The other implementations can't do that and risk starving fds in favor of poll handlers, so don't try this optimization when they are in use. IOPS improves from 10k to 105k when the guest has 100 virtio-blk-pci,num-queues=32 devices and 1 virtio-blk-pci,num-queues=1 device for rw=randread,iodepth=1,bs=4k,ioengine=libaio on NVMe. [Clarified aio_poll_handlers locking discipline explanation in comment after discussion with Paolo Bonzini . --Stefan] Signed-off-by: Stefan Hajnoczi Link: https://lore.kernel.org/r/20200305170806.1313245-8-stefanha@redhat.com Message-Id: <20200305170806.1313245-8-stefanha@redhat.com> --- include/block/aio.h | 8 +++++ util/aio-posix.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++++---- util/aio-posix.h | 2 ++ util/trace-events | 2 ++ 4 files changed, 98 insertions(+), 7 deletions(-) (limited to 'include/block') diff --git a/include/block/aio.h b/include/block/aio.h index f07ebb76b8..cb1989105a 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -227,6 +227,14 @@ struct AioContext { int64_t poll_grow; /* polling time growth factor */ int64_t poll_shrink; /* polling time shrink factor */ + /* + * List of handlers participating in userspace polling. Protected by + * ctx->list_lock. Iterated and modified mostly by the event loop thread + * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler() + * only touches the list to delete nodes if ctx->list_lock's count is zero. + */ + AioHandlerList poll_aio_handlers; + /* Are we in polling mode or monitoring file descriptors? */ bool poll_started; diff --git a/util/aio-posix.c b/util/aio-posix.c index 759989b45b..cd6cf0a4a9 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -22,6 +22,9 @@ #include "trace.h" #include "aio-posix.h" +/* Stop userspace polling on a handler if it isn't active for some time */ +#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND) + bool aio_poll_disabled(AioContext *ctx) { return atomic_read(&ctx->poll_disable_cnt); @@ -78,6 +81,7 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node) * deleted because deleted nodes are only cleaned up while * no one is walking the handlers list. */ + QLIST_SAFE_REMOVE(node, node_poll); QLIST_REMOVE(node, node); return true; } @@ -205,7 +209,7 @@ static bool poll_set_started(AioContext *ctx, bool started) ctx->poll_started = started; qemu_lockcnt_inc(&ctx->list_lock); - QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { + QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) { IOHandler *fn; if (QLIST_IS_INSERTED(node, node_deleted)) { @@ -286,6 +290,7 @@ static void aio_free_deleted_handlers(AioContext *ctx) while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) { QLIST_REMOVE(node, node); QLIST_REMOVE(node, node_deleted); + QLIST_SAFE_REMOVE(node, node_poll); g_free(node); } @@ -300,6 +305,22 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) revents = node->pfd.revents & node->pfd.events; node->pfd.revents = 0; + /* + * Start polling AioHandlers when they become ready because activity is + * likely to continue. Note that starvation is theoretically possible when + * fdmon_supports_polling(), but only until the fd fires for the first + * time. + */ + if (!QLIST_IS_INSERTED(node, node_deleted) && + !QLIST_IS_INSERTED(node, node_poll) && + node->io_poll) { + trace_poll_add(ctx, node, node->pfd.fd, revents); + if (ctx->poll_started && node->io_poll_begin) { + node->io_poll_begin(node->opaque); + } + QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll); + } + if (!QLIST_IS_INSERTED(node, node_deleted) && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && aio_node_check(ctx, node->is_external) && @@ -364,15 +385,19 @@ void aio_dispatch(AioContext *ctx) timerlistgroup_run_timers(&ctx->tlg); } -static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout) +static bool run_poll_handlers_once(AioContext *ctx, + int64_t now, + int64_t *timeout) { bool progress = false; AioHandler *node; + AioHandler *tmp; - QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { - if (!QLIST_IS_INSERTED(node, node_deleted) && node->io_poll && - aio_node_check(ctx, node->is_external) && + QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) { + if (aio_node_check(ctx, node->is_external) && node->io_poll(node->opaque)) { + node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS; + /* * Polling was successful, exit try_poll_mode immediately * to adjust the next polling time. @@ -389,6 +414,50 @@ static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout) return progress; } +static bool fdmon_supports_polling(AioContext *ctx) +{ + return ctx->fdmon_ops->need_wait != aio_poll_disabled; +} + +static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) +{ + AioHandler *node; + AioHandler *tmp; + bool progress = false; + + /* + * File descriptor monitoring implementations without userspace polling + * support suffer from starvation when a subset of handlers is polled + * because fds will not be processed in a timely fashion. Don't remove + * idle poll handlers. + */ + if (!fdmon_supports_polling(ctx)) { + return false; + } + + QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) { + if (node->poll_idle_timeout == 0LL) { + node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS; + } else if (now >= node->poll_idle_timeout) { + trace_poll_remove(ctx, node, node->pfd.fd); + node->poll_idle_timeout = 0LL; + QLIST_SAFE_REMOVE(node, node_poll); + if (ctx->poll_started && node->io_poll_end) { + node->io_poll_end(node->opaque); + + /* + * Final poll in case ->io_poll_end() races with an event. + * Nevermind about re-adding the handler in the rare case where + * this causes progress. + */ + progress = node->io_poll(node->opaque) || progress; + } + } + } + + return progress; +} + /* run_poll_handlers: * @ctx: the AioContext * @max_ns: maximum time to poll for, in nanoseconds @@ -424,12 +493,17 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); do { - progress = run_poll_handlers_once(ctx, timeout); + progress = run_poll_handlers_once(ctx, start_time, timeout); elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time; max_ns = qemu_soonest_timeout(*timeout, max_ns); assert(!(max_ns && progress)); } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx)); + if (remove_idle_poll_handlers(ctx, start_time + elapsed_time)) { + *timeout = 0; + progress = true; + } + /* If time has passed with no successful polling, adjust *timeout to * keep the same ending time. */ @@ -454,8 +528,13 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) */ static bool try_poll_mode(AioContext *ctx, int64_t *timeout) { - int64_t max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns); + int64_t max_ns; + + if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) { + return false; + } + max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns); if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) { poll_set_started(ctx, true); diff --git a/util/aio-posix.h b/util/aio-posix.h index 55fc771327..c80c04506a 100644 --- a/util/aio-posix.h +++ b/util/aio-posix.h @@ -30,10 +30,12 @@ struct AioHandler { QLIST_ENTRY(AioHandler) node; QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */ QLIST_ENTRY(AioHandler) node_deleted; + QLIST_ENTRY(AioHandler) node_poll; #ifdef CONFIG_LINUX_IO_URING QSLIST_ENTRY(AioHandler) node_submitted; unsigned flags; /* see fdmon-io_uring.c */ #endif + int64_t poll_idle_timeout; /* when to stop userspace polling */ bool is_external; }; diff --git a/util/trace-events b/util/trace-events index 83b6639018..0ce42822eb 100644 --- a/util/trace-events +++ b/util/trace-events @@ -5,6 +5,8 @@ run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_ run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64 poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 +poll_add(void *ctx, void *node, int fd, unsigned revents) "ctx %p node %p fd %d revents 0x%x" +poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d" # async.c aio_co_schedule(void *ctx, void *co) "ctx %p co %p" -- cgit v1.2.3-55-g7522