diff options
author | Stefan Hajnoczi | 2020-02-21 10:39:51 +0100 |
---|---|---|
committer | Stefan Hajnoczi | 2020-02-22 09:26:47 +0100 |
commit | 8c6b0356b53977bcfdea5299db07884915425b0c (patch) | |
tree | d309eb193c9a185cd2c9a58bcc769ffb73b108c7 /include/block | |
parent | rcu_queue: add QSLIST functions (diff) | |
download | qemu-8c6b0356b53977bcfdea5299db07884915425b0c.tar.gz qemu-8c6b0356b53977bcfdea5299db07884915425b0c.tar.xz qemu-8c6b0356b53977bcfdea5299db07884915425b0c.zip |
util/async: make bh_aio_poll() O(1)
The ctx->first_bh list contains all created BHs, including those that
are not scheduled. The list is iterated by the event loop and therefore
has O(n) time complexity with respected to the number of created BHs.
Rewrite BHs so that only scheduled or deleted BHs are enqueued.
Only BHs that actually require action will be iterated.
One semantic change is required: qemu_bh_delete() enqueues the BH and
therefore invokes aio_notify(). The
tests/test-aio.c:test_source_bh_delete_from_cb() test case assumed that
g_main_context_iteration(NULL, false) returns false after
qemu_bh_delete() but it now returns true for one iteration. Fix up the
test case.
This patch makes aio_compute_timeout() and aio_bh_poll() drop from a CPU
profile reported by perf-top(1). Previously they combined to 9% CPU
utilization when AioContext polling is commented out and the guest has 2
virtio-blk,num-queues=1 and 99 virtio-blk,num-queues=32 devices.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 20200221093951.1414693-1-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include/block')
-rw-r--r-- | include/block/aio.h | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/include/block/aio.h b/include/block/aio.h index 7ba9bd7874..1a2ce9ca26 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -51,6 +51,19 @@ struct ThreadPool; struct LinuxAioState; struct LuringState; +/* + * Each aio_bh_poll() call carves off a slice of the BH list, so that newly + * scheduled BHs are not processed until the next aio_bh_poll() call. All + * active aio_bh_poll() calls chain their slices together in a list, so that + * nested aio_bh_poll() calls process all scheduled bottom halves. + */ +typedef QSLIST_HEAD(, QEMUBH) BHList; +typedef struct BHListSlice BHListSlice; +struct BHListSlice { + BHList bh_list; + QSIMPLEQ_ENTRY(BHListSlice) next; +}; + struct AioContext { GSource source; @@ -91,8 +104,11 @@ struct AioContext { */ QemuLockCnt list_lock; - /* Anchor of the list of Bottom Halves belonging to the context */ - struct QEMUBH *first_bh; + /* Bottom Halves pending aio_bh_poll() processing */ + BHList bh_list; + + /* Chained BH list slices for each nested aio_bh_poll() call */ + QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list; /* Used by aio_notify. * |