summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorKevin Wolf2018-03-22 10:57:14 +0100
committerKevin Wolf2018-06-18 15:03:25 +0200
commit1cc8e54ada97f7ac479554e15ca9e426c895b158 (patch)
treebaea764fbca6465c70de628a2c9c5080f0f42ef9 /include
parenttests/test-bdrv-drain: bdrv_drain_all() works in coroutines now (diff)
downloadqemu-1cc8e54ada97f7ac479554e15ca9e426c895b158.tar.gz
qemu-1cc8e54ada97f7ac479554e15ca9e426c895b158.tar.xz
qemu-1cc8e54ada97f7ac479554e15ca9e426c895b158.zip
block: Avoid unnecessary aio_poll() in AIO_WAIT_WHILE()
Commit 91af091f923 added an additional aio_poll() to BDRV_POLL_WHILE() in order to make sure that all pending BHs are executed on drain. This was the wrong place to make the fix, as it is useless overhead for all other users of the macro and unnecessarily complicates the mechanism. This patch effectively reverts said commit (the context has changed a bit and the code has moved to AIO_WAIT_WHILE()) and instead polls in the loop condition for drain. The effect is probably hard to measure in any real-world use case because actual I/O will dominate, but if I run only the initialisation part of 'qemu-img convert' where it calls bdrv_block_status() for the whole image to find out how much data there is copy, this phase actually needs only roughly half the time after this patch. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/block/aio-wait.h22
1 files changed, 8 insertions, 14 deletions
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index 8c90a2e66e..783d3678dd 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -73,29 +73,23 @@ typedef struct {
*/
#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
bool waited_ = false; \
- bool busy_ = true; \
AioWait *wait_ = (wait); \
AioContext *ctx_ = (ctx); \
if (in_aio_context_home_thread(ctx_)) { \
- while ((cond) || busy_) { \
- busy_ = aio_poll(ctx_, (cond)); \
- waited_ |= !!(cond) | busy_; \
+ while ((cond)) { \
+ aio_poll(ctx_, true); \
+ waited_ = true; \
} \
} else { \
assert(qemu_get_current_aio_context() == \
qemu_get_aio_context()); \
/* Increment wait_->num_waiters before evaluating cond. */ \
atomic_inc(&wait_->num_waiters); \
- while (busy_) { \
- if ((cond)) { \
- waited_ = busy_ = true; \
- aio_context_release(ctx_); \
- aio_poll(qemu_get_aio_context(), true); \
- aio_context_acquire(ctx_); \
- } else { \
- busy_ = aio_poll(ctx_, false); \
- waited_ |= busy_; \
- } \
+ while ((cond)) { \
+ aio_context_release(ctx_); \
+ aio_poll(qemu_get_aio_context(), true); \
+ aio_context_acquire(ctx_); \
+ waited_ = true; \
} \
atomic_dec(&wait_->num_waiters); \
} \