summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini2014-07-09 11:53:01 +0200
committerStefan Hajnoczi2014-08-29 11:46:58 +0200
commit845ca10dd089b4e48f0a79bad005fb30eb77584e (patch)
tree18d8041935c716ea293399136f769dafdbd8614d
parentblockdev: fix drive-mirror 'granularity' error message (diff)
downloadqemu-845ca10dd089b4e48f0a79bad005fb30eb77584e.tar.gz
qemu-845ca10dd089b4e48f0a79bad005fb30eb77584e.tar.xz
qemu-845ca10dd089b4e48f0a79bad005fb30eb77584e.zip
AioContext: take bottom halves into account when computing aio_poll timeout
Right now, QEMU invokes aio_bh_poll before the "poll" phase of aio_poll. It is simpler to do it afterwards and skip the "poll" phase altogether when the OS-dependent parts of AioContext are invoked from GSource. This way, AioContext behaves more similarly when used as a GSource vs. when used as stand-alone. As a start, take bottom halves into account when computing the poll timeout. If a bottom half is ready, do a non-blocking poll. As a side effect, this makes idle bottom halves work with aio_poll; an improvement, but not really an important one since they are deprecated. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--aio-posix.c2
-rw-r--r--aio-win32.c4
-rw-r--r--async.c32
-rw-r--r--include/block/aio.h8
4 files changed, 29 insertions, 17 deletions
diff --git a/aio-posix.c b/aio-posix.c
index 2eada2e049..55706f8205 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -249,7 +249,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* wait until next event */
ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
ctx->pollfds->len,
- blocking ? timerlistgroup_deadline_ns(&ctx->tlg) : 0);
+ blocking ? aio_compute_timeout(ctx) : 0);
/* if we have any readable fds, dispatch event */
if (ret > 0) {
diff --git a/aio-win32.c b/aio-win32.c
index c12f61e97d..fe7ee5bb22 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -165,8 +165,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
while (count > 0) {
int ret;
- timeout = blocking ?
- qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0;
+ timeout = blocking
+ ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
/* if we have any signaled events, dispatch event */
diff --git a/async.c b/async.c
index 34af0b25ca..09e09c6526 100644
--- a/async.c
+++ b/async.c
@@ -152,39 +152,43 @@ void qemu_bh_delete(QEMUBH *bh)
bh->deleted = 1;
}
-static gboolean
-aio_ctx_prepare(GSource *source, gint *timeout)
+int64_t
+aio_compute_timeout(AioContext *ctx)
{
- AioContext *ctx = (AioContext *) source;
+ int64_t deadline;
+ int timeout = -1;
QEMUBH *bh;
- int deadline;
- /* We assume there is no timeout already supplied */
- *timeout = -1;
for (bh = ctx->first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
* every 10ms */
- *timeout = 10;
+ timeout = 10000000;
} else {
/* non-idle bottom halves will be executed
* immediately */
- *timeout = 0;
- return true;
+ return 0;
}
}
}
- deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
+ deadline = timerlistgroup_deadline_ns(&ctx->tlg);
if (deadline == 0) {
- *timeout = 0;
- return true;
+ return 0;
} else {
- *timeout = qemu_soonest_timeout(*timeout, deadline);
+ return qemu_soonest_timeout(timeout, deadline);
}
+}
- return false;
+static gboolean
+aio_ctx_prepare(GSource *source, gint *timeout)
+{
+ AioContext *ctx = (AioContext *) source;
+
+ /* We assume there is no timeout already supplied */
+ *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
+ return *timeout == 0;
}
static gboolean
diff --git a/include/block/aio.h b/include/block/aio.h
index c23de3cd1f..05b531ca25 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -303,4 +303,12 @@ static inline void aio_timer_init(AioContext *ctx,
timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque);
}
+/**
+ * aio_compute_timeout:
+ * @ctx: the aio context
+ *
+ * Compute the timeout that a blocking aio_poll should use.
+ */
+int64_t aio_compute_timeout(AioContext *ctx);
+
#endif