summaryrefslogtreecommitdiffstats
path: root/aio.c
diff options
context:
space:
mode:
authorPaolo Bonzini2012-09-13 12:28:51 +0200
committerPaolo Bonzini2012-10-30 09:30:53 +0100
commita915f4bc977c4f3aab08a78023c1303664d1c606 (patch)
tree058173beb1ee33e79f5f83d5000532d1e4b9d0c3 /aio.c
parentaio: introduce AioContext, move bottom halves there (diff)
downloadqemu-a915f4bc977c4f3aab08a78023c1303664d1c606.tar.gz
qemu-a915f4bc977c4f3aab08a78023c1303664d1c606.tar.xz
qemu-a915f4bc977c4f3aab08a78023c1303664d1c606.zip
aio: add I/O handlers to the AioContext interface
With this patch, I/O handlers (including event notifier handlers) can be attached to a single AioContext. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'aio.c')
-rw-r--r--aio.c68
1 files changed, 27 insertions, 41 deletions
diff --git a/aio.c b/aio.c
index 7e3fe708d2..c89f1e95c1 100644
--- a/aio.c
+++ b/aio.c
@@ -18,15 +18,6 @@
#include "qemu-queue.h"
#include "qemu_socket.h"
-/* The list of registered AIO handlers */
-static QLIST_HEAD(, AioHandler) aio_handlers;
-
-/* This is a simple lock used to protect the aio_handlers list. Specifically,
- * it's used to ensure that no callbacks are removed while we're walking and
- * dispatching callbacks.
- */
-static int walking_handlers;
-
struct AioHandler
{
int fd;
@@ -38,11 +29,11 @@ struct AioHandler
QLIST_ENTRY(AioHandler) node;
};
-static AioHandler *find_aio_handler(int fd)
+static AioHandler *find_aio_handler(AioContext *ctx, int fd)
{
AioHandler *node;
- QLIST_FOREACH(node, &aio_handlers, node) {
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->fd == fd)
if (!node->deleted)
return node;
@@ -51,21 +42,22 @@ static AioHandler *find_aio_handler(int fd)
return NULL;
}
-void qemu_aio_set_fd_handler(int fd,
- IOHandler *io_read,
- IOHandler *io_write,
- AioFlushHandler *io_flush,
- void *opaque)
+void aio_set_fd_handler(AioContext *ctx,
+ int fd,
+ IOHandler *io_read,
+ IOHandler *io_write,
+ AioFlushHandler *io_flush,
+ void *opaque)
{
AioHandler *node;
- node = find_aio_handler(fd);
+ node = find_aio_handler(ctx, fd);
/* Are we deleting the fd handler? */
if (!io_read && !io_write) {
if (node) {
/* If the lock is held, just mark the node as deleted */
- if (walking_handlers)
+ if (ctx->walking_handlers)
node->deleted = 1;
else {
/* Otherwise, delete it for real. We can't just mark it as
@@ -81,7 +73,7 @@ void qemu_aio_set_fd_handler(int fd,
/* Alloc and insert if it's not already there */
node = g_malloc0(sizeof(AioHandler));
node->fd = fd;
- QLIST_INSERT_HEAD(&aio_handlers, node, node);
+ QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
}
/* Update handler with latest information */
node->io_read = io_read;
@@ -89,25 +81,19 @@ void qemu_aio_set_fd_handler(int fd,
node->io_flush = io_flush;
node->opaque = opaque;
}
-
- qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
-}
-
-void qemu_aio_set_event_notifier(EventNotifier *notifier,
- EventNotifierHandler *io_read,
- AioFlushEventNotifierHandler *io_flush)
-{
- qemu_aio_set_fd_handler(event_notifier_get_fd(notifier),
- (IOHandler *)io_read, NULL,
- (AioFlushHandler *)io_flush, notifier);
}
-void qemu_aio_flush(void)
+void aio_set_event_notifier(AioContext *ctx,
+ EventNotifier *notifier,
+ EventNotifierHandler *io_read,
+ AioFlushEventNotifierHandler *io_flush)
{
- while (qemu_aio_wait());
+ aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
+ (IOHandler *)io_read, NULL,
+ (AioFlushHandler *)io_flush, notifier);
}
-bool qemu_aio_wait(void)
+bool aio_wait(AioContext *ctx)
{
AioHandler *node;
fd_set rdfds, wrfds;
@@ -120,18 +106,18 @@ bool qemu_aio_wait(void)
* Do not call select in this case, because it is possible that the caller
* does not need a complete flush (as is the case for qemu_aio_wait loops).
*/
- if (qemu_bh_poll()) {
+ if (aio_bh_poll(ctx)) {
return true;
}
- walking_handlers++;
+ ctx->walking_handlers++;
FD_ZERO(&rdfds);
FD_ZERO(&wrfds);
/* fill fd sets */
busy = false;
- QLIST_FOREACH(node, &aio_handlers, node) {
+ QLIST_FOREACH(node, &ctx->aio_handlers, node) {
/* If there aren't pending AIO operations, don't invoke callbacks.
* Otherwise, if there are no AIO requests, qemu_aio_wait() would
* wait indefinitely.
@@ -152,7 +138,7 @@ bool qemu_aio_wait(void)
}
}
- walking_handlers--;
+ ctx->walking_handlers--;
/* No AIO operations? Get us out of here */
if (!busy) {
@@ -166,11 +152,11 @@ bool qemu_aio_wait(void)
if (ret > 0) {
/* we have to walk very carefully in case
* qemu_aio_set_fd_handler is called while we're walking */
- node = QLIST_FIRST(&aio_handlers);
+ node = QLIST_FIRST(&ctx->aio_handlers);
while (node) {
AioHandler *tmp;
- walking_handlers++;
+ ctx->walking_handlers++;
if (!node->deleted &&
FD_ISSET(node->fd, &rdfds) &&
@@ -186,9 +172,9 @@ bool qemu_aio_wait(void)
tmp = node;
node = QLIST_NEXT(node, node);
- walking_handlers--;
+ ctx->walking_handlers--;
- if (!walking_handlers && tmp->deleted) {
+ if (!ctx->walking_handlers && tmp->deleted) {
QLIST_REMOVE(tmp, node);
g_free(tmp);
}