From patchwork Thu Jan 31 10:54:03 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stefan Hajnoczi X-Patchwork-Id: 217162 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 471C42C0040 for ; Thu, 31 Jan 2013 22:27:54 +1100 (EST) Received: from localhost ([::1]:52982 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1U0rnY-0001bt-Pi for incoming@patchwork.ozlabs.org; Thu, 31 Jan 2013 05:55:16 -0500 Received: from eggs.gnu.org ([208.118.235.92]:38223) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1U0rmw-0000ln-Mb for qemu-devel@nongnu.org; Thu, 31 Jan 2013 05:54:42 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1U0rmr-00048s-RO for qemu-devel@nongnu.org; Thu, 31 Jan 2013 05:54:38 -0500 Received: from mx1.redhat.com ([209.132.183.28]:34090) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1U0rmr-00048f-IA for qemu-devel@nongnu.org; Thu, 31 Jan 2013 05:54:33 -0500 Received: from int-mx02.intmail.prod.int.phx2.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id r0VAsVo3029672 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 31 Jan 2013 05:54:31 -0500 Received: from localhost (ovpn-112-28.ams2.redhat.com [10.36.112.28]) by int-mx02.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id r0VAsUgE018931; Thu, 31 Jan 2013 05:54:31 -0500 From: Stefan Hajnoczi To: Date: Thu, 31 Jan 2013 11:54:03 +0100 Message-Id: <1359629644-21920-11-git-send-email-stefanha@redhat.com> In-Reply-To: <1359629644-21920-1-git-send-email-stefanha@redhat.com> References: <1359629644-21920-1-git-send-email-stefanha@redhat.com> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.12 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 209.132.183.28 Cc: Anthony Liguori , Jan Kiszka , Fabien Chouteau , Stefan Hajnoczi , Paolo Bonzini , Amos Kong Subject: [Qemu-devel] [PATCH 10/11] aio: convert aio_poll() to g_poll(3) X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org AioHandler already has a GPollFD so we can directly use its events/revents. Add the int poller_idx field to AioContext so we can map g_poll(3) results back to AioHandlers. Reuse aio_dispatch() to invoke handlers after g_poll(3). Signed-off-by: Stefan Hajnoczi --- aio-posix.c | 62 ++++++++++++++++------------------------------------- async.c | 2 ++ include/block/aio.h | 4 ++++ 3 files changed, 25 insertions(+), 43 deletions(-) diff --git a/aio-posix.c b/aio-posix.c index 35131a3..303e19f 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -25,6 +25,7 @@ struct AioHandler IOHandler *io_write; AioFlushHandler *io_flush; int deleted; + int poller_idx; void *opaque; QLIST_ENTRY(AioHandler) node; }; @@ -85,6 +86,7 @@ void aio_set_fd_handler(AioContext *ctx, node->io_write = io_write; node->io_flush = io_flush; node->opaque = opaque; + node->poller_idx = -1; node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0); node->pfd.events |= (io_write ? G_IO_OUT : 0); @@ -177,10 +179,7 @@ static bool aio_dispatch(AioContext *ctx) bool aio_poll(AioContext *ctx, bool blocking) { - static struct timeval tv0; AioHandler *node; - fd_set rdfds, wrfds; - int max_fd = -1; int ret; bool busy, progress; @@ -206,12 +205,13 @@ bool aio_poll(AioContext *ctx, bool blocking) ctx->walking_handlers++; - FD_ZERO(&rdfds); - FD_ZERO(&wrfds); + poller_reset(&ctx->poller); - /* fill fd sets */ + /* fill poller */ busy = false; QLIST_FOREACH(node, &ctx->aio_handlers, node) { + node->poller_idx = -1; + /* If there aren't pending AIO operations, don't invoke callbacks. * Otherwise, if there are no AIO requests, qemu_aio_wait() would * wait indefinitely. @@ -222,13 +222,9 @@ bool aio_poll(AioContext *ctx, bool blocking) } busy = true; } - if (!node->deleted && node->io_read) { - FD_SET(node->pfd.fd, &rdfds); - max_fd = MAX(max_fd, node->pfd.fd + 1); - } - if (!node->deleted && node->io_write) { - FD_SET(node->pfd.fd, &wrfds); - max_fd = MAX(max_fd, node->pfd.fd + 1); + if (!node->deleted && node->pfd.events) { + node->poller_idx = poller_add_fd(&ctx->poller, node->pfd.fd, + node->pfd.events); } } @@ -240,41 +236,21 @@ bool aio_poll(AioContext *ctx, bool blocking) } /* wait until next event */ - ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0); + ret = g_poll(ctx->poller.poll_fds, + ctx->poller.nfds, + blocking ? -1 : 0); /* if we have any readable fds, dispatch event */ if (ret > 0) { - /* we have to walk very carefully in case - * qemu_aio_set_fd_handler is called while we're walking */ - node = QLIST_FIRST(&ctx->aio_handlers); - while (node) { - AioHandler *tmp; - - ctx->walking_handlers++; - - if (!node->deleted && - FD_ISSET(node->pfd.fd, &rdfds) && - node->io_read) { - node->io_read(node->opaque); - progress = true; - } - if (!node->deleted && - FD_ISSET(node->pfd.fd, &wrfds) && - node->io_write) { - node->io_write(node->opaque); - progress = true; - } - - tmp = node; - node = QLIST_NEXT(node, node); - - ctx->walking_handlers--; - - if (!ctx->walking_handlers && tmp->deleted) { - QLIST_REMOVE(tmp, node); - g_free(tmp); + QLIST_FOREACH(node, &ctx->aio_handlers, node) { + if (node->poller_idx != -1) { + node->pfd.revents |= + poller_get_revents(&ctx->poller, node->poller_idx); } } + if (aio_dispatch(ctx)) { + progress = true; + } } assert(progress || busy); diff --git a/async.c b/async.c index 72d268a..1cd1b89 100644 --- a/async.c +++ b/async.c @@ -174,6 +174,7 @@ aio_ctx_finalize(GSource *source) aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); event_notifier_cleanup(&ctx->notifier); + poller_cleanup(&ctx->poller); } static GSourceFuncs aio_source_funcs = { @@ -198,6 +199,7 @@ AioContext *aio_context_new(void) { AioContext *ctx; ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); + poller_init(&ctx->poller); event_notifier_init(&ctx->notifier, false); aio_set_event_notifier(ctx, &ctx->notifier, (EventNotifierHandler *) diff --git a/include/block/aio.h b/include/block/aio.h index 8eda924..45ff047 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -17,6 +17,7 @@ #include "qemu-common.h" #include "qemu/queue.h" #include "qemu/event_notifier.h" +#include "qemu/poller.h" typedef struct BlockDriverAIOCB BlockDriverAIOCB; typedef void BlockDriverCompletionFunc(void *opaque, int ret); @@ -63,6 +64,9 @@ typedef struct AioContext { /* Used for aio_notify. */ EventNotifier notifier; + + /* GPollFDs for aio_poll() */ + Poller poller; } AioContext; /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */