diff mbox

[v3,09/10] aio: convert aio_poll() to g_poll(3)

Message ID 1359979973-31338-10-git-send-email-stefanha@redhat.com
State New
Headers show

Commit Message

Stefan Hajnoczi Feb. 4, 2013, 12:12 p.m. UTC
AioHandler already has a GPollFD so we can directly use its
events/revents.

Add the int pollfds_idx field to AioContext so we can map g_poll(3)
results back to AioHandlers.

Reuse aio_dispatch() to invoke handlers after g_poll(3).

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 aio-posix.c         | 67 +++++++++++++++++++----------------------------------
 async.c             |  2 ++
 include/block/aio.h |  3 +++
 3 files changed, 29 insertions(+), 43 deletions(-)

Comments

Laszlo Ersek Feb. 6, 2013, 9:05 p.m. UTC | #1
comments in-line

On 02/04/13 13:12, Stefan Hajnoczi wrote:
> AioHandler already has a GPollFD so we can directly use its
> events/revents.
>
> Add the int pollfds_idx field to AioContext so we can map g_poll(3)
> results back to AioHandlers.
>
> Reuse aio_dispatch() to invoke handlers after g_poll(3).
>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  aio-posix.c         | 67 +++++++++++++++++++----------------------------------
>  async.c             |  2 ++
>  include/block/aio.h |  3 +++
>  3 files changed, 29 insertions(+), 43 deletions(-)

Question 0: I'm thoroughly confused by aio_poll(), even the pre-patch
version. It seems to walk two lists in the AioContext: first the bottom
halves, then aio_handlers. The interesting thing is that the "query" is
different between the two lists (aio_bh_poll() vs. manual iteration in
aio_poll()), but the "action" is the same (after the patch anyway):
aio_dispatch().

(q0 cont) I don't understand how aio_bh_poll() communicates with the
subsequent aio_dispatch(). Do the BH callbacks set some
"ctx->aio_handlers{->next}->pfd.revents"?

More to-the-earth questions (unfortunately, quite interleaved -- it's
likely best to iterate over the rest of this email four times, once for
each question separately):

(q1) you're adding AioContext.pollfds:

> diff --git a/include/block/aio.h b/include/block/aio.h
> index 8eda924..5b54d38 100644
> --- a/include/block/aio.h
> +++ b/include/block/aio.h
> @@ -63,6 +63,9 @@ typedef struct AioContext {
>
>      /* Used for aio_notify.  */
>      EventNotifier notifier;
> +
> +    /* GPollFDs for aio_poll() */
> +    GArray *pollfds;
>  } AioContext;
>
>  /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */

(q1) pollfds destroy/create:

> diff --git a/async.c b/async.c
> index 72d268a..f2d47ba 100644
> --- a/async.c
> +++ b/async.c
> @@ -174,6 +174,7 @@ aio_ctx_finalize(GSource     *source)
>
>      aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
>      event_notifier_cleanup(&ctx->notifier);
> +    g_array_free(ctx->pollfds, TRUE);
>  }
>
>  static GSourceFuncs aio_source_funcs = {
> @@ -198,6 +199,7 @@ AioContext *aio_context_new(void)
>  {
>      AioContext *ctx;
>      ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
> +    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
>      event_notifier_init(&ctx->notifier, false);
>      aio_set_event_notifier(ctx, &ctx->notifier,
>                             (EventNotifierHandler *)

(q1) Then

> @@ -177,10 +179,7 @@ static bool aio_dispatch(AioContext *ctx)
>
>  bool aio_poll(AioContext *ctx, bool blocking)
>  {
> -    static struct timeval tv0;
>      AioHandler *node;
> -    fd_set rdfds, wrfds;
> -    int max_fd = -1;
>      int ret;
>      bool busy, progress;
>
> @@ -206,12 +205,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>
>      ctx->walking_handlers++;
>
> -    FD_ZERO(&rdfds);
> -    FD_ZERO(&wrfds);
> +    g_array_set_size(ctx->pollfds, 0);

(q1) the pollfds array is truncated here,

>
> -    /* fill fd sets */
> +    /* fill pollfds */
>      busy = false;
>      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> +        node->pollfds_idx = -1;
> +

(q2) the new pollfds_idx is set to -1 here,


>          /* If there aren't pending AIO operations, don't invoke callbacks.
>           * Otherwise, if there are no AIO requests, qemu_aio_wait() would
>           * wait indefinitely.
> @@ -222,13 +222,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>              }
>              busy = true;
>          }
> -        if (!node->deleted && node->io_read) {
> -            FD_SET(node->pfd.fd, &rdfds);
> -            max_fd = MAX(max_fd, node->pfd.fd + 1);
> -        }
> -        if (!node->deleted && node->io_write) {
> -            FD_SET(node->pfd.fd, &wrfds);
> -            max_fd = MAX(max_fd, node->pfd.fd + 1);
> +        if (!node->deleted && node->pfd.events) {
> +            GPollFD pfd = {
> +                .fd = node->pfd.fd,
> +                .events = node->pfd.events,
> +            };
> +            node->pollfds_idx = ctx->pollfds->len;
> +            g_array_append_val(ctx->pollfds, pfd);

(q1) pollfds is extended here,

(q3) the controlling expressions' conversion seems fine;
"node->pfd.events" should be nonzero iff io_read and/or io_write are
present, according to aio_set_fd_handler().

(q3 cont) FWIW I wonder if you could have dealt away with the "pfd"
local variable here, and just added (deep-copied) "node->pfd" into
"ctx->pollfds". The difference is that the array entry's "revents" field
would come from "node->pfd" as well (instead of being constant-zero).

(q3 cont) Question 3: can node->pfd.revents be nonzero here? (And even
so, would it matter for g_poll()?) I think not; aio_dispatch() seems to
clear it.

>          }
>      }
>
> @@ -240,41 +240,22 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      }
>
>      /* wait until next event */
> -    ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
> +    ret = g_poll((GPollFD *)ctx->pollfds->data,
> +                 ctx->pollfds->len,
> +                 blocking ? -1 : 0);

(q1) pollfds being polled,

>
>      /* if we have any readable fds, dispatch event */
>      if (ret > 0) {
> -        /* we have to walk very carefully in case
> -         * qemu_aio_set_fd_handler is called while we're walking */
> -        node = QLIST_FIRST(&ctx->aio_handlers);
> -        while (node) {
> -            AioHandler *tmp;
> -
> -            ctx->walking_handlers++;
> -
> -            if (!node->deleted &&
> -                FD_ISSET(node->pfd.fd, &rdfds) &&
> -                node->io_read) {
> -                node->io_read(node->opaque);
> -                progress = true;
> -            }
> -            if (!node->deleted &&
> -                FD_ISSET(node->pfd.fd, &wrfds) &&
> -                node->io_write) {
> -                node->io_write(node->opaque);
> -                progress = true;
> -            }
> -
> -            tmp = node;
> -            node = QLIST_NEXT(node, node);
> -
> -            ctx->walking_handlers--;
> -
> -            if (!ctx->walking_handlers && tmp->deleted) {
> -                QLIST_REMOVE(tmp, node);
> -                g_free(tmp);
> +        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> +            if (node->pollfds_idx != -1) {

(q2) pollfds_idx is queried here. I think new aio_handlers cannot be
installed between the previous (q2) spot and here, via:

@@ -85,6 +86,7 @@ void aio_set_fd_handler(AioContext *ctx,
         node->io_write = io_write;
         node->io_flush = io_flush;
         node->opaque = opaque;
+        node->pollfds_idx = -1;

         node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0);
         node->pfd.events |= (io_write ? G_IO_OUT : 0);

(q2 cont) So Question 2: setting pollfds_idx to -1 in set_fd_handler()
is more like general hygiene, right?

Back to aio_poll(),


> +                GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
> +                                              node->pollfds_idx);

(q1) and pollfds is processed here. Question 1: I'm unable to see how
the contents of "ctx->pollfds" could survive aio_poll(). It looks like a
variable local to aio_poll() would suffice.

(q1 cont) One point where the in-patch solution is more convenient is
the "No AIO operations?  Get us out of here" return statement: you don't
have to free the array. But I think, if this array is indeed temporary
for a single invocation of aio_poll(), then it would be cleaner not to
leak it into AioContext.

> +                node->pfd.revents |= pfd->revents;

(q4) Question 4: any specific reason to use |= here (not counting
general versatility)? Tying in with (q3), "node->pfd.revents" seems to
be zero here.

>              }
>          }
> +        if (aio_dispatch(ctx)) {
> +            progress = true;
> +        }
>      }
>
>      assert(progress || busy);

Anyway my confusion should not get into the way of this patch. In
general questions should rather help the submitter spot any possible
problems.

Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Stefan Hajnoczi Feb. 7, 2013, 8:49 a.m. UTC | #2
On Wed, Feb 06, 2013 at 10:05:15PM +0100, Laszlo Ersek wrote:
> comments in-line
> 
> On 02/04/13 13:12, Stefan Hajnoczi wrote:
> > AioHandler already has a GPollFD so we can directly use its
> > events/revents.
> >
> > Add the int pollfds_idx field to AioContext so we can map g_poll(3)
> > results back to AioHandlers.
> >
> > Reuse aio_dispatch() to invoke handlers after g_poll(3).
> >
> > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > ---
> >  aio-posix.c         | 67 +++++++++++++++++++----------------------------------
> >  async.c             |  2 ++
> >  include/block/aio.h |  3 +++
> >  3 files changed, 29 insertions(+), 43 deletions(-)
> 
> Question 0: I'm thoroughly confused by aio_poll(), even the pre-patch
> version. It seems to walk two lists in the AioContext: first the bottom
> halves, then aio_handlers. The interesting thing is that the "query" is
> different between the two lists (aio_bh_poll() vs. manual iteration in
> aio_poll()), but the "action" is the same (after the patch anyway):
> aio_dispatch().
> 
> (q0 cont) I don't understand how aio_bh_poll() communicates with the
> subsequent aio_dispatch(). Do the BH callbacks set some
> "ctx->aio_handlers{->next}->pfd.revents"?

The first aio_dispatch() invocation is not related to BHs.  Instead, it
dispatches any glib GSource events that were noted by the main loop.  We
get those out of the way first before we do our own g_poll(3).

I don't like nested event loops :).  Maybe we'll find a way to at least
unify main-loop.c and aio some day, even if we still invoke event loops
in a nested fashion.

> 
> More to-the-earth questions (unfortunately, quite interleaved -- it's
> likely best to iterate over the rest of this email four times, once for
> each question separately):

Wow, I need 3D glasses to view this email.  There are multiple
dimensions of questions squashed down into a single dimension! :)

> 
> (q1) you're adding AioContext.pollfds:
> 
> > diff --git a/include/block/aio.h b/include/block/aio.h
> > index 8eda924..5b54d38 100644
> > --- a/include/block/aio.h
> > +++ b/include/block/aio.h
> > @@ -63,6 +63,9 @@ typedef struct AioContext {
> >
> >      /* Used for aio_notify.  */
> >      EventNotifier notifier;
> > +
> > +    /* GPollFDs for aio_poll() */
> > +    GArray *pollfds;
> >  } AioContext;
> >
> >  /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
> 
> (q1) pollfds destroy/create:
> 
> > diff --git a/async.c b/async.c
> > index 72d268a..f2d47ba 100644
> > --- a/async.c
> > +++ b/async.c
> > @@ -174,6 +174,7 @@ aio_ctx_finalize(GSource     *source)
> >
> >      aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
> >      event_notifier_cleanup(&ctx->notifier);
> > +    g_array_free(ctx->pollfds, TRUE);
> >  }
> >
> >  static GSourceFuncs aio_source_funcs = {
> > @@ -198,6 +199,7 @@ AioContext *aio_context_new(void)
> >  {
> >      AioContext *ctx;
> >      ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
> > +    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
> >      event_notifier_init(&ctx->notifier, false);
> >      aio_set_event_notifier(ctx, &ctx->notifier,
> >                             (EventNotifierHandler *)
> 
> (q1) Then
> 
> > @@ -177,10 +179,7 @@ static bool aio_dispatch(AioContext *ctx)
> >
> >  bool aio_poll(AioContext *ctx, bool blocking)
> >  {
> > -    static struct timeval tv0;
> >      AioHandler *node;
> > -    fd_set rdfds, wrfds;
> > -    int max_fd = -1;
> >      int ret;
> >      bool busy, progress;
> >
> > @@ -206,12 +205,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
> >
> >      ctx->walking_handlers++;
> >
> > -    FD_ZERO(&rdfds);
> > -    FD_ZERO(&wrfds);
> > +    g_array_set_size(ctx->pollfds, 0);
> 
> (q1) the pollfds array is truncated here,
> 
> >
> > -    /* fill fd sets */
> > +    /* fill pollfds */
> >      busy = false;
> >      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > +        node->pollfds_idx = -1;
> > +
> 
> (q2) the new pollfds_idx is set to -1 here,
> 
> 
> >          /* If there aren't pending AIO operations, don't invoke callbacks.
> >           * Otherwise, if there are no AIO requests, qemu_aio_wait() would
> >           * wait indefinitely.
> > @@ -222,13 +222,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
> >              }
> >              busy = true;
> >          }
> > -        if (!node->deleted && node->io_read) {
> > -            FD_SET(node->pfd.fd, &rdfds);
> > -            max_fd = MAX(max_fd, node->pfd.fd + 1);
> > -        }
> > -        if (!node->deleted && node->io_write) {
> > -            FD_SET(node->pfd.fd, &wrfds);
> > -            max_fd = MAX(max_fd, node->pfd.fd + 1);
> > +        if (!node->deleted && node->pfd.events) {
> > +            GPollFD pfd = {
> > +                .fd = node->pfd.fd,
> > +                .events = node->pfd.events,
> > +            };
> > +            node->pollfds_idx = ctx->pollfds->len;
> > +            g_array_append_val(ctx->pollfds, pfd);
> 
> (q1) pollfds is extended here,
> 
> (q3) the controlling expressions' conversion seems fine;
> "node->pfd.events" should be nonzero iff io_read and/or io_write are
> present, according to aio_set_fd_handler().
> 
> (q3 cont) FWIW I wonder if you could have dealt away with the "pfd"
> local variable here, and just added (deep-copied) "node->pfd" into
> "ctx->pollfds". The difference is that the array entry's "revents" field
> would come from "node->pfd" as well (instead of being constant-zero).
> 
> (q3 cont) Question 3: can node->pfd.revents be nonzero here? (And even
> so, would it matter for g_poll()?) I think not; aio_dispatch() seems to
> clear it.

A deep copy would work too.

> >          }
> >      }
> >
> > @@ -240,41 +240,22 @@ bool aio_poll(AioContext *ctx, bool blocking)
> >      }
> >
> >      /* wait until next event */
> > -    ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
> > +    ret = g_poll((GPollFD *)ctx->pollfds->data,
> > +                 ctx->pollfds->len,
> > +                 blocking ? -1 : 0);
> 
> (q1) pollfds being polled,
> 
> >
> >      /* if we have any readable fds, dispatch event */
> >      if (ret > 0) {
> > -        /* we have to walk very carefully in case
> > -         * qemu_aio_set_fd_handler is called while we're walking */
> > -        node = QLIST_FIRST(&ctx->aio_handlers);
> > -        while (node) {
> > -            AioHandler *tmp;
> > -
> > -            ctx->walking_handlers++;
> > -
> > -            if (!node->deleted &&
> > -                FD_ISSET(node->pfd.fd, &rdfds) &&
> > -                node->io_read) {
> > -                node->io_read(node->opaque);
> > -                progress = true;
> > -            }
> > -            if (!node->deleted &&
> > -                FD_ISSET(node->pfd.fd, &wrfds) &&
> > -                node->io_write) {
> > -                node->io_write(node->opaque);
> > -                progress = true;
> > -            }
> > -
> > -            tmp = node;
> > -            node = QLIST_NEXT(node, node);
> > -
> > -            ctx->walking_handlers--;
> > -
> > -            if (!ctx->walking_handlers && tmp->deleted) {
> > -                QLIST_REMOVE(tmp, node);
> > -                g_free(tmp);
> > +        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> > +            if (node->pollfds_idx != -1) {
> 
> (q2) pollfds_idx is queried here. I think new aio_handlers cannot be
> installed between the previous (q2) spot and here, via:
> 
> @@ -85,6 +86,7 @@ void aio_set_fd_handler(AioContext *ctx,
>          node->io_write = io_write;
>          node->io_flush = io_flush;
>          node->opaque = opaque;
> +        node->pollfds_idx = -1;
> 
>          node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0);
>          node->pfd.events |= (io_write ? G_IO_OUT : 0);
> 
> (q2 cont) So Question 2: setting pollfds_idx to -1 in set_fd_handler()
> is more like general hygiene, right?

You are right.  Initializing pollfds_idx to -1 is a good invariant
because it means we'll be safe if the code is changed in the future.

> Back to aio_poll(),
> 
> 
> > +                GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
> > +                                              node->pollfds_idx);
> 
> (q1) and pollfds is processed here. Question 1: I'm unable to see how
> the contents of "ctx->pollfds" could survive aio_poll(). It looks like a
> variable local to aio_poll() would suffice.

Yes, it could be local but we'd have to allocate a new GArray, grow it
incrementally, and then free it again for each aio_poll().  BTW
g_array_set_size() doesn't free anything, it just zeroes the
pollfds->len field.

> > +                node->pfd.revents |= pfd->revents;
> 
> (q4) Question 4: any specific reason to use |= here (not counting
> general versatility)? Tying in with (q3), "node->pfd.revents" seems to
> be zero here.

You are right, I was being overcautious.
diff mbox

Patch

diff --git a/aio-posix.c b/aio-posix.c
index 35131a3..7769927 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -25,6 +25,7 @@  struct AioHandler
     IOHandler *io_write;
     AioFlushHandler *io_flush;
     int deleted;
+    int pollfds_idx;
     void *opaque;
     QLIST_ENTRY(AioHandler) node;
 };
@@ -85,6 +86,7 @@  void aio_set_fd_handler(AioContext *ctx,
         node->io_write = io_write;
         node->io_flush = io_flush;
         node->opaque = opaque;
+        node->pollfds_idx = -1;
 
         node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0);
         node->pfd.events |= (io_write ? G_IO_OUT : 0);
@@ -177,10 +179,7 @@  static bool aio_dispatch(AioContext *ctx)
 
 bool aio_poll(AioContext *ctx, bool blocking)
 {
-    static struct timeval tv0;
     AioHandler *node;
-    fd_set rdfds, wrfds;
-    int max_fd = -1;
     int ret;
     bool busy, progress;
 
@@ -206,12 +205,13 @@  bool aio_poll(AioContext *ctx, bool blocking)
 
     ctx->walking_handlers++;
 
-    FD_ZERO(&rdfds);
-    FD_ZERO(&wrfds);
+    g_array_set_size(ctx->pollfds, 0);
 
-    /* fill fd sets */
+    /* fill pollfds */
     busy = false;
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+        node->pollfds_idx = -1;
+
         /* If there aren't pending AIO operations, don't invoke callbacks.
          * Otherwise, if there are no AIO requests, qemu_aio_wait() would
          * wait indefinitely.
@@ -222,13 +222,13 @@  bool aio_poll(AioContext *ctx, bool blocking)
             }
             busy = true;
         }
-        if (!node->deleted && node->io_read) {
-            FD_SET(node->pfd.fd, &rdfds);
-            max_fd = MAX(max_fd, node->pfd.fd + 1);
-        }
-        if (!node->deleted && node->io_write) {
-            FD_SET(node->pfd.fd, &wrfds);
-            max_fd = MAX(max_fd, node->pfd.fd + 1);
+        if (!node->deleted && node->pfd.events) {
+            GPollFD pfd = {
+                .fd = node->pfd.fd,
+                .events = node->pfd.events,
+            };
+            node->pollfds_idx = ctx->pollfds->len;
+            g_array_append_val(ctx->pollfds, pfd);
         }
     }
 
@@ -240,41 +240,22 @@  bool aio_poll(AioContext *ctx, bool blocking)
     }
 
     /* wait until next event */
-    ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
+    ret = g_poll((GPollFD *)ctx->pollfds->data,
+                 ctx->pollfds->len,
+                 blocking ? -1 : 0);
 
     /* if we have any readable fds, dispatch event */
     if (ret > 0) {
-        /* we have to walk very carefully in case
-         * qemu_aio_set_fd_handler is called while we're walking */
-        node = QLIST_FIRST(&ctx->aio_handlers);
-        while (node) {
-            AioHandler *tmp;
-
-            ctx->walking_handlers++;
-
-            if (!node->deleted &&
-                FD_ISSET(node->pfd.fd, &rdfds) &&
-                node->io_read) {
-                node->io_read(node->opaque);
-                progress = true;
-            }
-            if (!node->deleted &&
-                FD_ISSET(node->pfd.fd, &wrfds) &&
-                node->io_write) {
-                node->io_write(node->opaque);
-                progress = true;
-            }
-
-            tmp = node;
-            node = QLIST_NEXT(node, node);
-
-            ctx->walking_handlers--;
-
-            if (!ctx->walking_handlers && tmp->deleted) {
-                QLIST_REMOVE(tmp, node);
-                g_free(tmp);
+        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+            if (node->pollfds_idx != -1) {
+                GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
+                                              node->pollfds_idx);
+                node->pfd.revents |= pfd->revents;
             }
         }
+        if (aio_dispatch(ctx)) {
+            progress = true;
+        }
     }
 
     assert(progress || busy);
diff --git a/async.c b/async.c
index 72d268a..f2d47ba 100644
--- a/async.c
+++ b/async.c
@@ -174,6 +174,7 @@  aio_ctx_finalize(GSource     *source)
 
     aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
     event_notifier_cleanup(&ctx->notifier);
+    g_array_free(ctx->pollfds, TRUE);
 }
 
 static GSourceFuncs aio_source_funcs = {
@@ -198,6 +199,7 @@  AioContext *aio_context_new(void)
 {
     AioContext *ctx;
     ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
+    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
     event_notifier_init(&ctx->notifier, false);
     aio_set_event_notifier(ctx, &ctx->notifier, 
                            (EventNotifierHandler *)
diff --git a/include/block/aio.h b/include/block/aio.h
index 8eda924..5b54d38 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -63,6 +63,9 @@  typedef struct AioContext {
 
     /* Used for aio_notify.  */
     EventNotifier notifier;
+
+    /* GPollFDs for aio_poll() */
+    GArray *pollfds;
 } AioContext;
 
 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */