diff mbox

[v6,06/18] aio: stop using .io_flush()

Message ID 1374765505-14356-7-git-send-email-stefanha@redhat.com
State New
Headers show

Commit Message

Stefan Hajnoczi July 25, 2013, 3:18 p.m. UTC
Now that aio_poll() users check their termination condition themselves,
it is no longer necessary to call .io_flush() handlers.

The behavior of aio_poll() changes as follows:

1. .io_flush() is no longer invoked and file descriptors are *always*
monitored.  Previously returning 0 from .io_flush() would skip this file
descriptor.

Due to this change it is essential to check that requests are pending
before calling qemu_aio_wait().  Failure to do so means we block, for
example, waiting for an idle iSCSI socket to become readable when there
are no requests.  Currently all qemu_aio_wait()/aio_poll() callers check
before calling.

2. aio_poll() now returns true if progress was made (BH or fd handlers
executed) and false otherwise.  Previously it would return true whenever
'busy', which means that .io_flush() returned true.  The 'busy' concept
no longer exists so just progress is returned.

Due to this change we need to update tests/test-aio.c which asserts
aio_poll() return values.  Note that QEMU doesn't actually rely on these
return values so only tests/test-aio.c cares.

Note that ctx->notifier, the EventNotifier fd used for aio_notify(), is
now handled as a special case.  This is a little ugly but maintains
aio_poll() semantics, i.e. aio_notify() does not count as 'progress' and
aio_poll() avoids blocking when the user has not set any fd handlers yet.

Patches after this remove .io_flush() handler code until we can finally
drop the io_flush arguments to aio_set_fd_handler() and friends.

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 aio-posix.c      | 29 +++++++++--------------------
 aio-win32.c      | 34 ++++++++++++++--------------------
 tests/test-aio.c | 10 +++++-----
 3 files changed, 28 insertions(+), 45 deletions(-)

Comments

Jeff Cody July 26, 2013, 4:10 p.m. UTC | #1
On Thu, Jul 25, 2013 at 05:18:13PM +0200, Stefan Hajnoczi wrote:
> Now that aio_poll() users check their termination condition themselves,
> it is no longer necessary to call .io_flush() handlers.
> 
> The behavior of aio_poll() changes as follows:
> 
> 1. .io_flush() is no longer invoked and file descriptors are *always*
> monitored.  Previously returning 0 from .io_flush() would skip this file
> descriptor.
> 
> Due to this change it is essential to check that requests are pending
> before calling qemu_aio_wait().  Failure to do so means we block, for
> example, waiting for an idle iSCSI socket to become readable when there
> are no requests.  Currently all qemu_aio_wait()/aio_poll() callers check
> before calling.
> 
> 2. aio_poll() now returns true if progress was made (BH or fd handlers
> executed) and false otherwise.  Previously it would return true whenever
> 'busy', which means that .io_flush() returned true.  The 'busy' concept
> no longer exists so just progress is returned.
> 
> Due to this change we need to update tests/test-aio.c which asserts
> aio_poll() return values.  Note that QEMU doesn't actually rely on these
> return values so only tests/test-aio.c cares.
> 
> Note that ctx->notifier, the EventNotifier fd used for aio_notify(), is
> now handled as a special case.  This is a little ugly but maintains
> aio_poll() semantics, i.e. aio_notify() does not count as 'progress' and
> aio_poll() avoids blocking when the user has not set any fd handlers yet.
> 
> Patches after this remove .io_flush() handler code until we can finally
> drop the io_flush arguments to aio_set_fd_handler() and friends.
> 
> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  aio-posix.c      | 29 +++++++++--------------------
>  aio-win32.c      | 34 ++++++++++++++--------------------
>  tests/test-aio.c | 10 +++++-----
>  3 files changed, 28 insertions(+), 45 deletions(-)
> 
> diff --git a/aio-posix.c b/aio-posix.c
> index b68eccd..7d66048 100644
> --- a/aio-posix.c
> +++ b/aio-posix.c
> @@ -23,7 +23,6 @@ struct AioHandler
>      GPollFD pfd;
>      IOHandler *io_read;
>      IOHandler *io_write;
> -    AioFlushHandler *io_flush;
>      int deleted;
>      int pollfds_idx;
>      void *opaque;
> @@ -84,7 +83,6 @@ void aio_set_fd_handler(AioContext *ctx,
>          /* Update handler with latest information */
>          node->io_read = io_read;
>          node->io_write = io_write;
> -        node->io_flush = io_flush;
>          node->opaque = opaque;
>          node->pollfds_idx = -1;
>  
> @@ -147,7 +145,11 @@ static bool aio_dispatch(AioContext *ctx)
>              (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
>              node->io_read) {
>              node->io_read(node->opaque);
> -            progress = true;
> +
> +            /* aio_notify() does not count as progress */
> +            if (node->opaque != &ctx->notifier) {
> +                progress = true;
> +            }
>          }
>          if (!node->deleted &&
>              (revents & (G_IO_OUT | G_IO_ERR)) &&
> @@ -173,7 +175,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>  {
>      AioHandler *node;
>      int ret;
> -    bool busy, progress;
> +    bool progress;
>  
>      progress = false;
>  
> @@ -200,20 +202,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      g_array_set_size(ctx->pollfds, 0);
>  
>      /* fill pollfds */
> -    busy = false;
>      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
>          node->pollfds_idx = -1;
> -
> -        /* If there aren't pending AIO operations, don't invoke callbacks.
> -         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
> -         * wait indefinitely.
> -         */
> -        if (!node->deleted && node->io_flush) {
> -            if (node->io_flush(node->opaque) == 0) {
> -                continue;
> -            }
> -            busy = true;
> -        }
>          if (!node->deleted && node->pfd.events) {
>              GPollFD pfd = {
>                  .fd = node->pfd.fd,
> @@ -226,8 +216,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
>  
>      ctx->walking_handlers--;
>  
> -    /* No AIO operations?  Get us out of here */
> -    if (!busy) {
> +    /* early return if we only have the aio_notify() fd */
> +    if (ctx->pollfds->len == 1) {
>          return progress;
>      }
>  
> @@ -250,6 +240,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
>          }
>      }
>  
> -    assert(progress || busy);
> -    return true;
> +    return progress;
>  }
> diff --git a/aio-win32.c b/aio-win32.c
> index 38723bf..4309c16 100644
> --- a/aio-win32.c
> +++ b/aio-win32.c
> @@ -23,7 +23,6 @@
>  struct AioHandler {
>      EventNotifier *e;
>      EventNotifierHandler *io_notify;
> -    AioFlushEventNotifierHandler *io_flush;
>      GPollFD pfd;
>      int deleted;
>      QLIST_ENTRY(AioHandler) node;
> @@ -73,7 +72,6 @@ void aio_set_event_notifier(AioContext *ctx,
>          }
>          /* Update handler with latest information */
>          node->io_notify = io_notify;
> -        node->io_flush = io_flush;
>      }
>  
>      aio_notify(ctx);
> @@ -96,7 +94,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
>  {
>      AioHandler *node;
>      HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
> -    bool busy, progress;
> +    bool progress;
>      int count;
>  
>      progress = false;
> @@ -126,7 +124,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
>          if (node->pfd.revents && node->io_notify) {
>              node->pfd.revents = 0;
>              node->io_notify(node->e);
> -            progress = true;
> +
> +            /* aio_notify() does not count as progress */
> +            if (node->opaque != &ctx->notifier) {
> +                progress = true;
> +            }
>          }
>  
>          tmp = node;
> @@ -147,19 +149,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
>      ctx->walking_handlers++;
>  
>      /* fill fd sets */
> -    busy = false;
>      count = 0;
>      QLIST_FOREACH(node, &ctx->aio_handlers, node) {
> -        /* If there aren't pending AIO operations, don't invoke callbacks.
> -         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
> -         * wait indefinitely.
> -         */
> -        if (!node->deleted && node->io_flush) {
> -            if (node->io_flush(node->e) == 0) {
> -                continue;
> -            }
> -            busy = true;
> -        }
>          if (!node->deleted && node->io_notify) {
>              events[count++] = event_notifier_get_handle(node->e);
>          }
> @@ -167,8 +158,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
>  
>      ctx->walking_handlers--;
>  
> -    /* No AIO operations?  Get us out of here */
> -    if (!busy) {
> +    /* early return if we only have the aio_notify() fd */
> +    if (count == 1) {
>          return progress;
>      }

Minor point, but could we simplify it a bit if the above 3 lines were
removed, and the following while() case (not shown in diff context)
was just changed to while (count > 1) instead of while(count > 0).
I.e.:

-    /* No AIO operations?  Get us out of here */
-    if (!busy) {
-        return progress;
-    }

-   /* wait until next event */
-   while (count > 0) {
+   /* wait until next event that is not aio_notify() */
+   while (count > 1) {

This would assume of course that aio_notify() is always first in the
list.


>  
> @@ -196,7 +187,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
>                  event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
>                  node->io_notify) {
>                  node->io_notify(node->e);
> -                progress = true;
> +
> +                /* aio_notify() does not count as progress */
> +                if (node->opaque != &ctx->notifier) {
> +                    progress = true;
> +                }

^ We could then also drop this part of the patch, too, right?


>              }
>  
>              tmp = node;
> @@ -214,6 +209,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
>          events[ret - WAIT_OBJECT_0] = events[--count];
>      }
>  
> -    assert(progress || busy);
> -    return true;
> +    return progress;
>  }
> diff --git a/tests/test-aio.c b/tests/test-aio.c
> index 20bf5e6..1251952 100644
> --- a/tests/test-aio.c
> +++ b/tests/test-aio.c
> @@ -254,7 +254,7 @@ static void test_wait_event_notifier(void)
>      EventNotifierTestData data = { .n = 0, .active = 1 };
>      event_notifier_init(&data.e, false);
>      aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
> -    g_assert(aio_poll(ctx, false));
> +    g_assert(!aio_poll(ctx, false));
>      g_assert_cmpint(data.n, ==, 0);
>      g_assert_cmpint(data.active, ==, 1);
>  
> @@ -279,7 +279,7 @@ static void test_flush_event_notifier(void)
>      EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
>      event_notifier_init(&data.e, false);
>      aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
> -    g_assert(aio_poll(ctx, false));
> +    g_assert(!aio_poll(ctx, false));
>      g_assert_cmpint(data.n, ==, 0);
>      g_assert_cmpint(data.active, ==, 10);
>  
> @@ -313,7 +313,7 @@ static void test_wait_event_notifier_noflush(void)
>      /* Until there is an active descriptor, aio_poll may or may not call
>       * event_ready_cb.  Still, it must not block.  */
>      event_notifier_set(&data.e);
> -    g_assert(!aio_poll(ctx, true));
> +    g_assert(aio_poll(ctx, true));
>      data.n = 0;
>  
>      /* An active event notifier forces aio_poll to look at EventNotifiers.  */
> @@ -323,13 +323,13 @@ static void test_wait_event_notifier_noflush(void)
>      event_notifier_set(&data.e);
>      g_assert(aio_poll(ctx, false));
>      g_assert_cmpint(data.n, ==, 1);
> -    g_assert(aio_poll(ctx, false));
> +    g_assert(!aio_poll(ctx, false));
>      g_assert_cmpint(data.n, ==, 1);
>  
>      event_notifier_set(&data.e);
>      g_assert(aio_poll(ctx, false));
>      g_assert_cmpint(data.n, ==, 2);
> -    g_assert(aio_poll(ctx, false));
> +    g_assert(!aio_poll(ctx, false));
>      g_assert_cmpint(data.n, ==, 2);
>  
>      event_notifier_set(&dummy.e);
> -- 
> 1.8.1.4
> 
>
Paolo Bonzini July 26, 2013, 4:25 p.m. UTC | #2
Il 26/07/2013 18:10, Jeff Cody ha scritto:
> +   /* wait until next event that is not aio_notify() */
> +   while (count > 1) {
> 
> This would assume of course that aio_notify() is always first in the
> list.

No, you cannot assume that.  Any bdrv can be opened early (with -drive)
and then associated to an AioContext that is created later (when the
dataplane thread start).

Paolo
Jeff Cody July 26, 2013, 4:36 p.m. UTC | #3
On Fri, Jul 26, 2013 at 06:25:18PM +0200, Paolo Bonzini wrote:
> Il 26/07/2013 18:10, Jeff Cody ha scritto:
> > +   /* wait until next event that is not aio_notify() */
> > +   while (count > 1) {
> > 
> > This would assume of course that aio_notify() is always first in the
> > list.
> 
> No, you cannot assume that.  Any bdrv can be opened early (with -drive)
> and then associated to an AioContext that is created later (when the
> dataplane thread start).
> 
> Paolo
>

OK, good point, thanks.

Jeff
Jeff Cody July 26, 2013, 4:43 p.m. UTC | #4
On Thu, Jul 25, 2013 at 05:18:13PM +0200, Stefan Hajnoczi wrote:
> Now that aio_poll() users check their termination condition themselves,
> it is no longer necessary to call .io_flush() handlers.
> 
> The behavior of aio_poll() changes as follows:
> 
> 1. .io_flush() is no longer invoked and file descriptors are *always*
> monitored.  Previously returning 0 from .io_flush() would skip this file
> descriptor.
> 
> Due to this change it is essential to check that requests are pending
> before calling qemu_aio_wait().  Failure to do so means we block, for
> example, waiting for an idle iSCSI socket to become readable when there
> are no requests.  Currently all qemu_aio_wait()/aio_poll() callers check
> before calling.
> 
> 2. aio_poll() now returns true if progress was made (BH or fd handlers
> executed) and false otherwise.  Previously it would return true whenever
> 'busy', which means that .io_flush() returned true.  The 'busy' concept
> no longer exists so just progress is returned.
> 
> Due to this change we need to update tests/test-aio.c which asserts
> aio_poll() return values.  Note that QEMU doesn't actually rely on these
> return values so only tests/test-aio.c cares.
> 
> Note that ctx->notifier, the EventNotifier fd used for aio_notify(), is
> now handled as a special case.  This is a little ugly but maintains
> aio_poll() semantics, i.e. aio_notify() does not count as 'progress' and
> aio_poll() avoids blocking when the user has not set any fd handlers yet.
> 
> Patches after this remove .io_flush() handler code until we can finally
> drop the io_flush arguments to aio_set_fd_handler() and friends.
> 
> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  aio-posix.c      | 29 +++++++++--------------------
>  aio-win32.c      | 34 ++++++++++++++--------------------
>  tests/test-aio.c | 10 +++++-----
>  3 files changed, 28 insertions(+), 45 deletions(-)
> 

Reviewed-by: Jeff Cody <jcody@redhat.com>
Wayne Xia July 29, 2013, 8:08 a.m. UTC | #5
> Now that aio_poll() users check their termination condition themselves,
> it is no longer necessary to call .io_flush() handlers.
> 
> The behavior of aio_poll() changes as follows:
> 
> 1. .io_flush() is no longer invoked and file descriptors are *always*
> monitored.  Previously returning 0 from .io_flush() would skip this file
> descriptor.
> 
> Due to this change it is essential to check that requests are pending
> before calling qemu_aio_wait().  Failure to do so means we block, for
> example, waiting for an idle iSCSI socket to become readable when there
> are no requests.  Currently all qemu_aio_wait()/aio_poll() callers check
> before calling.
> 
> 2. aio_poll() now returns true if progress was made (BH or fd handlers
> executed) and false otherwise.  Previously it would return true whenever
> 'busy', which means that .io_flush() returned true.  The 'busy' concept
> no longer exists so just progress is returned.
> 
> Due to this change we need to update tests/test-aio.c which asserts
> aio_poll() return values.  Note that QEMU doesn't actually rely on these
> return values so only tests/test-aio.c cares.
> 
> Note that ctx->notifier, the EventNotifier fd used for aio_notify(), is
> now handled as a special case.  This is a little ugly but maintains
> aio_poll() semantics, i.e. aio_notify() does not count as 'progress' and
> aio_poll() avoids blocking when the user has not set any fd handlers yet.
> 
  I guess the goal is, distinguish qemu's internal used fd, with the
real meaningful external fd such as socket? How about distinguish them
with different GSource?


> Patches after this remove .io_flush() handler code until we can finally
> drop the io_flush arguments to aio_set_fd_handler() and friends.
> 
> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>   aio-posix.c      | 29 +++++++++--------------------
>   aio-win32.c      | 34 ++++++++++++++--------------------
>   tests/test-aio.c | 10 +++++-----
>   3 files changed, 28 insertions(+), 45 deletions(-)
>
Stefan Hajnoczi July 29, 2013, 2:32 p.m. UTC | #6
On Mon, Jul 29, 2013 at 04:08:34PM +0800, Wenchao Xia wrote:
> > Now that aio_poll() users check their termination condition themselves,
> > it is no longer necessary to call .io_flush() handlers.
> > 
> > The behavior of aio_poll() changes as follows:
> > 
> > 1. .io_flush() is no longer invoked and file descriptors are *always*
> > monitored.  Previously returning 0 from .io_flush() would skip this file
> > descriptor.
> > 
> > Due to this change it is essential to check that requests are pending
> > before calling qemu_aio_wait().  Failure to do so means we block, for
> > example, waiting for an idle iSCSI socket to become readable when there
> > are no requests.  Currently all qemu_aio_wait()/aio_poll() callers check
> > before calling.
> > 
> > 2. aio_poll() now returns true if progress was made (BH or fd handlers
> > executed) and false otherwise.  Previously it would return true whenever
> > 'busy', which means that .io_flush() returned true.  The 'busy' concept
> > no longer exists so just progress is returned.
> > 
> > Due to this change we need to update tests/test-aio.c which asserts
> > aio_poll() return values.  Note that QEMU doesn't actually rely on these
> > return values so only tests/test-aio.c cares.
> > 
> > Note that ctx->notifier, the EventNotifier fd used for aio_notify(), is
> > now handled as a special case.  This is a little ugly but maintains
> > aio_poll() semantics, i.e. aio_notify() does not count as 'progress' and
> > aio_poll() avoids blocking when the user has not set any fd handlers yet.
> > 
>   I guess the goal is, distinguish qemu's internal used fd, with the
> real meaningful external fd such as socket?

Exactly, the AioContext's own internal EventNotifier should not count.
For example, aio_poll() must not block if the user has not added any
AioHandlers yet.

> How about distinguish them with different GSource?

AioContext itself is designed as a GSource.  Internally it does not use
GSource or the glib event loop.

Introducing the glib event loop here would be a big change.  I hope in
the future we can unify main-loop.c and AioContext.  At that point we
might operate on GSources.

Stefan
diff mbox

Patch

diff --git a/aio-posix.c b/aio-posix.c
index b68eccd..7d66048 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -23,7 +23,6 @@  struct AioHandler
     GPollFD pfd;
     IOHandler *io_read;
     IOHandler *io_write;
-    AioFlushHandler *io_flush;
     int deleted;
     int pollfds_idx;
     void *opaque;
@@ -84,7 +83,6 @@  void aio_set_fd_handler(AioContext *ctx,
         /* Update handler with latest information */
         node->io_read = io_read;
         node->io_write = io_write;
-        node->io_flush = io_flush;
         node->opaque = opaque;
         node->pollfds_idx = -1;
 
@@ -147,7 +145,11 @@  static bool aio_dispatch(AioContext *ctx)
             (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
             node->io_read) {
             node->io_read(node->opaque);
-            progress = true;
+
+            /* aio_notify() does not count as progress */
+            if (node->opaque != &ctx->notifier) {
+                progress = true;
+            }
         }
         if (!node->deleted &&
             (revents & (G_IO_OUT | G_IO_ERR)) &&
@@ -173,7 +175,7 @@  bool aio_poll(AioContext *ctx, bool blocking)
 {
     AioHandler *node;
     int ret;
-    bool busy, progress;
+    bool progress;
 
     progress = false;
 
@@ -200,20 +202,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
     g_array_set_size(ctx->pollfds, 0);
 
     /* fill pollfds */
-    busy = false;
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
         node->pollfds_idx = -1;
-
-        /* If there aren't pending AIO operations, don't invoke callbacks.
-         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
-         * wait indefinitely.
-         */
-        if (!node->deleted && node->io_flush) {
-            if (node->io_flush(node->opaque) == 0) {
-                continue;
-            }
-            busy = true;
-        }
         if (!node->deleted && node->pfd.events) {
             GPollFD pfd = {
                 .fd = node->pfd.fd,
@@ -226,8 +216,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
 
     ctx->walking_handlers--;
 
-    /* No AIO operations?  Get us out of here */
-    if (!busy) {
+    /* early return if we only have the aio_notify() fd */
+    if (ctx->pollfds->len == 1) {
         return progress;
     }
 
@@ -250,6 +240,5 @@  bool aio_poll(AioContext *ctx, bool blocking)
         }
     }
 
-    assert(progress || busy);
-    return true;
+    return progress;
 }
diff --git a/aio-win32.c b/aio-win32.c
index 38723bf..4309c16 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -23,7 +23,6 @@ 
 struct AioHandler {
     EventNotifier *e;
     EventNotifierHandler *io_notify;
-    AioFlushEventNotifierHandler *io_flush;
     GPollFD pfd;
     int deleted;
     QLIST_ENTRY(AioHandler) node;
@@ -73,7 +72,6 @@  void aio_set_event_notifier(AioContext *ctx,
         }
         /* Update handler with latest information */
         node->io_notify = io_notify;
-        node->io_flush = io_flush;
     }
 
     aio_notify(ctx);
@@ -96,7 +94,7 @@  bool aio_poll(AioContext *ctx, bool blocking)
 {
     AioHandler *node;
     HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
-    bool busy, progress;
+    bool progress;
     int count;
 
     progress = false;
@@ -126,7 +124,11 @@  bool aio_poll(AioContext *ctx, bool blocking)
         if (node->pfd.revents && node->io_notify) {
             node->pfd.revents = 0;
             node->io_notify(node->e);
-            progress = true;
+
+            /* aio_notify() does not count as progress */
+            if (node->opaque != &ctx->notifier) {
+                progress = true;
+            }
         }
 
         tmp = node;
@@ -147,19 +149,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
     ctx->walking_handlers++;
 
     /* fill fd sets */
-    busy = false;
     count = 0;
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
-        /* If there aren't pending AIO operations, don't invoke callbacks.
-         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
-         * wait indefinitely.
-         */
-        if (!node->deleted && node->io_flush) {
-            if (node->io_flush(node->e) == 0) {
-                continue;
-            }
-            busy = true;
-        }
         if (!node->deleted && node->io_notify) {
             events[count++] = event_notifier_get_handle(node->e);
         }
@@ -167,8 +158,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
 
     ctx->walking_handlers--;
 
-    /* No AIO operations?  Get us out of here */
-    if (!busy) {
+    /* early return if we only have the aio_notify() fd */
+    if (count == 1) {
         return progress;
     }
 
@@ -196,7 +187,11 @@  bool aio_poll(AioContext *ctx, bool blocking)
                 event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
                 node->io_notify) {
                 node->io_notify(node->e);
-                progress = true;
+
+                /* aio_notify() does not count as progress */
+                if (node->opaque != &ctx->notifier) {
+                    progress = true;
+                }
             }
 
             tmp = node;
@@ -214,6 +209,5 @@  bool aio_poll(AioContext *ctx, bool blocking)
         events[ret - WAIT_OBJECT_0] = events[--count];
     }
 
-    assert(progress || busy);
-    return true;
+    return progress;
 }
diff --git a/tests/test-aio.c b/tests/test-aio.c
index 20bf5e6..1251952 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -254,7 +254,7 @@  static void test_wait_event_notifier(void)
     EventNotifierTestData data = { .n = 0, .active = 1 };
     event_notifier_init(&data.e, false);
     aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
-    g_assert(aio_poll(ctx, false));
+    g_assert(!aio_poll(ctx, false));
     g_assert_cmpint(data.n, ==, 0);
     g_assert_cmpint(data.active, ==, 1);
 
@@ -279,7 +279,7 @@  static void test_flush_event_notifier(void)
     EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
     event_notifier_init(&data.e, false);
     aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
-    g_assert(aio_poll(ctx, false));
+    g_assert(!aio_poll(ctx, false));
     g_assert_cmpint(data.n, ==, 0);
     g_assert_cmpint(data.active, ==, 10);
 
@@ -313,7 +313,7 @@  static void test_wait_event_notifier_noflush(void)
     /* Until there is an active descriptor, aio_poll may or may not call
      * event_ready_cb.  Still, it must not block.  */
     event_notifier_set(&data.e);
-    g_assert(!aio_poll(ctx, true));
+    g_assert(aio_poll(ctx, true));
     data.n = 0;
 
     /* An active event notifier forces aio_poll to look at EventNotifiers.  */
@@ -323,13 +323,13 @@  static void test_wait_event_notifier_noflush(void)
     event_notifier_set(&data.e);
     g_assert(aio_poll(ctx, false));
     g_assert_cmpint(data.n, ==, 1);
-    g_assert(aio_poll(ctx, false));
+    g_assert(!aio_poll(ctx, false));
     g_assert_cmpint(data.n, ==, 1);
 
     event_notifier_set(&data.e);
     g_assert(aio_poll(ctx, false));
     g_assert_cmpint(data.n, ==, 2);
-    g_assert(aio_poll(ctx, false));
+    g_assert(!aio_poll(ctx, false));
     g_assert_cmpint(data.n, ==, 2);
 
     event_notifier_set(&dummy.e);