diff mbox

[01/10] AioContext: take bottom halves into account when computing aio_poll timeout

Message ID 1404899590-24973-2-git-send-email-pbonzini@redhat.com
State New
Headers show

Commit Message

Paolo Bonzini July 9, 2014, 9:53 a.m. UTC
Right now, QEMU invokes aio_bh_poll before the "poll" phase
of aio_poll.  It is simpler to do it afterwards and skip the
"poll" phase altogether when the OS-dependent parts of AioContext
are invoked from GSource.  This way, AioContext behaves more
similarly when used as a GSource vs. when used as stand-alone.

As a start, take bottom halves into account when computing the
poll timeout.  If a bottom half is ready, do a non-blocking
poll.  As a side effect, this makes idle bottom halves work
with aio_poll; an improvement, but not really an important
one since they are deprecated.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 aio-posix.c         |  2 +-
 aio-win32.c         |  4 ++--
 async.c             | 32 ++++++++++++++++++--------------
 include/block/aio.h |  8 ++++++++
 4 files changed, 29 insertions(+), 17 deletions(-)

Comments

Stefan Hajnoczi Aug. 1, 2014, 2:34 p.m. UTC | #1
On Wed, Jul 09, 2014 at 11:53:01AM +0200, Paolo Bonzini wrote:
> diff --git a/async.c b/async.c
> index 34af0b2..ac40eab 100644
> --- a/async.c
> +++ b/async.c
> @@ -152,39 +152,43 @@ void qemu_bh_delete(QEMUBH *bh)
>      bh->deleted = 1;
>  }
>  
> -static gboolean
> -aio_ctx_prepare(GSource *source, gint    *timeout)
> +int
> +aio_compute_timeout(AioContext *ctx)

The return value is now nanoseconds so a 32-bit int doesn't offer much
range (only 2 seconds for a signed int).

Any reason to use int instead of int64_t as used by the timer API?
diff mbox

Patch

diff --git a/aio-posix.c b/aio-posix.c
index 2eada2e..55706f8 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -249,7 +249,7 @@  bool aio_poll(AioContext *ctx, bool blocking)
     /* wait until next event */
     ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
                          ctx->pollfds->len,
-                         blocking ? timerlistgroup_deadline_ns(&ctx->tlg) : 0);
+                         blocking ? aio_compute_timeout(ctx) : 0);
 
     /* if we have any readable fds, dispatch event */
     if (ret > 0) {
diff --git a/aio-win32.c b/aio-win32.c
index c12f61e..fe7ee5b 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -165,8 +165,8 @@  bool aio_poll(AioContext *ctx, bool blocking)
     while (count > 0) {
         int ret;
 
-        timeout = blocking ?
-            qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0;
+        timeout = blocking
+            ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
         ret = WaitForMultipleObjects(count, events, FALSE, timeout);
 
         /* if we have any signaled events, dispatch event */
diff --git a/async.c b/async.c
index 34af0b2..ac40eab 100644
--- a/async.c
+++ b/async.c
@@ -152,39 +152,43 @@  void qemu_bh_delete(QEMUBH *bh)
     bh->deleted = 1;
 }
 
-static gboolean
-aio_ctx_prepare(GSource *source, gint    *timeout)
+int
+aio_compute_timeout(AioContext *ctx)
 {
-    AioContext *ctx = (AioContext *) source;
+    int64_t deadline;
+    int timeout = -1;
     QEMUBH *bh;
-    int deadline;
 
-    /* We assume there is no timeout already supplied */
-    *timeout = -1;
     for (bh = ctx->first_bh; bh; bh = bh->next) {
         if (!bh->deleted && bh->scheduled) {
             if (bh->idle) {
                 /* idle bottom halves will be polled at least
                  * every 10ms */
-                *timeout = 10;
+                timeout = 10000000;
             } else {
                 /* non-idle bottom halves will be executed
                  * immediately */
-                *timeout = 0;
-                return true;
+                return 0;
             }
         }
     }
 
-    deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
+    deadline = timerlistgroup_deadline_ns(&ctx->tlg);
     if (deadline == 0) {
-        *timeout = 0;
-        return true;
+        return 0;
     } else {
-        *timeout = qemu_soonest_timeout(*timeout, deadline);
+        return qemu_soonest_timeout(timeout, deadline);
     }
+}
 
-    return false;
+static gboolean
+aio_ctx_prepare(GSource *source, gint    *timeout)
+{
+    AioContext *ctx = (AioContext *) source;
+
+    /* We assume there is no timeout already supplied */
+    *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
+    return *timeout == 0;
 }
 
 static gboolean
diff --git a/include/block/aio.h b/include/block/aio.h
index c23de3c..7eeb961 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -303,4 +303,12 @@  static inline void aio_timer_init(AioContext *ctx,
     timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque);
 }
 
+/**
+ * aio_compute_timeout:
+ * @ctx: the aio context
+ *
+ * Compute the timeout that a blocking aio_poll should use.
+ */
+int aio_compute_timeout(AioContext *ctx);
+
 #endif