diff mbox

[v4,01/17] block: stop relying on io_flush() in bdrv_drain_all()

Message ID 1371210243-6099-2-git-send-email-stefanha@redhat.com
State New
Headers show

Commit Message

Stefan Hajnoczi June 14, 2013, 11:43 a.m. UTC
If a block driver has no file descriptors to monitor but there are still
active requests, it can return 1 from .io_flush().  This is used to spin
during synchronous I/O.

Stop relying on .io_flush() and instead check
QLIST_EMPTY(&bs->tracked_requests) to decide whether there are active
requests.

This is the first step in removing .io_flush() so that event loops no
longer need to have the concept of synchronous I/O.  Eventually we may
be able to kill synchronous I/O completely by running everything in a
coroutine, but that is future work.

Note this patch moves bs->throttled_reqs initialization to bdrv_new() so
that bdrv_requests_pending(bs) can safely access it.  In practice bs is
g_malloc0() so the memory is already zeroed but it's safer to initialize
the queue properly.

In bdrv_delete() make sure to call bdrv_make_anon() *after* bdrv_close()
so that the device is still seen by bdrv_drain_all() when iterating
bdrv_states.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 block.c | 50 +++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 37 insertions(+), 13 deletions(-)

Comments

Paolo Bonzini June 27, 2013, 1:13 p.m. UTC | #1
Il 14/06/2013 13:43, Stefan Hajnoczi ha scritto:
> If a block driver has no file descriptors to monitor but there are still
> active requests, it can return 1 from .io_flush().  This is used to spin
> during synchronous I/O.
> 
> Stop relying on .io_flush() and instead check
> QLIST_EMPTY(&bs->tracked_requests) to decide whether there are active
> requests.
> 
> This is the first step in removing .io_flush() so that event loops no
> longer need to have the concept of synchronous I/O.  Eventually we may
> be able to kill synchronous I/O completely by running everything in a
> coroutine, but that is future work.
> 
> Note this patch moves bs->throttled_reqs initialization to bdrv_new() so
> that bdrv_requests_pending(bs) can safely access it.  In practice bs is
> g_malloc0() so the memory is already zeroed but it's safer to initialize
> the queue properly.
> 
> In bdrv_delete() make sure to call bdrv_make_anon() *after* bdrv_close()
> so that the device is still seen by bdrv_drain_all() when iterating
> bdrv_states.

I wonder if this last change should be separated out and CCed to
qemu-stable.  It seems like a bug if you close a device that has pending
throttled operations.

Paolo

> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  block.c | 50 +++++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 37 insertions(+), 13 deletions(-)
> 
> diff --git a/block.c b/block.c
> index 79ad33d..04821d8 100644
> --- a/block.c
> +++ b/block.c
> @@ -148,7 +148,6 @@ static void bdrv_block_timer(void *opaque)
>  
>  void bdrv_io_limits_enable(BlockDriverState *bs)
>  {
> -    qemu_co_queue_init(&bs->throttled_reqs);
>      bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
>      bs->io_limits_enabled = true;
>  }
> @@ -305,6 +304,7 @@ BlockDriverState *bdrv_new(const char *device_name)
>      }
>      bdrv_iostatus_disable(bs);
>      notifier_list_init(&bs->close_notifiers);
> +    qemu_co_queue_init(&bs->throttled_reqs);
>  
>      return bs;
>  }
> @@ -1412,6 +1412,35 @@ void bdrv_close_all(void)
>      }
>  }
>  
> +/* Check if any requests are in-flight (including throttled requests) */
> +static bool bdrv_requests_pending(BlockDriverState *bs)
> +{
> +    if (!QLIST_EMPTY(&bs->tracked_requests)) {
> +        return true;
> +    }
> +    if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
> +        return true;
> +    }
> +    if (bs->file && bdrv_requests_pending(bs->file)) {
> +        return true;
> +    }
> +    if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
> +        return true;
> +    }
> +    return false;
> +}
> +
> +static bool bdrv_requests_pending_all(void)
> +{
> +    BlockDriverState *bs;
> +    QTAILQ_FOREACH(bs, &bdrv_states, list) {
> +        if (bdrv_requests_pending(bs)) {
> +            return true;
> +        }
> +    }
> +    return false;
> +}
> +
>  /*
>   * Wait for pending requests to complete across all BlockDriverStates
>   *
> @@ -1426,27 +1455,22 @@ void bdrv_close_all(void)
>   */
>  void bdrv_drain_all(void)
>  {
> +    /* Always run first iteration so any pending completion BHs run */
> +    bool busy = true;
>      BlockDriverState *bs;
> -    bool busy;
> -
> -    do {
> -        busy = qemu_aio_wait();
>  
> +    while (busy) {
>          /* FIXME: We do not have timer support here, so this is effectively
>           * a busy wait.
>           */
>          QTAILQ_FOREACH(bs, &bdrv_states, list) {
>              if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
>                  qemu_co_queue_restart_all(&bs->throttled_reqs);
> -                busy = true;
>              }
>          }
> -    } while (busy);
>  
> -    /* If requests are still pending there is a bug somewhere */
> -    QTAILQ_FOREACH(bs, &bdrv_states, list) {
> -        assert(QLIST_EMPTY(&bs->tracked_requests));
> -        assert(qemu_co_queue_empty(&bs->throttled_reqs));
> +        busy = bdrv_requests_pending_all();
> +        busy |= aio_poll(qemu_get_aio_context(), busy);
>      }
>  }
>  
> @@ -1591,11 +1615,11 @@ void bdrv_delete(BlockDriverState *bs)
>      assert(!bs->job);
>      assert(!bs->in_use);
>  
> +    bdrv_close(bs);
> +
>      /* remove from list, if necessary */
>      bdrv_make_anon(bs);
>  
> -    bdrv_close(bs);
> -
>      g_free(bs);
>  }
>  
>
Stefan Hajnoczi June 27, 2013, 1:29 p.m. UTC | #2
On Thu, Jun 27, 2013 at 03:13:19PM +0200, Paolo Bonzini wrote:
> Il 14/06/2013 13:43, Stefan Hajnoczi ha scritto:
> > If a block driver has no file descriptors to monitor but there are still
> > active requests, it can return 1 from .io_flush().  This is used to spin
> > during synchronous I/O.
> > 
> > Stop relying on .io_flush() and instead check
> > QLIST_EMPTY(&bs->tracked_requests) to decide whether there are active
> > requests.
> > 
> > This is the first step in removing .io_flush() so that event loops no
> > longer need to have the concept of synchronous I/O.  Eventually we may
> > be able to kill synchronous I/O completely by running everything in a
> > coroutine, but that is future work.
> > 
> > Note this patch moves bs->throttled_reqs initialization to bdrv_new() so
> > that bdrv_requests_pending(bs) can safely access it.  In practice bs is
> > g_malloc0() so the memory is already zeroed but it's safer to initialize
> > the queue properly.
> > 
> > In bdrv_delete() make sure to call bdrv_make_anon() *after* bdrv_close()
> > so that the device is still seen by bdrv_drain_all() when iterating
> > bdrv_states.
> 
> I wonder if this last change should be separated out and CCed to
> qemu-stable.  It seems like a bug if you close a device that has pending
> throttled operations.

Seems like a good idea.  I'll send the next version with a separated Cc:
qemu-stable@nongnu.org patch.

Stefan
diff mbox

Patch

diff --git a/block.c b/block.c
index 79ad33d..04821d8 100644
--- a/block.c
+++ b/block.c
@@ -148,7 +148,6 @@  static void bdrv_block_timer(void *opaque)
 
 void bdrv_io_limits_enable(BlockDriverState *bs)
 {
-    qemu_co_queue_init(&bs->throttled_reqs);
     bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
     bs->io_limits_enabled = true;
 }
@@ -305,6 +304,7 @@  BlockDriverState *bdrv_new(const char *device_name)
     }
     bdrv_iostatus_disable(bs);
     notifier_list_init(&bs->close_notifiers);
+    qemu_co_queue_init(&bs->throttled_reqs);
 
     return bs;
 }
@@ -1412,6 +1412,35 @@  void bdrv_close_all(void)
     }
 }
 
+/* Check if any requests are in-flight (including throttled requests) */
+static bool bdrv_requests_pending(BlockDriverState *bs)
+{
+    if (!QLIST_EMPTY(&bs->tracked_requests)) {
+        return true;
+    }
+    if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
+        return true;
+    }
+    if (bs->file && bdrv_requests_pending(bs->file)) {
+        return true;
+    }
+    if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
+        return true;
+    }
+    return false;
+}
+
+static bool bdrv_requests_pending_all(void)
+{
+    BlockDriverState *bs;
+    QTAILQ_FOREACH(bs, &bdrv_states, list) {
+        if (bdrv_requests_pending(bs)) {
+            return true;
+        }
+    }
+    return false;
+}
+
 /*
  * Wait for pending requests to complete across all BlockDriverStates
  *
@@ -1426,27 +1455,22 @@  void bdrv_close_all(void)
  */
 void bdrv_drain_all(void)
 {
+    /* Always run first iteration so any pending completion BHs run */
+    bool busy = true;
     BlockDriverState *bs;
-    bool busy;
-
-    do {
-        busy = qemu_aio_wait();
 
+    while (busy) {
         /* FIXME: We do not have timer support here, so this is effectively
          * a busy wait.
          */
         QTAILQ_FOREACH(bs, &bdrv_states, list) {
             if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
                 qemu_co_queue_restart_all(&bs->throttled_reqs);
-                busy = true;
             }
         }
-    } while (busy);
 
-    /* If requests are still pending there is a bug somewhere */
-    QTAILQ_FOREACH(bs, &bdrv_states, list) {
-        assert(QLIST_EMPTY(&bs->tracked_requests));
-        assert(qemu_co_queue_empty(&bs->throttled_reqs));
+        busy = bdrv_requests_pending_all();
+        busy |= aio_poll(qemu_get_aio_context(), busy);
     }
 }
 
@@ -1591,11 +1615,11 @@  void bdrv_delete(BlockDriverState *bs)
     assert(!bs->job);
     assert(!bs->in_use);
 
+    bdrv_close(bs);
+
     /* remove from list, if necessary */
     bdrv_make_anon(bs);
 
-    bdrv_close(bs);
-
     g_free(bs);
 }