diff mbox series

[03/13] block: Revert .bdrv_drained_begin/end to non-coroutine_fn

Message ID 20221108123738.530873-4-kwolf@redhat.com
State New
Headers show
Series block: Simplify drain | expand

Commit Message

Kevin Wolf Nov. 8, 2022, 12:37 p.m. UTC
Polling during bdrv_drained_end() can be problematic (and in the future,
we may get cases for bdrv_drained_begin() where polling is forbidden,
and we don't care about already in-flight requests, but just want to
prevent new requests from arriving).

The .bdrv_drained_begin/end callbacks running in a coroutine is the only
reason why we have to do this polling, so make them non-coroutine
callbacks again. None of the callers actually yield any more.

This means that bdrv_drained_end() effectively doesn't poll any more,
even if AIO_WAIT_WHILE() loops are still there (their condition is false
from the beginning). This is generally not a problem, but in
test-bdrv-drain, some additional explicit aio_poll() calls need to be
added because the test case wants to verify the final state after BHs
have executed.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 include/block/block_int-common.h | 10 ++++---
 block.c                          |  4 +--
 block/io.c                       | 49 +++++---------------------------
 block/qed.c                      |  4 +--
 block/throttle.c                 |  6 ++--
 tests/unit/test-bdrv-drain.c     | 18 ++++++------
 6 files changed, 30 insertions(+), 61 deletions(-)

Comments

Vladimir Sementsov-Ogievskiy Nov. 9, 2022, 2:29 p.m. UTC | #1
On 11/8/22 15:37, Kevin Wolf wrote:
> Polling during bdrv_drained_end() can be problematic (and in the future,
> we may get cases for bdrv_drained_begin() where polling is forbidden,
> and we don't care about already in-flight requests, but just want to
> prevent new requests from arriving).
> 
> The .bdrv_drained_begin/end callbacks running in a coroutine is the only
> reason why we have to do this polling, so make them non-coroutine
> callbacks again. None of the callers actually yield any more.
> 
> This means that bdrv_drained_end() effectively doesn't poll any more,
> even if AIO_WAIT_WHILE() loops are still there (their condition is false
> from the beginning). This is generally not a problem, but in
> test-bdrv-drain, some additional explicit aio_poll() calls need to be
> added because the test case wants to verify the final state after BHs
> have executed.

So, drained_end_counter is always zero since this commit (and is removed in the next one).

> 
> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> ---
>   include/block/block_int-common.h | 10 ++++---
>   block.c                          |  4 +--
>   block/io.c                       | 49 +++++---------------------------
>   block/qed.c                      |  4 +--
>   block/throttle.c                 |  6 ++--
>   tests/unit/test-bdrv-drain.c     | 18 ++++++------
>   6 files changed, 30 insertions(+), 61 deletions(-)
> 

[..]

> --- a/block/qed.c
> +++ b/block/qed.c
> @@ -365,7 +365,7 @@ static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
>       }
>   }
>   
> -static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
> +static void bdrv_qed_co_drain_begin(BlockDriverState *bs)
>   {
>       BDRVQEDState *s = bs->opaque;
>   
> @@ -1661,7 +1661,7 @@ static BlockDriver bdrv_qed = {
>       .bdrv_co_check            = bdrv_qed_co_check,
>       .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
>       .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
> -    .bdrv_co_drain_begin      = bdrv_qed_co_drain_begin,
> +    .bdrv_drain_begin         = bdrv_qed_co_drain_begin,

Rename to bdrv_qed_drain_begin without _co_, as for tests ?


>   };
>   
>   static void bdrv_qed_init(void)
> diff --git a/block/throttle.c b/block/throttle.c
> index 131eba3ab4..6e3ae1b355 100644
> --- a/block/throttle.c
> +++ b/block/throttle.c
> @@ -214,7 +214,7 @@ static void throttle_reopen_abort(BDRVReopenState *reopen_state)
>       reopen_state->opaque = NULL;
>   }
>   
> -static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs)
> +static void throttle_co_drain_begin(BlockDriverState *bs)

and here.

And you didn't drop coroutine_fn for throttle_co_drain_end

with that fixed:

Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Stefan Hajnoczi Nov. 9, 2022, 10:13 p.m. UTC | #2
On Tue, Nov 08, 2022 at 01:37:28PM +0100, Kevin Wolf wrote:
> Polling during bdrv_drained_end() can be problematic (and in the future,
> we may get cases for bdrv_drained_begin() where polling is forbidden,
> and we don't care about already in-flight requests, but just want to
> prevent new requests from arriving).
> 
> The .bdrv_drained_begin/end callbacks running in a coroutine is the only
> reason why we have to do this polling, so make them non-coroutine
> callbacks again. None of the callers actually yield any more.
> 
> This means that bdrv_drained_end() effectively doesn't poll any more,
> even if AIO_WAIT_WHILE() loops are still there (their condition is false
> from the beginning). This is generally not a problem, but in
> test-bdrv-drain, some additional explicit aio_poll() calls need to be
> added because the test case wants to verify the final state after BHs
> have executed.
> 
> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> ---
>  include/block/block_int-common.h | 10 ++++---
>  block.c                          |  4 +--
>  block/io.c                       | 49 +++++---------------------------
>  block/qed.c                      |  4 +--
>  block/throttle.c                 |  6 ++--
>  tests/unit/test-bdrv-drain.c     | 18 ++++++------
>  6 files changed, 30 insertions(+), 61 deletions(-)

Wow, surprisingly little has to change to make these non-coroutine_fn.

> 
> diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
> index 5a2cc077a0..0956acbb60 100644
> --- a/include/block/block_int-common.h
> +++ b/include/block/block_int-common.h
> @@ -735,17 +735,19 @@ struct BlockDriver {
>      void (*bdrv_io_unplug)(BlockDriverState *bs);
>  
>      /**
> -     * bdrv_co_drain_begin is called if implemented in the beginning of a
> +     * bdrv_drain_begin is called if implemented in the beginning of a
>       * drain operation to drain and stop any internal sources of requests in
>       * the driver.
> -     * bdrv_co_drain_end is called if implemented at the end of the drain.
> +     * bdrv_drain_end is called if implemented at the end of the drain.
>       *
>       * They should be used by the driver to e.g. manage scheduled I/O
>       * requests, or toggle an internal state. After the end of the drain new
>       * requests will continue normally.
> +     *
> +     * Implementations of both functions must not call aio_poll().
>       */
> -    void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs);
> -    void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs);
> +    void (*bdrv_drain_begin)(BlockDriverState *bs);
> +    void (*bdrv_drain_end)(BlockDriverState *bs);
>  
>      bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs);
>      bool coroutine_fn (*bdrv_co_can_store_new_dirty_bitmap)(
> diff --git a/block.c b/block.c
> index 3bd594eb2a..fed8077993 100644
> --- a/block.c
> +++ b/block.c
> @@ -1705,8 +1705,8 @@ static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv,
>      assert(is_power_of_2(bs->bl.request_alignment));
>  
>      for (i = 0; i < bs->quiesce_counter; i++) {
> -        if (drv->bdrv_co_drain_begin) {
> -            drv->bdrv_co_drain_begin(bs);
> +        if (drv->bdrv_drain_begin) {
> +            drv->bdrv_drain_begin(bs);
>          }
>      }
>  
> diff --git a/block/io.c b/block/io.c
> index 34b30e304e..183b407f5b 100644
> --- a/block/io.c
> +++ b/block/io.c
> @@ -250,55 +250,20 @@ typedef struct {
>      int *drained_end_counter;
>  } BdrvCoDrainData;
>  
> -static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
> -{
> -    BdrvCoDrainData *data = opaque;
> -    BlockDriverState *bs = data->bs;
> -
> -    if (data->begin) {
> -        bs->drv->bdrv_co_drain_begin(bs);
> -    } else {
> -        bs->drv->bdrv_co_drain_end(bs);
> -    }
> -
> -    /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
> -    qatomic_mb_set(&data->done, true);
> -    if (!data->begin) {
> -        qatomic_dec(data->drained_end_counter);
> -    }
> -    bdrv_dec_in_flight(bs);
> -
> -    g_free(data);
> -}
> -
> -/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
> +/* Recursively call BlockDriver.bdrv_drain_begin/end callbacks */
>  static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
>                                int *drained_end_counter)
>  {
> -    BdrvCoDrainData *data;
> -
> -    if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
> -            (!begin && !bs->drv->bdrv_co_drain_end)) {
> +    if (!bs->drv || (begin && !bs->drv->bdrv_drain_begin) ||
> +            (!begin && !bs->drv->bdrv_drain_end)) {
>          return;
>      }
>  
> -    data = g_new(BdrvCoDrainData, 1);
> -    *data = (BdrvCoDrainData) {
> -        .bs = bs,
> -        .done = false,
> -        .begin = begin,
> -        .drained_end_counter = drained_end_counter,
> -    };
> -
> -    if (!begin) {
> -        qatomic_inc(drained_end_counter);
> +    if (begin) {
> +        bs->drv->bdrv_drain_begin(bs);
> +    } else {
> +        bs->drv->bdrv_drain_end(bs);
>      }
> -
> -    /* Make sure the driver callback completes during the polling phase for
> -     * drain_begin. */
> -    bdrv_inc_in_flight(bs);
> -    data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
> -    aio_co_schedule(bdrv_get_aio_context(bs), data->co);
>  }
>  
>  /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
> diff --git a/block/qed.c b/block/qed.c
> index 013f826c44..301ff8fd86 100644
> --- a/block/qed.c
> +++ b/block/qed.c
> @@ -365,7 +365,7 @@ static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
>      }
>  }
>  
> -static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
> +static void bdrv_qed_co_drain_begin(BlockDriverState *bs)

This function needs to be renamed s/_co_//.

>  {
>      BDRVQEDState *s = bs->opaque;
>  
> @@ -1661,7 +1661,7 @@ static BlockDriver bdrv_qed = {
>      .bdrv_co_check            = bdrv_qed_co_check,
>      .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
>      .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
> -    .bdrv_co_drain_begin      = bdrv_qed_co_drain_begin,
> +    .bdrv_drain_begin         = bdrv_qed_co_drain_begin,
>  };
>  
>  static void bdrv_qed_init(void)
> diff --git a/block/throttle.c b/block/throttle.c
> index 131eba3ab4..6e3ae1b355 100644
> --- a/block/throttle.c
> +++ b/block/throttle.c
> @@ -214,7 +214,7 @@ static void throttle_reopen_abort(BDRVReopenState *reopen_state)
>      reopen_state->opaque = NULL;
>  }
>  
> -static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs)
> +static void throttle_co_drain_begin(BlockDriverState *bs)

Same here.

>  {
>      ThrottleGroupMember *tgm = bs->opaque;
>      if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
> @@ -261,8 +261,8 @@ static BlockDriver bdrv_throttle = {
>      .bdrv_reopen_commit                 =   throttle_reopen_commit,
>      .bdrv_reopen_abort                  =   throttle_reopen_abort,
>  
> -    .bdrv_co_drain_begin                =   throttle_co_drain_begin,
> -    .bdrv_co_drain_end                  =   throttle_co_drain_end,

Is throttle_co_drain_end() still marked coroutine_fn? It also need to be
renamed to throttle_drain_end().

> +    .bdrv_drain_begin                   =   throttle_co_drain_begin,
> +    .bdrv_drain_end                     =   throttle_co_drain_end,
>  
>      .is_filter                          =   true,
>      .strong_runtime_opts                =   throttle_strong_runtime_opts,
> diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
> index 24f34e24ad..695519ee02 100644
> --- a/tests/unit/test-bdrv-drain.c
> +++ b/tests/unit/test-bdrv-drain.c
> @@ -46,7 +46,7 @@ static void coroutine_fn sleep_in_drain_begin(void *opaque)
>      bdrv_dec_in_flight(bs);
>  }
>  
> -static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
> +static void bdrv_test_drain_begin(BlockDriverState *bs)
>  {
>      BDRVTestState *s = bs->opaque;
>      s->drain_count++;
> @@ -57,7 +57,7 @@ static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
>      }
>  }
>  
> -static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
> +static void bdrv_test_drain_end(BlockDriverState *bs)
>  {
>      BDRVTestState *s = bs->opaque;
>      s->drain_count--;
> @@ -111,8 +111,8 @@ static BlockDriver bdrv_test = {
>      .bdrv_close             = bdrv_test_close,
>      .bdrv_co_preadv         = bdrv_test_co_preadv,
>  
> -    .bdrv_co_drain_begin    = bdrv_test_co_drain_begin,
> -    .bdrv_co_drain_end      = bdrv_test_co_drain_end,
> +    .bdrv_drain_begin       = bdrv_test_drain_begin,
> +    .bdrv_drain_end         = bdrv_test_drain_end,
>  
>      .bdrv_child_perm        = bdrv_default_perms,
>  
> @@ -1703,6 +1703,7 @@ static void test_blockjob_commit_by_drained_end(void)
>      bdrv_drained_begin(bs_child);
>      g_assert(!job_has_completed);
>      bdrv_drained_end(bs_child);
> +    aio_poll(qemu_get_aio_context(), false);
>      g_assert(job_has_completed);
>  
>      bdrv_unref(bs_parents[0]);
> @@ -1858,6 +1859,7 @@ static void test_drop_intermediate_poll(void)
>  
>      g_assert(!job_has_completed);
>      ret = bdrv_drop_intermediate(chain[1], chain[0], NULL);
> +    aio_poll(qemu_get_aio_context(), false);
>      g_assert(ret == 0);
>      g_assert(job_has_completed);
>  
> @@ -1946,7 +1948,7 @@ static void coroutine_fn bdrv_replace_test_drain_co(void *opaque)
>   * .was_drained.
>   * Increment .drain_count.
>   */
> -static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs)
> +static void bdrv_replace_test_drain_begin(BlockDriverState *bs)
>  {
>      BDRVReplaceTestState *s = bs->opaque;
>  
> @@ -1977,7 +1979,7 @@ static void coroutine_fn bdrv_replace_test_read_entry(void *opaque)
>   * If .drain_count reaches 0 and the node has a backing file, issue a
>   * read request.
>   */
> -static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs)
> +static void bdrv_replace_test_drain_end(BlockDriverState *bs)
>  {
>      BDRVReplaceTestState *s = bs->opaque;
>  
> @@ -2002,8 +2004,8 @@ static BlockDriver bdrv_replace_test = {
>      .bdrv_close             = bdrv_replace_test_close,
>      .bdrv_co_preadv         = bdrv_replace_test_co_preadv,
>  
> -    .bdrv_co_drain_begin    = bdrv_replace_test_co_drain_begin,
> -    .bdrv_co_drain_end      = bdrv_replace_test_co_drain_end,
> +    .bdrv_drain_begin       = bdrv_replace_test_drain_begin,
> +    .bdrv_drain_end         = bdrv_replace_test_drain_end,
>  
>      .bdrv_child_perm        = bdrv_default_perms,
>  };
> -- 
> 2.38.1
>
Emanuele Giuseppe Esposito Nov. 11, 2022, 11:14 a.m. UTC | #3
Am 08/11/2022 um 13:37 schrieb Kevin Wolf:
> Polling during bdrv_drained_end() can be problematic (and in the future,
> we may get cases for bdrv_drained_begin() where polling is forbidden,
> and we don't care about already in-flight requests, but just want to
> prevent new requests from arriving).
>
> The .bdrv_drained_begin/end callbacks running in a coroutine is the only
> reason why we have to do this polling, so make them non-coroutine
> callbacks again. None of the callers actually yield any more.
>
> This means that bdrv_drained_end() effectively doesn't poll any more,
> even if AIO_WAIT_WHILE() loops are still there (their condition is false
> from the beginning). This is generally not a problem, but in
> test-bdrv-drain, some additional explicit aio_poll() calls need to be
> added because the test case wants to verify the final state after BHs
> have executed.
>
> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
>


Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Hanna Czenczek Nov. 14, 2022, 6:17 p.m. UTC | #4
On 08.11.22 13:37, Kevin Wolf wrote:
> Polling during bdrv_drained_end() can be problematic (and in the future,
> we may get cases for bdrv_drained_begin() where polling is forbidden,
> and we don't care about already in-flight requests, but just want to
> prevent new requests from arriving).
>
> The .bdrv_drained_begin/end callbacks running in a coroutine is the only
> reason why we have to do this polling, so make them non-coroutine
> callbacks again. None of the callers actually yield any more.
>
> This means that bdrv_drained_end() effectively doesn't poll any more,
> even if AIO_WAIT_WHILE() loops are still there (their condition is false
> from the beginning). This is generally not a problem, but in
> test-bdrv-drain, some additional explicit aio_poll() calls need to be
> added because the test case wants to verify the final state after BHs
> have executed.
>
> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
> ---
>   include/block/block_int-common.h | 10 ++++---
>   block.c                          |  4 +--
>   block/io.c                       | 49 +++++---------------------------
>   block/qed.c                      |  4 +--
>   block/throttle.c                 |  6 ++--
>   tests/unit/test-bdrv-drain.c     | 18 ++++++------
>   6 files changed, 30 insertions(+), 61 deletions(-)

As the others have already suggested, I’d too drop the _co_ in qed and 
throttle, and the coroutine_fn in throttle.  With that done:

Reviewed-by: Hanna Reitz <hreitz@redhat.com>
diff mbox series

Patch

diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
index 5a2cc077a0..0956acbb60 100644
--- a/include/block/block_int-common.h
+++ b/include/block/block_int-common.h
@@ -735,17 +735,19 @@  struct BlockDriver {
     void (*bdrv_io_unplug)(BlockDriverState *bs);
 
     /**
-     * bdrv_co_drain_begin is called if implemented in the beginning of a
+     * bdrv_drain_begin is called if implemented in the beginning of a
      * drain operation to drain and stop any internal sources of requests in
      * the driver.
-     * bdrv_co_drain_end is called if implemented at the end of the drain.
+     * bdrv_drain_end is called if implemented at the end of the drain.
      *
      * They should be used by the driver to e.g. manage scheduled I/O
      * requests, or toggle an internal state. After the end of the drain new
      * requests will continue normally.
+     *
+     * Implementations of both functions must not call aio_poll().
      */
-    void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs);
-    void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs);
+    void (*bdrv_drain_begin)(BlockDriverState *bs);
+    void (*bdrv_drain_end)(BlockDriverState *bs);
 
     bool (*bdrv_supports_persistent_dirty_bitmap)(BlockDriverState *bs);
     bool coroutine_fn (*bdrv_co_can_store_new_dirty_bitmap)(
diff --git a/block.c b/block.c
index 3bd594eb2a..fed8077993 100644
--- a/block.c
+++ b/block.c
@@ -1705,8 +1705,8 @@  static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv,
     assert(is_power_of_2(bs->bl.request_alignment));
 
     for (i = 0; i < bs->quiesce_counter; i++) {
-        if (drv->bdrv_co_drain_begin) {
-            drv->bdrv_co_drain_begin(bs);
+        if (drv->bdrv_drain_begin) {
+            drv->bdrv_drain_begin(bs);
         }
     }
 
diff --git a/block/io.c b/block/io.c
index 34b30e304e..183b407f5b 100644
--- a/block/io.c
+++ b/block/io.c
@@ -250,55 +250,20 @@  typedef struct {
     int *drained_end_counter;
 } BdrvCoDrainData;
 
-static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
-{
-    BdrvCoDrainData *data = opaque;
-    BlockDriverState *bs = data->bs;
-
-    if (data->begin) {
-        bs->drv->bdrv_co_drain_begin(bs);
-    } else {
-        bs->drv->bdrv_co_drain_end(bs);
-    }
-
-    /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
-    qatomic_mb_set(&data->done, true);
-    if (!data->begin) {
-        qatomic_dec(data->drained_end_counter);
-    }
-    bdrv_dec_in_flight(bs);
-
-    g_free(data);
-}
-
-/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
+/* Recursively call BlockDriver.bdrv_drain_begin/end callbacks */
 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
                               int *drained_end_counter)
 {
-    BdrvCoDrainData *data;
-
-    if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
-            (!begin && !bs->drv->bdrv_co_drain_end)) {
+    if (!bs->drv || (begin && !bs->drv->bdrv_drain_begin) ||
+            (!begin && !bs->drv->bdrv_drain_end)) {
         return;
     }
 
-    data = g_new(BdrvCoDrainData, 1);
-    *data = (BdrvCoDrainData) {
-        .bs = bs,
-        .done = false,
-        .begin = begin,
-        .drained_end_counter = drained_end_counter,
-    };
-
-    if (!begin) {
-        qatomic_inc(drained_end_counter);
+    if (begin) {
+        bs->drv->bdrv_drain_begin(bs);
+    } else {
+        bs->drv->bdrv_drain_end(bs);
     }
-
-    /* Make sure the driver callback completes during the polling phase for
-     * drain_begin. */
-    bdrv_inc_in_flight(bs);
-    data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
-    aio_co_schedule(bdrv_get_aio_context(bs), data->co);
 }
 
 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
diff --git a/block/qed.c b/block/qed.c
index 013f826c44..301ff8fd86 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -365,7 +365,7 @@  static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
     }
 }
 
-static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
+static void bdrv_qed_co_drain_begin(BlockDriverState *bs)
 {
     BDRVQEDState *s = bs->opaque;
 
@@ -1661,7 +1661,7 @@  static BlockDriver bdrv_qed = {
     .bdrv_co_check            = bdrv_qed_co_check,
     .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
     .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
-    .bdrv_co_drain_begin      = bdrv_qed_co_drain_begin,
+    .bdrv_drain_begin         = bdrv_qed_co_drain_begin,
 };
 
 static void bdrv_qed_init(void)
diff --git a/block/throttle.c b/block/throttle.c
index 131eba3ab4..6e3ae1b355 100644
--- a/block/throttle.c
+++ b/block/throttle.c
@@ -214,7 +214,7 @@  static void throttle_reopen_abort(BDRVReopenState *reopen_state)
     reopen_state->opaque = NULL;
 }
 
-static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs)
+static void throttle_co_drain_begin(BlockDriverState *bs)
 {
     ThrottleGroupMember *tgm = bs->opaque;
     if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
@@ -261,8 +261,8 @@  static BlockDriver bdrv_throttle = {
     .bdrv_reopen_commit                 =   throttle_reopen_commit,
     .bdrv_reopen_abort                  =   throttle_reopen_abort,
 
-    .bdrv_co_drain_begin                =   throttle_co_drain_begin,
-    .bdrv_co_drain_end                  =   throttle_co_drain_end,
+    .bdrv_drain_begin                   =   throttle_co_drain_begin,
+    .bdrv_drain_end                     =   throttle_co_drain_end,
 
     .is_filter                          =   true,
     .strong_runtime_opts                =   throttle_strong_runtime_opts,
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 24f34e24ad..695519ee02 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -46,7 +46,7 @@  static void coroutine_fn sleep_in_drain_begin(void *opaque)
     bdrv_dec_in_flight(bs);
 }
 
-static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
+static void bdrv_test_drain_begin(BlockDriverState *bs)
 {
     BDRVTestState *s = bs->opaque;
     s->drain_count++;
@@ -57,7 +57,7 @@  static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
     }
 }
 
-static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
+static void bdrv_test_drain_end(BlockDriverState *bs)
 {
     BDRVTestState *s = bs->opaque;
     s->drain_count--;
@@ -111,8 +111,8 @@  static BlockDriver bdrv_test = {
     .bdrv_close             = bdrv_test_close,
     .bdrv_co_preadv         = bdrv_test_co_preadv,
 
-    .bdrv_co_drain_begin    = bdrv_test_co_drain_begin,
-    .bdrv_co_drain_end      = bdrv_test_co_drain_end,
+    .bdrv_drain_begin       = bdrv_test_drain_begin,
+    .bdrv_drain_end         = bdrv_test_drain_end,
 
     .bdrv_child_perm        = bdrv_default_perms,
 
@@ -1703,6 +1703,7 @@  static void test_blockjob_commit_by_drained_end(void)
     bdrv_drained_begin(bs_child);
     g_assert(!job_has_completed);
     bdrv_drained_end(bs_child);
+    aio_poll(qemu_get_aio_context(), false);
     g_assert(job_has_completed);
 
     bdrv_unref(bs_parents[0]);
@@ -1858,6 +1859,7 @@  static void test_drop_intermediate_poll(void)
 
     g_assert(!job_has_completed);
     ret = bdrv_drop_intermediate(chain[1], chain[0], NULL);
+    aio_poll(qemu_get_aio_context(), false);
     g_assert(ret == 0);
     g_assert(job_has_completed);
 
@@ -1946,7 +1948,7 @@  static void coroutine_fn bdrv_replace_test_drain_co(void *opaque)
  * .was_drained.
  * Increment .drain_count.
  */
-static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs)
+static void bdrv_replace_test_drain_begin(BlockDriverState *bs)
 {
     BDRVReplaceTestState *s = bs->opaque;
 
@@ -1977,7 +1979,7 @@  static void coroutine_fn bdrv_replace_test_read_entry(void *opaque)
  * If .drain_count reaches 0 and the node has a backing file, issue a
  * read request.
  */
-static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs)
+static void bdrv_replace_test_drain_end(BlockDriverState *bs)
 {
     BDRVReplaceTestState *s = bs->opaque;
 
@@ -2002,8 +2004,8 @@  static BlockDriver bdrv_replace_test = {
     .bdrv_close             = bdrv_replace_test_close,
     .bdrv_co_preadv         = bdrv_replace_test_co_preadv,
 
-    .bdrv_co_drain_begin    = bdrv_replace_test_co_drain_begin,
-    .bdrv_co_drain_end      = bdrv_replace_test_co_drain_end,
+    .bdrv_drain_begin       = bdrv_replace_test_drain_begin,
+    .bdrv_drain_end         = bdrv_replace_test_drain_end,
 
     .bdrv_child_perm        = bdrv_default_perms,
 };