@@ -1288,8 +1288,6 @@ blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes,
BlockDriverState *bs;
IO_CODE();
- blk_wait_while_drained(blk);
-
/* Call blk_bs() only after waiting, the graph may have changed */
bs = blk_bs(blk);
trace_blk_co_preadv(blk, bs, offset, bytes, flags);
@@ -1332,6 +1330,7 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
IO_OR_GS_CODE();
blk_inc_in_flight(blk);
+ blk_wait_while_drained(blk);
ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, 0, flags);
blk_dec_in_flight(blk);
@@ -1346,6 +1345,7 @@ int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset,
IO_OR_GS_CODE();
blk_inc_in_flight(blk);
+ blk_wait_while_drained(blk);
ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, qiov_offset, flags);
blk_dec_in_flight(blk);
@@ -1362,8 +1362,6 @@ blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
BlockDriverState *bs;
IO_CODE();
- blk_wait_while_drained(blk);
-
/* Call blk_bs() only after waiting, the graph may have changed */
bs = blk_bs(blk);
trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
@@ -1399,6 +1397,7 @@ int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
IO_OR_GS_CODE();
blk_inc_in_flight(blk);
+ blk_wait_while_drained(blk);
ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags);
blk_dec_in_flight(blk);
@@ -1543,6 +1542,7 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,
Coroutine *co;
blk_inc_in_flight(blk);
+ blk_wait_while_drained(blk);
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
@@ -1667,8 +1667,6 @@ blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
IO_CODE();
- blk_wait_while_drained(blk);
-
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -1683,6 +1681,7 @@ int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
IO_OR_GS_CODE();
blk_inc_in_flight(blk);
+ blk_wait_while_drained(blk);
ret = blk_co_do_ioctl(blk, req, buf);
blk_dec_in_flight(blk);
@@ -1713,8 +1712,6 @@ blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
int ret;
IO_CODE();
- blk_wait_while_drained(blk);
-
ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
@@ -1748,6 +1745,7 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
IO_OR_GS_CODE();
blk_inc_in_flight(blk);
+ blk_wait_while_drained(blk);
ret = blk_co_do_pdiscard(blk, offset, bytes);
blk_dec_in_flight(blk);
@@ -1757,7 +1755,6 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
static int coroutine_fn blk_co_do_flush(BlockBackend *blk)
{
- blk_wait_while_drained(blk);
IO_CODE();
if (!blk_is_available(blk)) {
@@ -1789,6 +1786,7 @@ int coroutine_fn blk_co_flush(BlockBackend *blk)
IO_OR_GS_CODE();
blk_inc_in_flight(blk);
+ blk_wait_while_drained(blk);
ret = blk_co_do_flush(blk);
blk_dec_in_flight(blk);
When called from within (another) coroutine, aio_co_enter will not enter a coroutine immediately; instead the new coroutine is scheduled to run after qemu_coroutine_yield(). This however might cause the currently-running coroutine to yield without having raised blk->in_flight. If it was a ->drained_begin() callback who scheduled the coroutine, bdrv_drained_begin() might exit without waiting for the I/O operation to finish. Right now, this is masked by unnecessary polling done by bdrv_drained_begin() after the callbacks return, but it is wrong and a latent bug. So, ensure that blk_inc_in_flight() and blk_wait_while_drained() are called before aio_co_enter(). To do so, pull the call to blk_wait_while_drained() out of the blk_co_do_* functions, which are called from the AIO coroutines, and place them separately in the public blk_co_* functions and in blk_aio_prwv. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- block/block-backend.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-)