Message ID | 20221212125920.248567-11-pbonzini@redhat.com |
---|---|
State | New |
Headers | show |
Series | More cleanups and fixes for drain | expand |
Am 12.12.2022 um 13:59 hat Paolo Bonzini geschrieben: > All I/O operations call blk_wait_while_drained() immediately after > blk_inc_in_flight(), except for blk_abort_aio_request() where it > does not hurt to add such a call. Merge the two functions into one, > and add a note about a disturbing lack of thread-safety that will > be fixed shortly. > > While at it, make the quiesce_counter check a loop. While the check > does not have to be "perfect", i.e. it only matters that an endless > stream of I/Os is stopped sooner or later, it is more logical to check > the counter repeatedly until it is zero. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > block/block-backend.c | 27 ++++++++------------------- > 1 file changed, 8 insertions(+), 19 deletions(-) > > diff --git a/block/block-backend.c b/block/block-backend.c > index fe42d53d655d..627d491d4155 100644 > --- a/block/block-backend.c > +++ b/block/block-backend.c > @@ -1270,18 +1270,6 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset, > return 0; > } > > -/* To be called between exactly one pair of blk_inc/dec_in_flight() */ > -static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) > -{ > - assert(blk->in_flight > 0); > - > - if (blk->quiesce_counter && !blk->disable_request_queuing) { > - blk_dec_in_flight(blk); > - qemu_co_queue_wait(&blk->queued_requests, NULL); > - blk_inc_in_flight(blk); > - } > -} > - > /* To be called between exactly one pair of blk_inc/dec_in_flight() */ > static int coroutine_fn > blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes, > @@ -1334,7 +1322,6 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, > IO_OR_GS_CODE(); > > blk_inc_in_flight(blk); > - blk_wait_while_drained(blk); > ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, 0, flags); > blk_dec_in_flight(blk); > > @@ -1349,7 +1336,6 @@ int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset, > IO_OR_GS_CODE(); > > blk_inc_in_flight(blk); > - blk_wait_while_drained(blk); > ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, qiov_offset, flags); > blk_dec_in_flight(blk); > > @@ -1401,7 +1387,6 @@ int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, > IO_OR_GS_CODE(); > > blk_inc_in_flight(blk); > - blk_wait_while_drained(blk); > ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); > blk_dec_in_flight(blk); > > @@ -1466,6 +1451,14 @@ void blk_inc_in_flight(BlockBackend *blk) > { > IO_CODE(); > qatomic_inc(&blk->in_flight); > + if (!blk->disable_request_queuing) { > + /* TODO: this is not thread-safe! */ > + while (blk->quiesce_counter) { > + qatomic_dec(&blk->in_flight); > + qemu_co_queue_wait(&blk->queued_requests, NULL); blk_inc_in_flight() must be a coroutine_fn now. blk_abort_aio_request() and blk_aio_prwv() aren't, but still call it. > + qatomic_inc(&blk->in_flight); > + } > + } > } Kevin
diff --git a/block/block-backend.c b/block/block-backend.c index fe42d53d655d..627d491d4155 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -1270,18 +1270,6 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset, return 0; } -/* To be called between exactly one pair of blk_inc/dec_in_flight() */ -static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) -{ - assert(blk->in_flight > 0); - - if (blk->quiesce_counter && !blk->disable_request_queuing) { - blk_dec_in_flight(blk); - qemu_co_queue_wait(&blk->queued_requests, NULL); - blk_inc_in_flight(blk); - } -} - /* To be called between exactly one pair of blk_inc/dec_in_flight() */ static int coroutine_fn blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes, @@ -1334,7 +1322,6 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, IO_OR_GS_CODE(); blk_inc_in_flight(blk); - blk_wait_while_drained(blk); ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, 0, flags); blk_dec_in_flight(blk); @@ -1349,7 +1336,6 @@ int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset, IO_OR_GS_CODE(); blk_inc_in_flight(blk); - blk_wait_while_drained(blk); ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, qiov_offset, flags); blk_dec_in_flight(blk); @@ -1401,7 +1387,6 @@ int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, IO_OR_GS_CODE(); blk_inc_in_flight(blk); - blk_wait_while_drained(blk); ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); blk_dec_in_flight(blk); @@ -1466,6 +1451,14 @@ void blk_inc_in_flight(BlockBackend *blk) { IO_CODE(); qatomic_inc(&blk->in_flight); + if (!blk->disable_request_queuing) { + /* TODO: this is not thread-safe! */ + while (blk->quiesce_counter) { + qatomic_dec(&blk->in_flight); + qemu_co_queue_wait(&blk->queued_requests, NULL); + qatomic_inc(&blk->in_flight); + } + } } void blk_dec_in_flight(BlockBackend *blk) @@ -1546,7 +1539,6 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, Coroutine *co; blk_inc_in_flight(blk); - blk_wait_while_drained(blk); acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); acb->rwco = (BlkRwCo) { .blk = blk, @@ -1685,7 +1677,6 @@ int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req, IO_OR_GS_CODE(); blk_inc_in_flight(blk); - blk_wait_while_drained(blk); ret = blk_co_do_ioctl(blk, req, buf); blk_dec_in_flight(blk); @@ -1749,7 +1740,6 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, IO_OR_GS_CODE(); blk_inc_in_flight(blk); - blk_wait_while_drained(blk); ret = blk_co_do_pdiscard(blk, offset, bytes); blk_dec_in_flight(blk); @@ -1790,7 +1780,6 @@ int coroutine_fn blk_co_flush(BlockBackend *blk) IO_OR_GS_CODE(); blk_inc_in_flight(blk); - blk_wait_while_drained(blk); ret = blk_co_do_flush(blk); blk_dec_in_flight(blk);
All I/O operations call blk_wait_while_drained() immediately after blk_inc_in_flight(), except for blk_abort_aio_request() where it does not hurt to add such a call. Merge the two functions into one, and add a note about a disturbing lack of thread-safety that will be fixed shortly. While at it, make the quiesce_counter check a loop. While the check does not have to be "perfect", i.e. it only matters that an endless stream of I/Os is stopped sooner or later, it is more logical to check the counter repeatedly until it is zero. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- block/block-backend.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-)