Message ID | 1431538099-3286-12-git-send-email-famz@redhat.com |
---|---|
State | New |
Headers | show |
On 13/05/2015 19:28, Fam Zheng wrote: > We don't want new requests from guest, so block the operation around the > nested poll. > > Signed-off-by: Fam Zheng <famz@redhat.com> > --- > block/io.c | 12 ++++++++++++ > 1 file changed, 12 insertions(+) > > diff --git a/block/io.c b/block/io.c > index 1ce62c4..d369de3 100644 > --- a/block/io.c > +++ b/block/io.c > @@ -289,9 +289,15 @@ static bool bdrv_drain_one(BlockDriverState *bs) > */ > void bdrv_drain(BlockDriverState *bs) > { > + Error *blocker = NULL; > + > + error_setg(&blocker, "bdrv_drain in progress"); > + bdrv_op_block(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > while (bdrv_drain_one(bs)) { > /* Keep iterating */ > } > + bdrv_op_unblock(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > + error_free(blocker); > } > > /* > @@ -311,6 +317,9 @@ void bdrv_drain_all(void) > /* Always run first iteration so any pending completion BHs run */ > bool busy = true; > BlockDriverState *bs = NULL; > + Error *blocker = NULL; > + > + error_setg(&blocker, "bdrv_drain_all in progress"); > > while ((bs = bdrv_next(bs))) { > AioContext *aio_context = bdrv_get_aio_context(bs); > @@ -319,6 +328,7 @@ void bdrv_drain_all(void) > if (bs->job) { > block_job_pause(bs->job); > } > + bdrv_op_block(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > aio_context_release(aio_context); > } > > @@ -343,8 +353,10 @@ void bdrv_drain_all(void) > if (bs->job) { > block_job_resume(bs->job); > } > + bdrv_op_unblock(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > aio_context_release(aio_context); > } > + error_free(blocker); > } > > /** > I think this isn't enough. It's the callers of bdrv_drain and bdrv_drain_all that need to block before drain and unblock before aio_context_release. Paolo
On Wed, 05/13 12:26, Paolo Bonzini wrote: > > > On 13/05/2015 19:28, Fam Zheng wrote: > > We don't want new requests from guest, so block the operation around the > > nested poll. > > > > Signed-off-by: Fam Zheng <famz@redhat.com> > > --- > > block/io.c | 12 ++++++++++++ > > 1 file changed, 12 insertions(+) > > > > diff --git a/block/io.c b/block/io.c > > index 1ce62c4..d369de3 100644 > > --- a/block/io.c > > +++ b/block/io.c > > @@ -289,9 +289,15 @@ static bool bdrv_drain_one(BlockDriverState *bs) > > */ > > void bdrv_drain(BlockDriverState *bs) > > { > > + Error *blocker = NULL; > > + > > + error_setg(&blocker, "bdrv_drain in progress"); > > + bdrv_op_block(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > > while (bdrv_drain_one(bs)) { > > /* Keep iterating */ > > } > > + bdrv_op_unblock(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > > + error_free(blocker); > > } > > > > /* > > @@ -311,6 +317,9 @@ void bdrv_drain_all(void) > > /* Always run first iteration so any pending completion BHs run */ > > bool busy = true; > > BlockDriverState *bs = NULL; > > + Error *blocker = NULL; > > + > > + error_setg(&blocker, "bdrv_drain_all in progress"); > > > > while ((bs = bdrv_next(bs))) { > > AioContext *aio_context = bdrv_get_aio_context(bs); > > @@ -319,6 +328,7 @@ void bdrv_drain_all(void) > > if (bs->job) { > > block_job_pause(bs->job); > > } > > + bdrv_op_block(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > > aio_context_release(aio_context); > > } > > > > @@ -343,8 +353,10 @@ void bdrv_drain_all(void) > > if (bs->job) { > > block_job_resume(bs->job); > > } > > + bdrv_op_unblock(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); > > aio_context_release(aio_context); > > } > > + error_free(blocker); > > } > > > > /** > > > > I think this isn't enough. It's the callers of bdrv_drain and > bdrv_drain_all that need to block before drain and unblock before > aio_context_release. Which callers do you mean? qmp_transaction is covered in this series. Fam
diff --git a/block/io.c b/block/io.c index 1ce62c4..d369de3 100644 --- a/block/io.c +++ b/block/io.c @@ -289,9 +289,15 @@ static bool bdrv_drain_one(BlockDriverState *bs) */ void bdrv_drain(BlockDriverState *bs) { + Error *blocker = NULL; + + error_setg(&blocker, "bdrv_drain in progress"); + bdrv_op_block(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); while (bdrv_drain_one(bs)) { /* Keep iterating */ } + bdrv_op_unblock(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); + error_free(blocker); } /* @@ -311,6 +317,9 @@ void bdrv_drain_all(void) /* Always run first iteration so any pending completion BHs run */ bool busy = true; BlockDriverState *bs = NULL; + Error *blocker = NULL; + + error_setg(&blocker, "bdrv_drain_all in progress"); while ((bs = bdrv_next(bs))) { AioContext *aio_context = bdrv_get_aio_context(bs); @@ -319,6 +328,7 @@ void bdrv_drain_all(void) if (bs->job) { block_job_pause(bs->job); } + bdrv_op_block(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); aio_context_release(aio_context); } @@ -343,8 +353,10 @@ void bdrv_drain_all(void) if (bs->job) { block_job_resume(bs->job); } + bdrv_op_unblock(bs, BLOCK_OP_TYPE_DEVICE_IO, blocker); aio_context_release(aio_context); } + error_free(blocker); } /**
We don't want new requests from guest, so block the operation around the nested poll. Signed-off-by: Fam Zheng <famz@redhat.com> --- block/io.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)