@@ -4217,7 +4217,15 @@ int block_job_set_speed(BlockJob *job, int64_t value)
void block_job_cancel(BlockJob *job)
{
+ /* Complete all guest I/O before cancelling the job, so that if the
+ * job chooses to complete itself it will do so with a consistent
+ * view of the disk.
+ */
+ bdrv_drain_all();
job->cancelled = true;
+ if (job->co && !job->busy) {
+ qemu_coroutine_enter(job->co, NULL);
+ }
}
bool block_job_is_cancelled(BlockJob *job)
@@ -4225,13 +4233,42 @@ bool block_job_is_cancelled(BlockJob *job)
return job->cancelled;
}
-void block_job_cancel_sync(BlockJob *job)
+struct BlockCancelData {
+ BlockJob *job;
+ BlockDriverCompletionFunc *cb;
+ void *opaque;
+ bool cancelled;
+ int ret;
+};
+
+static void block_job_cancel_cb(void *opaque, int ret)
+{
+ struct BlockCancelData *data = opaque;
+
+ data->cancelled = block_job_is_cancelled(data->job);
+ data->ret = ret;
+ data->cb(data->opaque, ret);
+}
+
+int block_job_cancel_sync(BlockJob *job)
{
+ struct BlockCancelData data;
BlockDriverState *bs = job->bs;
assert(bs->job == job);
+
+ /* Set up our own callback to store the result and chain to
+ * the original callback.
+ */
+ data.job = job;
+ data.cb = job->cb;
+ data.opaque = job->opaque;
+ data.ret = -EINPROGRESS;
+ job->cb = block_job_cancel_cb;
+ job->opaque = &data;
block_job_cancel(job);
- while (bs->job != NULL && bs->job->busy) {
+ while (data.ret == -EINPROGRESS) {
qemu_aio_wait();
}
+ return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
}
@@ -191,7 +191,6 @@ int stream_start(BlockDriverState *bs, BlockDriverState *base,
void *opaque)
{
StreamBlockJob *s;
- Coroutine *co;
s = block_job_create(&stream_job_type, bs, cb, opaque);
if (!s) {
@@ -203,8 +202,8 @@ int stream_start(BlockDriverState *bs, BlockDriverState *base,
pstrcpy(s->backing_file_id, sizeof(s->backing_file_id), base_id);
}
- co = qemu_coroutine_create(stream_run);
- trace_stream_start(bs, base, s, co, opaque);
- qemu_coroutine_enter(co, s);
+ s->common.co = qemu_coroutine_create(stream_run);
+ trace_stream_start(bs, base, s, s->common.co, opaque);
+ qemu_coroutine_enter(s->common.co, s);
return 0;
}
@@ -94,6 +94,12 @@ struct BlockJob {
BlockDriverState *bs;
/**
+ * The coroutine that executes the job. If not NULL, it is
+ * reentered when busy is false and the job is cancelled.
+ */
+ Coroutine *co;
+
+ /**
* Set to true if the job should cancel itself. The flag must
* always be tested just before toggling the busy flag from false
* to true. After a job has detected that the cancelled flag is
@@ -104,10 +110,8 @@ struct BlockJob {
/**
* Set to false by the job while it is in a quiescent state, where
- * no I/O is pending and cancellation can be processed without
- * issuing new I/O. The busy flag must be set to false when the
- * job goes to sleep on any condition that is not detected by
- * #qemu_aio_wait, such as a timer.
+ * no I/O is pending and the job goes to sleep on any condition
+ * that is not detected by #qemu_aio_wait, such as a timer.
*/
bool busy;
@@ -405,8 +409,11 @@ bool block_job_is_cancelled(BlockJob *job);
* immediately after #block_job_cancel_sync. Users of block jobs
* will usually protect the BlockDriverState objects with a reference
* count, should this be a concern.
+ *
+ * Returns the return value from the job if the job actually completed
+ * during the call, or -ECANCELED if it was canceled.
*/
-void block_job_cancel_sync(BlockJob *job);
+int block_job_cancel_sync(BlockJob *job);
/**
* stream_start:
The limitation on not having I/O after cancellation cannot really be held. Even streaming has a very small race window where you could cancel a job and have it report completion. If this window is hit, bdrv_change_backing_file() will yield and possibly cause accesses to dangling pointers etc. So, let's just assume that we cannot know exactly what will happen after the coroutine has set busy to false. We can set a very lax condition: - if we cancel the job, the coroutine won't set it to false again (and hence will not call co_sleep_ns again). - block_job_cancel_sync will wait for the coroutine to exit, which pretty much ensures no race. Instead, we put a very strict condition on what to do while busy = false. We track the coroutine that executes the job and reenter it (thus cancelling a wait for example) before block_job_cancel restarts. Thus you cannot really do anything but co_sleep_ns at that time. This patch also drains the I/O *before* canceling the job, so that block_job_cancel is quite sure to find the coroutine in quiescent state (busy = false). For mirroring, this means that the job will complete itself with a consistent view of the disk. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- block.c | 41 +++++++++++++++++++++++++++++++++++++++-- block/stream.c | 7 +++---- block_int.h | 17 ++++++++++++----- 3 files changed, 54 insertions(+), 11 deletions(-)