diff mbox series

[v3,4/5] block-copy: add a CoMutex

Message ID 20210608073344.53637-5-eesposit@redhat.com
State New
Headers show
Series block-copy: protect block-copy internal structures | expand

Commit Message

Emanuele Giuseppe Esposito June 8, 2021, 7:33 a.m. UTC
Add a CoMutex to protect concurrent access of block-copy
data structures.

This mutex also protects .copy_bitmap, because its thread-safe
API does not prevent it from assigning two tasks to the same
bitmap region.

.finished, .cancelled and reads to .ret and .error_is_read will be
protected in the following patch, because are used also outside
coroutines.

Also set block_copy_task_create as coroutine_fn because:
1) it is static and only invoked by coroutine functions
2) this patch introduces and uses a CoMutex lock there

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 block/block-copy.c | 82 ++++++++++++++++++++++++++++++----------------
 1 file changed, 54 insertions(+), 28 deletions(-)

Comments

Vladimir Sementsov-Ogievskiy June 9, 2021, 12:25 p.m. UTC | #1
08.06.2021 10:33, Emanuele Giuseppe Esposito wrote:
> Add a CoMutex to protect concurrent access of block-copy
> data structures.
> 
> This mutex also protects .copy_bitmap, because its thread-safe
> API does not prevent it from assigning two tasks to the same
> bitmap region.
> 
> .finished, .cancelled and reads to .ret and .error_is_read will be
> protected in the following patch, because are used also outside
> coroutines.
> 
> Also set block_copy_task_create as coroutine_fn because:
> 1) it is static and only invoked by coroutine functions
> 2) this patch introduces and uses a CoMutex lock there
> 
> Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>

I missed, did you (where?) add a comment like "all APIs are thread-safe", or what is thread-safe?

> ---
>   block/block-copy.c | 82 ++++++++++++++++++++++++++++++----------------
>   1 file changed, 54 insertions(+), 28 deletions(-)
> 
> diff --git a/block/block-copy.c b/block/block-copy.c
> index e2adb5b2ea..56f62913e4 100644
> --- a/block/block-copy.c
> +++ b/block/block-copy.c
> @@ -61,6 +61,7 @@ typedef struct BlockCopyCallState {
>   
>       /* OUT parameters */
>       bool cancelled;
> +    /* Fields protected by lock in BlockCopyState */
>       bool error_is_read;
>       int ret;
>   } BlockCopyCallState;
> @@ -78,7 +79,7 @@ typedef struct BlockCopyTask {
>       int64_t bytes; /* only re-set in task_shrink, before running the task */
>       BlockCopyMethod method; /* initialized in block_copy_dirty_clusters() */
>   
> -    /* State */
> +    /* State. Protected by lock in BlockCopyState */
>       CoQueue wait_queue; /* coroutines blocked on this task */
>   
>       /* To reference all call states from BlockCopyState */
> @@ -99,7 +100,8 @@ typedef struct BlockCopyState {
>       BdrvChild *source;
>       BdrvChild *target;
>   
> -    /* State */
> +    /* State. Protected by lock */
> +    CoMutex lock;
>       int64_t in_flight_bytes;
>       BlockCopyMethod method;
>       QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
> @@ -139,8 +141,10 @@ typedef struct BlockCopyState {
>       bool skip_unallocated;
>   } BlockCopyState;
>   

May be nitpicking, but if we want block_copy_set_progress_meter to be threadsafe it should set s->progress under mutex. Or we should document that it's not threadsafe and called once.


> -static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
> -                                            int64_t offset, int64_t bytes)
> +/* Called with lock held */
> +static BlockCopyTask *find_conflicting_task_locked(BlockCopyState *s,
> +                                                   int64_t offset,
> +                                                   int64_t bytes)
>   {
>       BlockCopyTask *t;
>   
> @@ -160,18 +164,22 @@ static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
>   static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
>                                                int64_t bytes)
>   {
> -    BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
> +    BlockCopyTask *task;
> +
> +    QEMU_LOCK_GUARD(&s->lock);
> +    task = find_conflicting_task_locked(s, offset, bytes);
>   
>       if (!task) {
>           return false;
>       }
>   
> -    qemu_co_queue_wait(&task->wait_queue, NULL);
> +    qemu_co_queue_wait(&task->wait_queue, &s->lock);
>   
>       return true;
>   }
>   
> -static int64_t block_copy_chunk_size(BlockCopyState *s)
> +/* Called with lock held */
> +static int64_t block_copy_chunk_size_locked(BlockCopyState *s)
>   {
>       switch (s->method) {
>       case COPY_READ_WRITE_CLUSTER:
> @@ -193,14 +201,16 @@ static int64_t block_copy_chunk_size(BlockCopyState *s)
>    * Search for the first dirty area in offset/bytes range and create task at
>    * the beginning of it.
>    */
> -static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
> -                                             BlockCopyCallState *call_state,
> -                                             int64_t offset, int64_t bytes)
> +static coroutine_fn BlockCopyTask *block_copy_task_create(BlockCopyState *s,
> +                                                BlockCopyCallState *call_state,
> +                                                int64_t offset, int64_t bytes)
>   {
>       BlockCopyTask *task;
> -    int64_t max_chunk = block_copy_chunk_size(s);
> +    int64_t max_chunk;
>   
> -    max_chunk = MIN_NON_ZERO(max_chunk, call_state->max_chunk);
> +    QEMU_LOCK_GUARD(&s->lock);
> +    max_chunk = MIN_NON_ZERO(block_copy_chunk_size_locked(s),
> +                             call_state->max_chunk);
>       if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
>                                              offset, offset + bytes,
>                                              max_chunk, &offset, &bytes))
> @@ -212,7 +222,7 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
>       bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
>   
>       /* region is dirty, so no existent tasks possible in it */
> -    assert(!find_conflicting_task(s, offset, bytes));
> +    assert(!find_conflicting_task_locked(s, offset, bytes));
>   
>       bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
>       s->in_flight_bytes += bytes;
> @@ -248,16 +258,19 @@ static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
>   

The function reads task->bytes not under mutex.. It's safe, as only that function is modifying the field, and it's called once. Still, let's make critical section a little bit wider, just for simplicity. I mean, simple QEMU_LOCK_GUARD() at start of function.

>       assert(new_bytes > 0 && new_bytes < task->bytes);
>   
> -    task->s->in_flight_bytes -= task->bytes - new_bytes;
> -    bdrv_set_dirty_bitmap(task->s->copy_bitmap,
> -                          task->offset + new_bytes, task->bytes - new_bytes);
> -
> -    task->bytes = new_bytes;
> -    qemu_co_queue_restart_all(&task->wait_queue);
> +    WITH_QEMU_LOCK_GUARD(&task->s->lock) {
> +        task->s->in_flight_bytes -= task->bytes - new_bytes;
> +        bdrv_set_dirty_bitmap(task->s->copy_bitmap,
> +                              task->offset + new_bytes,
> +                              task->bytes - new_bytes);
> +        task->bytes = new_bytes;
> +        qemu_co_queue_restart_all(&task->wait_queue);
> +    }
>   }
>   
>   static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
>   {
> +    QEMU_LOCK_GUARD(&task->s->lock);
>       task->s->in_flight_bytes -= task->bytes;
>       if (ret < 0) {
>           bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
> @@ -335,6 +348,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
>       }
>   
>       ratelimit_init(&s->rate_limit);
> +    qemu_co_mutex_init(&s->lock);
>       QLIST_INIT(&s->tasks);
>       QLIST_INIT(&s->calls);
>   
> @@ -390,6 +404,8 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool,

Oops. seems block_copy_task_run misses block_copy_task_end() call befokre freeing the task. preexisting bug..

>    * a full-size buffer or disabled if the copy_range attempt fails.  The output
>    * value of @method should be used for subsequent tasks.
>    * Returns 0 on success.
> + *
> + * Called with lock held.
>    */
>   static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
>                                              int64_t offset, int64_t bytes,
> @@ -476,16 +492,20 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
>       int ret;
>   
>       ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
> -    if (s->method == t->method) {
> -        s->method = method;
> -    }
> -    if (ret < 0) {
> -        if (!t->call_state->ret) {
> -            t->call_state->ret = ret;
> -            t->call_state->error_is_read = error_is_read;
> +
> +    WITH_QEMU_LOCK_GUARD(&t->s->lock) {
> +        if (s->method == t->method) {
> +            s->method = method;
> +        }
> +
> +        if (ret < 0) {
> +            if (!t->call_state->ret) {
> +                t->call_state->ret = ret;
> +                t->call_state->error_is_read = error_is_read;
> +            }
> +        } else {
> +            progress_work_done(t->s->progress, t->bytes);
>           }
> -    } else {
> -        progress_work_done(t->s->progress, t->bytes);
>       }
>       co_put_to_shres(t->s->mem, t->bytes);
>       block_copy_task_end(t, ret);
> @@ -587,10 +607,12 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
>       bytes = clusters * s->cluster_size;
>   
>       if (!ret) {
> +        qemu_co_mutex_lock(&s->lock);
>           bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
>           progress_set_remaining(s->progress,
>                                  bdrv_get_dirty_count(s->copy_bitmap) +
>                                  s->in_flight_bytes);
> +        qemu_co_mutex_unlock(&s->lock);
>       }
>   
>       *count = bytes;
> @@ -729,7 +751,9 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
>   {
>       int ret;
>   
> +    qemu_co_mutex_lock(&call_state->s->lock);
>       QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list);
> +    qemu_co_mutex_unlock(&call_state->s->lock);
>   
>       do {
>           ret = block_copy_dirty_clusters(call_state);
> @@ -756,7 +780,9 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
>           call_state->cb(call_state->cb_opaque);
>       }
>   
> +    qemu_co_mutex_lock(&call_state->s->lock);
>       QLIST_REMOVE(call_state, list);
> +    qemu_co_mutex_unlock(&call_state->s->lock);
>   
>       return ret;
>   }
> 

I looked through the whole file on top of the series, and it seems overall OK to me. I still don't really like additional atomics, but they probably should be refactored together with refactoring all status-getters into one block_copy_call_status().. So it's a work for some future day, I will not do it in parallel :)

I don't insist, but for me patches 2,4,5 only make sense as a whole, so, I'd merge them into one patch called "make block-copy APIs thread-safe". Otherwise, thread-safety comes only in last patch, and patches 2 and 4 are a kind of preparations that hard to review in separate.

Anyway, reviewing of such change is a walk through the whole file trying to understand, how much is it thread-safe now.
Emanuele Giuseppe Esposito June 10, 2021, 2:49 p.m. UTC | #2
On 09/06/2021 14:25, Vladimir Sementsov-Ogievskiy wrote:
> 08.06.2021 10:33, Emanuele Giuseppe Esposito wrote:
>> Add a CoMutex to protect concurrent access of block-copy
>> data structures.
>>
>> This mutex also protects .copy_bitmap, because its thread-safe
>> API does not prevent it from assigning two tasks to the same
>> bitmap region.
>>
>> .finished, .cancelled and reads to .ret and .error_is_read will be
>> protected in the following patch, because are used also outside
>> coroutines.
>>
>> Also set block_copy_task_create as coroutine_fn because:
>> 1) it is static and only invoked by coroutine functions
>> 2) this patch introduces and uses a CoMutex lock there
>>
>> Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
> 
> I missed, did you (where?) add a comment like "all APIs are 
> thread-safe", or what is thread-safe?

You're right, I can't find that comment too. I will add it once more.
> 
>> ---
>>   block/block-copy.c | 82 ++++++++++++++++++++++++++++++----------------
>>   1 file changed, 54 insertions(+), 28 deletions(-)
>>
>> diff --git a/block/block-copy.c b/block/block-copy.c
>> index e2adb5b2ea..56f62913e4 100644
>> --- a/block/block-copy.c
>> +++ b/block/block-copy.c
>> @@ -61,6 +61,7 @@ typedef struct BlockCopyCallState {
>>       /* OUT parameters */
>>       bool cancelled;
>> +    /* Fields protected by lock in BlockCopyState */
>>       bool error_is_read;
>>       int ret;
>>   } BlockCopyCallState;
>> @@ -78,7 +79,7 @@ typedef struct BlockCopyTask {
>>       int64_t bytes; /* only re-set in task_shrink, before running the 
>> task */
>>       BlockCopyMethod method; /* initialized in 
>> block_copy_dirty_clusters() */
>> -    /* State */
>> +    /* State. Protected by lock in BlockCopyState */
>>       CoQueue wait_queue; /* coroutines blocked on this task */
>>       /* To reference all call states from BlockCopyState */
>> @@ -99,7 +100,8 @@ typedef struct BlockCopyState {
>>       BdrvChild *source;
>>       BdrvChild *target;
>> -    /* State */
>> +    /* State. Protected by lock */
>> +    CoMutex lock;
>>       int64_t in_flight_bytes;
>>       BlockCopyMethod method;
>>       QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all 
>> block-copy calls */
>> @@ -139,8 +141,10 @@ typedef struct BlockCopyState {
>>       bool skip_unallocated;
>>   } BlockCopyState;
> 
> May be nitpicking, but if we want block_copy_set_progress_meter to be 
> threadsafe it should set s->progress under mutex. Or we should document 
> that it's not threadsafe and called once.

Document it definitely. It is only called by the job before starting 
block-copy, so it is safe to do as it is.

> 
> 
>> -static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
>> -                                            int64_t offset, int64_t 
>> bytes)
>> +/* Called with lock held */
>> +static BlockCopyTask *find_conflicting_task_locked(BlockCopyState *s,
>> +                                                   int64_t offset,
>> +                                                   int64_t bytes)
>>   {
>>       BlockCopyTask *t;
>> @@ -160,18 +164,22 @@ static BlockCopyTask 
>> *find_conflicting_task(BlockCopyState *s,
>>   static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, 
>> int64_t offset,
>>                                                int64_t bytes)
>>   {
>> -    BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
>> +    BlockCopyTask *task;
>> +
>> +    QEMU_LOCK_GUARD(&s->lock);
>> +    task = find_conflicting_task_locked(s, offset, bytes);
>>       if (!task) {
>>           return false;
>>       }
>> -    qemu_co_queue_wait(&task->wait_queue, NULL);
>> +    qemu_co_queue_wait(&task->wait_queue, &s->lock);
>>       return true;
>>   }
>> -static int64_t block_copy_chunk_size(BlockCopyState *s)
>> +/* Called with lock held */
>> +static int64_t block_copy_chunk_size_locked(BlockCopyState *s)
>>   {
>>       switch (s->method) {
>>       case COPY_READ_WRITE_CLUSTER:
>> @@ -193,14 +201,16 @@ static int64_t 
>> block_copy_chunk_size(BlockCopyState *s)
>>    * Search for the first dirty area in offset/bytes range and create 
>> task at
>>    * the beginning of it.
>>    */
>> -static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
>> -                                             BlockCopyCallState 
>> *call_state,
>> -                                             int64_t offset, int64_t 
>> bytes)
>> +static coroutine_fn BlockCopyTask 
>> *block_copy_task_create(BlockCopyState *s,
>> +                                                BlockCopyCallState 
>> *call_state,
>> +                                                int64_t offset, 
>> int64_t bytes)
>>   {
>>       BlockCopyTask *task;
>> -    int64_t max_chunk = block_copy_chunk_size(s);
>> +    int64_t max_chunk;
>> -    max_chunk = MIN_NON_ZERO(max_chunk, call_state->max_chunk);
>> +    QEMU_LOCK_GUARD(&s->lock);
>> +    max_chunk = MIN_NON_ZERO(block_copy_chunk_size_locked(s),
>> +                             call_state->max_chunk);
>>       if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
>>                                              offset, offset + bytes,
>>                                              max_chunk, &offset, &bytes))
>> @@ -212,7 +222,7 @@ static BlockCopyTask 
>> *block_copy_task_create(BlockCopyState *s,
>>       bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
>>       /* region is dirty, so no existent tasks possible in it */
>> -    assert(!find_conflicting_task(s, offset, bytes));
>> +    assert(!find_conflicting_task_locked(s, offset, bytes));
>>       bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
>>       s->in_flight_bytes += bytes;
>> @@ -248,16 +258,19 @@ static void coroutine_fn 
>> block_copy_task_shrink(BlockCopyTask *task,
> 
> The function reads task->bytes not under mutex.. It's safe, as only that 
> function is modifying the field, and it's called once. Still, let's make 
> critical section a little bit wider, just for simplicity. I mean, simple 
> QEMU_LOCK_GUARD() at start of function.

Done.
> 
>>       assert(new_bytes > 0 && new_bytes < task->bytes);
>> -    task->s->in_flight_bytes -= task->bytes - new_bytes;
>> -    bdrv_set_dirty_bitmap(task->s->copy_bitmap,
>> -                          task->offset + new_bytes, task->bytes - 
>> new_bytes);
>> -
>> -    task->bytes = new_bytes;
>> -    qemu_co_queue_restart_all(&task->wait_queue);
>> +    WITH_QEMU_LOCK_GUARD(&task->s->lock) {
>> +        task->s->in_flight_bytes -= task->bytes - new_bytes;
>> +        bdrv_set_dirty_bitmap(task->s->copy_bitmap,
>> +                              task->offset + new_bytes,
>> +                              task->bytes - new_bytes);
>> +        task->bytes = new_bytes;
>> +        qemu_co_queue_restart_all(&task->wait_queue);
>> +    }
>>   }
>>   static void coroutine_fn block_copy_task_end(BlockCopyTask *task, 
>> int ret)
>>   {
>> +    QEMU_LOCK_GUARD(&task->s->lock);
>>       task->s->in_flight_bytes -= task->bytes;
>>       if (ret < 0) {
>>           bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, 
>> task->bytes);
>> @@ -335,6 +348,7 @@ BlockCopyState *block_copy_state_new(BdrvChild 
>> *source, BdrvChild *target,
>>       }
>>       ratelimit_init(&s->rate_limit);
>> +    qemu_co_mutex_init(&s->lock);
>>       QLIST_INIT(&s->tasks);
>>       QLIST_INIT(&s->calls);
>> @@ -390,6 +404,8 @@ static coroutine_fn int 
>> block_copy_task_run(AioTaskPool *pool,
> 
> Oops. seems block_copy_task_run misses block_copy_task_end() call 
> befokre freeing the task. preexisting bug..

Nope, it is not a bug.  .func() internally calls block_copy_task_end(). 
All good here.

> 
>>    * a full-size buffer or disabled if the copy_range attempt fails.  
>> The output
>>    * value of @method should be used for subsequent tasks.
>>    * Returns 0 on success.
>> + *
>> + * Called with lock held.
>>    */
>>   static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
>>                                              int64_t offset, int64_t 
>> bytes,
>> @@ -476,16 +492,20 @@ static coroutine_fn int 
>> block_copy_task_entry(AioTask *task)
>>       int ret;
>>       ret = block_copy_do_copy(s, t->offset, t->bytes, &method, 
>> &error_is_read);
>> -    if (s->method == t->method) {
>> -        s->method = method;
>> -    }
>> -    if (ret < 0) {
>> -        if (!t->call_state->ret) {
>> -            t->call_state->ret = ret;
>> -            t->call_state->error_is_read = error_is_read;
>> +
>> +    WITH_QEMU_LOCK_GUARD(&t->s->lock) {
>> +        if (s->method == t->method) {
>> +            s->method = method;
>> +        }
>> +
>> +        if (ret < 0) {
>> +            if (!t->call_state->ret) {
>> +                t->call_state->ret = ret;
>> +                t->call_state->error_is_read = error_is_read;
>> +            }
>> +        } else {
>> +            progress_work_done(t->s->progress, t->bytes);
>>           }
>> -    } else {
>> -        progress_work_done(t->s->progress, t->bytes);
>>       }
>>       co_put_to_shres(t->s->mem, t->bytes);
>>       block_copy_task_end(t, ret);
>> @@ -587,10 +607,12 @@ int64_t 
>> block_copy_reset_unallocated(BlockCopyState *s,
>>       bytes = clusters * s->cluster_size;
>>       if (!ret) {
>> +        qemu_co_mutex_lock(&s->lock);
>>           bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
>>           progress_set_remaining(s->progress,
>>                                  bdrv_get_dirty_count(s->copy_bitmap) +
>>                                  s->in_flight_bytes);
>> +        qemu_co_mutex_unlock(&s->lock);
>>       }
>>       *count = bytes;
>> @@ -729,7 +751,9 @@ static int coroutine_fn 
>> block_copy_common(BlockCopyCallState *call_state)
>>   {
>>       int ret;
>> +    qemu_co_mutex_lock(&call_state->s->lock);
>>       QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list);
>> +    qemu_co_mutex_unlock(&call_state->s->lock);
>>       do {
>>           ret = block_copy_dirty_clusters(call_state);
>> @@ -756,7 +780,9 @@ static int coroutine_fn 
>> block_copy_common(BlockCopyCallState *call_state)
>>           call_state->cb(call_state->cb_opaque);
>>       }
>> +    qemu_co_mutex_lock(&call_state->s->lock);
>>       QLIST_REMOVE(call_state, list);
>> +    qemu_co_mutex_unlock(&call_state->s->lock);
>>       return ret;
>>   }
>>
> 
> I looked through the whole file on top of the series, and it seems 
> overall OK to me. I still don't really like additional atomics, but they 
> probably should be refactored together with refactoring all 
> status-getters into one block_copy_call_status().. So it's a work for 
> some future day, I will not do it in parallel :)
> 
> I don't insist, but for me patches 2,4,5 only make sense as a whole, so, 
> I'd merge them into one patch called "make block-copy APIs thread-safe". 
> Otherwise, thread-safety comes only in last patch, and patches 2 and 4 
> are a kind of preparations that hard to review in separate.

I will try to see how they look. I think that separate do not look too 
bad, putting everything together might be very difficult to understand. 
And this stuff is already complicated on its own...

Thank you,
Emanuele
> 
> Anyway, reviewing of such change is a walk through the whole file trying 
> to understand, how much is it thread-safe now.
>
diff mbox series

Patch

diff --git a/block/block-copy.c b/block/block-copy.c
index e2adb5b2ea..56f62913e4 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -61,6 +61,7 @@  typedef struct BlockCopyCallState {
 
     /* OUT parameters */
     bool cancelled;
+    /* Fields protected by lock in BlockCopyState */
     bool error_is_read;
     int ret;
 } BlockCopyCallState;
@@ -78,7 +79,7 @@  typedef struct BlockCopyTask {
     int64_t bytes; /* only re-set in task_shrink, before running the task */
     BlockCopyMethod method; /* initialized in block_copy_dirty_clusters() */
 
-    /* State */
+    /* State. Protected by lock in BlockCopyState */
     CoQueue wait_queue; /* coroutines blocked on this task */
 
     /* To reference all call states from BlockCopyState */
@@ -99,7 +100,8 @@  typedef struct BlockCopyState {
     BdrvChild *source;
     BdrvChild *target;
 
-    /* State */
+    /* State. Protected by lock */
+    CoMutex lock;
     int64_t in_flight_bytes;
     BlockCopyMethod method;
     QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
@@ -139,8 +141,10 @@  typedef struct BlockCopyState {
     bool skip_unallocated;
 } BlockCopyState;
 
-static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
-                                            int64_t offset, int64_t bytes)
+/* Called with lock held */
+static BlockCopyTask *find_conflicting_task_locked(BlockCopyState *s,
+                                                   int64_t offset,
+                                                   int64_t bytes)
 {
     BlockCopyTask *t;
 
@@ -160,18 +164,22 @@  static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
 static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
                                              int64_t bytes)
 {
-    BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
+    BlockCopyTask *task;
+
+    QEMU_LOCK_GUARD(&s->lock);
+    task = find_conflicting_task_locked(s, offset, bytes);
 
     if (!task) {
         return false;
     }
 
-    qemu_co_queue_wait(&task->wait_queue, NULL);
+    qemu_co_queue_wait(&task->wait_queue, &s->lock);
 
     return true;
 }
 
-static int64_t block_copy_chunk_size(BlockCopyState *s)
+/* Called with lock held */
+static int64_t block_copy_chunk_size_locked(BlockCopyState *s)
 {
     switch (s->method) {
     case COPY_READ_WRITE_CLUSTER:
@@ -193,14 +201,16 @@  static int64_t block_copy_chunk_size(BlockCopyState *s)
  * Search for the first dirty area in offset/bytes range and create task at
  * the beginning of it.
  */
-static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
-                                             BlockCopyCallState *call_state,
-                                             int64_t offset, int64_t bytes)
+static coroutine_fn BlockCopyTask *block_copy_task_create(BlockCopyState *s,
+                                                BlockCopyCallState *call_state,
+                                                int64_t offset, int64_t bytes)
 {
     BlockCopyTask *task;
-    int64_t max_chunk = block_copy_chunk_size(s);
+    int64_t max_chunk;
 
-    max_chunk = MIN_NON_ZERO(max_chunk, call_state->max_chunk);
+    QEMU_LOCK_GUARD(&s->lock);
+    max_chunk = MIN_NON_ZERO(block_copy_chunk_size_locked(s),
+                             call_state->max_chunk);
     if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
                                            offset, offset + bytes,
                                            max_chunk, &offset, &bytes))
@@ -212,7 +222,7 @@  static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
     bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
 
     /* region is dirty, so no existent tasks possible in it */
-    assert(!find_conflicting_task(s, offset, bytes));
+    assert(!find_conflicting_task_locked(s, offset, bytes));
 
     bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
     s->in_flight_bytes += bytes;
@@ -248,16 +258,19 @@  static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
 
     assert(new_bytes > 0 && new_bytes < task->bytes);
 
-    task->s->in_flight_bytes -= task->bytes - new_bytes;
-    bdrv_set_dirty_bitmap(task->s->copy_bitmap,
-                          task->offset + new_bytes, task->bytes - new_bytes);
-
-    task->bytes = new_bytes;
-    qemu_co_queue_restart_all(&task->wait_queue);
+    WITH_QEMU_LOCK_GUARD(&task->s->lock) {
+        task->s->in_flight_bytes -= task->bytes - new_bytes;
+        bdrv_set_dirty_bitmap(task->s->copy_bitmap,
+                              task->offset + new_bytes,
+                              task->bytes - new_bytes);
+        task->bytes = new_bytes;
+        qemu_co_queue_restart_all(&task->wait_queue);
+    }
 }
 
 static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
 {
+    QEMU_LOCK_GUARD(&task->s->lock);
     task->s->in_flight_bytes -= task->bytes;
     if (ret < 0) {
         bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
@@ -335,6 +348,7 @@  BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
     }
 
     ratelimit_init(&s->rate_limit);
+    qemu_co_mutex_init(&s->lock);
     QLIST_INIT(&s->tasks);
     QLIST_INIT(&s->calls);
 
@@ -390,6 +404,8 @@  static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
  * a full-size buffer or disabled if the copy_range attempt fails.  The output
  * value of @method should be used for subsequent tasks.
  * Returns 0 on success.
+ *
+ * Called with lock held.
  */
 static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
                                            int64_t offset, int64_t bytes,
@@ -476,16 +492,20 @@  static coroutine_fn int block_copy_task_entry(AioTask *task)
     int ret;
 
     ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
-    if (s->method == t->method) {
-        s->method = method;
-    }
-    if (ret < 0) {
-        if (!t->call_state->ret) {
-            t->call_state->ret = ret;
-            t->call_state->error_is_read = error_is_read;
+
+    WITH_QEMU_LOCK_GUARD(&t->s->lock) {
+        if (s->method == t->method) {
+            s->method = method;
+        }
+
+        if (ret < 0) {
+            if (!t->call_state->ret) {
+                t->call_state->ret = ret;
+                t->call_state->error_is_read = error_is_read;
+            }
+        } else {
+            progress_work_done(t->s->progress, t->bytes);
         }
-    } else {
-        progress_work_done(t->s->progress, t->bytes);
     }
     co_put_to_shres(t->s->mem, t->bytes);
     block_copy_task_end(t, ret);
@@ -587,10 +607,12 @@  int64_t block_copy_reset_unallocated(BlockCopyState *s,
     bytes = clusters * s->cluster_size;
 
     if (!ret) {
+        qemu_co_mutex_lock(&s->lock);
         bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
         progress_set_remaining(s->progress,
                                bdrv_get_dirty_count(s->copy_bitmap) +
                                s->in_flight_bytes);
+        qemu_co_mutex_unlock(&s->lock);
     }
 
     *count = bytes;
@@ -729,7 +751,9 @@  static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
 {
     int ret;
 
+    qemu_co_mutex_lock(&call_state->s->lock);
     QLIST_INSERT_HEAD(&call_state->s->calls, call_state, list);
+    qemu_co_mutex_unlock(&call_state->s->lock);
 
     do {
         ret = block_copy_dirty_clusters(call_state);
@@ -756,7 +780,9 @@  static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
         call_state->cb(call_state->cb_opaque);
     }
 
+    qemu_co_mutex_lock(&call_state->s->lock);
     QLIST_REMOVE(call_state, list);
+    qemu_co_mutex_unlock(&call_state->s->lock);
 
     return ret;
 }