diff mbox

[PULL,1/4] curl: do not use aio_context_acquire/release

Message ID 20170227163447.20428-2-stefanha@redhat.com
State New
Headers show

Commit Message

Stefan Hajnoczi Feb. 27, 2017, 4:34 p.m. UTC
From: Paolo Bonzini <pbonzini@redhat.com>

Now that all bottom halves and callbacks take care of taking the
AioContext lock, we can migrate some users away from it and to a
specific QemuMutex or CoMutex.

Protect BDRVCURLState access with a QemuMutex.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 20170222180725.28611-2-pbonzini@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 block/curl.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

Comments

Richard W.M. Jones May 3, 2017, 2:54 p.m. UTC | #1
On Mon, Feb 27, 2017 at 04:34:44PM +0000, Stefan Hajnoczi wrote:
> From: Paolo Bonzini <pbonzini@redhat.com>
> 
> Now that all bottom halves and callbacks take care of taking the
> AioContext lock, we can migrate some users away from it and to a
> specific QemuMutex or CoMutex.
> 
> Protect BDRVCURLState access with a QemuMutex.
>
> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> Message-id: 20170222180725.28611-2-pbonzini@redhat.com
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>

https://bugzilla.redhat.com/show_bug.cgi?id=1447590

I've been tracking down a bug in the curl driver which affects
virt-v2v, and this commit is implicated.

It manifests itself as a hang while downloading a certain file within
a remotely located disk image accessed over https.

Unfortunately the bug environment is extremely difficult to reproduce
(not the bug itself -- that is very easy to reproduce once you've set
up the environment).  Anyway I don't have a simple reproducer which
anyone could try.  I'll try to work on that next.

However I bisected it and it is caused by this commit.  The hang
affects qemu from master.  Reverting this commit on top of qemu from
master fixes the hang.

Is there anything obviously wrong with the commit?

Rich.

>  block/curl.c | 24 +++++++++++++++---------
>  1 file changed, 15 insertions(+), 9 deletions(-)
> 
> diff --git a/block/curl.c b/block/curl.c
> index 2939cc7..e83dcd8 100644
> --- a/block/curl.c
> +++ b/block/curl.c
> @@ -135,6 +135,7 @@ typedef struct BDRVCURLState {
>      char *cookie;
>      bool accept_range;
>      AioContext *aio_context;
> +    QemuMutex mutex;
>      char *username;
>      char *password;
>      char *proxyusername;
> @@ -333,6 +334,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
>      return FIND_RET_NONE;
>  }
>  
> +/* Called with s->mutex held.  */
>  static void curl_multi_check_completion(BDRVCURLState *s)
>  {
>      int msgs_in_queue;
> @@ -374,7 +376,9 @@ static void curl_multi_check_completion(BDRVCURLState *s)
>                          continue;
>                      }
>  
> +                    qemu_mutex_unlock(&s->mutex);
>                      acb->common.cb(acb->common.opaque, -EPROTO);
> +                    qemu_mutex_lock(&s->mutex);
>                      qemu_aio_unref(acb);
>                      state->acb[i] = NULL;
>                  }
> @@ -386,6 +390,7 @@ static void curl_multi_check_completion(BDRVCURLState *s)
>      }
>  }
>  
> +/* Called with s->mutex held.  */
>  static void curl_multi_do_locked(CURLState *s)
>  {
>      CURLSocket *socket, *next_socket;
> @@ -409,19 +414,19 @@ static void curl_multi_do(void *arg)
>  {
>      CURLState *s = (CURLState *)arg;
>  
> -    aio_context_acquire(s->s->aio_context);
> +    qemu_mutex_lock(&s->s->mutex);
>      curl_multi_do_locked(s);
> -    aio_context_release(s->s->aio_context);
> +    qemu_mutex_unlock(&s->s->mutex);
>  }
>  
>  static void curl_multi_read(void *arg)
>  {
>      CURLState *s = (CURLState *)arg;
>  
> -    aio_context_acquire(s->s->aio_context);
> +    qemu_mutex_lock(&s->s->mutex);
>      curl_multi_do_locked(s);
>      curl_multi_check_completion(s->s);
> -    aio_context_release(s->s->aio_context);
> +    qemu_mutex_unlock(&s->s->mutex);
>  }
>  
>  static void curl_multi_timeout_do(void *arg)
> @@ -434,11 +439,11 @@ static void curl_multi_timeout_do(void *arg)
>          return;
>      }
>  
> -    aio_context_acquire(s->aio_context);
> +    qemu_mutex_lock(&s->mutex);
>      curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
>  
>      curl_multi_check_completion(s);
> -    aio_context_release(s->aio_context);
> +    qemu_mutex_unlock(&s->mutex);
>  #else
>      abort();
>  #endif
> @@ -771,6 +776,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
>      curl_easy_cleanup(state->curl);
>      state->curl = NULL;
>  
> +    qemu_mutex_init(&s->mutex);
>      curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
>  
>      qemu_opts_del(opts);
> @@ -801,12 +807,11 @@ static void curl_readv_bh_cb(void *p)
>      CURLAIOCB *acb = p;
>      BlockDriverState *bs = acb->common.bs;
>      BDRVCURLState *s = bs->opaque;
> -    AioContext *ctx = bdrv_get_aio_context(bs);
>  
>      size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
>      size_t end;
>  
> -    aio_context_acquire(ctx);
> +    qemu_mutex_lock(&s->mutex);
>  
>      // In case we have the requested data already (e.g. read-ahead),
>      // we can just call the callback and be done.
> @@ -854,7 +859,7 @@ static void curl_readv_bh_cb(void *p)
>      curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
>  
>  out:
> -    aio_context_release(ctx);
> +    qemu_mutex_unlock(&s->mutex);
>      if (ret != -EINPROGRESS) {
>          acb->common.cb(acb->common.opaque, ret);
>          qemu_aio_unref(acb);
> @@ -883,6 +888,7 @@ static void curl_close(BlockDriverState *bs)
>  
>      DPRINTF("CURL: Close\n");
>      curl_detach_aio_context(bs);
> +    qemu_mutex_destroy(&s->mutex);
>  
>      g_free(s->cookie);
>      g_free(s->url);
> -- 
> 2.9.3
>
Paolo Bonzini May 3, 2017, 2:59 p.m. UTC | #2
On 03/05/2017 16:54, Richard W.M. Jones wrote:
> On Mon, Feb 27, 2017 at 04:34:44PM +0000, Stefan Hajnoczi wrote:
>> From: Paolo Bonzini <pbonzini@redhat.com>
>>
>> Now that all bottom halves and callbacks take care of taking the
>> AioContext lock, we can migrate some users away from it and to a
>> specific QemuMutex or CoMutex.
>>
>> Protect BDRVCURLState access with a QemuMutex.
>>
>> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> Message-id: 20170222180725.28611-2-pbonzini@redhat.com
>> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> 
> https://bugzilla.redhat.com/show_bug.cgi?id=1447590
> 
> I've been tracking down a bug in the curl driver which affects
> virt-v2v, and this commit is implicated.
> 
> It manifests itself as a hang while downloading a certain file within
> a remotely located disk image accessed over https.
> 
> Unfortunately the bug environment is extremely difficult to reproduce
> (not the bug itself -- that is very easy to reproduce once you've set
> up the environment).  Anyway I don't have a simple reproducer which
> anyone could try.  I'll try to work on that next.
> 
> However I bisected it and it is caused by this commit.  The hang
> affects qemu from master.  Reverting this commit on top of qemu from
> master fixes the hang.
> 
> Is there anything obviously wrong with the commit?

Maybe there is, can you grab an all-threads backtrace via gdb?

Paolo

> Rich.
> 
>>  block/curl.c | 24 +++++++++++++++---------
>>  1 file changed, 15 insertions(+), 9 deletions(-)
>>
>> diff --git a/block/curl.c b/block/curl.c
>> index 2939cc7..e83dcd8 100644
>> --- a/block/curl.c
>> +++ b/block/curl.c
>> @@ -135,6 +135,7 @@ typedef struct BDRVCURLState {
>>      char *cookie;
>>      bool accept_range;
>>      AioContext *aio_context;
>> +    QemuMutex mutex;
>>      char *username;
>>      char *password;
>>      char *proxyusername;
>> @@ -333,6 +334,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
>>      return FIND_RET_NONE;
>>  }
>>  
>> +/* Called with s->mutex held.  */
>>  static void curl_multi_check_completion(BDRVCURLState *s)
>>  {
>>      int msgs_in_queue;
>> @@ -374,7 +376,9 @@ static void curl_multi_check_completion(BDRVCURLState *s)
>>                          continue;
>>                      }
>>  
>> +                    qemu_mutex_unlock(&s->mutex);
>>                      acb->common.cb(acb->common.opaque, -EPROTO);
>> +                    qemu_mutex_lock(&s->mutex);
>>                      qemu_aio_unref(acb);
>>                      state->acb[i] = NULL;
>>                  }
>> @@ -386,6 +390,7 @@ static void curl_multi_check_completion(BDRVCURLState *s)
>>      }
>>  }
>>  
>> +/* Called with s->mutex held.  */
>>  static void curl_multi_do_locked(CURLState *s)
>>  {
>>      CURLSocket *socket, *next_socket;
>> @@ -409,19 +414,19 @@ static void curl_multi_do(void *arg)
>>  {
>>      CURLState *s = (CURLState *)arg;
>>  
>> -    aio_context_acquire(s->s->aio_context);
>> +    qemu_mutex_lock(&s->s->mutex);
>>      curl_multi_do_locked(s);
>> -    aio_context_release(s->s->aio_context);
>> +    qemu_mutex_unlock(&s->s->mutex);
>>  }
>>  
>>  static void curl_multi_read(void *arg)
>>  {
>>      CURLState *s = (CURLState *)arg;
>>  
>> -    aio_context_acquire(s->s->aio_context);
>> +    qemu_mutex_lock(&s->s->mutex);
>>      curl_multi_do_locked(s);
>>      curl_multi_check_completion(s->s);
>> -    aio_context_release(s->s->aio_context);
>> +    qemu_mutex_unlock(&s->s->mutex);
>>  }
>>  
>>  static void curl_multi_timeout_do(void *arg)
>> @@ -434,11 +439,11 @@ static void curl_multi_timeout_do(void *arg)
>>          return;
>>      }
>>  
>> -    aio_context_acquire(s->aio_context);
>> +    qemu_mutex_lock(&s->mutex);
>>      curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
>>  
>>      curl_multi_check_completion(s);
>> -    aio_context_release(s->aio_context);
>> +    qemu_mutex_unlock(&s->mutex);
>>  #else
>>      abort();
>>  #endif
>> @@ -771,6 +776,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
>>      curl_easy_cleanup(state->curl);
>>      state->curl = NULL;
>>  
>> +    qemu_mutex_init(&s->mutex);
>>      curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
>>  
>>      qemu_opts_del(opts);
>> @@ -801,12 +807,11 @@ static void curl_readv_bh_cb(void *p)
>>      CURLAIOCB *acb = p;
>>      BlockDriverState *bs = acb->common.bs;
>>      BDRVCURLState *s = bs->opaque;
>> -    AioContext *ctx = bdrv_get_aio_context(bs);
>>  
>>      size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
>>      size_t end;
>>  
>> -    aio_context_acquire(ctx);
>> +    qemu_mutex_lock(&s->mutex);
>>  
>>      // In case we have the requested data already (e.g. read-ahead),
>>      // we can just call the callback and be done.
>> @@ -854,7 +859,7 @@ static void curl_readv_bh_cb(void *p)
>>      curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
>>  
>>  out:
>> -    aio_context_release(ctx);
>> +    qemu_mutex_unlock(&s->mutex);
>>      if (ret != -EINPROGRESS) {
>>          acb->common.cb(acb->common.opaque, ret);
>>          qemu_aio_unref(acb);
>> @@ -883,6 +888,7 @@ static void curl_close(BlockDriverState *bs)
>>  
>>      DPRINTF("CURL: Close\n");
>>      curl_detach_aio_context(bs);
>> +    qemu_mutex_destroy(&s->mutex);
>>  
>>      g_free(s->cookie);
>>      g_free(s->url);
>> -- 
>> 2.9.3
>>
>
Richard W.M. Jones May 3, 2017, 3:31 p.m. UTC | #3
On Wed, May 03, 2017 at 04:59:22PM +0200, Paolo Bonzini wrote:
> Maybe there is, can you grab an all-threads backtrace via gdb?

Program received signal SIGINT, Interrupt.
0x00007f1d57f861bd in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) t a a bt

Thread 9 (Thread 0x7f1d49967700 (LWP 29949)):
#0  0x00007f1d53a39bf9 in syscall () at /lib64/libc.so.6
#1  0x000055ea5c358856 in qemu_event_wait (val=<optimized out>, f=<optimized out>) at /home/rjones/d/qemu/include/qemu/futex.h:26
#2  0x000055ea5c358856 in qemu_event_wait (ev=ev@entry=0x55ea5cde4ac4 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
#3  0x000055ea5c3680fe in call_rcu_thread (opaque=<optimized out>)
    at util/rcu.c:249
#4  0x00007f1d57f7fdc5 in start_thread () at /lib64/libpthread.so.0
#5  0x00007f1d53a3f73d in clone () at /lib64/libc.so.6

Thread 7 (Thread 0x7f1d46987700 (LWP 29955)):
#0  0x00007f1d57f861bd in __lll_lock_wait () at /lib64/libpthread.so.0
#1  0x00007f1d57f81d02 in _L_lock_791 () at /lib64/libpthread.so.0
#2  0x00007f1d57f81c08 in pthread_mutex_lock () at /lib64/libpthread.so.0
#3  0x000055ea5c3583e9 in qemu_mutex_lock (mutex=mutex@entry=0x55ea5c9a2400 <qemu_global_mutex>) at util/qemu-thread-posix.c:60
#4  0x000055ea5bff512c in qemu_mutex_lock_iothread ()
    at /home/rjones/d/qemu/cpus.c:1565
#5  0x000055ea5c0081c7 in kvm_cpu_exec (cpu=cpu@entry=0x55ea5e7df020)
    at /home/rjones/d/qemu/kvm-all.c:2096
#6  0x000055ea5bff5332 in qemu_kvm_cpu_thread_fn (arg=0x55ea5e7df020)
    at /home/rjones/d/qemu/cpus.c:1118
#7  0x00007f1d57f7fdc5 in start_thread () at /lib64/libpthread.so.0
#8  0x00007f1d53a3f73d in clone () at /lib64/libc.so.6

Thread 1 (Thread 0x7f1d5a4e2c00 (LWP 29947)):
#0  0x00007f1d57f861bd in __lll_lock_wait () at /lib64/libpthread.so.0
#1  0x00007f1d57f81d02 in _L_lock_791 () at /lib64/libpthread.so.0
#2  0x00007f1d57f81c08 in pthread_mutex_lock () at /lib64/libpthread.so.0
#3  0x000055ea5c3583e9 in qemu_mutex_lock (mutex=mutex@entry=0x55ea5e5ac178)
    at util/qemu-thread-posix.c:60
#4  0x000055ea5c2fb2d1 in curl_readv_bh_cb (p=0x55ea6013c620)
    at block/curl.c:824
#5  0x000055ea5c352fe1 in aio_bh_poll (bh=0x55ea60eb99d0) at util/async.c:90
#6  0x000055ea5c352fe1 in aio_bh_poll (ctx=ctx@entry=0x55ea5e555970)
    at util/async.c:118
#7  0x000055ea5c3563c4 in aio_poll (ctx=0x55ea5e555970, blocking=blocking@entry=true) at util/aio-posix.c:682
#8  0x000055ea5c2fb09e in curl_init_state (bs=0x55ea5e5a56b0, s=s@entry=0x55ea5e5ab100) at block/curl.c:470
#9  0x000055ea5c2fb39b in curl_readv_bh_cb (p=0x55ea5f650c30)
    at block/curl.c:839
#10 0x000055ea5c352fe1 in aio_bh_poll (bh=0x55ea60eb1910) at util/async.c:90
#11 0x000055ea5c352fe1 in aio_bh_poll (ctx=ctx@entry=0x55ea5e555970)
    at util/async.c:118
#12 0x000055ea5c355f80 in aio_dispatch (ctx=0x55ea5e555970)
    at util/aio-posix.c:429
#13 0x000055ea5c352ebe in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:261
#14 0x00007f1d55489d7a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#15 0x000055ea5c35523c in main_loop_wait () at util/main-loop.c:213
#16 0x000055ea5c35523c in main_loop_wait (timeout=<optimized out>)
    at util/main-loop.c:261
---Type <return> to continue, or q <return> to quit---
#17 0x000055ea5c35523c in main_loop_wait (nonblocking=nonblocking@entry=0)
    at util/main-loop.c:517
#18 0x000055ea5bfb44a7 in main () at vl.c:1899
#19 0x000055ea5bfb44a7 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4717


I'll get you one with more symbols in a minute, just installing
debuginfo ...

BTW this bug is reproducible on both Fedora 26 and RHEL 7.4
(in both cases with either qemu 2.9 or qemu from git).

Rich.
Richard W.M. Jones May 3, 2017, 3:34 p.m. UTC | #4
Same backtrace, but with some more symbols:

__lll_lock_wait () at ../nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:135
135		2: movl	%edx, %eax

(gdb) t a a bt

Thread 3 (Thread 0x7f1d49967700 (LWP 29949)):
#0  0x00007f1d53a39bf9 in syscall ()
    at ../sysdeps/unix/sysv/linux/x86_64/syscall.S:38
#1  0x000055ea5c358856 in qemu_event_wait (val=<optimized out>, f=<optimized out>) at /home/rjones/d/qemu/include/qemu/futex.h:26
#2  0x000055ea5c358856 in qemu_event_wait (ev=ev@entry=0x55ea5cde4ac4 <rcu_call_ready_event>) at util/qemu-thread-posix.c:399
#3  0x000055ea5c3680fe in call_rcu_thread (opaque=<optimized out>)
    at util/rcu.c:249
#4  0x00007f1d57f7fdc5 in start_thread (arg=0x7f1d49967700)
    at pthread_create.c:308
#5  0x00007f1d53a3f73d in clone ()
    at ../sysdeps/unix/sysv/linux/x86_64/clone.S:113

Thread 2 (Thread 0x7f1d46987700 (LWP 29955)):
#0  0x00007f1d57f861bd in __lll_lock_wait ()
    at ../nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:135
#1  0x00007f1d57f81d02 in _L_lock_791 () at /lib64/libpthread.so.0
#2  0x00007f1d57f81c08 in __GI___pthread_mutex_lock (mutex=mutex@entry=0x55ea5c9a2400 <qemu_global_mutex>) at pthread_mutex_lock.c:64
#3  0x000055ea5c3583e9 in qemu_mutex_lock (mutex=mutex@entry=0x55ea5c9a2400 <qemu_global_mutex>) at util/qemu-thread-posix.c:60
#4  0x000055ea5bff512c in qemu_mutex_lock_iothread ()
    at /home/rjones/d/qemu/cpus.c:1565
#5  0x000055ea5c0081c7 in kvm_cpu_exec (cpu=cpu@entry=0x55ea5e7df020)
    at /home/rjones/d/qemu/kvm-all.c:2096
#6  0x000055ea5bff5332 in qemu_kvm_cpu_thread_fn (arg=0x55ea5e7df020)
    at /home/rjones/d/qemu/cpus.c:1118
#7  0x00007f1d57f7fdc5 in start_thread (arg=0x7f1d46987700)
    at pthread_create.c:308
#8  0x00007f1d53a3f73d in clone ()
    at ../sysdeps/unix/sysv/linux/x86_64/clone.S:113

Thread 1 (Thread 0x7f1d5a4e2c00 (LWP 29947)):
#0  0x00007f1d57f861bd in __lll_lock_wait ()
    at ../nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:135
#1  0x00007f1d57f81d02 in _L_lock_791 () at /lib64/libpthread.so.0
#2  0x00007f1d57f81c08 in __GI___pthread_mutex_lock (mutex=mutex@entry=0x55ea5e5ac178) at pthread_mutex_lock.c:64
#3  0x000055ea5c3583e9 in qemu_mutex_lock (mutex=mutex@entry=0x55ea5e5ac178)
    at util/qemu-thread-posix.c:60
#4  0x000055ea5c2fb2d1 in curl_readv_bh_cb (p=0x55ea6013c620)
    at block/curl.c:824
#5  0x000055ea5c352fe1 in aio_bh_poll (bh=0x55ea60eb99d0) at util/async.c:90
#6  0x000055ea5c352fe1 in aio_bh_poll (ctx=ctx@entry=0x55ea5e555970)
    at util/async.c:118
#7  0x000055ea5c3563c4 in aio_poll (ctx=0x55ea5e555970, blocking=blocking@entry=true) at util/aio-posix.c:682
#8  0x000055ea5c2fb09e in curl_init_state (bs=0x55ea5e5a56b0, s=s@entry=0x55ea5e5ab100) at block/curl.c:470
#9  0x000055ea5c2fb39b in curl_readv_bh_cb (p=0x55ea5f650c30)
    at block/curl.c:839
#10 0x000055ea5c352fe1 in aio_bh_poll (bh=0x55ea60eb1910) at util/async.c:90
#11 0x000055ea5c352fe1 in aio_bh_poll (ctx=ctx@entry=0x55ea5e555970)
    at util/async.c:118
#12 0x000055ea5c355f80 in aio_dispatch (ctx=0x55ea5e555970)
    at util/aio-posix.c:429
#13 0x000055ea5c352ebe in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:261
#14 0x00007f1d55489d7a in g_main_context_dispatch (context=0x55ea5e555d40)
    at gmain.c:3152
#15 0x00007f1d55489d7a in g_main_context_dispatch (context=context@entry=0x55ea5e555d40) at gmain.c:3767
#16 0x000055ea5c35523c in main_loop_wait () at util/main-loop.c:213
#17 0x000055ea5c35523c in main_loop_wait (timeout=<optimized out>)
    at util/main-loop.c:261
#18 0x000055ea5c35523c in main_loop_wait (nonblocking=nonblocking@entry=0)
    at util/main-loop.c:517
#19 0x000055ea5bfb44a7 in main () at vl.c:1899
#20 0x000055ea5bfb44a7 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4717
diff mbox

Patch

diff --git a/block/curl.c b/block/curl.c
index 2939cc7..e83dcd8 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -135,6 +135,7 @@  typedef struct BDRVCURLState {
     char *cookie;
     bool accept_range;
     AioContext *aio_context;
+    QemuMutex mutex;
     char *username;
     char *password;
     char *proxyusername;
@@ -333,6 +334,7 @@  static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
     return FIND_RET_NONE;
 }
 
+/* Called with s->mutex held.  */
 static void curl_multi_check_completion(BDRVCURLState *s)
 {
     int msgs_in_queue;
@@ -374,7 +376,9 @@  static void curl_multi_check_completion(BDRVCURLState *s)
                         continue;
                     }
 
+                    qemu_mutex_unlock(&s->mutex);
                     acb->common.cb(acb->common.opaque, -EPROTO);
+                    qemu_mutex_lock(&s->mutex);
                     qemu_aio_unref(acb);
                     state->acb[i] = NULL;
                 }
@@ -386,6 +390,7 @@  static void curl_multi_check_completion(BDRVCURLState *s)
     }
 }
 
+/* Called with s->mutex held.  */
 static void curl_multi_do_locked(CURLState *s)
 {
     CURLSocket *socket, *next_socket;
@@ -409,19 +414,19 @@  static void curl_multi_do(void *arg)
 {
     CURLState *s = (CURLState *)arg;
 
-    aio_context_acquire(s->s->aio_context);
+    qemu_mutex_lock(&s->s->mutex);
     curl_multi_do_locked(s);
-    aio_context_release(s->s->aio_context);
+    qemu_mutex_unlock(&s->s->mutex);
 }
 
 static void curl_multi_read(void *arg)
 {
     CURLState *s = (CURLState *)arg;
 
-    aio_context_acquire(s->s->aio_context);
+    qemu_mutex_lock(&s->s->mutex);
     curl_multi_do_locked(s);
     curl_multi_check_completion(s->s);
-    aio_context_release(s->s->aio_context);
+    qemu_mutex_unlock(&s->s->mutex);
 }
 
 static void curl_multi_timeout_do(void *arg)
@@ -434,11 +439,11 @@  static void curl_multi_timeout_do(void *arg)
         return;
     }
 
-    aio_context_acquire(s->aio_context);
+    qemu_mutex_lock(&s->mutex);
     curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
 
     curl_multi_check_completion(s);
-    aio_context_release(s->aio_context);
+    qemu_mutex_unlock(&s->mutex);
 #else
     abort();
 #endif
@@ -771,6 +776,7 @@  static int curl_open(BlockDriverState *bs, QDict *options, int flags,
     curl_easy_cleanup(state->curl);
     state->curl = NULL;
 
+    qemu_mutex_init(&s->mutex);
     curl_attach_aio_context(bs, bdrv_get_aio_context(bs));
 
     qemu_opts_del(opts);
@@ -801,12 +807,11 @@  static void curl_readv_bh_cb(void *p)
     CURLAIOCB *acb = p;
     BlockDriverState *bs = acb->common.bs;
     BDRVCURLState *s = bs->opaque;
-    AioContext *ctx = bdrv_get_aio_context(bs);
 
     size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
     size_t end;
 
-    aio_context_acquire(ctx);
+    qemu_mutex_lock(&s->mutex);
 
     // In case we have the requested data already (e.g. read-ahead),
     // we can just call the callback and be done.
@@ -854,7 +859,7 @@  static void curl_readv_bh_cb(void *p)
     curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
 
 out:
-    aio_context_release(ctx);
+    qemu_mutex_unlock(&s->mutex);
     if (ret != -EINPROGRESS) {
         acb->common.cb(acb->common.opaque, ret);
         qemu_aio_unref(acb);
@@ -883,6 +888,7 @@  static void curl_close(BlockDriverState *bs)
 
     DPRINTF("CURL: Close\n");
     curl_detach_aio_context(bs);
+    qemu_mutex_destroy(&s->mutex);
 
     g_free(s->cookie);
     g_free(s->url);