diff mbox series

[2/9] block: Replaced qemu_mutex_lock calls with QEMU_LOCK_GUARD

Message ID 20210311031538.5325-3-ma.mandourr@gmail.com
State New
Headers show
Series Changing qemu_mutex_locks to lock guard macros | expand

Commit Message

Mahmoud Abumandour March 11, 2021, 3:15 a.m. UTC
Replaced various qemu_mutex_lock/qemu_mutex_unlock calls with
lock guard macros (QEMU_LOCK_GUARD() and WITH_QEMU_LOCK_GUARD).
This slightly simplifies the code by eliminating calls to
qemu_mutex_unlock and eliminates goto paths.

Signed-off-by: Mahmoud Mandour <ma.mandourr@gmail.com>
---
 block/curl.c |  13 ++--
 block/nbd.c  | 188 ++++++++++++++++++++++++---------------------------
 2 files changed, 95 insertions(+), 106 deletions(-)

Comments

Vladimir Sementsov-Ogievskiy March 12, 2021, 10:23 a.m. UTC | #1
11.03.2021 06:15, Mahmoud Mandour wrote:
> Replaced various qemu_mutex_lock/qemu_mutex_unlock calls with
> lock guard macros (QEMU_LOCK_GUARD() and WITH_QEMU_LOCK_GUARD).
> This slightly simplifies the code by eliminating calls to
> qemu_mutex_unlock and eliminates goto paths.
> 
> Signed-off-by: Mahmoud Mandour <ma.mandourr@gmail.com>
> ---
>   block/curl.c |  13 ++--
>   block/nbd.c  | 188 ++++++++++++++++++++++++---------------------------

Better would be make two separate patches I think.

>   2 files changed, 95 insertions(+), 106 deletions(-)
> 
> diff --git a/block/curl.c b/block/curl.c
> index 4ff895df8f..56a217951a 100644
> --- a/block/curl.c
> +++ b/block/curl.c
> @@ -832,12 +832,12 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>       uint64_t start = acb->offset;
>       uint64_t end;
>   
> -    qemu_mutex_lock(&s->mutex);
> +    QEMU_LOCK_GUARD(&s->mutex);
>   
>       // In case we have the requested data already (e.g. read-ahead),
>       // we can just call the callback and be done.
>       if (curl_find_buf(s, start, acb->bytes, acb)) {
> -        goto out;
> +        return;
>       }
>   
>       // No cache found, so let's start a new request
> @@ -852,7 +852,7 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>       if (curl_init_state(s, state) < 0) {
>           curl_clean_state(state);
>           acb->ret = -EIO;
> -        goto out;
> +        return;
>       }
>   
>       acb->start = 0;
> @@ -867,7 +867,7 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>       if (state->buf_len && state->orig_buf == NULL) {
>           curl_clean_state(state);
>           acb->ret = -ENOMEM;
> -        goto out;
> +        return;
>       }
>       state->acb[0] = acb;
>   
> @@ -880,14 +880,11 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>           acb->ret = -EIO;
>   
>           curl_clean_state(state);
> -        goto out;
> +        return;
>       }
>   
>       /* Tell curl it needs to kick things off */
>       curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
> -
> -out:
> -    qemu_mutex_unlock(&s->mutex);
>   }

This change is obvious and good.

>   
>   static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
> diff --git a/block/nbd.c b/block/nbd.c
> index c26dc5a54f..28ba7aad61 100644
> --- a/block/nbd.c
> +++ b/block/nbd.c
> @@ -407,27 +407,25 @@ static void *connect_thread_func(void *opaque)
>           thr->sioc = NULL;
>       }
>   
> -    qemu_mutex_lock(&thr->mutex);
> -
> -    switch (thr->state) {
> -    case CONNECT_THREAD_RUNNING:
> -        thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
> -        if (thr->bh_ctx) {
> -            aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
> -
> -            /* play safe, don't reuse bh_ctx on further connection attempts */
> -            thr->bh_ctx = NULL;
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        switch (thr->state) {
> +        case CONNECT_THREAD_RUNNING:
> +            thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
> +            if (thr->bh_ctx) {
> +                aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);

over-80 line

> +
> +                /* play safe, don't reuse bh_ctx on further connection attempts */

and here

> +                thr->bh_ctx = NULL;
> +            }
> +            break;
> +        case CONNECT_THREAD_RUNNING_DETACHED:
> +            do_free = true;
> +            break;
> +        default:
> +            abort();
>           }
> -        break;
> -    case CONNECT_THREAD_RUNNING_DETACHED:
> -        do_free = true;
> -        break;
> -    default:
> -        abort();
>       }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> ->       if (do_free) {
>           nbd_free_connect_thread(thr);
>       }
> @@ -443,36 +441,33 @@ nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
>       BDRVNBDState *s = bs->opaque;
>       NBDConnectThread *thr = s->connect_thread;
>   
> -    qemu_mutex_lock(&thr->mutex);
> -
> -    switch (thr->state) {
> -    case CONNECT_THREAD_FAIL:
> -    case CONNECT_THREAD_NONE:
> -        error_free(thr->err);
> -        thr->err = NULL;
> -        thr->state = CONNECT_THREAD_RUNNING;
> -        qemu_thread_create(&thread, "nbd-connect",
> -                           connect_thread_func, thr, QEMU_THREAD_DETACHED);
> -        break;
> -    case CONNECT_THREAD_SUCCESS:
> -        /* Previous attempt finally succeeded in background */
> -        thr->state = CONNECT_THREAD_NONE;
> -        s->sioc = thr->sioc;
> -        thr->sioc = NULL;
> -        yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> -                               nbd_yank, bs);
> -        qemu_mutex_unlock(&thr->mutex);
> -        return 0;
> -    case CONNECT_THREAD_RUNNING:
> -        /* Already running, will wait */
> -        break;
> -    default:
> -        abort();
> -    }
> -
> -    thr->bh_ctx = qemu_get_current_aio_context();
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        switch (thr->state) {
> +        case CONNECT_THREAD_FAIL:
> +        case CONNECT_THREAD_NONE:
> +            error_free(thr->err);
> +            thr->err = NULL;
> +            thr->state = CONNECT_THREAD_RUNNING;
> +            qemu_thread_create(&thread, "nbd-connect",
> +                               connect_thread_func, thr, QEMU_THREAD_DETACHED);
> +            break;
> +        case CONNECT_THREAD_SUCCESS:
> +            /* Previous attempt finally succeeded in background */
> +            thr->state = CONNECT_THREAD_NONE;
> +            s->sioc = thr->sioc;
> +            thr->sioc = NULL;
> +            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> +                                   nbd_yank, bs);
> +            return 0;
> +        case CONNECT_THREAD_RUNNING:
> +            /* Already running, will wait */
> +            break;
> +        default:
> +            abort();
> +        }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> +        thr->bh_ctx = qemu_get_current_aio_context();
> +    }
>   
>   
>       /*
> @@ -486,46 +481,45 @@ nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
>       s->wait_connect = true;
>       qemu_coroutine_yield();
>   
> -    qemu_mutex_lock(&thr->mutex);
>   
> -    switch (thr->state) {
> -    case CONNECT_THREAD_SUCCESS:
> -    case CONNECT_THREAD_FAIL:
> -        thr->state = CONNECT_THREAD_NONE;
> -        error_propagate(errp, thr->err);
> -        thr->err = NULL;
> -        s->sioc = thr->sioc;
> -        thr->sioc = NULL;
> -        if (s->sioc) {
> -            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> -                                   nbd_yank, bs);
> -        }
> -        ret = (s->sioc ? 0 : -1);
> -        break;
> -    case CONNECT_THREAD_RUNNING:
> -    case CONNECT_THREAD_RUNNING_DETACHED:
> -        /*
> -         * Obviously, drained section wants to start. Report the attempt as
> -         * failed. Still connect thread is executing in background, and its
> -         * result may be used for next connection attempt.
> -         */
> -        ret = -1;
> -        error_setg(errp, "Connection attempt cancelled by other operation");
> -        break;
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        switch (thr->state) {
> +        case CONNECT_THREAD_SUCCESS:
> +        case CONNECT_THREAD_FAIL:
> +            thr->state = CONNECT_THREAD_NONE;
> +            error_propagate(errp, thr->err);
> +            thr->err = NULL;
> +            s->sioc = thr->sioc;
> +            thr->sioc = NULL;
> +            if (s->sioc) {
> +                yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> +                                       nbd_yank, bs);
> +            }
> +            ret = (s->sioc ? 0 : -1);
> +            break;
> +        case CONNECT_THREAD_RUNNING:
> +        case CONNECT_THREAD_RUNNING_DETACHED:
> +            /*
> +             * Obviously, drained section wants to start. Report the attempt as
> +             * failed. Still connect thread is executing in background, and its
> +             * result may be used for next connection attempt.
> +             */
> +            ret = -1;
> +            error_setg(errp, "Connection attempt cancelled by other operation");
> +            break;
>   
> -    case CONNECT_THREAD_NONE:
> -        /*
> -         * Impossible. We've seen this thread running. So it should be
> -         * running or at least give some results.
> -         */
> -        abort();
> +        case CONNECT_THREAD_NONE:
> +            /*
> +             * Impossible. We've seen this thread running. So it should be
> +             * running or at least give some results.
> +             */
> +            abort();
>   
> -    default:
> -        abort();
> +        default:
> +            abort();
> +        }
>       }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> -
>       return ret;
>   }
>   
> @@ -546,25 +540,23 @@ static void nbd_co_establish_connection_cancel(BlockDriverState *bs,
>       bool wake = false;
>       bool do_free = false;
>   
> -    qemu_mutex_lock(&thr->mutex);
> -
> -    if (thr->state == CONNECT_THREAD_RUNNING) {
> -        /* We can cancel only in running state, when bh is not yet scheduled */
> -        thr->bh_ctx = NULL;
> -        if (s->wait_connect) {
> -            s->wait_connect = false;
> -            wake = true;
> -        }
> -        if (detach) {
> -            thr->state = CONNECT_THREAD_RUNNING_DETACHED;
> -            s->connect_thread = NULL;
> +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> +        if (thr->state == CONNECT_THREAD_RUNNING) {
> +            /* We can cancel only in running state, when bh is not yet scheduled */

over-80 line

> +            thr->bh_ctx = NULL;
> +            if (s->wait_connect) {
> +                s->wait_connect = false;
> +                wake = true;
> +            }
> +            if (detach) {
> +                thr->state = CONNECT_THREAD_RUNNING_DETACHED;
> +                s->connect_thread = NULL;
> +            }
> +        } else if (detach) {
> +            do_free = true;
>           }
> -    } else if (detach) {
> -        do_free = true;
>       }
>   
> -    qemu_mutex_unlock(&thr->mutex);
> -
>       if (do_free) {
>           nbd_free_connect_thread(thr);
>           s->connect_thread = NULL;
> 


For nbd.c we mostly change simple critical sections

qemu_mutex_lock()
...
qemu_mutex_unlock()

into

WITH_QEMU_LOCK_GUARD() {
...
}

And don't eliminate any gotos.. Hmm. On the first sight increasing nesting of the code doesn't make it more beautiful.
But I understand that context-manager concept is safer than calling unlock() by hand, and with nested block the critical section becomes more obvious. So, with fixed over-80 lines:

Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Mahmoud Abumandour March 13, 2021, 5:51 a.m. UTC | #2
Thank you for the fast review and I'm sorry for the silly and obvious style
errors. Unfortunately I did not notice the section on using the checkpatch
script in the Contributing page on the wiki before committing. But I assure
you that such errors will not occur again.

On Fri, Mar 12, 2021 at 12:23 PM Vladimir Sementsov-Ogievskiy <
vsementsov@virtuozzo.com> wrote:

> 11.03.2021 06:15, Mahmoud Mandour wrote:
> > Replaced various qemu_mutex_lock/qemu_mutex_unlock calls with
> > lock guard macros (QEMU_LOCK_GUARD() and WITH_QEMU_LOCK_GUARD).
> > This slightly simplifies the code by eliminating calls to
> > qemu_mutex_unlock and eliminates goto paths.
> >
> > Signed-off-by: Mahmoud Mandour <ma.mandourr@gmail.com>
> > ---
> >   block/curl.c |  13 ++--
> >   block/nbd.c  | 188 ++++++++++++++++++++++++---------------------------
>
> Better would be make two separate patches I think.
>
> >   2 files changed, 95 insertions(+), 106 deletions(-)
> >
> > diff --git a/block/curl.c b/block/curl.c
> > index 4ff895df8f..56a217951a 100644
> > --- a/block/curl.c
> > +++ b/block/curl.c
> > @@ -832,12 +832,12 @@ static void curl_setup_preadv(BlockDriverState
> *bs, CURLAIOCB *acb)
> >       uint64_t start = acb->offset;
> >       uint64_t end;
> >
> > -    qemu_mutex_lock(&s->mutex);
> > +    QEMU_LOCK_GUARD(&s->mutex);
> >
> >       // In case we have the requested data already (e.g. read-ahead),
> >       // we can just call the callback and be done.
> >       if (curl_find_buf(s, start, acb->bytes, acb)) {
> > -        goto out;
> > +        return;
> >       }
> >
> >       // No cache found, so let's start a new request
> > @@ -852,7 +852,7 @@ static void curl_setup_preadv(BlockDriverState *bs,
> CURLAIOCB *acb)
> >       if (curl_init_state(s, state) < 0) {
> >           curl_clean_state(state);
> >           acb->ret = -EIO;
> > -        goto out;
> > +        return;
> >       }
> >
> >       acb->start = 0;
> > @@ -867,7 +867,7 @@ static void curl_setup_preadv(BlockDriverState *bs,
> CURLAIOCB *acb)
> >       if (state->buf_len && state->orig_buf == NULL) {
> >           curl_clean_state(state);
> >           acb->ret = -ENOMEM;
> > -        goto out;
> > +        return;
> >       }
> >       state->acb[0] = acb;
> >
> > @@ -880,14 +880,11 @@ static void curl_setup_preadv(BlockDriverState
> *bs, CURLAIOCB *acb)
> >           acb->ret = -EIO;
> >
> >           curl_clean_state(state);
> > -        goto out;
> > +        return;
> >       }
> >
> >       /* Tell curl it needs to kick things off */
> >       curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0,
> &running);
> > -
> > -out:
> > -    qemu_mutex_unlock(&s->mutex);
> >   }
>
> This change is obvious and good.
>
> >
> >   static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
> > diff --git a/block/nbd.c b/block/nbd.c
> > index c26dc5a54f..28ba7aad61 100644
> > --- a/block/nbd.c
> > +++ b/block/nbd.c
> > @@ -407,27 +407,25 @@ static void *connect_thread_func(void *opaque)
> >           thr->sioc = NULL;
> >       }
> >
> > -    qemu_mutex_lock(&thr->mutex);
> > -
> > -    switch (thr->state) {
> > -    case CONNECT_THREAD_RUNNING:
> > -        thr->state = ret < 0 ? CONNECT_THREAD_FAIL :
> CONNECT_THREAD_SUCCESS;
> > -        if (thr->bh_ctx) {
> > -            aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func,
> thr->bh_opaque);
> > -
> > -            /* play safe, don't reuse bh_ctx on further connection
> attempts */
> > -            thr->bh_ctx = NULL;
> > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> > +        switch (thr->state) {
> > +        case CONNECT_THREAD_RUNNING:
> > +            thr->state = ret < 0 ? CONNECT_THREAD_FAIL :
> CONNECT_THREAD_SUCCESS;
> > +            if (thr->bh_ctx) {
> > +                aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func,
> thr->bh_opaque);
>
> over-80 line
>
> > +
> > +                /* play safe, don't reuse bh_ctx on further connection
> attempts */
>
> and here
>
> > +                thr->bh_ctx = NULL;
> > +            }
> > +            break;
> > +        case CONNECT_THREAD_RUNNING_DETACHED:
> > +            do_free = true;
> > +            break;
> > +        default:
> > +            abort();
> >           }
> > -        break;
> > -    case CONNECT_THREAD_RUNNING_DETACHED:
> > -        do_free = true;
> > -        break;
> > -    default:
> > -        abort();
> >       }
> >
> > -    qemu_mutex_unlock(&thr->mutex);
> > ->       if (do_free) {
> >           nbd_free_connect_thread(thr);
> >       }
> > @@ -443,36 +441,33 @@ nbd_co_establish_connection(BlockDriverState *bs,
> Error **errp)
> >       BDRVNBDState *s = bs->opaque;
> >       NBDConnectThread *thr = s->connect_thread;
> >
> > -    qemu_mutex_lock(&thr->mutex);
> > -
> > -    switch (thr->state) {
> > -    case CONNECT_THREAD_FAIL:
> > -    case CONNECT_THREAD_NONE:
> > -        error_free(thr->err);
> > -        thr->err = NULL;
> > -        thr->state = CONNECT_THREAD_RUNNING;
> > -        qemu_thread_create(&thread, "nbd-connect",
> > -                           connect_thread_func, thr,
> QEMU_THREAD_DETACHED);
> > -        break;
> > -    case CONNECT_THREAD_SUCCESS:
> > -        /* Previous attempt finally succeeded in background */
> > -        thr->state = CONNECT_THREAD_NONE;
> > -        s->sioc = thr->sioc;
> > -        thr->sioc = NULL;
> > -        yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> > -                               nbd_yank, bs);
> > -        qemu_mutex_unlock(&thr->mutex);
> > -        return 0;
> > -    case CONNECT_THREAD_RUNNING:
> > -        /* Already running, will wait */
> > -        break;
> > -    default:
> > -        abort();
> > -    }
> > -
> > -    thr->bh_ctx = qemu_get_current_aio_context();
> > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> > +        switch (thr->state) {
> > +        case CONNECT_THREAD_FAIL:
> > +        case CONNECT_THREAD_NONE:
> > +            error_free(thr->err);
> > +            thr->err = NULL;
> > +            thr->state = CONNECT_THREAD_RUNNING;
> > +            qemu_thread_create(&thread, "nbd-connect",
> > +                               connect_thread_func, thr,
> QEMU_THREAD_DETACHED);
> > +            break;
> > +        case CONNECT_THREAD_SUCCESS:
> > +            /* Previous attempt finally succeeded in background */
> > +            thr->state = CONNECT_THREAD_NONE;
> > +            s->sioc = thr->sioc;
> > +            thr->sioc = NULL;
> > +
> yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> > +                                   nbd_yank, bs);
> > +            return 0;
> > +        case CONNECT_THREAD_RUNNING:
> > +            /* Already running, will wait */
> > +            break;
> > +        default:
> > +            abort();
> > +        }
> >
> > -    qemu_mutex_unlock(&thr->mutex);
> > +        thr->bh_ctx = qemu_get_current_aio_context();
> > +    }
> >
> >
> >       /*
> > @@ -486,46 +481,45 @@ nbd_co_establish_connection(BlockDriverState *bs,
> Error **errp)
> >       s->wait_connect = true;
> >       qemu_coroutine_yield();
> >
> > -    qemu_mutex_lock(&thr->mutex);
> >
> > -    switch (thr->state) {
> > -    case CONNECT_THREAD_SUCCESS:
> > -    case CONNECT_THREAD_FAIL:
> > -        thr->state = CONNECT_THREAD_NONE;
> > -        error_propagate(errp, thr->err);
> > -        thr->err = NULL;
> > -        s->sioc = thr->sioc;
> > -        thr->sioc = NULL;
> > -        if (s->sioc) {
> > -
> yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> > -                                   nbd_yank, bs);
> > -        }
> > -        ret = (s->sioc ? 0 : -1);
> > -        break;
> > -    case CONNECT_THREAD_RUNNING:
> > -    case CONNECT_THREAD_RUNNING_DETACHED:
> > -        /*
> > -         * Obviously, drained section wants to start. Report the
> attempt as
> > -         * failed. Still connect thread is executing in background, and
> its
> > -         * result may be used for next connection attempt.
> > -         */
> > -        ret = -1;
> > -        error_setg(errp, "Connection attempt cancelled by other
> operation");
> > -        break;
> > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> > +        switch (thr->state) {
> > +        case CONNECT_THREAD_SUCCESS:
> > +        case CONNECT_THREAD_FAIL:
> > +            thr->state = CONNECT_THREAD_NONE;
> > +            error_propagate(errp, thr->err);
> > +            thr->err = NULL;
> > +            s->sioc = thr->sioc;
> > +            thr->sioc = NULL;
> > +            if (s->sioc) {
> > +
> yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
> > +                                       nbd_yank, bs);
> > +            }
> > +            ret = (s->sioc ? 0 : -1);
> > +            break;
> > +        case CONNECT_THREAD_RUNNING:
> > +        case CONNECT_THREAD_RUNNING_DETACHED:
> > +            /*
> > +             * Obviously, drained section wants to start. Report the
> attempt as
> > +             * failed. Still connect thread is executing in background,
> and its
> > +             * result may be used for next connection attempt.
> > +             */
> > +            ret = -1;
> > +            error_setg(errp, "Connection attempt cancelled by other
> operation");
> > +            break;
> >
> > -    case CONNECT_THREAD_NONE:
> > -        /*
> > -         * Impossible. We've seen this thread running. So it should be
> > -         * running or at least give some results.
> > -         */
> > -        abort();
> > +        case CONNECT_THREAD_NONE:
> > +            /*
> > +             * Impossible. We've seen this thread running. So it should
> be
> > +             * running or at least give some results.
> > +             */
> > +            abort();
> >
> > -    default:
> > -        abort();
> > +        default:
> > +            abort();
> > +        }
> >       }
> >
> > -    qemu_mutex_unlock(&thr->mutex);
> > -
> >       return ret;
> >   }
> >
> > @@ -546,25 +540,23 @@ static void
> nbd_co_establish_connection_cancel(BlockDriverState *bs,
> >       bool wake = false;
> >       bool do_free = false;
> >
> > -    qemu_mutex_lock(&thr->mutex);
> > -
> > -    if (thr->state == CONNECT_THREAD_RUNNING) {
> > -        /* We can cancel only in running state, when bh is not yet
> scheduled */
> > -        thr->bh_ctx = NULL;
> > -        if (s->wait_connect) {
> > -            s->wait_connect = false;
> > -            wake = true;
> > -        }
> > -        if (detach) {
> > -            thr->state = CONNECT_THREAD_RUNNING_DETACHED;
> > -            s->connect_thread = NULL;
> > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
> > +        if (thr->state == CONNECT_THREAD_RUNNING) {
> > +            /* We can cancel only in running state, when bh is not yet
> scheduled */
>
> over-80 line
>
> > +            thr->bh_ctx = NULL;
> > +            if (s->wait_connect) {
> > +                s->wait_connect = false;
> > +                wake = true;
> > +            }
> > +            if (detach) {
> > +                thr->state = CONNECT_THREAD_RUNNING_DETACHED;
> > +                s->connect_thread = NULL;
> > +            }
> > +        } else if (detach) {
> > +            do_free = true;
> >           }
> > -    } else if (detach) {
> > -        do_free = true;
> >       }
> >
> > -    qemu_mutex_unlock(&thr->mutex);
> > -
> >       if (do_free) {
> >           nbd_free_connect_thread(thr);
> >           s->connect_thread = NULL;
> >
>
>
> For nbd.c we mostly change simple critical sections
>
> qemu_mutex_lock()
> ...
> qemu_mutex_unlock()
>
> into
>
> WITH_QEMU_LOCK_GUARD() {
> ...
> }
>
> And don't eliminate any gotos.. Hmm. On the first sight increasing nesting
> of the code doesn't make it more beautiful.
> But I understand that context-manager concept is safer than calling
> unlock() by hand, and with nested block the critical section becomes more
> obvious. So, with fixed over-80 lines:
>
> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>
> --
> Best regards,
> Vladimir
>
Vladimir Sementsov-Ogievskiy March 15, 2021, 2:08 p.m. UTC | #3
13.03.2021 08:51, Mahmoud Mandour wrote:
> Thank you for the fast review and I'm sorry for the silly and obvious style
> errors. Unfortunately I did not notice the section on using the checkpatch
> script in the Contributing page on the wiki before committing. But I assure
> you that such errors will not occur again.

Don't worry :)

> 
> On Fri, Mar 12, 2021 at 12:23 PM Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com <mailto:vsementsov@virtuozzo.com>> wrote:
> 
>     11.03.2021 06:15, Mahmoud Mandour wrote:
>      > Replaced various qemu_mutex_lock/qemu_mutex_unlock calls with
>      > lock guard macros (QEMU_LOCK_GUARD() and WITH_QEMU_LOCK_GUARD).
>      > This slightly simplifies the code by eliminating calls to
>      > qemu_mutex_unlock and eliminates goto paths.
>      >
>      > Signed-off-by: Mahmoud Mandour <ma.mandourr@gmail.com <mailto:ma.mandourr@gmail.com>>
>      > ---
>      >   block/curl.c |  13 ++--
>      >   block/nbd.c  | 188 ++++++++++++++++++++++++---------------------------
> 
>     Better would be make two separate patches I think.
> 
>      >   2 files changed, 95 insertions(+), 106 deletions(-)
>      >
>      > diff --git a/block/curl.c b/block/curl.c
>      > index 4ff895df8f..56a217951a 100644
>      > --- a/block/curl.c
>      > +++ b/block/curl.c
>      > @@ -832,12 +832,12 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>      >       uint64_t start = acb->offset;
>      >       uint64_t end;
>      >
>      > -    qemu_mutex_lock(&s->mutex);
>      > +    QEMU_LOCK_GUARD(&s->mutex);
>      >
>      >       // In case we have the requested data already (e.g. read-ahead),
>      >       // we can just call the callback and be done.
>      >       if (curl_find_buf(s, start, acb->bytes, acb)) {
>      > -        goto out;
>      > +        return;
>      >       }
>      >
>      >       // No cache found, so let's start a new request
>      > @@ -852,7 +852,7 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>      >       if (curl_init_state(s, state) < 0) {
>      >           curl_clean_state(state);
>      >           acb->ret = -EIO;
>      > -        goto out;
>      > +        return;
>      >       }
>      >
>      >       acb->start = 0;
>      > @@ -867,7 +867,7 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>      >       if (state->buf_len && state->orig_buf == NULL) {
>      >           curl_clean_state(state);
>      >           acb->ret = -ENOMEM;
>      > -        goto out;
>      > +        return;
>      >       }
>      >       state->acb[0] = acb;
>      >
>      > @@ -880,14 +880,11 @@ static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
>      >           acb->ret = -EIO;
>      >
>      >           curl_clean_state(state);
>      > -        goto out;
>      > +        return;
>      >       }
>      >
>      >       /* Tell curl it needs to kick things off */
>      >       curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
>      > -
>      > -out:
>      > -    qemu_mutex_unlock(&s->mutex);
>      >   }
> 
>     This change is obvious and good.
> 
>      >
>      >   static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
>      > diff --git a/block/nbd.c b/block/nbd.c
>      > index c26dc5a54f..28ba7aad61 100644
>      > --- a/block/nbd.c
>      > +++ b/block/nbd.c
>      > @@ -407,27 +407,25 @@ static void *connect_thread_func(void *opaque)
>      >           thr->sioc = NULL;
>      >       }
>      >
>      > -    qemu_mutex_lock(&thr->mutex);
>      > -
>      > -    switch (thr->state) {
>      > -    case CONNECT_THREAD_RUNNING:
>      > -        thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
>      > -        if (thr->bh_ctx) {
>      > -            aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
>      > -
>      > -            /* play safe, don't reuse bh_ctx on further connection attempts */
>      > -            thr->bh_ctx = NULL;
>      > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
>      > +        switch (thr->state) {
>      > +        case CONNECT_THREAD_RUNNING:
>      > +            thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
>      > +            if (thr->bh_ctx) {
>      > +                aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
> 
>     over-80 line
> 
>      > +
>      > +                /* play safe, don't reuse bh_ctx on further connection attempts */
> 
>     and here
> 
>      > +                thr->bh_ctx = NULL;
>      > +            }
>      > +            break;
>      > +        case CONNECT_THREAD_RUNNING_DETACHED:
>      > +            do_free = true;
>      > +            break;
>      > +        default:
>      > +            abort();
>      >           }
>      > -        break;
>      > -    case CONNECT_THREAD_RUNNING_DETACHED:
>      > -        do_free = true;
>      > -        break;
>      > -    default:
>      > -        abort();
>      >       }
>      >
>      > -    qemu_mutex_unlock(&thr->mutex);
>      > ->       if (do_free) {
>      >           nbd_free_connect_thread(thr);
>      >       }
>      > @@ -443,36 +441,33 @@ nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
>      >       BDRVNBDState *s = bs->opaque;
>      >       NBDConnectThread *thr = s->connect_thread;
>      >
>      > -    qemu_mutex_lock(&thr->mutex);
>      > -
>      > -    switch (thr->state) {
>      > -    case CONNECT_THREAD_FAIL:
>      > -    case CONNECT_THREAD_NONE:
>      > -        error_free(thr->err);
>      > -        thr->err = NULL;
>      > -        thr->state = CONNECT_THREAD_RUNNING;
>      > -        qemu_thread_create(&thread, "nbd-connect",
>      > -                           connect_thread_func, thr, QEMU_THREAD_DETACHED);
>      > -        break;
>      > -    case CONNECT_THREAD_SUCCESS:
>      > -        /* Previous attempt finally succeeded in background */
>      > -        thr->state = CONNECT_THREAD_NONE;
>      > -        s->sioc = thr->sioc;
>      > -        thr->sioc = NULL;
>      > -        yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
>      > -                               nbd_yank, bs);
>      > -        qemu_mutex_unlock(&thr->mutex);
>      > -        return 0;
>      > -    case CONNECT_THREAD_RUNNING:
>      > -        /* Already running, will wait */
>      > -        break;
>      > -    default:
>      > -        abort();
>      > -    }
>      > -
>      > -    thr->bh_ctx = qemu_get_current_aio_context();
>      > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
>      > +        switch (thr->state) {
>      > +        case CONNECT_THREAD_FAIL:
>      > +        case CONNECT_THREAD_NONE:
>      > +            error_free(thr->err);
>      > +            thr->err = NULL;
>      > +            thr->state = CONNECT_THREAD_RUNNING;
>      > +            qemu_thread_create(&thread, "nbd-connect",
>      > +                               connect_thread_func, thr, QEMU_THREAD_DETACHED);
>      > +            break;
>      > +        case CONNECT_THREAD_SUCCESS:
>      > +            /* Previous attempt finally succeeded in background */
>      > +            thr->state = CONNECT_THREAD_NONE;
>      > +            s->sioc = thr->sioc;
>      > +            thr->sioc = NULL;
>      > +            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
>      > +                                   nbd_yank, bs);
>      > +            return 0;
>      > +        case CONNECT_THREAD_RUNNING:
>      > +            /* Already running, will wait */
>      > +            break;
>      > +        default:
>      > +            abort();
>      > +        }
>      >
>      > -    qemu_mutex_unlock(&thr->mutex);
>      > +        thr->bh_ctx = qemu_get_current_aio_context();
>      > +    }
>      >
>      >
>      >       /*
>      > @@ -486,46 +481,45 @@ nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
>      >       s->wait_connect = true;
>      >       qemu_coroutine_yield();
>      >
>      > -    qemu_mutex_lock(&thr->mutex);
>      >
>      > -    switch (thr->state) {
>      > -    case CONNECT_THREAD_SUCCESS:
>      > -    case CONNECT_THREAD_FAIL:
>      > -        thr->state = CONNECT_THREAD_NONE;
>      > -        error_propagate(errp, thr->err);
>      > -        thr->err = NULL;
>      > -        s->sioc = thr->sioc;
>      > -        thr->sioc = NULL;
>      > -        if (s->sioc) {
>      > -            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
>      > -                                   nbd_yank, bs);
>      > -        }
>      > -        ret = (s->sioc ? 0 : -1);
>      > -        break;
>      > -    case CONNECT_THREAD_RUNNING:
>      > -    case CONNECT_THREAD_RUNNING_DETACHED:
>      > -        /*
>      > -         * Obviously, drained section wants to start. Report the attempt as
>      > -         * failed. Still connect thread is executing in background, and its
>      > -         * result may be used for next connection attempt.
>      > -         */
>      > -        ret = -1;
>      > -        error_setg(errp, "Connection attempt cancelled by other operation");
>      > -        break;
>      > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
>      > +        switch (thr->state) {
>      > +        case CONNECT_THREAD_SUCCESS:
>      > +        case CONNECT_THREAD_FAIL:
>      > +            thr->state = CONNECT_THREAD_NONE;
>      > +            error_propagate(errp, thr->err);
>      > +            thr->err = NULL;
>      > +            s->sioc = thr->sioc;
>      > +            thr->sioc = NULL;
>      > +            if (s->sioc) {
>      > +                yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
>      > +                                       nbd_yank, bs);
>      > +            }
>      > +            ret = (s->sioc ? 0 : -1);
>      > +            break;
>      > +        case CONNECT_THREAD_RUNNING:
>      > +        case CONNECT_THREAD_RUNNING_DETACHED:
>      > +            /*
>      > +             * Obviously, drained section wants to start. Report the attempt as
>      > +             * failed. Still connect thread is executing in background, and its
>      > +             * result may be used for next connection attempt.
>      > +             */
>      > +            ret = -1;
>      > +            error_setg(errp, "Connection attempt cancelled by other operation");
>      > +            break;
>      >
>      > -    case CONNECT_THREAD_NONE:
>      > -        /*
>      > -         * Impossible. We've seen this thread running. So it should be
>      > -         * running or at least give some results.
>      > -         */
>      > -        abort();
>      > +        case CONNECT_THREAD_NONE:
>      > +            /*
>      > +             * Impossible. We've seen this thread running. So it should be
>      > +             * running or at least give some results.
>      > +             */
>      > +            abort();
>      >
>      > -    default:
>      > -        abort();
>      > +        default:
>      > +            abort();
>      > +        }
>      >       }
>      >
>      > -    qemu_mutex_unlock(&thr->mutex);
>      > -
>      >       return ret;
>      >   }
>      >
>      > @@ -546,25 +540,23 @@ static void nbd_co_establish_connection_cancel(BlockDriverState *bs,
>      >       bool wake = false;
>      >       bool do_free = false;
>      >
>      > -    qemu_mutex_lock(&thr->mutex);
>      > -
>      > -    if (thr->state == CONNECT_THREAD_RUNNING) {
>      > -        /* We can cancel only in running state, when bh is not yet scheduled */
>      > -        thr->bh_ctx = NULL;
>      > -        if (s->wait_connect) {
>      > -            s->wait_connect = false;
>      > -            wake = true;
>      > -        }
>      > -        if (detach) {
>      > -            thr->state = CONNECT_THREAD_RUNNING_DETACHED;
>      > -            s->connect_thread = NULL;
>      > +    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
>      > +        if (thr->state == CONNECT_THREAD_RUNNING) {
>      > +            /* We can cancel only in running state, when bh is not yet scheduled */
> 
>     over-80 line
> 
>      > +            thr->bh_ctx = NULL;
>      > +            if (s->wait_connect) {
>      > +                s->wait_connect = false;
>      > +                wake = true;
>      > +            }
>      > +            if (detach) {
>      > +                thr->state = CONNECT_THREAD_RUNNING_DETACHED;
>      > +                s->connect_thread = NULL;
>      > +            }
>      > +        } else if (detach) {
>      > +            do_free = true;
>      >           }
>      > -    } else if (detach) {
>      > -        do_free = true;
>      >       }
>      >
>      > -    qemu_mutex_unlock(&thr->mutex);
>      > -
>      >       if (do_free) {
>      >           nbd_free_connect_thread(thr);
>      >           s->connect_thread = NULL;
>      >
> 
> 
>     For nbd.c we mostly change simple critical sections
> 
>     qemu_mutex_lock()
>     ...
>     qemu_mutex_unlock()
> 
>     into
> 
>     WITH_QEMU_LOCK_GUARD() {
>     ...
>     }
> 
>     And don't eliminate any gotos.. Hmm. On the first sight increasing nesting of the code doesn't make it more beautiful.
>     But I understand that context-manager concept is safer than calling unlock() by hand, and with nested block the critical section becomes more obvious. So, with fixed over-80 lines:
> 
>     Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com <mailto:vsementsov@virtuozzo.com>>
> 
>     -- 
>     Best regards,
>     Vladimir
>
Eric Blake March 16, 2021, 1:29 p.m. UTC | #4
On 3/12/21 4:23 AM, Vladimir Sementsov-Ogievskiy wrote:
> 11.03.2021 06:15, Mahmoud Mandour wrote:
>> Replaced various qemu_mutex_lock/qemu_mutex_unlock calls with
>> lock guard macros (QEMU_LOCK_GUARD() and WITH_QEMU_LOCK_GUARD).
>> This slightly simplifies the code by eliminating calls to
>> qemu_mutex_unlock and eliminates goto paths.
>>
>> Signed-off-by: Mahmoud Mandour <ma.mandourr@gmail.com>
>> ---
>>   block/curl.c |  13 ++--
>>   block/nbd.c  | 188 ++++++++++++++++++++++++---------------------------
> 
> Better would be make two separate patches I think.

Given the imminent approach of soft freeze, and the fact that this does
not add a feature, I'm debating whether this one patch warrants an
immediate pull request through my NBD tree, or if it should be split
first, or if we just defer it to 6.1 (it shouldn't affect correctness,
just a maintenance cleanup).

In case some other maintainer wants to push this series through for 6.0
(rather than waiting for each maintainer to pick up one patch at a
time), feel free to add:

Acked-by: Eric Blake <eblake@redhat.com>


> For nbd.c we mostly change simple critical sections
> 
> qemu_mutex_lock()
> ...
> qemu_mutex_unlock()
> 
> into
> 
> WITH_QEMU_LOCK_GUARD() {
> ...
> }
> 
> And don't eliminate any gotos.. Hmm. On the first sight increasing
> nesting of the code doesn't make it more beautiful.
> But I understand that context-manager concept is safer than calling
> unlock() by hand, and with nested block the critical section becomes
> more obvious. So, with fixed over-80 lines:
> 
> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
>
diff mbox series

Patch

diff --git a/block/curl.c b/block/curl.c
index 4ff895df8f..56a217951a 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -832,12 +832,12 @@  static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
     uint64_t start = acb->offset;
     uint64_t end;
 
-    qemu_mutex_lock(&s->mutex);
+    QEMU_LOCK_GUARD(&s->mutex);
 
     // In case we have the requested data already (e.g. read-ahead),
     // we can just call the callback and be done.
     if (curl_find_buf(s, start, acb->bytes, acb)) {
-        goto out;
+        return;
     }
 
     // No cache found, so let's start a new request
@@ -852,7 +852,7 @@  static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
     if (curl_init_state(s, state) < 0) {
         curl_clean_state(state);
         acb->ret = -EIO;
-        goto out;
+        return;
     }
 
     acb->start = 0;
@@ -867,7 +867,7 @@  static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
     if (state->buf_len && state->orig_buf == NULL) {
         curl_clean_state(state);
         acb->ret = -ENOMEM;
-        goto out;
+        return;
     }
     state->acb[0] = acb;
 
@@ -880,14 +880,11 @@  static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
         acb->ret = -EIO;
 
         curl_clean_state(state);
-        goto out;
+        return;
     }
 
     /* Tell curl it needs to kick things off */
     curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
-
-out:
-    qemu_mutex_unlock(&s->mutex);
 }
 
 static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
diff --git a/block/nbd.c b/block/nbd.c
index c26dc5a54f..28ba7aad61 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -407,27 +407,25 @@  static void *connect_thread_func(void *opaque)
         thr->sioc = NULL;
     }
 
-    qemu_mutex_lock(&thr->mutex);
-
-    switch (thr->state) {
-    case CONNECT_THREAD_RUNNING:
-        thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
-        if (thr->bh_ctx) {
-            aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
-
-            /* play safe, don't reuse bh_ctx on further connection attempts */
-            thr->bh_ctx = NULL;
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        switch (thr->state) {
+        case CONNECT_THREAD_RUNNING:
+            thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
+            if (thr->bh_ctx) {
+                aio_bh_schedule_oneshot(thr->bh_ctx, thr->bh_func, thr->bh_opaque);
+
+                /* play safe, don't reuse bh_ctx on further connection attempts */
+                thr->bh_ctx = NULL;
+            }
+            break;
+        case CONNECT_THREAD_RUNNING_DETACHED:
+            do_free = true;
+            break;
+        default:
+            abort();
         }
-        break;
-    case CONNECT_THREAD_RUNNING_DETACHED:
-        do_free = true;
-        break;
-    default:
-        abort();
     }
 
-    qemu_mutex_unlock(&thr->mutex);
-
     if (do_free) {
         nbd_free_connect_thread(thr);
     }
@@ -443,36 +441,33 @@  nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
     BDRVNBDState *s = bs->opaque;
     NBDConnectThread *thr = s->connect_thread;
 
-    qemu_mutex_lock(&thr->mutex);
-
-    switch (thr->state) {
-    case CONNECT_THREAD_FAIL:
-    case CONNECT_THREAD_NONE:
-        error_free(thr->err);
-        thr->err = NULL;
-        thr->state = CONNECT_THREAD_RUNNING;
-        qemu_thread_create(&thread, "nbd-connect",
-                           connect_thread_func, thr, QEMU_THREAD_DETACHED);
-        break;
-    case CONNECT_THREAD_SUCCESS:
-        /* Previous attempt finally succeeded in background */
-        thr->state = CONNECT_THREAD_NONE;
-        s->sioc = thr->sioc;
-        thr->sioc = NULL;
-        yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
-                               nbd_yank, bs);
-        qemu_mutex_unlock(&thr->mutex);
-        return 0;
-    case CONNECT_THREAD_RUNNING:
-        /* Already running, will wait */
-        break;
-    default:
-        abort();
-    }
-
-    thr->bh_ctx = qemu_get_current_aio_context();
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        switch (thr->state) {
+        case CONNECT_THREAD_FAIL:
+        case CONNECT_THREAD_NONE:
+            error_free(thr->err);
+            thr->err = NULL;
+            thr->state = CONNECT_THREAD_RUNNING;
+            qemu_thread_create(&thread, "nbd-connect",
+                               connect_thread_func, thr, QEMU_THREAD_DETACHED);
+            break;
+        case CONNECT_THREAD_SUCCESS:
+            /* Previous attempt finally succeeded in background */
+            thr->state = CONNECT_THREAD_NONE;
+            s->sioc = thr->sioc;
+            thr->sioc = NULL;
+            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
+                                   nbd_yank, bs);
+            return 0;
+        case CONNECT_THREAD_RUNNING:
+            /* Already running, will wait */
+            break;
+        default:
+            abort();
+        }
 
-    qemu_mutex_unlock(&thr->mutex);
+        thr->bh_ctx = qemu_get_current_aio_context();
+    }
 
 
     /*
@@ -486,46 +481,45 @@  nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
     s->wait_connect = true;
     qemu_coroutine_yield();
 
-    qemu_mutex_lock(&thr->mutex);
 
-    switch (thr->state) {
-    case CONNECT_THREAD_SUCCESS:
-    case CONNECT_THREAD_FAIL:
-        thr->state = CONNECT_THREAD_NONE;
-        error_propagate(errp, thr->err);
-        thr->err = NULL;
-        s->sioc = thr->sioc;
-        thr->sioc = NULL;
-        if (s->sioc) {
-            yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
-                                   nbd_yank, bs);
-        }
-        ret = (s->sioc ? 0 : -1);
-        break;
-    case CONNECT_THREAD_RUNNING:
-    case CONNECT_THREAD_RUNNING_DETACHED:
-        /*
-         * Obviously, drained section wants to start. Report the attempt as
-         * failed. Still connect thread is executing in background, and its
-         * result may be used for next connection attempt.
-         */
-        ret = -1;
-        error_setg(errp, "Connection attempt cancelled by other operation");
-        break;
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        switch (thr->state) {
+        case CONNECT_THREAD_SUCCESS:
+        case CONNECT_THREAD_FAIL:
+            thr->state = CONNECT_THREAD_NONE;
+            error_propagate(errp, thr->err);
+            thr->err = NULL;
+            s->sioc = thr->sioc;
+            thr->sioc = NULL;
+            if (s->sioc) {
+                yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
+                                       nbd_yank, bs);
+            }
+            ret = (s->sioc ? 0 : -1);
+            break;
+        case CONNECT_THREAD_RUNNING:
+        case CONNECT_THREAD_RUNNING_DETACHED:
+            /*
+             * Obviously, drained section wants to start. Report the attempt as
+             * failed. Still connect thread is executing in background, and its
+             * result may be used for next connection attempt.
+             */
+            ret = -1;
+            error_setg(errp, "Connection attempt cancelled by other operation");
+            break;
 
-    case CONNECT_THREAD_NONE:
-        /*
-         * Impossible. We've seen this thread running. So it should be
-         * running or at least give some results.
-         */
-        abort();
+        case CONNECT_THREAD_NONE:
+            /*
+             * Impossible. We've seen this thread running. So it should be
+             * running or at least give some results.
+             */
+            abort();
 
-    default:
-        abort();
+        default:
+            abort();
+        }
     }
 
-    qemu_mutex_unlock(&thr->mutex);
-
     return ret;
 }
 
@@ -546,25 +540,23 @@  static void nbd_co_establish_connection_cancel(BlockDriverState *bs,
     bool wake = false;
     bool do_free = false;
 
-    qemu_mutex_lock(&thr->mutex);
-
-    if (thr->state == CONNECT_THREAD_RUNNING) {
-        /* We can cancel only in running state, when bh is not yet scheduled */
-        thr->bh_ctx = NULL;
-        if (s->wait_connect) {
-            s->wait_connect = false;
-            wake = true;
-        }
-        if (detach) {
-            thr->state = CONNECT_THREAD_RUNNING_DETACHED;
-            s->connect_thread = NULL;
+    WITH_QEMU_LOCK_GUARD(&thr->mutex) {
+        if (thr->state == CONNECT_THREAD_RUNNING) {
+            /* We can cancel only in running state, when bh is not yet scheduled */
+            thr->bh_ctx = NULL;
+            if (s->wait_connect) {
+                s->wait_connect = false;
+                wake = true;
+            }
+            if (detach) {
+                thr->state = CONNECT_THREAD_RUNNING_DETACHED;
+                s->connect_thread = NULL;
+            }
+        } else if (detach) {
+            do_free = true;
         }
-    } else if (detach) {
-        do_free = true;
     }
 
-    qemu_mutex_unlock(&thr->mutex);
-
     if (do_free) {
         nbd_free_connect_thread(thr);
         s->connect_thread = NULL;