From patchwork Thu Aug 6 13:36:11 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paolo Bonzini X-Patchwork-Id: 504730 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 16B6D1402B7 for ; Fri, 7 Aug 2015 01:21:25 +1000 (AEST) Received: from localhost ([::1]:45344 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZNLuc-0000HQ-Fj for incoming@patchwork.ozlabs.org; Thu, 06 Aug 2015 10:12:50 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:41122) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZNLoD-0005Xr-0L for qemu-devel@nongnu.org; Thu, 06 Aug 2015 10:06:19 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZNLo6-0000ep-Cr for qemu-devel@nongnu.org; Thu, 06 Aug 2015 10:06:12 -0400 Received: from mx1.redhat.com ([209.132.183.28]:33544) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZNLnq-0000ZN-U6; Thu, 06 Aug 2015 10:05:51 -0400 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by mx1.redhat.com (Postfix) with ESMTPS id A0A6B9248C; Thu, 6 Aug 2015 14:05:50 +0000 (UTC) Received: from donizetti.redhat.com (ovpn-112-80.ams2.redhat.com [10.36.112.80]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id t76DaI9S016488; Thu, 6 Aug 2015 09:37:46 -0400 From: Paolo Bonzini To: qemu-devel@nongnu.org Date: Thu, 6 Aug 2015 15:36:11 +0200 Message-Id: <1438868176-20364-14-git-send-email-pbonzini@redhat.com> In-Reply-To: <1438868176-20364-1-git-send-email-pbonzini@redhat.com> References: <1438868176-20364-1-git-send-email-pbonzini@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 209.132.183.28 Cc: kwolf@redhat.com, famz@redhat.com, stefanha@redhat.com, qemu-block@nongnu.org Subject: [Qemu-devel] [PATCH 13/18] block: explicitly acquire aiocontext in bottom halves that need it X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Signed-off-by: Paolo Bonzini --- async.c | 2 -- block/archipelago.c | 3 +++ block/blkdebug.c | 4 ++++ block/blkverify.c | 3 +++ block/block-backend.c | 4 ++++ block/curl.c | 25 +++++++++++++++++-------- block/gluster.c | 2 ++ block/io.c | 6 ++++++ block/iscsi.c | 4 ++++ block/linux-aio.c | 7 +++++++ block/nfs.c | 4 ++++ block/null.c | 4 ++++ block/qed.c | 3 +++ block/rbd.c | 4 ++++ dma-helpers.c | 7 +++++-- hw/block/virtio-blk.c | 2 ++ hw/scsi/scsi-bus.c | 2 ++ thread-pool.c | 2 ++ 18 files changed, 76 insertions(+), 12 deletions(-) diff --git a/async.c b/async.c index 1fce3e4..6186901 100644 --- a/async.c +++ b/async.c @@ -87,9 +87,7 @@ int aio_bh_poll(AioContext *ctx) ret = 1; } bh->idle = 0; - aio_context_acquire(ctx); bh->cb(bh->opaque); - aio_context_release(ctx); } } diff --git a/block/archipelago.c b/block/archipelago.c index 855655c..7f69a3f 100644 --- a/block/archipelago.c +++ b/block/archipelago.c @@ -312,9 +312,12 @@ static void qemu_archipelago_complete_aio(void *opaque) { AIORequestData *reqdata = (AIORequestData *) opaque; ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb; + AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs); qemu_bh_delete(aio_cb->bh); + aio_context_acquire(ctx); aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret); + aio_context_release(ctx); aio_cb->status = 0; qemu_aio_unref(aio_cb); diff --git a/block/blkdebug.c b/block/blkdebug.c index bc247f4..2130811 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -458,8 +458,12 @@ out: static void error_callback_bh(void *opaque) { struct BlkdebugAIOCB *acb = opaque; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); + qemu_bh_delete(acb->bh); + aio_context_acquire(ctx); acb->common.cb(acb->common.opaque, acb->ret); + aio_context_release(ctx); qemu_aio_unref(acb); } diff --git a/block/blkverify.c b/block/blkverify.c index d277e63..510c198 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -185,13 +185,16 @@ static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState *bs, bool is_write, static void blkverify_aio_bh(void *opaque) { BlkverifyAIOCB *acb = opaque; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); qemu_bh_delete(acb->bh); if (acb->buf) { qemu_iovec_destroy(&acb->raw_qiov); qemu_vfree(acb->buf); } + aio_context_acquire(ctx); acb->common.cb(acb->common.opaque, acb->ret); + aio_context_release(ctx); qemu_aio_unref(acb); } diff --git a/block/block-backend.c b/block/block-backend.c index aee8a12..185ba32 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -529,8 +529,12 @@ int blk_write_zeroes(BlockBackend *blk, int64_t sector_num, static void error_callback_bh(void *opaque) { struct BlockBackendAIOCB *acb = opaque; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); + qemu_bh_delete(acb->bh); + aio_context_acquire(ctx); acb->common.cb(acb->common.opaque, acb->ret); + aio_context_release(ctx); qemu_aio_unref(acb); } diff --git a/block/curl.c b/block/curl.c index b572828..446a6d9 100644 --- a/block/curl.c +++ b/block/curl.c @@ -651,10 +651,14 @@ static void curl_readv_bh_cb(void *p) { CURLState *state; int running; + int ret = -EINPROGRESS; CURLAIOCB *acb = p; - BDRVCURLState *s = acb->common.bs->opaque; + BlockDriverState *bs = acb->common.bs; + BDRVCURLState *s = bs->opaque; + AioContext *ctx = bdrv_get_aio_context(bs); + aio_context_acquire(ctx); qemu_bh_delete(acb->bh); acb->bh = NULL; @@ -668,7 +672,7 @@ static void curl_readv_bh_cb(void *p) qemu_aio_unref(acb); // fall through case FIND_RET_WAIT: - return; + goto out; default: break; } @@ -676,9 +680,8 @@ static void curl_readv_bh_cb(void *p) // No cache found, so let's start a new request state = curl_init_state(acb->common.bs, s); if (!state) { - acb->common.cb(acb->common.opaque, -EIO); - qemu_aio_unref(acb); - return; + ret = -EIO; + goto out; } acb->start = 0; @@ -692,9 +695,8 @@ static void curl_readv_bh_cb(void *p) state->orig_buf = g_try_malloc(state->buf_len); if (state->buf_len && state->orig_buf == NULL) { curl_clean_state(state); - acb->common.cb(acb->common.opaque, -ENOMEM); - qemu_aio_unref(acb); - return; + ret = -ENOMEM; + goto out; } state->acb[0] = acb; @@ -707,6 +709,13 @@ static void curl_readv_bh_cb(void *p) /* Tell curl it needs to kick things off */ curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); + +out: + if (ret != -EINPROGRESS) { + acb->common.cb(acb->common.opaque, ret); + qemu_aio_unref(acb); + } + aio_context_release(ctx); } static BlockAIOCB *curl_aio_readv(BlockDriverState *bs, diff --git a/block/gluster.c b/block/gluster.c index 1eb3a8c..35d4230 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -232,7 +232,9 @@ static void qemu_gluster_complete_aio(void *opaque) qemu_bh_delete(acb->bh); acb->bh = NULL; + aio_context_acquire(acb->aio_context); qemu_coroutine_enter(acb->coroutine, NULL); + aio_context_release(acb->aio_context); } /* diff --git a/block/io.c b/block/io.c index d4bc83b..74f5705 100644 --- a/block/io.c +++ b/block/io.c @@ -2002,12 +2002,15 @@ static const AIOCBInfo bdrv_em_aiocb_info = { static void bdrv_aio_bh_cb(void *opaque) { BlockAIOCBSync *acb = opaque; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); if (!acb->is_write && acb->ret >= 0) { qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); } qemu_vfree(acb->bounce); + aio_context_acquire(ctx); acb->common.cb(acb->common.opaque, acb->ret); + aio_context_release(ctx); qemu_bh_delete(acb->bh); acb->bh = NULL; qemu_aio_unref(acb); @@ -2083,10 +2086,13 @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb) static void bdrv_co_em_bh(void *opaque) { BlockAIOCBCoroutine *acb = opaque; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); assert(!acb->need_bh); qemu_bh_delete(acb->bh); + aio_context_acquire(ctx); bdrv_co_complete(acb); + aio_context_release(ctx); } static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) diff --git a/block/iscsi.c b/block/iscsi.c index 1c3f99b..9948e70 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -150,9 +150,13 @@ iscsi_schedule_bh(IscsiAIOCB *acb) static void iscsi_co_generic_bh_cb(void *opaque) { struct IscsiTask *iTask = opaque; + AioContext *ctx = iTask->iscsilun->aio_context; + iTask->complete = 1; qemu_bh_delete(iTask->bh); + aio_context_acquire(ctx); qemu_coroutine_enter(iTask->co, NULL); + aio_context_release(ctx); } static void iscsi_retry_timer_expired(void *opaque) diff --git a/block/linux-aio.c b/block/linux-aio.c index c991443..bc83e5c 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -46,6 +46,8 @@ typedef struct { } LaioQueue; struct qemu_laio_state { + AioContext *aio_context; + io_context_t ctx; EventNotifier e; @@ -109,6 +111,7 @@ static void qemu_laio_completion_bh(void *opaque) struct qemu_laio_state *s = opaque; /* Fetch more completion events when empty */ + aio_context_acquire(s->aio_context); if (s->event_idx == s->event_max) { do { struct timespec ts = { 0 }; @@ -141,6 +144,8 @@ static void qemu_laio_completion_bh(void *opaque) if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { ioq_submit(s); } + + aio_context_release(s->aio_context); } static void qemu_laio_completion_cb(EventNotifier *e) @@ -289,12 +294,14 @@ void laio_detach_aio_context(void *s_, AioContext *old_context) aio_set_event_notifier(old_context, &s->e, NULL); qemu_bh_delete(s->completion_bh); + s->aio_context = NULL; } void laio_attach_aio_context(void *s_, AioContext *new_context) { struct qemu_laio_state *s = s_; + s->aio_context = new_context; s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb); } diff --git a/block/nfs.c b/block/nfs.c index 05b02f5..ffab087 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -103,9 +103,13 @@ static void nfs_co_init_task(NFSClient *client, NFSRPC *task) static void nfs_co_generic_bh_cb(void *opaque) { NFSRPC *task = opaque; + AioContext *ctx = task->client->aio_context; + task->complete = 1; qemu_bh_delete(task->bh); + aio_context_acquire(ctx); qemu_coroutine_enter(task->co, NULL); + aio_context_release(ctx); } static void diff --git a/block/null.c b/block/null.c index 7d08323..dd1b170 100644 --- a/block/null.c +++ b/block/null.c @@ -117,7 +117,11 @@ static const AIOCBInfo null_aiocb_info = { static void null_bh_cb(void *opaque) { NullAIOCB *acb = opaque; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); + + aio_context_acquire(ctx); acb->common.cb(acb->common.opaque, 0); + aio_context_release(ctx); qemu_bh_delete(acb->bh); qemu_aio_unref(acb); } diff --git a/block/qed.c b/block/qed.c index 954ed00..d47d7e1 100644 --- a/block/qed.c +++ b/block/qed.c @@ -910,12 +910,15 @@ static void qed_aio_complete_bh(void *opaque) BlockCompletionFunc *cb = acb->common.cb; void *user_opaque = acb->common.opaque; int ret = acb->bh_ret; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); qemu_bh_delete(acb->bh); qemu_aio_unref(acb); /* Invoke callback */ + aio_context_acquire(ctx); cb(user_opaque, ret); + aio_context_release(ctx); } static void qed_aio_complete(QEDAIOCB *acb, int ret) diff --git a/block/rbd.c b/block/rbd.c index a60a19d..6206dc3 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -376,6 +376,7 @@ static int qemu_rbd_create(const char *filename, QemuOpts *opts, Error **errp) static void qemu_rbd_complete_aio(RADOSCB *rcb) { RBDAIOCB *acb = rcb->acb; + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); int64_t r; r = rcb->ret; @@ -408,7 +409,10 @@ static void qemu_rbd_complete_aio(RADOSCB *rcb) qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); } qemu_vfree(acb->bounce); + + aio_context_acquire(ctx); acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); + aio_context_release(ctx); qemu_aio_unref(acb); } diff --git a/dma-helpers.c b/dma-helpers.c index 4faec5d..68f6f07 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -69,6 +69,7 @@ void qemu_sglist_destroy(QEMUSGList *qsg) typedef struct { BlockAIOCB common; + AioContext *ctx; BlockBackend *blk; BlockAIOCB *acb; QEMUSGList *sg; @@ -153,8 +154,7 @@ static void dma_blk_cb(void *opaque, int ret) if (dbs->iov.size == 0) { trace_dma_map_wait(dbs); - dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk), - reschedule_dma, dbs); + dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs); cpu_register_map_client(dbs->bh); return; } @@ -163,8 +163,10 @@ static void dma_blk_cb(void *opaque, int ret) qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK); } + aio_context_acquire(dbs->ctx); dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov, dbs->iov.size / 512, dma_blk_cb, dbs); + aio_context_release(dbs->ctx); assert(dbs->acb); } @@ -201,6 +203,7 @@ BlockAIOCB *dma_blk_io( dbs->acb = NULL; dbs->blk = blk; + dbs->ctx = blk_get_aio_context(blk); dbs->sg = sg; dbs->sector_num = sector_num; dbs->sg_cur_index = 0; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 1556c9c..4462ad2 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -620,6 +620,7 @@ static void virtio_blk_dma_restart_bh(void *opaque) s->rq = NULL; + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); while (req) { VirtIOBlockReq *next = req->next; virtio_blk_handle_request(req, &mrb); @@ -629,6 +630,7 @@ static void virtio_blk_dma_restart_bh(void *opaque) if (mrb.num_reqs) { virtio_blk_submit_multireq(s->blk, &mrb); } + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_dma_restart_cb(void *opaque, int running, diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index ffac8f4..c42e854 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -102,6 +102,7 @@ static void scsi_dma_restart_bh(void *opaque) qemu_bh_delete(s->bh); s->bh = NULL; + aio_context_acquire(blk_get_aio_context(s->conf.blk)); QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { scsi_req_ref(req); if (req->retry) { @@ -119,6 +120,7 @@ static void scsi_dma_restart_bh(void *opaque) } scsi_req_unref(req); } + aio_context_release(blk_get_aio_context(s->conf.blk)); } void scsi_req_retry(SCSIRequest *req) diff --git a/thread-pool.c b/thread-pool.c index ac909f4..1039188 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -165,6 +165,7 @@ static void thread_pool_completion_bh(void *opaque) ThreadPool *pool = opaque; ThreadPoolElement *elem, *next; + aio_context_acquire(pool->ctx); restart: QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { if (elem->state != THREAD_DONE) { @@ -191,6 +192,7 @@ restart: qemu_aio_unref(elem); } } + aio_context_release(pool->ctx); } static void thread_pool_cancel(BlockAIOCB *acb)