From patchwork Tue May 5 10:19:00 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pavel Dovgalyuk X-Patchwork-Id: 468050 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 517951409B7 for ; Tue, 5 May 2015 20:23:12 +1000 (AEST) Received: from localhost ([::1]:38114 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Ypa0L-0006O5-TI for incoming@patchwork.ozlabs.org; Tue, 05 May 2015 06:23:09 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:60588) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YpZwL-0007WT-TT for qemu-devel@nongnu.org; Tue, 05 May 2015 06:19:06 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YpZwK-0007lF-Et for qemu-devel@nongnu.org; Tue, 05 May 2015 06:19:01 -0400 Received: from mail.ispras.ru ([83.149.199.45]:38905) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YpZwK-0007kK-2Y for qemu-devel@nongnu.org; Tue, 05 May 2015 06:19:00 -0400 Received: from [10.10.150.75] (unknown [85.142.117.224]) by mail.ispras.ru (Postfix) with ESMTPSA id 518B654006F; Tue, 5 May 2015 13:18:59 +0300 (MSK) To: qemu-devel@nongnu.org From: Pavel Dovgalyuk Date: Tue, 05 May 2015 13:19:00 +0300 Message-ID: <20150505101900.16764.4906.stgit@PASHA-ISP> In-Reply-To: <20150505101732.16764.93601.stgit@PASHA-ISP> References: <20150505101732.16764.93601.stgit@PASHA-ISP> User-Agent: StGit/0.16 MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 83.149.199.45 Cc: peter.maydell@linaro.org, peter.crosthwaite@xilinx.com, alex.bennee@linaro.org, mark.burton@greensocs.com, real@ispras.ru, batuzovk@ispras.ru, maria.klimushenkova@ispras.ru, pavel.dovgaluk@ispras.ru, pbonzini@redhat.com, fred.konrad@greensocs.com Subject: [Qemu-devel] [RFC PATCH v12 15/21] aio: replace stack of bottom halves with queue X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Bottom halves in AIO context are stored and removes in LIFO order. It makes their execution non-deterministic. This patch replaces the stack with queue to preserve the order of bottom halves processing. Signed-off-by: Pavel Dovgalyuk --- async.c | 26 +++++++++++--------------- include/block/aio.h | 4 ++-- include/qemu/queue.h | 18 ++++++++++++++++++ 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/async.c b/async.c index 2b51e87..bd975c9 100644 --- a/async.c +++ b/async.c @@ -35,7 +35,7 @@ struct QEMUBH { AioContext *ctx; QEMUBHFunc *cb; void *opaque; - QEMUBH *next; + QSIMPLEQ_ENTRY(QEMUBH) next; bool scheduled; bool idle; bool deleted; @@ -51,10 +51,7 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) .opaque = opaque, }; qemu_mutex_lock(&ctx->bh_lock); - bh->next = ctx->first_bh; - /* Make sure that the members are ready before putting bh into list */ - smp_wmb(); - ctx->first_bh = bh; + QSIMPLEQ_INSERT_TAIL_RCU(&ctx->bh_queue, bh, next); qemu_mutex_unlock(&ctx->bh_lock); return bh; } @@ -62,16 +59,15 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) /* Multiple occurrences of aio_bh_poll cannot be called concurrently */ int aio_bh_poll(AioContext *ctx) { - QEMUBH *bh, **bhp, *next; + QEMUBH *bh, *next, *prev; int ret; ctx->walking_bh++; ret = 0; - for (bh = ctx->first_bh; bh; bh = next) { + QSIMPLEQ_FOREACH(bh, &ctx->bh_queue, next) { /* Make sure that fetching bh happens before accessing its members */ smp_read_barrier_depends(); - next = bh->next; /* The atomic_xchg is paired with the one in qemu_bh_schedule. The * implicit memory barrier ensures that the callback sees all writes * done by the scheduling thread. It also ensures that the scheduling @@ -91,14 +87,13 @@ int aio_bh_poll(AioContext *ctx) /* remove deleted bhs */ if (!ctx->walking_bh) { qemu_mutex_lock(&ctx->bh_lock); - bhp = &ctx->first_bh; - while (*bhp) { - bh = *bhp; + prev = NULL; + QSIMPLEQ_FOREACH_SAFE(bh, &ctx->bh_queue, next, next) { if (bh->deleted) { - *bhp = bh->next; + QSIMPLEQ_REMOVE_AFTER(&ctx->bh_queue, prev, QEMUBH, next); g_free(bh); } else { - bhp = &bh->next; + prev = bh; } } qemu_mutex_unlock(&ctx->bh_lock); @@ -157,7 +152,7 @@ aio_compute_timeout(AioContext *ctx) int timeout = -1; QEMUBH *bh; - for (bh = ctx->first_bh; bh; bh = bh->next) { + QSIMPLEQ_FOREACH(bh, &ctx->bh_queue, next) { if (!bh->deleted && bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least @@ -200,7 +195,7 @@ aio_ctx_check(GSource *source) AioContext *ctx = (AioContext *) source; QEMUBH *bh; - for (bh = ctx->first_bh; bh; bh = bh->next) { + QSIMPLEQ_FOREACH(bh, &ctx->bh_queue, next) { if (!bh->deleted && bh->scheduled) { return true; } @@ -307,6 +302,7 @@ AioContext *aio_context_new(Error **errp) qemu_mutex_init(&ctx->bh_lock); rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx); timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); + QSIMPLEQ_INIT(&ctx->bh_queue); return ctx; } diff --git a/include/block/aio.h b/include/block/aio.h index 7d1e26b..82cdf78 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -71,8 +71,8 @@ struct AioContext { /* lock to protect between bh's adders and deleter */ QemuMutex bh_lock; - /* Anchor of the list of Bottom Halves belonging to the context */ - struct QEMUBH *first_bh; + /* List of Bottom Halves belonging to the context */ + QSIMPLEQ_HEAD(, QEMUBH) bh_queue; /* A simple lock used to protect the first_bh list, and ensure that * no callbacks are removed while we're walking and dispatching callbacks. diff --git a/include/qemu/queue.h b/include/qemu/queue.h index f781aa2..99564bc 100644 --- a/include/qemu/queue.h +++ b/include/qemu/queue.h @@ -271,6 +271,13 @@ struct { \ (head)->sqh_last = &(elm)->field.sqe_next; \ } while (/*CONSTCOND*/0) +#define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + smp_wmb(); \ + atomic_rcu_set((head)->sqh_last, (elm)); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + #define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ @@ -306,6 +313,17 @@ struct { \ } \ } while (/*CONSTCOND*/0) +#define QSIMPLEQ_REMOVE_AFTER(head, curelm, type, field) do { \ + if ((curelm) == NULL) { \ + QSIMPLEQ_REMOVE_HEAD((head), field); \ + } else { \ + if (((curelm)->field.sqe_next = \ + (curelm)->field.sqe_next->field.sqe_next) == NULL) { \ + (head)->sqh_last = &(curelm)->field.sqe_next; \ + } \ + } \ +} while (/*CONSTCOND*/0) + #define QSIMPLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->sqh_first); \ (var); \