From patchwork Mon Jul 29 03:16:07 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: pingfan liu X-Patchwork-Id: 262655 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 39D3C2C0098 for ; Mon, 29 Jul 2013 13:19:37 +1000 (EST) Received: from localhost ([::1]:37165 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1V3dzj-0003dT-4l for incoming@patchwork.ozlabs.org; Sun, 28 Jul 2013 23:19:35 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:45285) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1V3dzL-0003Tv-PH for qemu-devel@nongnu.org; Sun, 28 Jul 2013 23:19:20 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1V3dzD-0002lV-1a for qemu-devel@nongnu.org; Sun, 28 Jul 2013 23:19:11 -0400 Received: from mail-oa0-x22d.google.com ([2607:f8b0:4003:c02::22d]:44191) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1V3dzC-0002kb-RT for qemu-devel@nongnu.org; Sun, 28 Jul 2013 23:19:02 -0400 Received: by mail-oa0-f45.google.com with SMTP id m1so1703449oag.18 for ; Sun, 28 Jul 2013 20:19:02 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; bh=NcLYZi4nCdUFisMQfWkV6rrj4iXHEWH7oA0YypkC2Ow=; b=BqXbysM6gh0ibZqdGNnuh/rU1pJH/UiAeBnQnVTmcQaMp69Q0ZWcuiSBLz+FNuKcaG HbvwwEjSbfGxY8LDtdw2Xt0lpfpx6Cen2CqeStH12NBDi7WyvDgANPZdo5OQi3JRhFCx h3+Juz0LD+91ThCf4xLCQ8r9ep+mg5SD0ON1Ys/AiLDUvyNDWi/PrLEmrAC4w+VXfitz zMy85SjEblJUrQh6rHkLKmBAfqteg5mA6k/HcecL389ONV1muBSEUSHnO3hTIdFogs6P /apa4mu9izrDvduJg0pL7RYnlwLx9VVrBH1mpgmMDiv94nW9crKXS/M+TrDwWLuP1Szm L65A== X-Received: by 10.182.28.98 with SMTP id a2mr49353913obh.36.1375067942271; Sun, 28 Jul 2013 20:19:02 -0700 (PDT) Received: from localhost ([202.108.130.138]) by mx.google.com with ESMTPSA id hm1sm85155654obb.9.2013.07.28.20.18.26 for (version=TLSv1.2 cipher=RC4-SHA bits=128/128); Sun, 28 Jul 2013 20:19:01 -0700 (PDT) From: Liu Ping Fan To: qemu-devel@nongnu.org Date: Mon, 29 Jul 2013 11:16:07 +0800 Message-Id: <1375067768-11342-5-git-send-email-pingfank@linux.vnet.ibm.com> X-Mailer: git-send-email 1.8.1.4 In-Reply-To: <1375067768-11342-1-git-send-email-pingfank@linux.vnet.ibm.com> References: <1375067768-11342-1-git-send-email-pingfank@linux.vnet.ibm.com> X-detected-operating-system: by eggs.gnu.org: Error: Malformed IPv6 address (bad octet value). X-Received-From: 2607:f8b0:4003:c02::22d Cc: Kevin Wolf , Stefan Hajnoczi , Jan Kiszka , Alex Bligh , Anthony Liguori , Paolo Bonzini Subject: [Qemu-devel] [RFC v2 4/5] timer: associate three timerlists with AioContext X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Currently, timers run on iothread inside QBL, this limits the usage of timers in some case, e.g. virtio-blk-dataplane. In order to run timers on private thread based on different clocksource, we arm each AioContext with three timer lists in according to three clocksource (QemuClock). A little later, we will run timers in aio_poll. Signed-off-by: Liu Ping Fan ------ issue to fix --- Note: before this patch, there should be another one to fix the race issue by qemu_mod_timer() and _run_timers(). --- async.c | 9 ++++++++ include/block/aio.h | 13 +++++++++++ include/qemu/timer.h | 20 ++++++++++++++++ qemu-timer.c | 65 +++++++++++++++++++++++++++++++--------------------- 4 files changed, 81 insertions(+), 26 deletions(-) diff --git a/async.c b/async.c index ba4072c..7e2340e 100644 --- a/async.c +++ b/async.c @@ -201,11 +201,15 @@ static void aio_ctx_finalize(GSource *source) { AioContext *ctx = (AioContext *) source; + int i; thread_pool_free(ctx->thread_pool); aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); event_notifier_cleanup(&ctx->notifier); g_array_free(ctx->pollfds, TRUE); + for (i = 0; i < QEMU_CLOCK_MAXCNT; i++) { + timer_list_finalize(&ctx->timer_list[i]); + } } static GSourceFuncs aio_source_funcs = { @@ -237,6 +241,8 @@ void aio_notify(AioContext *ctx) AioContext *aio_context_new(void) { AioContext *ctx; + int i; + ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); ctx->thread_pool = NULL; @@ -245,6 +251,9 @@ AioContext *aio_context_new(void) aio_set_event_notifier(ctx, &ctx->notifier, (EventNotifierHandler *) event_notifier_test_and_clear, NULL); + for (i = 0; i < QEMU_CLOCK_MAXCNT; i++) { + timer_list_init(&ctx->timer_list[i]); + } return ctx; } diff --git a/include/block/aio.h b/include/block/aio.h index 04598b2..cf41b42 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -43,6 +43,18 @@ typedef struct AioHandler AioHandler; typedef void QEMUBHFunc(void *opaque); typedef void IOHandler(void *opaque); +/* Related timer with AioContext */ +typedef struct QEMUTimer QEMUTimer; +#define QEMU_CLOCK_MAXCNT 3 + +typedef struct TimerList { + QEMUTimer *active_timers; + QemuMutex active_timers_lock; +} TimerList; + +void timer_list_init(TimerList *tlist); +void timer_list_finalize(TimerList *tlist); + typedef struct AioContext { GSource source; @@ -73,6 +85,7 @@ typedef struct AioContext { /* Thread pool for performing work and receiving completion callbacks */ struct ThreadPool *thread_pool; + TimerList timer_list[QEMU_CLOCK_MAXCNT]; } AioContext; /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 9dd206c..3e5016b 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -33,9 +33,14 @@ extern QEMUClock *vm_clock; extern QEMUClock *host_clock; int64_t qemu_get_clock_ns(QEMUClock *clock); +/* qemu_clock_has_timers, qemu_clock_expired, qemu_clock_deadline + * run In tcg icount mode. There is only one AioContext i.e. qemu_aio_context. + * So we only count the timers on qemu_aio_context. + */ int64_t qemu_clock_has_timers(QEMUClock *clock); int64_t qemu_clock_expired(QEMUClock *clock); int64_t qemu_clock_deadline(QEMUClock *clock); + void qemu_clock_enable(QEMUClock *clock, bool enabled); void qemu_clock_warp(QEMUClock *clock); @@ -45,6 +50,9 @@ void qemu_unregister_clock_reset_notifier(QEMUClock *clock, QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale, QEMUTimerCB *cb, void *opaque); +QEMUTimer *aioctx_new_timer(QEMUClock *clock, int scale, + QEMUTimerCB *cb, void *opaque, AioContext *ctx); + void qemu_free_timer(QEMUTimer *ts); void qemu_del_timer(QEMUTimer *ts); void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time); @@ -75,6 +83,18 @@ static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb, return qemu_new_timer(clock, SCALE_MS, cb, opaque); } +static inline QEMUTimer *aioctx_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb, + void *opaque, AioContext *ctx) +{ + return aioctx_new_timer(clock, SCALE_NS, cb, opaque, ctx); +} + +static inline QEMUTimer *aioctx_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb, + void *opaque, AioContext *ctx) +{ + return aioctx_new_timer(clock, SCALE_MS, cb, opaque, ctx); +} + static inline int64_t qemu_get_clock_ms(QEMUClock *clock) { return qemu_get_clock_ns(clock) / SCALE_MS; diff --git a/qemu-timer.c b/qemu-timer.c index d941a83..f15c3e6 100644 --- a/qemu-timer.c +++ b/qemu-timer.c @@ -45,14 +45,6 @@ #define QEMU_CLOCK_REALTIME 0 #define QEMU_CLOCK_VIRTUAL 1 #define QEMU_CLOCK_HOST 2 -#define QEMU_CLOCK_MAXCNT 3 - -typedef struct TimerList { - QEMUTimer *active_timers; - QemuMutex active_timers_lock; -} TimerList; - -static TimerList timer_list[QEMU_CLOCK_MAXCNT]; struct QEMUClock { NotifierList reset_notifiers; @@ -72,7 +64,9 @@ struct QEMUClock { struct QEMUTimer { int64_t expire_time; /* in nanoseconds */ + /* quick link to AioContext timer list */ TimerList *list; + AioContext *ctx; QEMUTimerCB *cb; void *opaque; QEMUTimer *next; @@ -100,11 +94,12 @@ struct qemu_alarm_timer { static struct qemu_alarm_timer *alarm_timer; -static TimerList *clock_to_timerlist(QEMUClock *clock) +static TimerList *clock_to_timerlist(QEMUClock *clock, AioContext *ctx) { int type = clock->type; - return &timer_list[type]; + assert(ctx); + return &ctx->timer_list[type]; } static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time) @@ -112,7 +107,8 @@ static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time) return timer_head && (timer_head->expire_time <= current_time); } -static int64_t qemu_next_clock_deadline(QEMUClock *clock, int64_t delta) +static int64_t qemu_next_clock_deadline(QEMUClock *clock, int64_t delta, + AioContext *ctx) { int64_t expire_time, next; bool has_timer = false; @@ -122,7 +118,7 @@ static int64_t qemu_next_clock_deadline(QEMUClock *clock, int64_t delta) return delta; } - tlist = clock_to_timerlist(clock); + tlist = clock_to_timerlist(clock, ctx); qemu_mutex_lock(&tlist->active_timers_lock); if (tlist->active_timers) { has_timer = true; @@ -140,12 +136,13 @@ static int64_t qemu_next_clock_deadline(QEMUClock *clock, int64_t delta) static int64_t qemu_next_alarm_deadline(void) { int64_t delta = INT64_MAX; + AioContext *ctx = *tls_get_thread_aio_context(); if (!use_icount) { - delta = qemu_next_clock_deadline(vm_clock, delta); + delta = qemu_next_clock_deadline(vm_clock, delta, ctx); } - delta = qemu_next_clock_deadline(host_clock, delta); - return qemu_next_clock_deadline(rt_clock, delta); + delta = qemu_next_clock_deadline(host_clock, delta, ctx); + return qemu_next_clock_deadline(rt_clock, delta, ctx); } static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t) @@ -267,16 +264,21 @@ QEMUClock *rt_clock; QEMUClock *vm_clock; QEMUClock *host_clock; -static void timer_list_init(TimerList *tlist) +void timer_list_init(TimerList *tlist) { qemu_mutex_init(&tlist->active_timers_lock); tlist->active_timers = NULL; } +void timer_list_finalize(TimerList *tlist) +{ + qemu_mutex_destroy(&tlist->active_timers_lock); + assert(!tlist->active_timers); +} + static QEMUClock *qemu_new_clock(int type) { QEMUClock *clock; - TimerList *tlist; clock = g_malloc0(sizeof(QEMUClock)); clock->type = type; @@ -286,8 +288,6 @@ static QEMUClock *qemu_new_clock(int type) qemu_cond_init(&clock->wait_using); qemu_mutex_init(&clock->lock); notifier_list_init(&clock->reset_notifiers); - tlist = clock_to_timerlist(clock); - timer_list_init(tlist); return clock; } @@ -308,10 +308,14 @@ void qemu_clock_enable(QEMUClock *clock, bool enabled) } } +/* qemu_clock_has_timers, qemu_clock_expired, qemu_clock_deadline + * run In tcg icount mode. There is only one AioContext i.e. qemu_aio_context. + * So we only count the timers on qemu_aio_context. +*/ int64_t qemu_clock_has_timers(QEMUClock *clock) { bool has_timers; - TimerList *tlist = clock_to_timerlist(clock); + TimerList *tlist = clock_to_timerlist(clock, qemu_get_aio_context()); qemu_mutex_lock(&tlist->active_timers_lock); has_timers = !!tlist->active_timers; @@ -323,7 +327,7 @@ int64_t qemu_clock_expired(QEMUClock *clock) { bool has_timers; int64_t expire_time; - TimerList *tlist = clock_to_timerlist(clock); + TimerList *tlist = clock_to_timerlist(clock, qemu_get_aio_context()); qemu_mutex_lock(&tlist->active_timers_lock); has_timers = tlist->active_timers; @@ -339,7 +343,7 @@ int64_t qemu_clock_deadline(QEMUClock *clock) int64_t delta = INT32_MAX; bool has_timers; int64_t expire_time; - TimerList *tlist = clock_to_timerlist(clock); + TimerList *tlist = clock_to_timerlist(clock, qemu_get_aio_context()); qemu_mutex_lock(&tlist->active_timers_lock); has_timers = tlist->active_timers; @@ -355,19 +359,26 @@ int64_t qemu_clock_deadline(QEMUClock *clock) return delta; } -QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale, - QEMUTimerCB *cb, void *opaque) +QEMUTimer *aioctx_new_timer(QEMUClock *clock, int scale, + QEMUTimerCB *cb, void *opaque, AioContext *ctx) { QEMUTimer *ts; ts = g_malloc0(sizeof(QEMUTimer)); - ts->list = clock_to_timerlist(clock); + ts->list = clock_to_timerlist(clock, ctx); ts->cb = cb; ts->opaque = opaque; ts->scale = scale; + ts->ctx = ctx; return ts; } +QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale, + QEMUTimerCB *cb, void *opaque) +{ + return aioctx_new_timer(clock, scale, cb, opaque, qemu_get_aio_context()); +} + void qemu_free_timer(QEMUTimer *ts) { g_free(ts); @@ -457,6 +468,7 @@ void qemu_run_timers(QEMUClock *clock) QEMUTimer *ts; int64_t current_time; TimerList *tlist; + AioContext *ctx; atomic_inc(&clock->using); if (unlikely(!clock->enabled)) { @@ -465,7 +477,8 @@ void qemu_run_timers(QEMUClock *clock) current_time = qemu_get_clock_ns(clock); - tlist = clock_to_timerlist(clock); + ctx = *tls_get_thread_aio_context(); + tlist = clock_to_timerlist(clock, ctx); for(;;) { qemu_mutex_lock(&tlist->active_timers_lock);