From patchwork Thu Jun 3 08:56:28 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gautham R Shenoy X-Patchwork-Id: 54465 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 2FC11B7D48 for ; Thu, 3 Jun 2010 19:06:18 +1000 (EST) Received: from localhost ([127.0.0.1]:59982 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1OK6NS-0003W9-Rv for incoming@patchwork.ozlabs.org; Thu, 03 Jun 2010 05:06:14 -0400 Received: from [140.186.70.92] (port=33682 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1OK6E7-0006SR-7C for qemu-devel@nongnu.org; Thu, 03 Jun 2010 04:56:36 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.69) (envelope-from ) id 1OK6E4-0002FM-CS for qemu-devel@nongnu.org; Thu, 03 Jun 2010 04:56:35 -0400 Received: from e23smtp07.au.ibm.com ([202.81.31.140]:50345) by eggs.gnu.org with esmtp (Exim 4.69) (envelope-from ) id 1OK6E3-0002Er-KB for qemu-devel@nongnu.org; Thu, 03 Jun 2010 04:56:32 -0400 Received: from d23relay03.au.ibm.com (d23relay03.au.ibm.com [202.81.31.245]) by e23smtp07.au.ibm.com (8.14.4/8.13.1) with ESMTP id o538uWXk016513 for ; Thu, 3 Jun 2010 18:56:32 +1000 Received: from d23av02.au.ibm.com (d23av02.au.ibm.com [9.190.235.138]) by d23relay03.au.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id o538uTNw1827036 for ; Thu, 3 Jun 2010 18:56:29 +1000 Received: from d23av02.au.ibm.com (loopback [127.0.0.1]) by d23av02.au.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id o538uTCU025985 for ; Thu, 3 Jun 2010 18:56:29 +1000 Received: from sofia.in.ibm.com ([9.124.35.109]) by d23av02.au.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id o538uSWY025977; Thu, 3 Jun 2010 18:56:29 +1000 Received: from localhost.localdomain (localhost [IPv6:::1]) by sofia.in.ibm.com (Postfix) with ESMTP id 53134E4AC5; Thu, 3 Jun 2010 14:26:28 +0530 (IST) To: Qemu-development List From: Gautham R Shenoy Date: Thu, 03 Jun 2010 14:26:28 +0530 Message-ID: <20100603085628.25546.23212.stgit@localhost.localdomain> In-Reply-To: <20100603085223.25546.88499.stgit@localhost.localdomain> References: <20100603085223.25546.88499.stgit@localhost.localdomain> User-Agent: StGit/0.15-51-gc750 MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.6, seldom 2.4 (older, 4) Cc: Anthony Liguori , Avi Kivity , Corentin Chary Subject: [Qemu-devel] [PATCH V3 3/3] qemu: Convert AIO code to use the generic threading infrastructure. X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org This patch makes the paio subsystem use the generic work offloading infrastructure, there by decoupling asynchronous threading framework portion out of posix-aio-compat.c The patch has been tested with fstress. Signed-off-by: Gautham R Shenoy --- posix-aio-compat.c | 155 ++++++++++------------------------------------------ 1 files changed, 29 insertions(+), 126 deletions(-) diff --git a/posix-aio-compat.c b/posix-aio-compat.c index b43c531..f2e7c6a 100644 --- a/posix-aio-compat.c +++ b/posix-aio-compat.c @@ -28,6 +28,7 @@ #include "block_int.h" #include "block/raw-posix-aio.h" +#include "async-work.h" struct qemu_paiocb { @@ -50,6 +51,7 @@ struct qemu_paiocb { struct qemu_paiocb *next; int async_context_id; + struct work_item *work; }; typedef struct PosixAioState { @@ -57,15 +59,8 @@ typedef struct PosixAioState { struct qemu_paiocb *first_aio; } PosixAioState; - -static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; -static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; -static pthread_t thread_id; -static pthread_attr_t attr; static int max_threads = 64; -static int cur_threads = 0; -static int idle_threads = 0; -static QTAILQ_HEAD(, qemu_paiocb) request_list; +static struct async_queue aio_request_list; #ifdef CONFIG_PREADV static int preadv_present = 1; @@ -84,39 +79,6 @@ static void die(const char *what) die2(errno, what); } -static void mutex_lock(pthread_mutex_t *mutex) -{ - int ret = pthread_mutex_lock(mutex); - if (ret) die2(ret, "pthread_mutex_lock"); -} - -static void mutex_unlock(pthread_mutex_t *mutex) -{ - int ret = pthread_mutex_unlock(mutex); - if (ret) die2(ret, "pthread_mutex_unlock"); -} - -static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, - struct timespec *ts) -{ - int ret = pthread_cond_timedwait(cond, mutex, ts); - if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait"); - return ret; -} - -static void cond_signal(pthread_cond_t *cond) -{ - int ret = pthread_cond_signal(cond); - if (ret) die2(ret, "pthread_cond_signal"); -} - -static void thread_create(pthread_t *thread, pthread_attr_t *attr, - void *(*start_routine)(void*), void *arg) -{ - int ret = pthread_create(thread, attr, start_routine, arg); - if (ret) die2(ret, "pthread_create"); -} - static ssize_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb) { int ret; @@ -300,47 +262,27 @@ static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb) return nbytes; } -static void *aio_thread(void *unused) +static void aio_thread(struct work_item *work) { - pid_t pid; - pid = getpid(); - - while (1) { - struct qemu_paiocb *aiocb; - ssize_t ret = 0; - qemu_timeval tv; - struct timespec ts; - - qemu_gettimeofday(&tv); - ts.tv_sec = tv.tv_sec + 10; - ts.tv_nsec = 0; - - mutex_lock(&lock); + pid_t pid; - while (QTAILQ_EMPTY(&request_list) && - !(ret == ETIMEDOUT)) { - ret = cond_timedwait(&cond, &lock, &ts); - } + struct qemu_paiocb *aiocb = (struct qemu_paiocb *) work->private; + ssize_t ret = 0; - if (QTAILQ_EMPTY(&request_list)) - break; + pid = getpid(); - aiocb = QTAILQ_FIRST(&request_list); - QTAILQ_REMOVE(&request_list, aiocb, node); - aiocb->active = 1; - idle_threads--; - mutex_unlock(&lock); + aiocb->active = 1; - switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) { - case QEMU_AIO_READ: - case QEMU_AIO_WRITE: + switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) { + case QEMU_AIO_READ: + case QEMU_AIO_WRITE: ret = handle_aiocb_rw(aiocb); break; - case QEMU_AIO_FLUSH: - ret = handle_aiocb_flush(aiocb); - break; - case QEMU_AIO_IOCTL: + case QEMU_AIO_FLUSH: + ret = handle_aiocb_flush(aiocb); + break; + case QEMU_AIO_IOCTL: ret = handle_aiocb_ioctl(aiocb); break; default: @@ -349,57 +291,28 @@ static void *aio_thread(void *unused) break; } - mutex_lock(&lock); - aiocb->ret = ret; - idle_threads++; - mutex_unlock(&lock); - - if (kill(pid, aiocb->ev_signo)) die("kill failed"); - } - - idle_threads--; - cur_threads--; - mutex_unlock(&lock); + aiocb->ret = ret; - return NULL; -} - -static void spawn_thread(void) -{ - sigset_t set, oldset; - - cur_threads++; - idle_threads++; - - /* block all signals */ - if (sigfillset(&set)) die("sigfillset"); - if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask"); - - thread_create(&thread_id, &attr, aio_thread, NULL); - - if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore"); + if (kill(pid, aiocb->ev_signo)) die("kill failed"); } static void qemu_paio_submit(struct qemu_paiocb *aiocb) { + struct work_item *work; + aiocb->ret = -EINPROGRESS; aiocb->active = 0; - mutex_lock(&lock); - if (idle_threads == 0 && cur_threads < max_threads) - spawn_thread(); - QTAILQ_INSERT_TAIL(&request_list, aiocb, node); - mutex_unlock(&lock); - cond_signal(&cond); + + work = async_work_init(&aio_request_list, aio_thread, aiocb); + aiocb->work = work; + qemu_async_submit(&aio_request_list, work); } static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb) { ssize_t ret; - mutex_lock(&lock); ret = aiocb->ret; - mutex_unlock(&lock); - return ret; } @@ -535,14 +448,14 @@ static void paio_cancel(BlockDriverAIOCB *blockacb) struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb; int active = 0; - mutex_lock(&lock); if (!acb->active) { - QTAILQ_REMOVE(&request_list, acb, node); - acb->ret = -ECANCELED; + if (!qemu_async_cancel_work(&aio_request_list, acb->work)) + acb->ret = -ECANCELED; + else + active = 1; } else if (acb->ret == -EINPROGRESS) { active = 1; } - mutex_unlock(&lock); if (active) { /* fail safe: if the aio could not be canceled, we wait for @@ -615,7 +528,6 @@ int paio_init(void) struct sigaction act; PosixAioState *s; int fds[2]; - int ret; if (posix_aio_state) return 0; @@ -642,16 +554,7 @@ int paio_init(void) qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, posix_aio_process_queue, s); - ret = pthread_attr_init(&attr); - if (ret) - die2(ret, "pthread_attr_init"); - - ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - if (ret) - die2(ret, "pthread_attr_setdetachstate"); - - QTAILQ_INIT(&request_list); - posix_aio_state = s; + async_queue_init(&aio_request_list, max_threads, max_threads); return 0; }