From patchwork Thu Oct 13 12:08:22 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stefan Hajnoczi X-Patchwork-Id: 119457 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [140.186.70.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id CE4B2B6F82 for ; Thu, 13 Oct 2011 23:09:48 +1100 (EST) Received: from localhost ([::1]:36470 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1REK6P-0003rV-BQ for incoming@patchwork.ozlabs.org; Thu, 13 Oct 2011 08:09:33 -0400 Received: from eggs.gnu.org ([140.186.70.92]:34121) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1REK5e-0001Wm-Ge for qemu-devel@nongnu.org; Thu, 13 Oct 2011 08:08:51 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1REK5Z-00035L-30 for qemu-devel@nongnu.org; Thu, 13 Oct 2011 08:08:46 -0400 Received: from mtagate3.uk.ibm.com ([194.196.100.163]:36765) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1REK5Y-000345-Rd for qemu-devel@nongnu.org; Thu, 13 Oct 2011 08:08:41 -0400 Received: from d06nrmr1707.portsmouth.uk.ibm.com (d06nrmr1707.portsmouth.uk.ibm.com [9.149.39.225]) by mtagate3.uk.ibm.com (8.13.1/8.13.1) with ESMTP id p9DC8Zpl026482 for ; Thu, 13 Oct 2011 12:08:35 GMT Received: from d06av02.portsmouth.uk.ibm.com (d06av02.portsmouth.uk.ibm.com [9.149.37.228]) by d06nrmr1707.portsmouth.uk.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id p9DC8ZQs2486296 for ; Thu, 13 Oct 2011 13:08:35 +0100 Received: from d06av02.portsmouth.uk.ibm.com (loopback [127.0.0.1]) by d06av02.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id p9DC8Z65022015 for ; Thu, 13 Oct 2011 06:08:35 -0600 Received: from localhost (stefanha-thinkpad.manchester-maybrook.uk.ibm.com [9.174.219.31]) by d06av02.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id p9DC8ZGN022012; Thu, 13 Oct 2011 06:08:35 -0600 From: Stefan Hajnoczi To: Date: Thu, 13 Oct 2011 13:08:22 +0100 Message-Id: <1318507705-13840-3-git-send-email-stefanha@linux.vnet.ibm.com> X-Mailer: git-send-email 1.7.6.3 In-Reply-To: <1318507705-13840-1-git-send-email-stefanha@linux.vnet.ibm.com> References: <1318507705-13840-1-git-send-email-stefanha@linux.vnet.ibm.com> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.6, seldom 2.4 (older, 4) X-Received-From: 194.196.100.163 Cc: Kevin Wolf , Stefan Hajnoczi Subject: [Qemu-devel] [PATCH v2 2/5] block: switch bdrv_read()/bdrv_write() to coroutines X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org The bdrv_read()/bdrv_write() functions call .bdrv_read()/.bdrv_write(). They should go through bdrv_co_do_readv() and bdrv_co_do_writev() instead in order to unify request processing code across sync, aio, and coroutine interfaces. This is also an important step towards removing BlockDriverState .bdrv_read()/.bdrv_write() in the future. Signed-off-by: Stefan Hajnoczi --- block.c | 112 +++++++++++++++++++++++++++++++++++---------------------------- 1 files changed, 62 insertions(+), 50 deletions(-) diff --git a/block.c b/block.c index f4731ec..ae8fc80 100644 --- a/block.c +++ b/block.c @@ -44,6 +44,8 @@ #include #endif +#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ + static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load); static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, @@ -74,6 +76,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs, static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs); static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); +static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); static QTAILQ_HEAD(, BlockDriverState) bdrv_states = QTAILQ_HEAD_INITIALIZER(bdrv_states); @@ -1042,30 +1046,69 @@ static inline bool bdrv_has_async_flush(BlockDriver *drv) return drv->bdrv_aio_flush != bdrv_aio_flush_em; } -/* return < 0 if error. See bdrv_write() for the return codes */ -int bdrv_read(BlockDriverState *bs, int64_t sector_num, - uint8_t *buf, int nb_sectors) +typedef struct RwCo { + BlockDriverState *bs; + int64_t sector_num; + int nb_sectors; + QEMUIOVector *qiov; + bool is_write; + int ret; +} RwCo; + +static void coroutine_fn bdrv_rw_co_entry(void *opaque) { - BlockDriver *drv = bs->drv; + RwCo *rwco = opaque; - if (!drv) - return -ENOMEDIUM; + if (!rwco->is_write) { + rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num, + rwco->nb_sectors, rwco->qiov); + } else { + rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num, + rwco->nb_sectors, rwco->qiov); + } +} - if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) { - QEMUIOVector qiov; - struct iovec iov = { - .iov_base = (void *)buf, - .iov_len = nb_sectors * BDRV_SECTOR_SIZE, - }; +/* + * Process a synchronous request using coroutines + */ +static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, + int nb_sectors, bool is_write) +{ + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = (void *)buf, + .iov_len = nb_sectors * BDRV_SECTOR_SIZE, + }; + Coroutine *co; + RwCo rwco = { + .bs = bs, + .sector_num = sector_num, + .nb_sectors = nb_sectors, + .qiov = &qiov, + .is_write = is_write, + .ret = NOT_DONE, + }; - qemu_iovec_init_external(&qiov, &iov, 1); - return bdrv_co_readv(bs, sector_num, nb_sectors, &qiov); - } + qemu_iovec_init_external(&qiov, &iov, 1); - if (bdrv_check_request(bs, sector_num, nb_sectors)) - return -EIO; + if (qemu_in_coroutine()) { + /* Fast-path if already in coroutine context */ + bdrv_rw_co_entry(&rwco); + } else { + co = qemu_coroutine_create(bdrv_rw_co_entry); + qemu_coroutine_enter(co, &rwco); + while (rwco.ret == NOT_DONE) { + qemu_aio_wait(); + } + } + return rwco.ret; +} - return drv->bdrv_read(bs, sector_num, buf, nb_sectors); +/* return < 0 if error. See bdrv_write() for the return codes */ +int bdrv_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors) +{ + return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false); } static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num, @@ -1105,36 +1148,7 @@ static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num, int bdrv_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { - BlockDriver *drv = bs->drv; - - if (!bs->drv) - return -ENOMEDIUM; - - if (bdrv_has_async_rw(drv) && qemu_in_coroutine()) { - QEMUIOVector qiov; - struct iovec iov = { - .iov_base = (void *)buf, - .iov_len = nb_sectors * BDRV_SECTOR_SIZE, - }; - - qemu_iovec_init_external(&qiov, &iov, 1); - return bdrv_co_writev(bs, sector_num, nb_sectors, &qiov); - } - - if (bs->read_only) - return -EACCES; - if (bdrv_check_request(bs, sector_num, nb_sectors)) - return -EIO; - - if (bs->dirty_bitmap) { - set_dirty_bitmap(bs, sector_num, nb_sectors, 1); - } - - if (bs->wr_highest_sector < sector_num + nb_sectors - 1) { - bs->wr_highest_sector = sector_num + nb_sectors - 1; - } - - return drv->bdrv_write(bs, sector_num, buf, nb_sectors); + return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true); } int bdrv_pread(BlockDriverState *bs, int64_t offset, @@ -2912,8 +2926,6 @@ static void bdrv_rw_em_cb(void *opaque, int ret) *(int *)opaque = ret; } -#define NOT_DONE 0x7fffffff - static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) {