From patchwork Tue Jan 26 08:31:47 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Liran Schour X-Patchwork-Id: 43690 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 49212B7CC2 for ; Tue, 26 Jan 2010 23:09:39 +1100 (EST) Received: from localhost ([127.0.0.1]:46661 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1NZkAT-0007m9-51 for incoming@patchwork.ozlabs.org; Tue, 26 Jan 2010 07:05:13 -0500 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1NZk9A-0007m3-Mz for qemu-devel@nongnu.org; Tue, 26 Jan 2010 07:03:52 -0500 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1NZk95-0007jz-It for qemu-devel@nongnu.org; Tue, 26 Jan 2010 07:03:51 -0500 Received: from [199.232.76.173] (port=37652 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1NZk95-0007js-B8 for qemu-devel@nongnu.org; Tue, 26 Jan 2010 07:03:47 -0500 Received: from mtagate2.uk.ibm.com ([194.196.100.162]:39311) by monty-python.gnu.org with esmtps (TLS-1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.60) (envelope-from ) id 1NZk94-000785-K7 for qemu-devel@nongnu.org; Tue, 26 Jan 2010 07:03:46 -0500 Received: from d06nrmr1806.portsmouth.uk.ibm.com (d06nrmr1806.portsmouth.uk.ibm.com [9.149.39.193]) by mtagate2.uk.ibm.com (8.13.1/8.13.1) with ESMTP id o0Q8X1H5030758 for ; Tue, 26 Jan 2010 08:33:01 GMT Received: from d06av01.portsmouth.uk.ibm.com (d06av01.portsmouth.uk.ibm.com [9.149.37.212]) by d06nrmr1806.portsmouth.uk.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id o0Q8X01G704688 for ; Tue, 26 Jan 2010 08:33:01 GMT Received: from d06av01.portsmouth.uk.ibm.com (loopback [127.0.0.1]) by d06av01.portsmouth.uk.ibm.com (8.12.11.20060308/8.13.3) with ESMTP id o0Q8WxhE012327 for ; Tue, 26 Jan 2010 08:32:59 GMT Received: from localhost.localdomain (vandev1.haifa.ibm.com [9.148.8.11]) by d06av01.portsmouth.uk.ibm.com (8.12.11.20060308/8.12.11) with ESMTP id o0Q8WvQ6012257; Tue, 26 Jan 2010 08:32:59 GMT From: Liran Schour To: qemu-devel@nongnu.org Date: Tue, 26 Jan 2010 10:31:47 +0200 Message-Id: <1264494709-31848-4-git-send-email-lirans@il.ibm.com> X-Mailer: git-send-email 1.6.0.4 In-Reply-To: <1264494709-31848-3-git-send-email-lirans@il.ibm.com> References: <1264494709-31848-1-git-send-email-lirans@il.ibm.com> <1264494709-31848-2-git-send-email-lirans@il.ibm.com> <1264494709-31848-3-git-send-email-lirans@il.ibm.com> X-detected-operating-system: by monty-python.gnu.org: GNU/Linux 2.6, seldom 2.4 (older, 4) Cc: Liran Schour Subject: [Qemu-devel] [PATCH v3 3/5] Tranfer dirty blocks during iterative phase X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Start transfer dirty blocks during the iterative stage. That will reduce the time that the guest will be suspended Signed-off-by: Liran Schour --- block-migration.c | 135 +++++++++++++++++++++++++++++++++++++++-------------- 1 files changed, 99 insertions(+), 36 deletions(-) diff --git a/block-migration.c b/block-migration.c index 93d86d9..d8755d1 100644 --- a/block-migration.c +++ b/block-migration.c @@ -42,6 +42,7 @@ typedef struct BlkMigDevState { int bulk_completed; int shared_base; int64_t cur_sector; + int64_t cur_dirty; int64_t completed_sectors; int64_t total_sectors; int64_t dirty; @@ -70,6 +71,7 @@ typedef struct BlkMigState { int64_t total_sector_sum; int prev_progress; int bulk_completed; + int dirty_iterations; } BlkMigState; static BlkMigState block_mig_state; @@ -183,6 +185,7 @@ static int mig_save_device_bulk(Monitor *mon, QEMUFile *f, goto error; } block_mig_state.submitted++; + bdrv_reset_dirty(bs, cur_sector, nr_sectors); bmds->cur_sector = cur_sector + nr_sectors; @@ -281,39 +284,88 @@ static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f) return ret; } -#define MAX_NUM_BLOCKS 4 - -static void blk_mig_save_dirty_blocks(Monitor *mon, QEMUFile *f) +static void blk_mig_reset_dirty_cursor(void) { BlkMigDevState *bmds; - BlkMigBlock blk; + + QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { + bmds->cur_dirty = 0; + } +} + +static int mig_save_device_dirty(Monitor *mon, QEMUFile *f, + BlkMigDevState *bmds, int is_async) +{ + BlkMigBlock *blk; + int64_t total_sectors = bmds->total_sectors; int64_t sector; + int nr_sectors; - blk.buf = qemu_malloc(BLOCK_SIZE); + for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) { + if (bdrv_get_dirty(bmds->bs, sector)) { - QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - for (sector = 0; sector < bmds->cur_sector;) { - if (bdrv_get_dirty(bmds->bs, sector)) { - if (bdrv_read(bmds->bs, sector, blk.buf, - BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) { - monitor_printf(mon, "Error reading sector %" PRId64 "\n", - sector); - qemu_file_set_error(f); - qemu_free(blk.buf); - return; + if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { + nr_sectors = total_sectors - sector; + } else { + nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; + } + blk = qemu_malloc(sizeof(BlkMigBlock)); + blk->buf = qemu_malloc(BLOCK_SIZE); + blk->bmds = bmds; + blk->sector = sector; + + if(is_async) { + blk->iov.iov_base = blk->buf; + blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; + qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); + + blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov, + nr_sectors, blk_mig_read_cb, blk); + if (!blk->aiocb) { + goto error; + } + block_mig_state.submitted++; + } else { + if (bdrv_read(bmds->bs, sector, blk->buf, + nr_sectors) < 0) { + goto error; } - blk.bmds = bmds; - blk.sector = sector; - blk_send(f, &blk); + blk_send(f, blk); - bdrv_reset_dirty(bmds->bs, sector, - BDRV_SECTORS_PER_DIRTY_CHUNK); + qemu_free(blk->buf); + qemu_free(blk); } - sector += BDRV_SECTORS_PER_DIRTY_CHUNK; + + bdrv_reset_dirty(bmds->bs, sector, nr_sectors); + break; } + sector += BDRV_SECTORS_PER_DIRTY_CHUNK; + bmds->cur_dirty = sector; } - qemu_free(blk.buf); + return (bmds->cur_dirty >= bmds->total_sectors); + + error: + monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector); + qemu_file_set_error(f); + qemu_free(blk->buf); + qemu_free(blk); + return 0; +} + +static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async) +{ + BlkMigDevState *bmds; + int ret = 0; + + QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { + if(mig_save_device_dirty(mon, f, bmds, is_async) == 0) { + ret = 1; + break; + } + } + + return ret; } static void flush_blks(QEMUFile* f) @@ -404,28 +456,39 @@ static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque) return 0; } - /* control the rate of transfer */ - while ((block_mig_state.submitted + - block_mig_state.read_done) * BLOCK_SIZE < - qemu_file_get_rate_limit(f)) { - if (blk_mig_save_bulked_block(mon, f) == 0) { - /* finished saving bulk on all devices */ - block_mig_state.bulk_completed = 1; - break; + blk_mig_reset_dirty_cursor(); + + if(stage == 2) { + /* control the rate of transfer */ + while ((block_mig_state.submitted + + block_mig_state.read_done) * BLOCK_SIZE < + qemu_file_get_rate_limit(f)) { + if (block_mig_state.bulk_completed == 0) { + /* first finish the bulk phase */ + if (blk_mig_save_bulked_block(mon, f) == 0) { + /* finish saving bulk on all devices */ + block_mig_state.bulk_completed = 1; + } + } else { + if (blk_mig_save_dirty_block(mon, f, 1) == 0) { + /* no more dirty blocks */ + break; + } + } } - } - flush_blks(f); + flush_blks(f); - if (qemu_file_has_error(f)) { - blk_mig_cleanup(mon); - return 0; + if (qemu_file_has_error(f)) { + blk_mig_cleanup(mon); + return 0; + } } if (stage == 3) { /* we know for sure that save bulk is completed */ - blk_mig_save_dirty_blocks(mon, f); + while(blk_mig_save_dirty_block(mon, f, 0) != 0); blk_mig_cleanup(mon); /* report completion */