From patchwork Thu Feb 5 18:58:23 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Max Reitz X-Patchwork-Id: 436935 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 3244B1401DD for ; Fri, 6 Feb 2015 06:07:32 +1100 (AEDT) Received: from localhost ([::1]:44769 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YJRly-0005Rx-10 for incoming@patchwork.ozlabs.org; Thu, 05 Feb 2015 14:07:30 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:48676) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YJRdV-00070m-LU for qemu-devel@nongnu.org; Thu, 05 Feb 2015 13:58:46 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1YJRdU-0002Ig-93 for qemu-devel@nongnu.org; Thu, 05 Feb 2015 13:58:45 -0500 Received: from mx1.redhat.com ([209.132.183.28]:57294) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1YJRdU-0002IF-2I for qemu-devel@nongnu.org; Thu, 05 Feb 2015 13:58:44 -0500 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id t15IwhoM013147 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=FAIL) for ; Thu, 5 Feb 2015 13:58:43 -0500 Received: from localhost ([10.18.17.71]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id t15IwgZS030251 (version=TLSv1/SSLv3 cipher=AES128-GCM-SHA256 bits=128 verify=NO); Thu, 5 Feb 2015 13:58:42 -0500 From: Max Reitz To: qemu-devel@nongnu.org Date: Thu, 5 Feb 2015 13:58:23 -0500 Message-Id: <1423162705-32065-15-git-send-email-mreitz@redhat.com> In-Reply-To: <1423162705-32065-1-git-send-email-mreitz@redhat.com> References: <1423162705-32065-1-git-send-email-mreitz@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 209.132.183.28 Cc: Kevin Wolf , Stefan Hajnoczi , Max Reitz Subject: [Qemu-devel] [PATCH v4 14/16] block: Clamp BlockBackend requests X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org BlockBackend is used as the interface between the block layer and guest devices. It should therefore assure that all requests are clamped to the image size. Signed-off-by: Max Reitz Reviewed-by: Eric Blake Reviewed-by: Kevin Wolf Reviewed-by: Stefan Hajnoczi --- block/block-backend.c | 152 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/block/block-backend.c b/block/block-backend.c index d083b85..aabe569 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -31,6 +31,16 @@ struct BlockBackend { void *dev_opaque; }; +typedef struct BlockBackendAIOCB { + BlockAIOCB common; + QEMUBH *bh; + int ret; +} BlockBackendAIOCB; + +static const AIOCBInfo block_backend_aiocb_info = { + .aiocb_size = sizeof(BlockBackendAIOCB), +}; + static void drive_info_del(DriveInfo *dinfo); /* All the BlockBackends (except for hidden ones) */ @@ -428,39 +438,137 @@ void blk_iostatus_enable(BlockBackend *blk) bdrv_iostatus_enable(blk->bs); } +static int blk_check_byte_request(BlockBackend *blk, int64_t offset, + size_t size) +{ + int64_t len; + + if (size > INT_MAX) { + return -EIO; + } + + if (!blk_is_inserted(blk)) { + return -ENOMEDIUM; + } + + len = blk_getlength(blk); + if (len < 0) { + return len; + } + + if (offset < 0) { + return -EIO; + } + + if (offset > len || len - offset < size) { + return -EIO; + } + + return 0; +} + +static int blk_check_request(BlockBackend *blk, int64_t sector_num, + int nb_sectors) +{ + if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) { + return -EIO; + } + + if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) { + return -EIO; + } + + return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE, + nb_sectors * BDRV_SECTOR_SIZE); +} + int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf, int nb_sectors) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + return bdrv_read(blk->bs, sector_num, buf, nb_sectors); } int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf, int nb_sectors) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors); } int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf, int nb_sectors) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + return bdrv_write(blk->bs, sector_num, buf, nb_sectors); } +static void error_callback_bh(void *opaque) +{ + struct BlockBackendAIOCB *acb = opaque; + qemu_bh_delete(acb->bh); + acb->common.cb(acb->common.opaque, acb->ret); + qemu_aio_unref(acb); +} + +static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb, + void *opaque, int ret) +{ + struct BlockBackendAIOCB *acb; + QEMUBH *bh; + + acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque); + acb->ret = ret; + + bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb); + acb->bh = bh; + qemu_bh_schedule(bh); + + return &acb->common; +} + BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return abort_aio_request(blk, cb, opaque, ret); + } + return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags, cb, opaque); } int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) { + int ret = blk_check_byte_request(blk, offset, count); + if (ret < 0) { + return ret; + } + return bdrv_pread(blk->bs, offset, buf, count); } int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count) { + int ret = blk_check_byte_request(blk, offset, count); + if (ret < 0) { + return ret; + } + return bdrv_pwrite(blk->bs, offset, buf, count); } @@ -483,6 +591,11 @@ BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num, QEMUIOVector *iov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return abort_aio_request(blk, cb, opaque, ret); + } + return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque); } @@ -490,6 +603,11 @@ BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num, QEMUIOVector *iov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return abort_aio_request(blk, cb, opaque, ret); + } + return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque); } @@ -503,6 +621,11 @@ BlockAIOCB *blk_aio_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return abort_aio_request(blk, cb, opaque, ret); + } + return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque); } @@ -518,6 +641,15 @@ void blk_aio_cancel_async(BlockAIOCB *acb) int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs) { + int i, ret; + + for (i = 0; i < num_reqs; i++) { + ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors); + if (ret < 0) { + return ret; + } + } + return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs); } @@ -534,6 +666,11 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + return bdrv_co_discard(blk->bs, sector_num, nb_sectors); } @@ -711,12 +848,22 @@ void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags); } int blk_write_compressed(BlockBackend *blk, int64_t sector_num, const uint8_t *buf, int nb_sectors) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors); } @@ -727,6 +874,11 @@ int blk_truncate(BlockBackend *blk, int64_t offset) int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors) { + int ret = blk_check_request(blk, sector_num, nb_sectors); + if (ret < 0) { + return ret; + } + return bdrv_discard(blk->bs, sector_num, nb_sectors); }