From patchwork Mon Mar 28 21:14:27 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Michael S. Tsirkin" X-Patchwork-Id: 88691 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [199.232.76.165]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 323FAB6F7F for ; Tue, 29 Mar 2011 08:26:04 +1100 (EST) Received: from localhost ([127.0.0.1]:40622 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Q4Jwf-0004NC-7q for incoming@patchwork.ozlabs.org; Mon, 28 Mar 2011 17:25:53 -0400 Received: from [140.186.70.92] (port=50334 helo=eggs.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Q4Jm0-0004lQ-0O for qemu-devel@nongnu.org; Mon, 28 Mar 2011 17:14:53 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Q4Jly-0006o4-Dr for qemu-devel@nongnu.org; Mon, 28 Mar 2011 17:14:51 -0400 Received: from mx1.redhat.com ([209.132.183.28]:3675) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Q4Jly-0006nq-3y for qemu-devel@nongnu.org; Mon, 28 Mar 2011 17:14:50 -0400 Received: from int-mx12.intmail.prod.int.phx2.redhat.com (int-mx12.intmail.prod.int.phx2.redhat.com [10.5.11.25]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id p2SLEk3m008552 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Mon, 28 Mar 2011 17:14:46 -0400 Received: from redhat.com (vpn-200-95.tlv.redhat.com [10.35.200.95]) by int-mx12.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with SMTP id p2SLEfMj007157; Mon, 28 Mar 2011 17:14:42 -0400 Date: Mon, 28 Mar 2011 23:14:27 +0200 From: "Michael S. Tsirkin" To: qemu-devel@nongnu.org, Anthony Liguori , gleb@redhat.com, Jason Wang , Alex Williamson , Jes Sorensen , Amit Shah , Christoph Hellwig , armbru@redhat.com, kwolf@redhat.com Message-ID: References: MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: X-Mutt-Fcc: =sent User-Agent: Mutt/1.5.21 (2010-09-15) X-Scanned-By: MIMEDefang 2.68 on 10.5.11.25 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 209.132.183.28 Cc: Subject: [Qemu-devel] [PATCH 3/3] vhost: roll our own cpu map variant X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org vhost used cpu_physical_memory_map to get the virtual address for the ring, however, this will exit on an illegal RAM address. Since the addresses are guest-controlled, we shouldn't do that. Switch to our own variant that uses the vhost tables and returns an error instead of exiting. Signed-off-by: Michael S. Tsirkin --- hw/vhost.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++------------ 1 files changed, 52 insertions(+), 14 deletions(-) diff --git a/hw/vhost.c b/hw/vhost.c index c17a831..5fd09b5 100644 --- a/hw/vhost.c +++ b/hw/vhost.c @@ -271,6 +271,44 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) dev->log_size = size; } +/* Same as cpu_physical_memory_map but doesn't allocate, + * doesn't use a bounce buffer, checks input for errors such + * as wrap-around, and does not exit on failure. */ +static void *vhost_memory_map(struct vhost_dev *dev, + uint64_t addr, + uint64_t *size, + int is_write) +{ + int i; + if (addr + *size < addr) { + *size = -addr; + } + for (i = 0; i < dev->mem->nregions; ++i) { + struct vhost_memory_region *reg = dev->mem->regions + i; + uint64_t rlast, mlast, userspace_addr; + if (!range_covers_byte(reg->guest_phys_addr, reg->memory_size, addr)) { + continue; + } + rlast = range_get_last(reg->guest_phys_addr, reg->memory_size); + mlast = range_get_last(addr, *size); + if (rlast < mlast) { + *size -= (mlast - rlast); + } + userspace_addr = reg->userspace_addr + addr - reg->guest_phys_addr; + if ((unsigned long)userspace_addr != userspace_addr) { + return NULL; + } + return (void *)((unsigned long)userspace_addr); + } + return NULL; +} + +/* Placeholder to keep the API consistent with cpu_physical_memory_unmap. */ +static void vhost_memory_unmap(void *buffer, uint64_t len, + int is_write, uint64_t access_len) +{ +} + static int vhost_verify_ring_mappings(struct vhost_dev *dev, uint64_t start_addr, uint64_t size) @@ -285,7 +323,7 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev, continue; } l = vq->ring_size; - p = cpu_physical_memory_map(vq->ring_phys, &l, 1); + p = vhost_memory_map(dev, vq->ring_phys, &l, 1); if (!p || l != vq->ring_size) { virtio_error(dev->vdev, "Unable to map ring buffer for ring %d\n", i); return -ENOMEM; @@ -294,7 +332,7 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev, virtio_error(dev->vdev, "Ring buffer relocated for ring %d\n", i); return -EBUSY; } - cpu_physical_memory_unmap(p, l, 0, 0); + vhost_memory_unmap(p, l, 0, 0); } return 0; } @@ -480,21 +518,21 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, s = l = virtio_queue_get_desc_size(vdev, idx); a = virtio_queue_get_desc_addr(vdev, idx); - vq->desc = cpu_physical_memory_map(a, &l, 0); + vq->desc = vhost_memory_map(dev, a, &l, 0); if (!vq->desc || l != s) { r = -ENOMEM; goto fail_alloc_desc; } s = l = virtio_queue_get_avail_size(vdev, idx); a = virtio_queue_get_avail_addr(vdev, idx); - vq->avail = cpu_physical_memory_map(a, &l, 0); + vq->avail = vhost_memory_map(dev, a, &l, 0); if (!vq->avail || l != s) { r = -ENOMEM; goto fail_alloc_avail; } vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); - vq->used = cpu_physical_memory_map(a, &l, 1); + vq->used = vhost_memory_map(dev, a, &l, 1); if (!vq->used || l != s) { r = -ENOMEM; goto fail_alloc_used; @@ -502,7 +540,7 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); - vq->ring = cpu_physical_memory_map(a, &l, 1); + vq->ring = vhost_memory_map(dev, a, &l, 1); if (!vq->ring || l != s) { r = -ENOMEM; goto fail_alloc_ring; @@ -540,16 +578,16 @@ fail_kick: vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false); fail_host_notifier: fail_alloc: - cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), + vhost_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 0, 0); fail_alloc_ring: - cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), + vhost_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 0, 0); fail_alloc_used: - cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), + vhost_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 0, 0); fail_alloc_avail: - cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), + vhost_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 0, 0); fail_alloc_desc: return r; @@ -577,13 +615,13 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev, } virtio_queue_set_last_avail_idx(vdev, idx, state.num); assert (r >= 0); - cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), + vhost_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 0, virtio_queue_get_ring_size(vdev, idx)); - cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), + vhost_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 1, virtio_queue_get_used_size(vdev, idx)); - cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), + vhost_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 0, virtio_queue_get_avail_size(vdev, idx)); - cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), + vhost_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 0, virtio_queue_get_desc_size(vdev, idx)); }