From patchwork Mon Dec 19 14:13:41 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Avi Kivity X-Patchwork-Id: 132250 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [140.186.70.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id BE27DB704C for ; Tue, 20 Dec 2011 01:55:26 +1100 (EST) Received: from localhost ([::1]:53801 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Rce05-0006Aj-82 for incoming@patchwork.ozlabs.org; Mon, 19 Dec 2011 09:15:33 -0500 Received: from eggs.gnu.org ([140.186.70.92]:48574) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Rcdyl-0002wX-GD for qemu-devel@nongnu.org; Mon, 19 Dec 2011 09:14:20 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Rcdyj-0001wL-3G for qemu-devel@nongnu.org; Mon, 19 Dec 2011 09:14:11 -0500 Received: from mx1.redhat.com ([209.132.183.28]:25260) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Rcdyi-0001vX-S4 for qemu-devel@nongnu.org; Mon, 19 Dec 2011 09:14:09 -0500 Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id pBJEE5m1008973 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Mon, 19 Dec 2011 09:14:05 -0500 Received: from cleopatra.tlv.redhat.com (cleopatra.tlv.redhat.com [10.35.255.11]) by int-mx09.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id pBJEE0hK006981; Mon, 19 Dec 2011 09:14:04 -0500 Received: from s01.tlv.redhat.com (s01.tlv.redhat.com [10.35.255.8]) by cleopatra.tlv.redhat.com (Postfix) with ESMTP id C2A0E250BB3; Mon, 19 Dec 2011 16:13:54 +0200 (IST) From: Avi Kivity To: Stefano Stabellini , qemu-devel@nongnu.org, "Michael S. Tsirkin" Date: Mon, 19 Dec 2011 16:13:41 +0200 Message-Id: <1324304024-11220-21-git-send-email-avi@redhat.com> In-Reply-To: <1324304024-11220-1-git-send-email-avi@redhat.com> References: <1324304024-11220-1-git-send-email-avi@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.22 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.6 (newer, 3) X-Received-From: 209.132.183.28 Cc: xen-devel@lists.xensource.com, kvm@vger.kernel.org Subject: [Qemu-devel] [PATCH 20/23] vhost: avoid cpu_get_physical_page_desc() X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org This reaches into the innards of the memory core, which are being changed. Switch to a memory API version. Signed-off-by: Avi Kivity --- hw/vhost.c | 47 +++++++++++++++++++++++++++++++++++++++-------- hw/vhost.h | 2 ++ 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/hw/vhost.c b/hw/vhost.c index a1c5e4c..cd56e75 100644 --- a/hw/vhost.c +++ b/hw/vhost.c @@ -17,6 +17,7 @@ #include static void vhost_dev_sync_region(struct vhost_dev *dev, + MemoryRegionSection *section, uint64_t mfirst, uint64_t mlast, uint64_t rfirst, uint64_t rlast) { @@ -49,8 +50,8 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, ffsll(log) : ffs(log))) { ram_addr_t ram_addr; bit -= 1; - ram_addr = cpu_get_physical_page_desc(addr + bit * VHOST_LOG_PAGE); - cpu_physical_memory_set_dirty(ram_addr); + ram_addr = section->offset_within_region + bit * VHOST_LOG_PAGE; + memory_region_set_dirty(section->mr, ram_addr); log &= ~(0x1ull << bit); } addr += VHOST_LOG_CHUNK; @@ -58,6 +59,7 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, } static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, + MemoryRegionSection *section, target_phys_addr_t start_addr, target_phys_addr_t end_addr) { @@ -68,14 +70,14 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, } for (i = 0; i < dev->mem->nregions; ++i) { struct vhost_memory_region *reg = dev->mem->regions + i; - vhost_dev_sync_region(dev, start_addr, end_addr, + vhost_dev_sync_region(dev, section, start_addr, end_addr, reg->guest_phys_addr, range_get_last(reg->guest_phys_addr, reg->memory_size)); } for (i = 0; i < dev->nvqs; ++i) { struct vhost_virtqueue *vq = dev->vqs + i; - vhost_dev_sync_region(dev, start_addr, end_addr, vq->used_phys, + vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, range_get_last(vq->used_phys, vq->used_size)); } return 0; @@ -89,7 +91,7 @@ static void vhost_log_sync(MemoryListener *listener, target_phys_addr_t start_addr = section->offset_within_address_space; target_phys_addr_t end_addr = start_addr + section->size; - vhost_sync_dirty_bitmap(dev, start_addr, end_addr); + vhost_sync_dirty_bitmap(dev, section, start_addr, end_addr); } /* Assign/unassign. Keep an unsorted array of non-overlapping @@ -261,7 +263,7 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) { vhost_log_chunk_t *log; uint64_t log_base; - int r; + int r, i; if (size) { log = g_malloc0(size * sizeof *log); } else { @@ -270,7 +272,10 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) log_base = (uint64_t)(unsigned long)log; r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base); assert(r >= 0); - vhost_sync_dirty_bitmap(dev, 0, (target_phys_addr_t)~0x0ull); + for (i = 0; i < dev->n_mem_sections; ++i) { + vhost_sync_dirty_bitmap(dev, &dev->mem_sections[i], + 0, (target_phys_addr_t)~0x0ull); + } if (dev->log) { g_free(dev->log); } @@ -428,13 +433,33 @@ static void vhost_set_memory(MemoryListener *listener, static void vhost_region_add(MemoryListener *listener, MemoryRegionSection *section) { + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + + ++dev->n_mem_sections; + dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, + dev->n_mem_sections); + dev->mem_sections[dev->n_mem_sections - 1] = *section; vhost_set_memory(listener, section, true); } static void vhost_region_del(MemoryListener *listener, MemoryRegionSection *section) { + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + int i; + vhost_set_memory(listener, section, false); + for (i = 0; i < dev->n_mem_sections; ++i) { + if (dev->mem_sections[i].offset_within_address_space + == section->offset_within_address_space) { + --dev->n_mem_sections; + memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], + dev->n_mem_sections - i); + break; + } + } } static int vhost_virtqueue_set_addr(struct vhost_dev *dev, @@ -714,6 +739,8 @@ int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force) .log_global_stop = vhost_log_global_stop, }; hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); + hdev->n_mem_sections = 0; + hdev->mem_sections = NULL; hdev->log = NULL; hdev->log_size = 0; hdev->log_enabled = false; @@ -731,6 +758,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev) { memory_listener_unregister(&hdev->memory_listener); g_free(hdev->mem); + g_free(hdev->mem_sections); close(hdev->control); } @@ -871,7 +899,10 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) hdev->vqs + i, i); } - vhost_sync_dirty_bitmap(hdev, 0, (target_phys_addr_t)~0x0ull); + for (i = 0; i < hdev->n_mem_sections; ++i) { + vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i], + 0, (target_phys_addr_t)~0x0ull); + } r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); if (r < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); diff --git a/hw/vhost.h b/hw/vhost.h index d1824ec..80e64df 100644 --- a/hw/vhost.h +++ b/hw/vhost.h @@ -30,6 +30,8 @@ struct vhost_dev { MemoryListener memory_listener; int control; struct vhost_memory *mem; + int n_mem_sections; + MemoryRegionSection *mem_sections; struct vhost_virtqueue *vqs; int nvqs; unsigned long long features;