From patchwork Tue May 14 09:13:53 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Gibson X-Patchwork-Id: 243636 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 72E922C00A9 for ; Tue, 14 May 2013 19:17:49 +1000 (EST) Received: from localhost ([::1]:55310 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UcBMh-0000T9-JM for incoming@patchwork.ozlabs.org; Tue, 14 May 2013 05:17:47 -0400 Received: from eggs.gnu.org ([208.118.235.92]:39562) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UcBJK-0004eB-PR for qemu-devel@nongnu.org; Tue, 14 May 2013 05:14:38 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UcBJ5-0001Mw-KE for qemu-devel@nongnu.org; Tue, 14 May 2013 05:14:18 -0400 Received: from ozlabs.org ([203.10.76.45]:35821) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UcBJ4-0001M8-Ui; Tue, 14 May 2013 05:14:03 -0400 Received: by ozlabs.org (Postfix, from userid 1007) id C16D02C00C6; Tue, 14 May 2013 19:13:59 +1000 (EST) From: David Gibson To: alex.williamson@redhat.com, pbonzini@redhat.com Date: Tue, 14 May 2013 19:13:53 +1000 Message-Id: <1368522837-20747-8-git-send-email-david@gibson.dropbear.id.au> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1368522837-20747-1-git-send-email-david@gibson.dropbear.id.au> References: <1368522837-20747-1-git-send-email-david@gibson.dropbear.id.au> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 203.10.76.45 Cc: aik@ozlabs.ru, David Gibson , qemu-ppc@nongnu.org, qemu-devel@nongnu.org, mst@redhat.com Subject: [Qemu-devel] [PATCH 07/11] vfio: Introduce VFIO address spaces X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org The only model so far supported for VFIO passthrough devices is the model usually used on x86, where all of the guest's RAM is mapped into the (host) IOMMU and there is no IOMMU visible in the guest. This patch begins to relax this model, introducing the notion of a VFIOAddressSpace. This represents a logical DMA address space which will be visible to one or more VFIO devices by appropriate mapping in the (host) IOMMU. Thus the currently global list of containers becomes local to a VFIOAddressSpace, and we verify that we don't attempt to add a VFIO group to multiple address spaces. For now, only one VFIOAddressSpace is created and used, corresponding to main system memory, that will change in future patches. Signed-off-by: David Gibson --- hw/misc/vfio.c | 63 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 15 deletions(-) diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c index c4a8853..e557f69 100644 --- a/hw/misc/vfio.c +++ b/hw/misc/vfio.c @@ -113,9 +113,17 @@ enum { VFIO_INT_MSIX = 3, }; +typedef struct VFIOAddressSpace { + AddressSpace *as; + QLIST_HEAD(, VFIOContainer) containers; +} VFIOAddressSpace; + +static VFIOAddressSpace vfio_address_space_memory; + struct VFIOGroup; typedef struct VFIOContainer { + VFIOAddressSpace *space; int fd; /* /dev/vfio/vfio, empowered by the attached groups */ struct { /* enable abstraction to support various iommu backends */ @@ -178,9 +186,6 @@ typedef struct VFIOGroup { #define MSIX_CAP_LENGTH 12 -static QLIST_HEAD(, VFIOContainer) - container_list = QLIST_HEAD_INITIALIZER(container_list); - static QLIST_HEAD(, VFIOGroup) group_list = QLIST_HEAD_INITIALIZER(group_list); @@ -2624,16 +2629,28 @@ static int vfio_load_rom(VFIODevice *vdev) return 0; } -static int vfio_connect_container(VFIOGroup *group) +static void vfio_address_space_init(VFIOAddressSpace *space, AddressSpace *as) +{ + space->as = as; + QLIST_INIT(&space->containers); +} + +static int vfio_connect_container(VFIOGroup *group, VFIOAddressSpace *space) { VFIOContainer *container; int ret, fd; if (group->container) { - return 0; + if (group->container->space == space) { + return 0; + } else { + error_report("vfio: group %d used in multiple address spaces", + group->groupid); + return -EBUSY; + } } - QLIST_FOREACH(container, &container_list, next) { + QLIST_FOREACH(container, &space->containers, next) { if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { group->container = container; QLIST_INSERT_HEAD(&container->group_list, group, container_next); @@ -2656,6 +2673,7 @@ static int vfio_connect_container(VFIOGroup *group) } container = g_malloc0(sizeof(*container)); + container->space = space; container->fd = fd; if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) { @@ -2678,7 +2696,8 @@ static int vfio_connect_container(VFIOGroup *group) container->iommu_data.listener = vfio_memory_listener; container->iommu_data.release = vfio_listener_release; - memory_listener_register(&container->iommu_data.listener, &address_space_memory); + memory_listener_register(&container->iommu_data.listener, + container->space->as); } else { error_report("vfio: No available IOMMU models"); g_free(container); @@ -2687,7 +2706,7 @@ static int vfio_connect_container(VFIOGroup *group) } QLIST_INIT(&container->group_list); - QLIST_INSERT_HEAD(&container_list, container, next); + QLIST_INSERT_HEAD(&space->containers, container, next); group->container = container; QLIST_INSERT_HEAD(&container->group_list, group, container_next); @@ -2700,7 +2719,7 @@ static void vfio_disconnect_container(VFIOGroup *group) VFIOContainer *container = group->container; if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { - error_report("vfio: error disconnecting group %d from container", + error_report("vfio: error disconnecting group %d from context", group->groupid); } @@ -2712,13 +2731,13 @@ static void vfio_disconnect_container(VFIOGroup *group) container->iommu_data.release(container); } QLIST_REMOVE(container, next); - DPRINTF("vfio_disconnect_container: close container->fd\n"); + DPRINTF("vfio_disconnect: close container->fd\n"); close(container->fd); g_free(container); } } -static VFIOGroup *vfio_get_group(int groupid) +static VFIOGroup *vfio_get_group(int groupid, VFIOAddressSpace *space) { VFIOGroup *group; char path[32]; @@ -2726,7 +2745,15 @@ static VFIOGroup *vfio_get_group(int groupid) QLIST_FOREACH(group, &group_list, next) { if (group->groupid == groupid) { - return group; + /* Found it. Now is it already in the right context? */ + assert(group->container); + if (group->container->space == space) { + return group; + } else { + error_report("vfio: group %d used in multiple address spaces", + group->groupid); + return NULL; + } } } @@ -2759,8 +2786,8 @@ static VFIOGroup *vfio_get_group(int groupid) group->groupid = groupid; QLIST_INIT(&group->device_list); - if (vfio_connect_container(group)) { - error_report("vfio: failed to setup container for group %d", groupid); + if (vfio_connect_container(group, space)) { + error_report("vfio: failed to setup context for group %d", groupid); close(group->fd); g_free(group); return NULL; @@ -2992,7 +3019,12 @@ static int vfio_initfn(PCIDevice *pdev) DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function, groupid); - group = vfio_get_group(groupid); + if (pci_iommu_as(pdev) != &address_space_memory) { + error_report("vfio: DMA address space must be system memory"); + return -ENXIO; + } + + group = vfio_get_group(groupid, &vfio_address_space_memory); if (!group) { error_report("vfio: failed to get group %d", groupid); return -ENOENT; @@ -3212,6 +3244,7 @@ static const TypeInfo vfio_pci_dev_info = { static void register_vfio_pci_dev_type(void) { + vfio_address_space_init(&vfio_address_space_memory, &address_space_memory); type_register_static(&vfio_pci_dev_info); }