diff mbox

[8/8] vfio: Create VFIOAddressSpace objects as needed

Message ID 1368442465-14363-9-git-send-email-david@gibson.dropbear.id.au
State New
Headers show

Commit Message

David Gibson May 13, 2013, 10:54 a.m. UTC
So far, VFIO has a notion of different logical DMA address spaces, but
only ever uses one (system memory).  This patch extends this, creating
new VFIOAddressSpace objects as necessary, according to the AddressSpace
reported by the PCI subsystem for this device's DMAs.

This isn't enough yet to support guest side IOMMUs with VFIO, but it does
mean we could now support VFIO devices on, for example, a guest side PCI
host bridge which maps system memory at somewhere other than 0 in PCI
space.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
 hw/misc/vfio.c |   36 +++++++++++++++++++++++++++++-------
 1 file changed, 29 insertions(+), 7 deletions(-)

Comments

Alex Williamson May 13, 2013, 9:33 p.m. UTC | #1
On Mon, 2013-05-13 at 20:54 +1000, David Gibson wrote:
> So far, VFIO has a notion of different logical DMA address spaces, but
> only ever uses one (system memory).  This patch extends this, creating
> new VFIOAddressSpace objects as necessary, according to the AddressSpace
> reported by the PCI subsystem for this device's DMAs.
> 
> This isn't enough yet to support guest side IOMMUs with VFIO, but it does
> mean we could now support VFIO devices on, for example, a guest side PCI
> host bridge which maps system memory at somewhere other than 0 in PCI
> space.
> 
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> ---
>  hw/misc/vfio.c |   36 +++++++++++++++++++++++++++++-------
>  1 file changed, 29 insertions(+), 7 deletions(-)
> 
> diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
> index b1e9220..3850d39 100644
> --- a/hw/misc/vfio.c
> +++ b/hw/misc/vfio.c
> @@ -116,9 +116,10 @@ enum {
>  typedef struct VFIOAddressSpace {
>      AddressSpace *as;
>      QLIST_HEAD(, VFIOContainer) containers;
> +    QLIST_ENTRY(VFIOAddressSpace) list;
>  } VFIOAddressSpace;
>  
> -static VFIOAddressSpace vfio_address_space_memory;
> +QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces;
>  
>  struct VFIOGroup;
>  
> @@ -2635,6 +2636,23 @@ static void vfio_address_space_init(VFIOAddressSpace *vas, AddressSpace *as)
>      QLIST_INIT(&vas->containers);
>  }
>  
> +static VFIOAddressSpace *vfio_address_space_get(AddressSpace *as)

vfio_get_address_space is a better match for the rest of the code.

> +{
> +    VFIOAddressSpace *vas;
> +
> +    QLIST_FOREACH(vas, &vfio_address_spaces, list) {
> +        if (vas->as == as)
> +            return vas;
> +    }
> +
> +    /* No suitable VFIOAddressSpace, create a new one */
> +    vas = g_malloc0(sizeof(*vas));
> +    vfio_address_space_init(vas, as);
> +    QLIST_INSERT_HEAD(&vfio_address_spaces, vas, list);

Do we still need vfio_address_space_init?  Seems like it should be
rolled in here.

> +
> +    return vas;
> +}
> +
>  static int vfio_connect(VFIOGroup *group, VFIOAddressSpace *vas)
>  {
>      VFIOContainer *container;
> @@ -2727,6 +2745,8 @@ static void vfio_disconnect(VFIOGroup *group)
>      group->container = NULL;
>  
>      if (QLIST_EMPTY(&container->group_list)) {
> +        VFIOAddressSpace *vas = container->vas;
> +
>          if (container->iommu_data.release) {
>              container->iommu_data.release(container);
>          }
> @@ -2734,6 +2754,11 @@ static void vfio_disconnect(VFIOGroup *group)
>          DPRINTF("vfio_disconnect: close container->fd\n");
>          close(container->fd);
>          g_free(container);
> +
> +        if (QLIST_EMPTY(&vas->containers)) {
> +            QLIST_REMOVE(vas, list);
> +            g_free(vas);
> +        }

vfio_put_address_space?  Where there's a get...

>      }
>  }
>  
> @@ -2984,6 +3009,7 @@ static int vfio_initfn(PCIDevice *pdev)
>  {
>      VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
>      VFIOGroup *group;
> +    VFIOAddressSpace *vas;
>      char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
>      ssize_t len;
>      struct stat st;
> @@ -3019,12 +3045,9 @@ static int vfio_initfn(PCIDevice *pdev)
>      DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
>              vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
>  
> -    if (pci_iommu_as(pdev) != &address_space_memory) {
> -        error_report("vfio: DMA address space must be system memory");
> -        return -ENXIO;
> -    }
> +    vas = vfio_address_space_get(pci_iommu_as(pdev));

I don't think the structure malloc'd here will get cleaned up in all
cases on error.  Thanks,

Alex

>  
> -    group = vfio_get_group(groupid, &vfio_address_space_memory);
> +    group = vfio_get_group(groupid, vas);
>      if (!group) {
>          error_report("vfio: failed to get group %d", groupid);
>          return -ENOENT;
> @@ -3244,7 +3267,6 @@ static const TypeInfo vfio_pci_dev_info = {
>  
>  static void register_vfio_pci_dev_type(void)
>  {
> -    vfio_address_space_init(&vfio_address_space_memory, &address_space_memory);
>      type_register_static(&vfio_pci_dev_info);
>  }
>
David Gibson May 14, 2013, 1:58 a.m. UTC | #2
On Mon, May 13, 2013 at 03:33:06PM -0600, Alex Williamson wrote:
> On Mon, 2013-05-13 at 20:54 +1000, David Gibson wrote:
> > So far, VFIO has a notion of different logical DMA address spaces, but
> > only ever uses one (system memory).  This patch extends this, creating
> > new VFIOAddressSpace objects as necessary, according to the AddressSpace
> > reported by the PCI subsystem for this device's DMAs.
> > 
> > This isn't enough yet to support guest side IOMMUs with VFIO, but it does
> > mean we could now support VFIO devices on, for example, a guest side PCI
> > host bridge which maps system memory at somewhere other than 0 in PCI
> > space.
> > 
> > Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> > ---
> >  hw/misc/vfio.c |   36 +++++++++++++++++++++++++++++-------
> >  1 file changed, 29 insertions(+), 7 deletions(-)
> > 
> > diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
> > index b1e9220..3850d39 100644
> > --- a/hw/misc/vfio.c
> > +++ b/hw/misc/vfio.c
> > @@ -116,9 +116,10 @@ enum {
> >  typedef struct VFIOAddressSpace {
> >      AddressSpace *as;
> >      QLIST_HEAD(, VFIOContainer) containers;
> > +    QLIST_ENTRY(VFIOAddressSpace) list;
> >  } VFIOAddressSpace;
> >  
> > -static VFIOAddressSpace vfio_address_space_memory;
> > +QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces;
> >  
> >  struct VFIOGroup;
> >  
> > @@ -2635,6 +2636,23 @@ static void vfio_address_space_init(VFIOAddressSpace *vas, AddressSpace *as)
> >      QLIST_INIT(&vas->containers);
> >  }
> >  
> > +static VFIOAddressSpace *vfio_address_space_get(AddressSpace *as)
> 
> vfio_get_address_space is a better match for the rest of the code.

Ok.

> > +{
> > +    VFIOAddressSpace *vas;
> > +
> > +    QLIST_FOREACH(vas, &vfio_address_spaces, list) {
> > +        if (vas->as == as)
> > +            return vas;
> > +    }
> > +
> > +    /* No suitable VFIOAddressSpace, create a new one */
> > +    vas = g_malloc0(sizeof(*vas));
> > +    vfio_address_space_init(vas, as);
> > +    QLIST_INSERT_HEAD(&vfio_address_spaces, vas, list);
> 
> Do we still need vfio_address_space_init?  Seems like it should be
> rolled in here.

Ah, true.  I had some notions of allowing host bridges to statically
allocate a vfio address space as part of their own structure, but this
current code assumes they are malloc()ed by get_address_space, so yes,
I'll fold that in.

> > +
> > +    return vas;
> > +}
> > +
> >  static int vfio_connect(VFIOGroup *group, VFIOAddressSpace *vas)
> >  {
> >      VFIOContainer *container;
> > @@ -2727,6 +2745,8 @@ static void vfio_disconnect(VFIOGroup *group)
> >      group->container = NULL;
> >  
> >      if (QLIST_EMPTY(&container->group_list)) {
> > +        VFIOAddressSpace *vas = container->vas;
> > +
> >          if (container->iommu_data.release) {
> >              container->iommu_data.release(container);
> >          }
> > @@ -2734,6 +2754,11 @@ static void vfio_disconnect(VFIOGroup *group)
> >          DPRINTF("vfio_disconnect: close container->fd\n");
> >          close(container->fd);
> >          g_free(container);
> > +
> > +        if (QLIST_EMPTY(&vas->containers)) {
> > +            QLIST_REMOVE(vas, list);
> > +            g_free(vas);
> > +        }
> 
> vfio_put_address_space?  Where there's a get...

Fair enough, will revise.


> 
> >      }
> >  }
> >  
> > @@ -2984,6 +3009,7 @@ static int vfio_initfn(PCIDevice *pdev)
> >  {
> >      VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
> >      VFIOGroup *group;
> > +    VFIOAddressSpace *vas;
> >      char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
> >      ssize_t len;
> >      struct stat st;
> > @@ -3019,12 +3045,9 @@ static int vfio_initfn(PCIDevice *pdev)
> >      DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
> >              vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
> >  
> > -    if (pci_iommu_as(pdev) != &address_space_memory) {
> > -        error_report("vfio: DMA address space must be system memory");
> > -        return -ENXIO;
> > -    }
> > +    vas = vfio_address_space_get(pci_iommu_as(pdev));
> 
> I don't think the structure malloc'd here will get cleaned up in all
> cases on error.  Thanks,

Good point, auditing now..
diff mbox

Patch

diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
index b1e9220..3850d39 100644
--- a/hw/misc/vfio.c
+++ b/hw/misc/vfio.c
@@ -116,9 +116,10 @@  enum {
 typedef struct VFIOAddressSpace {
     AddressSpace *as;
     QLIST_HEAD(, VFIOContainer) containers;
+    QLIST_ENTRY(VFIOAddressSpace) list;
 } VFIOAddressSpace;
 
-static VFIOAddressSpace vfio_address_space_memory;
+QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces;
 
 struct VFIOGroup;
 
@@ -2635,6 +2636,23 @@  static void vfio_address_space_init(VFIOAddressSpace *vas, AddressSpace *as)
     QLIST_INIT(&vas->containers);
 }
 
+static VFIOAddressSpace *vfio_address_space_get(AddressSpace *as)
+{
+    VFIOAddressSpace *vas;
+
+    QLIST_FOREACH(vas, &vfio_address_spaces, list) {
+        if (vas->as == as)
+            return vas;
+    }
+
+    /* No suitable VFIOAddressSpace, create a new one */
+    vas = g_malloc0(sizeof(*vas));
+    vfio_address_space_init(vas, as);
+    QLIST_INSERT_HEAD(&vfio_address_spaces, vas, list);
+
+    return vas;
+}
+
 static int vfio_connect(VFIOGroup *group, VFIOAddressSpace *vas)
 {
     VFIOContainer *container;
@@ -2727,6 +2745,8 @@  static void vfio_disconnect(VFIOGroup *group)
     group->container = NULL;
 
     if (QLIST_EMPTY(&container->group_list)) {
+        VFIOAddressSpace *vas = container->vas;
+
         if (container->iommu_data.release) {
             container->iommu_data.release(container);
         }
@@ -2734,6 +2754,11 @@  static void vfio_disconnect(VFIOGroup *group)
         DPRINTF("vfio_disconnect: close container->fd\n");
         close(container->fd);
         g_free(container);
+
+        if (QLIST_EMPTY(&vas->containers)) {
+            QLIST_REMOVE(vas, list);
+            g_free(vas);
+        }
     }
 }
 
@@ -2984,6 +3009,7 @@  static int vfio_initfn(PCIDevice *pdev)
 {
     VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
     VFIOGroup *group;
+    VFIOAddressSpace *vas;
     char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
     ssize_t len;
     struct stat st;
@@ -3019,12 +3045,9 @@  static int vfio_initfn(PCIDevice *pdev)
     DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
             vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
 
-    if (pci_iommu_as(pdev) != &address_space_memory) {
-        error_report("vfio: DMA address space must be system memory");
-        return -ENXIO;
-    }
+    vas = vfio_address_space_get(pci_iommu_as(pdev));
 
-    group = vfio_get_group(groupid, &vfio_address_space_memory);
+    group = vfio_get_group(groupid, vas);
     if (!group) {
         error_report("vfio: failed to get group %d", groupid);
         return -ENOENT;
@@ -3244,7 +3267,6 @@  static const TypeInfo vfio_pci_dev_info = {
 
 static void register_vfio_pci_dev_type(void)
 {
-    vfio_address_space_init(&vfio_address_space_memory, &address_space_memory);
     type_register_static(&vfio_pci_dev_info);
 }