diff mbox

[fixup,2/2] vhost: genearlize iommu memory region

Message ID 1489980999-5965-3-git-send-email-jasowang@redhat.com
State New
Headers show

Commit Message

Jason Wang March 20, 2017, 3:36 a.m. UTC
We assumes the iommu_ops were attached to the root region of address
space. This may not true for all kinds of IOMMU implementation. So fix
this by not assume as->root has iommu_ops and:

- register a memory listener to dma_as
- during region_add, if it's a region of IOMMU, register a specific
  IOMMU notifier, and store all notifiers in a list
- during region_del, compare and delete the IOMMU notifier

This is a must for making vhost device IOTLB works for IOMMU other
than intel ones.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/virtio/vhost.c         | 93 +++++++++++++++++++++++++++++++++++++----------
 include/hw/virtio/vhost.h | 10 +++++
 2 files changed, 83 insertions(+), 20 deletions(-)

Comments

Peter Xu March 20, 2017, 9:07 a.m. UTC | #1
On Mon, Mar 20, 2017 at 11:36:39AM +0800, Jason Wang wrote:
> We assumes the iommu_ops were attached to the root region of address
> space. This may not true for all kinds of IOMMU implementation. So fix
> this by not assume as->root has iommu_ops and:
> 
> - register a memory listener to dma_as
> - during region_add, if it's a region of IOMMU, register a specific
>   IOMMU notifier, and store all notifiers in a list
> - during region_del, compare and delete the IOMMU notifier
> 
> This is a must for making vhost device IOTLB works for IOMMU other
> than intel ones.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>

[...]

> @@ -1454,9 +1509,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
>          goto fail_features;
>      }
>  
> -    if (vhost_dev_has_iommu(hdev)) {
> -        memory_region_register_iommu_notifier(vdev->dma_as->root,
> -                                              &hdev->n);
> +    if (true) {

Here the if clause can be removed. And...

> +        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
>      }
>  
>      r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
> @@ -1536,10 +1590,9 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
>                               hdev->vq_index + i);
>      }
>  
> -    if (vhost_dev_has_iommu(hdev)) {
> +    if (true) {

...here. Besides that:

Reviewed-by: Peter Xu <peterx@redhat.com>

Since this patchset depends on vtd vfio series and fixes its breakage
to vhost, I'll pick them up for consistency for next post of vtd vfio
series as well.

Thanks,

-- peterx
Michael S. Tsirkin March 21, 2017, 1:12 a.m. UTC | #2
On Mon, Mar 20, 2017 at 05:07:34PM +0800, Peter Xu wrote:
> On Mon, Mar 20, 2017 at 11:36:39AM +0800, Jason Wang wrote:
> > We assumes the iommu_ops were attached to the root region of address
> > space. This may not true for all kinds of IOMMU implementation. So fix
> > this by not assume as->root has iommu_ops and:
> > 
> > - register a memory listener to dma_as
> > - during region_add, if it's a region of IOMMU, register a specific
> >   IOMMU notifier, and store all notifiers in a list
> > - during region_del, compare and delete the IOMMU notifier
> > 
> > This is a must for making vhost device IOTLB works for IOMMU other
> > than intel ones.
> > 
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> 
> [...]
> 
> > @@ -1454,9 +1509,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
> >          goto fail_features;
> >      }
> >  
> > -    if (vhost_dev_has_iommu(hdev)) {
> > -        memory_region_register_iommu_notifier(vdev->dma_as->root,
> > -                                              &hdev->n);
> > +    if (true) {
> 
> Here the if clause can be removed. And...
> 
> > +        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
> >      }
> >  
> >      r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
> > @@ -1536,10 +1590,9 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
> >                               hdev->vq_index + i);
> >      }
> >  
> > -    if (vhost_dev_has_iommu(hdev)) {
> > +    if (true) {
> 
> ...here. Besides that:
> 
> Reviewed-by: Peter Xu <peterx@redhat.com>
> 
> Since this patchset depends on vtd vfio series and fixes its breakage
> to vhost, I'll pick them up for consistency for next post of vtd vfio
> series as well.
> 
> Thanks,
> 
> -- peterx

Sounds good. It's best to order patches in a way that avoids
breakages even for people that bisect though.
Might require some patch squashing.
Eric Blake March 21, 2017, 1:21 a.m. UTC | #3
On 03/20/2017 08:12 PM, Michael S. Tsirkin wrote:

>>
>> Since this patchset depends on vtd vfio series and fixes its breakage
>> to vhost, I'll pick them up for consistency for next post of vtd vfio
>> series as well.
>>
>> Thanks,
>>
>> -- peterx
> 
> Sounds good. It's best to order patches in a way that avoids
> breakages even for people that bisect though.
> Might require some patch squashing.

Indeed - a patch submitted with 'fixup' in the title is usually best
incorporated by squashing into a prior patch that has not actually
landed in master, rather than as a standalone patch.

But if you do post this to master as a standalone patch, please fix the
subject line: s/genearlize/generalize/
Peter Xu March 21, 2017, 1:39 a.m. UTC | #4
On Mon, Mar 20, 2017 at 08:21:44PM -0500, Eric Blake wrote:
> On 03/20/2017 08:12 PM, Michael S. Tsirkin wrote:
> 
> >>
> >> Since this patchset depends on vtd vfio series and fixes its breakage
> >> to vhost, I'll pick them up for consistency for next post of vtd vfio
> >> series as well.
> >>
> >> Thanks,
> >>
> >> -- peterx
> > 
> > Sounds good. It's best to order patches in a way that avoids
> > breakages even for people that bisect though.
> > Might require some patch squashing.
> 
> Indeed - a patch submitted with 'fixup' in the title is usually best
> incorporated by squashing into a prior patch that has not actually
> landed in master, rather than as a standalone patch.
> 
> But if you do post this to master as a standalone patch, please fix the
> subject line: s/genearlize/generalize/

Will do. Thanks!

-- peterx
Jason Wang March 29, 2017, 2:57 a.m. UTC | #5
On 2017年03月21日 09:39, Peter Xu wrote:
> On Mon, Mar 20, 2017 at 08:21:44PM -0500, Eric Blake wrote:
>> On 03/20/2017 08:12 PM, Michael S. Tsirkin wrote:
>>
>>>> Since this patchset depends on vtd vfio series and fixes its breakage
>>>> to vhost, I'll pick them up for consistency for next post of vtd vfio
>>>> series as well.
>>>>
>>>> Thanks,
>>>>
>>>> -- peterx
>>> Sounds good. It's best to order patches in a way that avoids
>>> breakages even for people that bisect though.
>>> Might require some patch squashing.
>> Indeed - a patch submitted with 'fixup' in the title is usually best
>> incorporated by squashing into a prior patch that has not actually
>> landed in master, rather than as a standalone patch.
>>
>> But if you do post this to master as a standalone patch, please fix the
>> subject line: s/genearlize/generalize/
> Will do. Thanks!
>
> -- peterx

Looks like the assumption were broken by the introducing of bus master 
container, so this patch is needed for 2.9.  Will post a formal patch 
for this.

Thanks
diff mbox

Patch

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index ccf8b2e..45f75da 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -425,10 +425,8 @@  static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
 static int vhost_dev_has_iommu(struct vhost_dev *dev)
 {
     VirtIODevice *vdev = dev->vdev;
-    AddressSpace *dma_as = vdev->dma_as;
 
-    return memory_region_is_iommu(dma_as->root) &&
-           virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+    return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
 }
 
 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
@@ -720,6 +718,69 @@  static void vhost_region_del(MemoryListener *listener,
     }
 }
 
+static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+    struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
+    struct vhost_dev *hdev = iommu->hdev;
+
+    if (hdev->vhost_ops->vhost_invalidate_device_iotlb(hdev,
+                                                       iotlb->iova,
+                                                       iotlb->addr_mask + 1)) {
+        error_report("Fail to invalidate device iotlb");
+    }
+}
+
+static void vhost_iommu_region_add(MemoryListener *listener,
+                                   MemoryRegionSection *section)
+{
+    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+                                         iommu_listener);
+    struct vhost_iommu *iommu;
+    Int128 llend;
+
+    if (!memory_region_is_iommu(section->mr)) {
+        return;
+    }
+
+    llend = int128_make64(section->offset_within_address_space);
+    llend = int128_add(llend, section->size);
+    llend = int128_sub(llend, int128_one());
+
+    iommu = g_malloc0(sizeof(*iommu));
+    iommu->mr = section->mr;
+    iommu->hdev = dev;
+    iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
+                        IOMMU_NOTIFIER_UNMAP,
+                        section->offset_within_region,
+                        int128_get64(llend));
+    memory_region_register_iommu_notifier(section->mr, &iommu->n);
+    /* TODO: can replay help performance here? */
+    QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
+}
+
+static void vhost_iommu_region_del(MemoryListener *listener,
+                                   MemoryRegionSection *section)
+{
+    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
+                                         iommu_listener);
+    struct vhost_iommu *iommu;
+
+    if (!memory_region_is_iommu(section->mr)) {
+        return;
+    }
+
+    QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
+        if (iommu->mr == section->mr &&
+            iommu->n.start == section->offset_within_region) {
+            memory_region_unregister_iommu_notifier(iommu->mr,
+                                                    &iommu->n);
+            QLIST_REMOVE(iommu, iommu_next);
+            g_free(iommu);
+            break;
+        }
+    }
+}
+
 static void vhost_region_nop(MemoryListener *listener,
                              MemoryRegionSection *section)
 {
@@ -1161,17 +1222,6 @@  static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
     event_notifier_cleanup(&vq->masked_notifier);
 }
 
-static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
-{
-    struct vhost_dev *hdev = container_of(n, struct vhost_dev, n);
-
-    if (hdev->vhost_ops->vhost_invalidate_device_iotlb(hdev,
-                                                       iotlb->iova,
-                                                       iotlb->addr_mask + 1)) {
-        error_report("Fail to invalidate device iotlb");
-    }
-}
-
 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
                    VhostBackendType backend_type, uint32_t busyloop_timeout)
 {
@@ -1244,6 +1294,11 @@  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
         .priority = 10
     };
 
+    hdev->iommu_listener = (MemoryListener) {
+        .region_add = vhost_iommu_region_add,
+        .region_del = vhost_iommu_region_del,
+    };
+
     iommu_notifier_init(&hdev->n, vhost_iommu_unmap_notify,
                         IOMMU_NOTIFIER_UNMAP, 0, ~0ULL);
 
@@ -1454,9 +1509,8 @@  int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
         goto fail_features;
     }
 
-    if (vhost_dev_has_iommu(hdev)) {
-        memory_region_register_iommu_notifier(vdev->dma_as->root,
-                                              &hdev->n);
+    if (true) {
+        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
     }
 
     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
@@ -1536,10 +1590,9 @@  void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
                              hdev->vq_index + i);
     }
 
-    if (vhost_dev_has_iommu(hdev)) {
+    if (true) {
         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
-        memory_region_unregister_iommu_notifier(vdev->dma_as->root,
-                                                &hdev->n);
+        memory_listener_unregister(&hdev->iommu_listener);
     }
     vhost_log_put(hdev, true);
     hdev->started = false;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 52f633e..4d89dcd 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -37,10 +37,19 @@  struct vhost_log {
     vhost_log_chunk_t *log;
 };
 
+struct vhost_dev;
+struct vhost_iommu {
+    struct vhost_dev *hdev;
+    MemoryRegion *mr;
+    IOMMUNotifier n;
+    QLIST_ENTRY(vhost_iommu) iommu_next;
+};
+
 struct vhost_memory;
 struct vhost_dev {
     VirtIODevice *vdev;
     MemoryListener memory_listener;
+    MemoryListener iommu_listener;
     struct vhost_memory *mem;
     int n_mem_sections;
     MemoryRegionSection *mem_sections;
@@ -64,6 +73,7 @@  struct vhost_dev {
     void *opaque;
     struct vhost_log *log;
     QLIST_ENTRY(vhost_dev) entry;
+    QLIST_HEAD(,vhost_iommu) iommu_list;
     IOMMUNotifier n;
 };