diff mbox series

[v2,5/7] hw/arm/smmuv3: Enforce invalidation on a power of two range

Message ID 20210225091435.644762-6-eric.auger@redhat.com
State New
Headers show
Series Some vIOMMU fixes | expand

Commit Message

Eric Auger Feb. 25, 2021, 9:14 a.m. UTC
As of today, the driver can invalide a number of pages that is
not a power of 2. However IOTLB unmap notifications and internal
IOTLB invalidations work with masks leading to erroneous
invalidations.

In case the range is not a power of 2, split invalidations into
power of 2 invalidations.

When looking for a single page entry in the vSMMU internal IOTLB,
let's make sure that if the entry is not found using a
g_hash_table_remove() we iterate over all the entries to find a
potential range that overlaps it.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
 hw/arm/smmu-common.c | 30 ++++++++++++++++++------------
 hw/arm/smmuv3.c      | 24 ++++++++++++++++++++----
 2 files changed, 38 insertions(+), 16 deletions(-)

Comments

Peter Maydell March 8, 2021, 4:44 p.m. UTC | #1
On Thu, 25 Feb 2021 at 09:15, Eric Auger <eric.auger@redhat.com> wrote:
>
> As of today, the driver can invalide a number of pages that is

"invalidate"

> not a power of 2. However IOTLB unmap notifications and internal
> IOTLB invalidations work with masks leading to erroneous
> invalidations.
>
> In case the range is not a power of 2, split invalidations into
> power of 2 invalidations.
>
> When looking for a single page entry in the vSMMU internal IOTLB,
> let's make sure that if the entry is not found using a
> g_hash_table_remove() we iterate over all the entries to find a
> potential range that overlaps it.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
> ---
>  hw/arm/smmu-common.c | 30 ++++++++++++++++++------------
>  hw/arm/smmuv3.c      | 24 ++++++++++++++++++++----
>  2 files changed, 38 insertions(+), 16 deletions(-)
>
> diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
> index e9ca3aebb2..84d2c62c26 100644
> --- a/hw/arm/smmu-common.c
> +++ b/hw/arm/smmu-common.c
> @@ -151,22 +151,28 @@ inline void
>  smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
>                      uint8_t tg, uint64_t num_pages, uint8_t ttl)
>  {
> +    /* if tg is not set we use 4KB range invalidation */
> +    uint8_t granule = tg ? tg * 2 + 10 : 12;

I see this in-passing side-steps the question about how
we should be handling the lookup-by-key when the tg isn't set.

> +
>      if (ttl && (num_pages == 1) && (asid >= 0)) {
>          SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl);
>
> -        g_hash_table_remove(s->iotlb, &key);
> -    } else {
> -        /* if tg is not set we use 4KB range invalidation */
> -        uint8_t granule = tg ? tg * 2 + 10 : 12;
> -
> -        SMMUIOTLBPageInvInfo info = {
> -            .asid = asid, .iova = iova,
> -            .mask = (num_pages * 1 << granule) - 1};
> -
> -        g_hash_table_foreach_remove(s->iotlb,
> -                                    smmu_hash_remove_by_asid_iova,
> -                                    &info);
> +        if (g_hash_table_remove(s->iotlb, &key)) {
> +            return;
> +        }
> +        /*
> +         * if the entry is not found, let's see if it does not
> +         * belong to a larger IOTLB entry
> +         */
>      }
> +
> +    SMMUIOTLBPageInvInfo info = {
> +        .asid = asid, .iova = iova,
> +        .mask = (num_pages * 1 << granule) - 1};
> +
> +    g_hash_table_foreach_remove(s->iotlb,
> +                                smmu_hash_remove_by_asid_iova,
> +                                &info);
>  }
>
>  inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index bd1f97000d..fdd6332ce5 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -861,7 +861,8 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
>      uint16_t vmid = CMD_VMID(cmd);
>      bool leaf = CMD_LEAF(cmd);
>      uint8_t tg = CMD_TG(cmd);
> -    hwaddr num_pages = 1;
> +    uint64_t first_page = 0, last_page;
> +    uint64_t num_pages = 1;
>      int asid = -1;
>
>      if (tg) {
> @@ -874,9 +875,24 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
>      if (type == SMMU_CMD_TLBI_NH_VA) {
>          asid = CMD_ASID(cmd);
>      }
> -    trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
> -    smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
> -    smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
> +
> +    /* Split invalidations into ^2 range invalidations */
> +    last_page = num_pages - 1;
> +    while (num_pages) {
> +        uint8_t granule = tg * 2 + 10;
> +        uint64_t mask, count;
> +
> +        mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
> +        count = mask + 1;
> +
> +        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
> +        smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
> +        smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
> +
> +        num_pages -= count;
> +        first_page += count;
> +        addr += count * BIT_ULL(granule);
> +    }
>  }

This is probably right but I'll wait to review it until I read
the doc comment for dma_aligned_pow2_mask().

-- PMM
diff mbox series

Patch

diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index e9ca3aebb2..84d2c62c26 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -151,22 +151,28 @@  inline void
 smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
                     uint8_t tg, uint64_t num_pages, uint8_t ttl)
 {
+    /* if tg is not set we use 4KB range invalidation */
+    uint8_t granule = tg ? tg * 2 + 10 : 12;
+
     if (ttl && (num_pages == 1) && (asid >= 0)) {
         SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl);
 
-        g_hash_table_remove(s->iotlb, &key);
-    } else {
-        /* if tg is not set we use 4KB range invalidation */
-        uint8_t granule = tg ? tg * 2 + 10 : 12;
-
-        SMMUIOTLBPageInvInfo info = {
-            .asid = asid, .iova = iova,
-            .mask = (num_pages * 1 << granule) - 1};
-
-        g_hash_table_foreach_remove(s->iotlb,
-                                    smmu_hash_remove_by_asid_iova,
-                                    &info);
+        if (g_hash_table_remove(s->iotlb, &key)) {
+            return;
+        }
+        /*
+         * if the entry is not found, let's see if it does not
+         * belong to a larger IOTLB entry
+         */
     }
+
+    SMMUIOTLBPageInvInfo info = {
+        .asid = asid, .iova = iova,
+        .mask = (num_pages * 1 << granule) - 1};
+
+    g_hash_table_foreach_remove(s->iotlb,
+                                smmu_hash_remove_by_asid_iova,
+                                &info);
 }
 
 inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index bd1f97000d..fdd6332ce5 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -861,7 +861,8 @@  static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
     uint16_t vmid = CMD_VMID(cmd);
     bool leaf = CMD_LEAF(cmd);
     uint8_t tg = CMD_TG(cmd);
-    hwaddr num_pages = 1;
+    uint64_t first_page = 0, last_page;
+    uint64_t num_pages = 1;
     int asid = -1;
 
     if (tg) {
@@ -874,9 +875,24 @@  static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
     if (type == SMMU_CMD_TLBI_NH_VA) {
         asid = CMD_ASID(cmd);
     }
-    trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
-    smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
-    smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
+
+    /* Split invalidations into ^2 range invalidations */
+    last_page = num_pages - 1;
+    while (num_pages) {
+        uint8_t granule = tg * 2 + 10;
+        uint64_t mask, count;
+
+        mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
+        count = mask + 1;
+
+        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
+        smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
+        smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
+
+        num_pages -= count;
+        first_page += count;
+        addr += count * BIT_ULL(granule);
+    }
 }
 
 static int smmuv3_cmdq_consume(SMMUv3State *s)