diff mbox series

[1/3] iommu/amd/io-pgtable: Implement map_pages io_pgtable_ops callback

Message ID 20230614162535.8637-2-khalid.elmously@canonical.com
State New
Headers show
Series backport "iommu/amd: Add map/unmap_pages() iommu_domain_ops callback support" | expand

Commit Message

Khalid Elmously June 14, 2023, 4:25 p.m. UTC
From: Vasant Hegde <vasant.hegde@amd.com>

BugLink: https://bugs.launchpad.net/bugs/2023313

Implement the io_pgtable_ops->map_pages() callback for AMD driver.
Also deprecate io_pgtable->map callback.

Suggested-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/20220825063939.8360-2-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
(cherry picked from commit 8cc233dec3137a2d243e6996e425c32032672c88)
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
---
 drivers/iommu/amd/io_pgtable.c | 59 ++++++++++++++++++++--------------
 1 file changed, 34 insertions(+), 25 deletions(-)
diff mbox series

Patch

diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 6608d1717574..20d4b1091ce1 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -362,8 +362,9 @@  static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
  * supporting all features of AMD IOMMU page tables like level skipping
  * and full 64 bit address spaces.
  */
-static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
-			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
+			      int prot, gfp_t gfp, size_t *mapped)
 {
 	struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
 	LIST_HEAD(freelist);
@@ -371,39 +372,47 @@  static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
 	u64 __pte, *pte;
 	int ret, i, count;
 
-	BUG_ON(!IS_ALIGNED(iova, size));
-	BUG_ON(!IS_ALIGNED(paddr, size));
+	BUG_ON(!IS_ALIGNED(iova, pgsize));
+	BUG_ON(!IS_ALIGNED(paddr, pgsize));
 
 	ret = -EINVAL;
 	if (!(prot & IOMMU_PROT_MASK))
 		goto out;
 
-	count = PAGE_SIZE_PTE_COUNT(size);
-	pte   = alloc_pte(dom, iova, size, NULL, gfp, &updated);
+	while (pgcount > 0) {
+		count = PAGE_SIZE_PTE_COUNT(pgsize);
+		pte   = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
 
-	ret = -ENOMEM;
-	if (!pte)
-		goto out;
+		ret = -ENOMEM;
+		if (!pte)
+			goto out;
 
-	for (i = 0; i < count; ++i)
-		free_clear_pte(&pte[i], pte[i], &freelist);
+		for (i = 0; i < count; ++i)
+			free_clear_pte(&pte[i], pte[i], &freelist);
 
-	if (!list_empty(&freelist))
-		updated = true;
+		if (!list_empty(&freelist))
+			updated = true;
 
-	if (count > 1) {
-		__pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
-		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
-	} else
-		__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+		if (count > 1) {
+			__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
+			__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+		} else
+			__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
 
-	if (prot & IOMMU_PROT_IR)
-		__pte |= IOMMU_PTE_IR;
-	if (prot & IOMMU_PROT_IW)
-		__pte |= IOMMU_PTE_IW;
+		if (prot & IOMMU_PROT_IR)
+			__pte |= IOMMU_PTE_IR;
+		if (prot & IOMMU_PROT_IW)
+			__pte |= IOMMU_PTE_IW;
 
-	for (i = 0; i < count; ++i)
-		pte[i] = __pte;
+		for (i = 0; i < count; ++i)
+			pte[i] = __pte;
+
+		iova  += pgsize;
+		paddr += pgsize;
+		pgcount--;
+		if (mapped)
+			*mapped += pgsize;
+	}
 
 	ret = 0;
 
@@ -516,7 +525,7 @@  static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
 	cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE,
 	cfg->tlb            = &v1_flush_ops;
 
-	pgtable->iop.ops.map          = iommu_v1_map_page;
+	pgtable->iop.ops.map_pages    = iommu_v1_map_pages;
 	pgtable->iop.ops.unmap        = iommu_v1_unmap_page;
 	pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;