[kernel,2/4] powerpc/mm/iommu/vfio_spapr_tce: Change mm_iommu_get to reference a region

Message ID 20181015092416.47380-3-aik@ozlabs.ru
State New
Headers show
Series
  • vfio/spapr_tce: Reworks for NVIDIA V100 + P9 passthrough (part 1)
Related show

Commit Message

Alexey Kardashevskiy Oct. 15, 2018, 9:24 a.m.
We are going to add another helper to preregister device memory so
instead of having mm_iommu_new() which pre-registers the normal memory
and references the region, we need separate helpers for pre-registerign
and referencing.

To make the mm_iommu_get name reflect what it is supposed to do, this
changes mm_iommu_get() to reference the region so from now on for every
mm_iommu_get() we need a matching mm_iommu_put().

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
 arch/powerpc/mm/mmu_context_iommu.c |  5 +++++
 drivers/vfio/vfio_iommu_spapr_tce.c | 33 ++++++++++++++++++++++-----------
 2 files changed, 27 insertions(+), 11 deletions(-)

Comments

David Gibson Oct. 17, 2018, 12:46 a.m. | #1
On Mon, Oct 15, 2018 at 08:24:14PM +1100, Alexey Kardashevskiy wrote:
> We are going to add another helper to preregister device memory so
> instead of having mm_iommu_new() which pre-registers the normal memory
> and references the region, we need separate helpers for pre-registerign
> and referencing.
> 
> To make the mm_iommu_get name reflect what it is supposed to do, this
> changes mm_iommu_get() to reference the region so from now on for every
> mm_iommu_get() we need a matching mm_iommu_put().
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>

.. ah, I see.

I think this should be folded with the first patch, so we don't have
an interim step where mm_iommu_get() has a misleading name.

> ---
>  arch/powerpc/mm/mmu_context_iommu.c |  5 +++++
>  drivers/vfio/vfio_iommu_spapr_tce.c | 33 ++++++++++++++++++++++-----------
>  2 files changed, 27 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
> index 8eeb99d..a8c4a3c 100644
> --- a/arch/powerpc/mm/mmu_context_iommu.c
> +++ b/arch/powerpc/mm/mmu_context_iommu.c
> @@ -373,13 +373,18 @@ struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
>  {
>  	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
>  
> +	mutex_lock(&mem_list_mutex);
> +
>  	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
>  		if ((mem->ua == ua) && (mem->entries == entries)) {
>  			ret = mem;
> +			++mem->used;
>  			break;
>  		}
>  	}
>  
> +	mutex_unlock(&mem_list_mutex);
> +
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(mm_iommu_get);
> diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
> index 1701798..56db071 100644
> --- a/drivers/vfio/vfio_iommu_spapr_tce.c
> +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
> @@ -151,7 +151,8 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
>  {
>  	struct mm_iommu_table_group_mem_t *mem;
>  	struct tce_iommu_prereg *tcemem;
> -	bool found = false;
> +	bool found;
> +	long ret;
>  
>  	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
>  		return -EINVAL;
> @@ -168,9 +169,13 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
>  	}
>  
>  	if (!found)
> -		return -ENOENT;
> +		ret = -ENOENT;
> +	else
> +		ret = tce_iommu_prereg_free(container, tcemem);
>  
> -	return tce_iommu_prereg_free(container, tcemem);
> +	mm_iommu_put(container->mm, mem);
> +
> +	return ret;
>  }
>  
>  static long tce_iommu_register_pages(struct tce_container *container,
> @@ -188,19 +193,21 @@ static long tce_iommu_register_pages(struct tce_container *container,
>  	mem = mm_iommu_get(container->mm, vaddr, entries);
>  	if (mem) {
>  		list_for_each_entry(tcemem, &container->prereg_list, next) {
> -			if (tcemem->mem == mem)
> -				return -EBUSY;
> +			if (tcemem->mem == mem) {
> +				ret = -EBUSY;
> +				goto put_exit;
> +			}
>  		}
> +	} else {
> +		ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
> +		if (ret)
> +			return ret;
>  	}
>  
> -	ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
> -	if (ret)
> -		return ret;
> -
>  	tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
>  	if (!tcemem) {
> -		mm_iommu_put(container->mm, mem);
> -		return -ENOMEM;
> +		ret = -ENOMEM;
> +		goto put_exit;
>  	}
>  
>  	tcemem->mem = mem;
> @@ -209,6 +216,10 @@ static long tce_iommu_register_pages(struct tce_container *container,
>  	container->enabled = true;
>  
>  	return 0;
> +
> +put_exit:
> +	mm_iommu_put(container->mm, mem);
> +	return ret;
>  }
>  
>  static bool tce_page_is_contained(struct page *page, unsigned page_shift)

Patch

diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 8eeb99d..a8c4a3c 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -373,13 +373,18 @@  struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
 {
 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
 
+	mutex_lock(&mem_list_mutex);
+
 	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
 		if ((mem->ua == ua) && (mem->entries == entries)) {
 			ret = mem;
+			++mem->used;
 			break;
 		}
 	}
 
+	mutex_unlock(&mem_list_mutex);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(mm_iommu_get);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 1701798..56db071 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -151,7 +151,8 @@  static long tce_iommu_unregister_pages(struct tce_container *container,
 {
 	struct mm_iommu_table_group_mem_t *mem;
 	struct tce_iommu_prereg *tcemem;
-	bool found = false;
+	bool found;
+	long ret;
 
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
 		return -EINVAL;
@@ -168,9 +169,13 @@  static long tce_iommu_unregister_pages(struct tce_container *container,
 	}
 
 	if (!found)
-		return -ENOENT;
+		ret = -ENOENT;
+	else
+		ret = tce_iommu_prereg_free(container, tcemem);
 
-	return tce_iommu_prereg_free(container, tcemem);
+	mm_iommu_put(container->mm, mem);
+
+	return ret;
 }
 
 static long tce_iommu_register_pages(struct tce_container *container,
@@ -188,19 +193,21 @@  static long tce_iommu_register_pages(struct tce_container *container,
 	mem = mm_iommu_get(container->mm, vaddr, entries);
 	if (mem) {
 		list_for_each_entry(tcemem, &container->prereg_list, next) {
-			if (tcemem->mem == mem)
-				return -EBUSY;
+			if (tcemem->mem == mem) {
+				ret = -EBUSY;
+				goto put_exit;
+			}
 		}
+	} else {
+		ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
+		if (ret)
+			return ret;
 	}
 
-	ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
-	if (ret)
-		return ret;
-
 	tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
 	if (!tcemem) {
-		mm_iommu_put(container->mm, mem);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto put_exit;
 	}
 
 	tcemem->mem = mem;
@@ -209,6 +216,10 @@  static long tce_iommu_register_pages(struct tce_container *container,
 	container->enabled = true;
 
 	return 0;
+
+put_exit:
+	mm_iommu_put(container->mm, mem);
+	return ret;
 }
 
 static bool tce_page_is_contained(struct page *page, unsigned page_shift)