diff mbox series

[v4,06/26] iommu/sva: Register page fault handler

Message ID 20200224182401.353359-7-jean-philippe@linaro.org
State New
Headers show
Series iommu: Shared Virtual Addressing and SMMUv3 support | expand

Commit Message

Jean-Philippe Brucker Feb. 24, 2020, 6:23 p.m. UTC
From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>

When enabling SVA, register the fault handler. Device driver will register
an I/O page fault queue before or after calling iommu_sva_enable. The
fault queue must be flushed before any io_mm is freed, to make sure that
its PASID isn't used in any fault queue, and can be reallocated. Add
iopf_queue_flush() calls in a few strategic locations.

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
 drivers/iommu/Kconfig     |  1 +
 drivers/iommu/iommu-sva.c | 16 ++++++++++++++++
 2 files changed, 17 insertions(+)

Comments

Jacob Pan Feb. 26, 2020, 7:39 p.m. UTC | #1
On Mon, 24 Feb 2020 19:23:41 +0100
Jean-Philippe Brucker <jean-philippe@linaro.org> wrote:

> From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
> 
> When enabling SVA, register the fault handler. Device driver will
> register an I/O page fault queue before or after calling
> iommu_sva_enable. The fault queue must be flushed before any io_mm is
> freed, to make sure that its PASID isn't used in any fault queue, and
> can be reallocated. Add iopf_queue_flush() calls in a few strategic
> locations.
> 
> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
> ---
>  drivers/iommu/Kconfig     |  1 +
>  drivers/iommu/iommu-sva.c | 16 ++++++++++++++++
>  2 files changed, 17 insertions(+)
> 
> diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> index e4a42e1708b4..211684e785ea 100644
> --- a/drivers/iommu/Kconfig
> +++ b/drivers/iommu/Kconfig
> @@ -106,6 +106,7 @@ config IOMMU_DMA
>  config IOMMU_SVA
>  	bool
>  	select IOASID
> +	select IOMMU_PAGE_FAULT
>  	select IOMMU_API
>  	select MMU_NOTIFIER
>  
> diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
> index bfd0c477f290..494ca0824e4b 100644
> --- a/drivers/iommu/iommu-sva.c
> +++ b/drivers/iommu/iommu-sva.c
> @@ -366,6 +366,8 @@ static void io_mm_release(struct mmu_notifier
> *mn, struct mm_struct *mm) dev_WARN(dev, "possible leak of PASID %u",
>  				 io_mm->pasid);
>  
> +		iopf_queue_flush_dev(dev, io_mm->pasid);
> +
>  		/* unbind() frees the bond, we just detach it */
>  		io_mm_detach_locked(bond);
>  	}
> @@ -442,11 +444,20 @@ static void iommu_sva_unbind_locked(struct
> iommu_bond *bond) 
>  void iommu_sva_unbind_generic(struct iommu_sva *handle)
>  {
> +	int pasid;
>  	struct iommu_param *param = handle->dev->iommu_param;
>  
>  	if (WARN_ON(!param))
>  		return;
>  
> +	/*
> +	 * Caller stopped the device from issuing PASIDs, now make
> sure they are
> +	 * out of the fault queue.
> +	 */
> +	pasid = iommu_sva_get_pasid_generic(handle);
> +	if (pasid != IOMMU_PASID_INVALID)
> +		iopf_queue_flush_dev(handle->dev, pasid);
> +
I have an ordering concern.
The caller can only stop the device issuing page request but there will
be in-flight request inside the IOMMU. If we flush here before clearing
the PASID context, there might be new request coming in before the
detach.
How about detach first then flush? Then anything come after the detach
would be faults. Flush will be clean.

>  	mutex_lock(&param->sva_lock);
>  	mutex_lock(&iommu_sva_lock);
>  	iommu_sva_unbind_locked(to_iommu_bond(handle));
> @@ -484,6 +495,10 @@ int iommu_sva_enable(struct device *dev, struct
> iommu_sva_param *sva_param) goto err_unlock;
>  	}
>  
> +	ret = iommu_register_device_fault_handler(dev,
> iommu_queue_iopf, dev);
> +	if (ret)
> +		goto err_unlock;
> +
>  	dev->iommu_param->sva_param = new_param;
>  	mutex_unlock(&param->sva_lock);
>  	return 0;
> @@ -521,6 +536,7 @@ int iommu_sva_disable(struct device *dev)
>  		goto out_unlock;
>  	}
>  
> +	iommu_unregister_device_fault_handler(dev);
>  	kfree(param->sva_param);
>  	param->sva_param = NULL;
>  out_unlock:

[Jacob Pan]
Jean-Philippe Brucker Feb. 28, 2020, 2:44 p.m. UTC | #2
On Wed, Feb 26, 2020 at 11:39:59AM -0800, Jacob Pan wrote:
> > @@ -442,11 +444,20 @@ static void iommu_sva_unbind_locked(struct
> > iommu_bond *bond) 
> >  void iommu_sva_unbind_generic(struct iommu_sva *handle)
> >  {
> > +	int pasid;
> >  	struct iommu_param *param = handle->dev->iommu_param;
> >  
> >  	if (WARN_ON(!param))
> >  		return;
> >  
> > +	/*
> > +	 * Caller stopped the device from issuing PASIDs, now make
> > sure they are
> > +	 * out of the fault queue.
> > +	 */
> > +	pasid = iommu_sva_get_pasid_generic(handle);
> > +	if (pasid != IOMMU_PASID_INVALID)
> > +		iopf_queue_flush_dev(handle->dev, pasid);
> > +
> I have an ordering concern.
> The caller can only stop the device issuing page request but there will
> be in-flight request inside the IOMMU. If we flush here before clearing
> the PASID context, there might be new request coming in before the
> detach.

The goal of this flush is also to clear the IOMMU PRI queue. It calls the
IOMMU's flush() callback before flushing the workqueue. So when this
returns, there shouldn't be any more pending fault.

Thanks,
Jean

> How about detach first then flush? Then anything come after the detach
> would be faults. Flush will be clean.
> 
> >  	mutex_lock(&param->sva_lock);
> >  	mutex_lock(&iommu_sva_lock);
> >  	iommu_sva_unbind_locked(to_iommu_bond(handle));
> > @@ -484,6 +495,10 @@ int iommu_sva_enable(struct device *dev, struct
> > iommu_sva_param *sva_param) goto err_unlock;
> >  	}
> >  
> > +	ret = iommu_register_device_fault_handler(dev,
> > iommu_queue_iopf, dev);
> > +	if (ret)
> > +		goto err_unlock;
> > +
> >  	dev->iommu_param->sva_param = new_param;
> >  	mutex_unlock(&param->sva_lock);
> >  	return 0;
> > @@ -521,6 +536,7 @@ int iommu_sva_disable(struct device *dev)
> >  		goto out_unlock;
> >  	}
> >  
> > +	iommu_unregister_device_fault_handler(dev);
> >  	kfree(param->sva_param);
> >  	param->sva_param = NULL;
> >  out_unlock:
> 
> [Jacob Pan]
diff mbox series

Patch

diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e4a42e1708b4..211684e785ea 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -106,6 +106,7 @@  config IOMMU_DMA
 config IOMMU_SVA
 	bool
 	select IOASID
+	select IOMMU_PAGE_FAULT
 	select IOMMU_API
 	select MMU_NOTIFIER
 
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index bfd0c477f290..494ca0824e4b 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -366,6 +366,8 @@  static void io_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 			dev_WARN(dev, "possible leak of PASID %u",
 				 io_mm->pasid);
 
+		iopf_queue_flush_dev(dev, io_mm->pasid);
+
 		/* unbind() frees the bond, we just detach it */
 		io_mm_detach_locked(bond);
 	}
@@ -442,11 +444,20 @@  static void iommu_sva_unbind_locked(struct iommu_bond *bond)
 
 void iommu_sva_unbind_generic(struct iommu_sva *handle)
 {
+	int pasid;
 	struct iommu_param *param = handle->dev->iommu_param;
 
 	if (WARN_ON(!param))
 		return;
 
+	/*
+	 * Caller stopped the device from issuing PASIDs, now make sure they are
+	 * out of the fault queue.
+	 */
+	pasid = iommu_sva_get_pasid_generic(handle);
+	if (pasid != IOMMU_PASID_INVALID)
+		iopf_queue_flush_dev(handle->dev, pasid);
+
 	mutex_lock(&param->sva_lock);
 	mutex_lock(&iommu_sva_lock);
 	iommu_sva_unbind_locked(to_iommu_bond(handle));
@@ -484,6 +495,10 @@  int iommu_sva_enable(struct device *dev, struct iommu_sva_param *sva_param)
 		goto err_unlock;
 	}
 
+	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+	if (ret)
+		goto err_unlock;
+
 	dev->iommu_param->sva_param = new_param;
 	mutex_unlock(&param->sva_lock);
 	return 0;
@@ -521,6 +536,7 @@  int iommu_sva_disable(struct device *dev)
 		goto out_unlock;
 	}
 
+	iommu_unregister_device_fault_handler(dev);
 	kfree(param->sva_param);
 	param->sva_param = NULL;
 out_unlock: