diff mbox

powerpc: Correct VIO bus accounting problem in CMO env.

Message ID 20090122194000.GA14767@austin.ibm.com (mailing list archive)
State Accepted, archived
Delegated to: Benjamin Herrenschmidt
Headers show

Commit Message

Robert Jennings Jan. 22, 2009, 7:40 p.m. UTC
In the VIO bus code the wrappers for dma alloc_coherent and free_coherent
calls are rounding to IOMMU_PAGE_SIZE.  Taking a look at the underlying
calls, the actual mapping is promoted to PAGE_SIZE.  Changing the
rounding in these two functions fixes under-reporting the entitlement
used by the system.  Without this change, the system could run out of
entitlement before it believes it has and incur mapping failures at the
firmware level.

Also in the VIO bus code, the wrapper for dma map_sg is not exiting in
an error path where it should.  Rather than fall through to code for the
success case, this patch adds the return that is needed in the error path.

Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>

---
 arch/powerpc/kernel/vio.c |    7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

Comments

Brian King Jan. 28, 2009, 2:47 p.m. UTC | #1
Acked by: Brian King <brking@linux.vnet.ibm.com>

Robert Jennings wrote:
> In the VIO bus code the wrappers for dma alloc_coherent and free_coherent
> calls are rounding to IOMMU_PAGE_SIZE.  Taking a look at the underlying
> calls, the actual mapping is promoted to PAGE_SIZE.  Changing the
> rounding in these two functions fixes under-reporting the entitlement
> used by the system.  Without this change, the system could run out of
> entitlement before it believes it has and incur mapping failures at the
> firmware level.
> 
> Also in the VIO bus code, the wrapper for dma map_sg is not exiting in
> an error path where it should.  Rather than fall through to code for the
> success case, this patch adds the return that is needed in the error path.
> 
> Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>
> 
> ---
>  arch/powerpc/kernel/vio.c |    7 ++++---
>  1 file changed, 4 insertions(+), 3 deletions(-)
> 
> Index: b/arch/powerpc/kernel/vio.c
> ===================================================================
> --- a/arch/powerpc/kernel/vio.c
> +++ b/arch/powerpc/kernel/vio.c
> @@ -492,14 +492,14 @@ static void *vio_dma_iommu_alloc_coheren
>  	struct vio_dev *viodev = to_vio_dev(dev);
>  	void *ret;
> 
> -	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
> +	if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
>  		atomic_inc(&viodev->cmo.allocs_failed);
>  		return NULL;
>  	}
> 
>  	ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
>  	if (unlikely(ret == NULL)) {
> -		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
> +		vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
>  		atomic_inc(&viodev->cmo.allocs_failed);
>  	}
> 
> @@ -513,7 +513,7 @@ static void vio_dma_iommu_free_coherent(
> 
>  	dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
> 
> -	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
> +	vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
>  }
> 
>  static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
> @@ -572,6 +572,7 @@ static int vio_dma_iommu_map_sg(struct d
>  	if (unlikely(!ret)) {
>  		vio_cmo_dealloc(viodev, alloc_size);
>  		atomic_inc(&viodev->cmo.allocs_failed);
> +		return ret;
>  	}
> 
>  	for (sgl = sglist, count = 0; count < ret; count++, sgl++)
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@ozlabs.org
> https://ozlabs.org/mailman/listinfo/linuxppc-dev
diff mbox

Patch

Index: b/arch/powerpc/kernel/vio.c
===================================================================
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -492,14 +492,14 @@  static void *vio_dma_iommu_alloc_coheren
 	struct vio_dev *viodev = to_vio_dev(dev);
 	void *ret;
 
-	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
+	if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
 		atomic_inc(&viodev->cmo.allocs_failed);
 		return NULL;
 	}
 
 	ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
 	if (unlikely(ret == NULL)) {
-		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+		vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 		atomic_inc(&viodev->cmo.allocs_failed);
 	}
 
@@ -513,7 +513,7 @@  static void vio_dma_iommu_free_coherent(
 
 	dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
 
-	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+	vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
 }
 
 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
@@ -572,6 +572,7 @@  static int vio_dma_iommu_map_sg(struct d
 	if (unlikely(!ret)) {
 		vio_cmo_dealloc(viodev, alloc_size);
 		atomic_inc(&viodev->cmo.allocs_failed);
+		return ret;
 	}
 
 	for (sgl = sglist, count = 0; count < ret; count++, sgl++)