diff mbox series

dpaa_eth: fix DMA mapping leak

Message ID 1576764528-10742-1-git-send-email-madalin.bucur@oss.nxp.com
State Changes Requested
Delegated to: David Miller
Headers show
Series dpaa_eth: fix DMA mapping leak | expand

Commit Message

Madalin Bucur (OSS) Dec. 19, 2019, 2:08 p.m. UTC
From: Madalin Bucur <madalin.bucur@nxp.com>

On the error path some fragments remain DMA mapped. Adding a fix
that unmaps all the fragments.

Fixes: 8151ee88bad56 (dpaa_eth: use page backed rx buffers)
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

Comments

David Miller Dec. 21, 2019, 5:35 a.m. UTC | #1
From: Madalin Bucur <madalin.bucur@oss.nxp.com>
Date: Thu, 19 Dec 2019 16:08:48 +0200

> @@ -1744,6 +1744,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
>  		count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
>  		dma_unmap_page(priv->rx_dma_dev, sg_addr,
>  			       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
> +
> +		j++; /* fragments up to j were DMA unmapped */
> +

You can move this code:

		/* We may use multiple Rx pools */
		dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
		if (!dpaa_bp)
			goto free_buffers;

		count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);

after the dma_unmap_page() call and that is such a much simpler
way to fix this bug.
Madalin Bucur (OSS) Dec. 23, 2019, 6:50 a.m. UTC | #2
> -----Original Message-----
> From: David Miller <davem@davemloft.net>
> Sent: Saturday, December 21, 2019 7:36 AM
> To: Madalin Bucur <madalin.bucur@nxp.com>; Madalin Bucur (OSS)
> <madalin.bucur@oss.nxp.com>
> Cc: netdev@vger.kernel.org
> Subject: Re: [PATCH] dpaa_eth: fix DMA mapping leak
> 
> From: Madalin Bucur <madalin.bucur@oss.nxp.com>
> Date: Thu, 19 Dec 2019 16:08:48 +0200
> 
> > @@ -1744,6 +1744,9 @@ static struct sk_buff *sg_fd_to_skb(const struct
> dpaa_priv *priv,
> >  		count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
> >  		dma_unmap_page(priv->rx_dma_dev, sg_addr,
> >  			       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
> > +
> > +		j++; /* fragments up to j were DMA unmapped */
> > +
> 
> You can move this code:
> 
> 		/* We may use multiple Rx pools */
> 		dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
> 		if (!dpaa_bp)
> 			goto free_buffers;
> 
> 		count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
> 
> after the dma_unmap_page() call and that is such a much simpler
> way to fix this bug.

Thank you, that will yield a simpler cleanup path.
Leon Romanovsky Dec. 23, 2019, 7:19 a.m. UTC | #3
On Thu, Dec 19, 2019 at 04:08:48PM +0200, Madalin Bucur wrote:
> From: Madalin Bucur <madalin.bucur@nxp.com>
>
> On the error path some fragments remain DMA mapped. Adding a fix
> that unmaps all the fragments.
>
> Fixes: 8151ee88bad56 (dpaa_eth: use page backed rx buffers)

This line should be
Fixes: 8151ee88bad5 ("dpaa_eth: use page backed rx buffers")

Thanks
diff mbox series

Patch

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6a9d12dad5d9..ff9e9a2637e7 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1719,7 +1719,7 @@  static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 	int page_offset;
 	unsigned int sz;
 	int *count_ptr;
-	int i;
+	int i, j;
 
 	vaddr = phys_to_virt(addr);
 	WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
@@ -1727,7 +1727,7 @@  static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 	/* Iterate through the SGT entries and add data buffers to the skb */
 	sgt = vaddr + fd_off;
 	skb = NULL;
-	for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
+	for (i = 0, j = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
 		/* Extension bit is not supported */
 		WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
 
@@ -1744,6 +1744,9 @@  static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 		count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
 		dma_unmap_page(priv->rx_dma_dev, sg_addr,
 			       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+
+		j++; /* fragments up to j were DMA unmapped */
+
 		if (!skb) {
 			sz = dpaa_bp->size +
 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1812,6 +1815,9 @@  static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 	for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
 		sg_addr = qm_sg_addr(&sgt[i]);
 		sg_vaddr = phys_to_virt(sg_addr);
+		if (i >= j)
+			dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[i]),
+				       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
 		free_pages((unsigned long)sg_vaddr, 0);
 		dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
 		if (dpaa_bp) {