diff mbox series

[RFC,v2,05/33] net: netsec: Add support for XDP frame size

Message ID 158634665970.707275.15490233569929847990.stgit@firesoul
State RFC
Delegated to: BPF Maintainers
Headers show
Series [RFC,v2,01/33] xdp: add frame size to xdp_buff | expand

Commit Message

Jesper Dangaard Brouer April 8, 2020, 11:50 a.m. UTC
From: Ilias Apalodimas <ilias.apalodimas@linaro.org>

This driver takes advantage of page_pool PP_FLAG_DMA_SYNC_DEV that
can help reduce the number of cache-lines that need to be flushed
when doing DMA sync for_device. Due to xdp_adjust_tail can grow the
area accessible to the by the CPU (can possibly write into), then max
sync length *after* bpf_prog_run_xdp() needs to be taken into account.

Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
 drivers/net/ethernet/socionext/netsec.c |   30 ++++++++++++++++++------------
 1 file changed, 18 insertions(+), 12 deletions(-)

Comments

Lorenzo Bianconi April 8, 2020, 1:09 p.m. UTC | #1
> From: Ilias Apalodimas <ilias.apalodimas@linaro.org>

Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>

> 
> This driver takes advantage of page_pool PP_FLAG_DMA_SYNC_DEV that
> can help reduce the number of cache-lines that need to be flushed
> when doing DMA sync for_device. Due to xdp_adjust_tail can grow the
> area accessible to the by the CPU (can possibly write into), then max
> sync length *after* bpf_prog_run_xdp() needs to be taken into account.
> 
> Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
> ---
>  drivers/net/ethernet/socionext/netsec.c |   30 ++++++++++++++++++------------
>  1 file changed, 18 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
> index a5a0fb60193a..e1f4be4b3d69 100644
> --- a/drivers/net/ethernet/socionext/netsec.c
> +++ b/drivers/net/ethernet/socionext/netsec.c
> @@ -884,23 +884,28 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
>  			  struct xdp_buff *xdp)
>  {
>  	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
> -	unsigned int len = xdp->data_end - xdp->data;
> +	unsigned int sync, len = xdp->data_end - xdp->data;
>  	u32 ret = NETSEC_XDP_PASS;
> +	struct page *page;
>  	int err;
>  	u32 act;
>  
>  	act = bpf_prog_run_xdp(prog, xdp);
>  
> +	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
> +	sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
> +	sync = max(sync, len);
> +
>  	switch (act) {
>  	case XDP_PASS:
>  		ret = NETSEC_XDP_PASS;
>  		break;
>  	case XDP_TX:
>  		ret = netsec_xdp_xmit_back(priv, xdp);
> -		if (ret != NETSEC_XDP_TX)
> -			page_pool_put_page(dring->page_pool,
> -					   virt_to_head_page(xdp->data), len,
> -					   true);
> +		if (ret != NETSEC_XDP_TX) {
> +			page = virt_to_head_page(xdp->data);
> +			page_pool_put_page(dring->page_pool, page, sync, true);
> +		}
>  		break;
>  	case XDP_REDIRECT:
>  		err = xdp_do_redirect(priv->ndev, xdp, prog);
> @@ -908,9 +913,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
>  			ret = NETSEC_XDP_REDIR;
>  		} else {
>  			ret = NETSEC_XDP_CONSUMED;
> -			page_pool_put_page(dring->page_pool,
> -					   virt_to_head_page(xdp->data), len,
> -					   true);
> +			page = virt_to_head_page(xdp->data);
> +			page_pool_put_page(dring->page_pool, page, sync, true);
>  		}
>  		break;
>  	default:
> @@ -921,8 +925,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
>  		/* fall through -- handle aborts by dropping packet */
>  	case XDP_DROP:
>  		ret = NETSEC_XDP_CONSUMED;
> -		page_pool_put_page(dring->page_pool,
> -				   virt_to_head_page(xdp->data), len, true);
> +		page = virt_to_head_page(xdp->data);
> +		page_pool_put_page(dring->page_pool, page, sync, true);
>  		break;
>  	}
>  
> @@ -936,10 +940,14 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
>  	struct netsec_rx_pkt_info rx_info;
>  	enum dma_data_direction dma_dir;
>  	struct bpf_prog *xdp_prog;
> +	struct xdp_buff xdp;
>  	u16 xdp_xmit = 0;
>  	u32 xdp_act = 0;
>  	int done = 0;
>  
> +	xdp.rxq = &dring->xdp_rxq;
> +	xdp.frame_sz = PAGE_SIZE;
> +
>  	rcu_read_lock();
>  	xdp_prog = READ_ONCE(priv->xdp_prog);
>  	dma_dir = page_pool_get_dma_dir(dring->page_pool);
> @@ -953,7 +961,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
>  		struct sk_buff *skb = NULL;
>  		u16 pkt_len, desc_len;
>  		dma_addr_t dma_handle;
> -		struct xdp_buff xdp;
>  		void *buf_addr;
>  
>  		if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
> @@ -1002,7 +1009,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
>  		xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
>  		xdp_set_data_meta_invalid(&xdp);
>  		xdp.data_end = xdp.data + pkt_len;
> -		xdp.rxq = &dring->xdp_rxq;
>  
>  		if (xdp_prog) {
>  			xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
> 
>
Jesper Dangaard Brouer April 14, 2020, 8:07 a.m. UTC | #2
On Wed, 8 Apr 2020 15:09:23 +0200
Lorenzo Bianconi <lorenzo.bianconi@redhat.com> wrote:

> > From: Ilias Apalodimas <ilias.apalodimas@linaro.org>  
> 
> Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>

Thanks, collected ACK for next submission.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index a5a0fb60193a..e1f4be4b3d69 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -884,23 +884,28 @@  static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
 			  struct xdp_buff *xdp)
 {
 	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
-	unsigned int len = xdp->data_end - xdp->data;
+	unsigned int sync, len = xdp->data_end - xdp->data;
 	u32 ret = NETSEC_XDP_PASS;
+	struct page *page;
 	int err;
 	u32 act;
 
 	act = bpf_prog_run_xdp(prog, xdp);
 
+	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+	sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
+	sync = max(sync, len);
+
 	switch (act) {
 	case XDP_PASS:
 		ret = NETSEC_XDP_PASS;
 		break;
 	case XDP_TX:
 		ret = netsec_xdp_xmit_back(priv, xdp);
-		if (ret != NETSEC_XDP_TX)
-			page_pool_put_page(dring->page_pool,
-					   virt_to_head_page(xdp->data), len,
-					   true);
+		if (ret != NETSEC_XDP_TX) {
+			page = virt_to_head_page(xdp->data);
+			page_pool_put_page(dring->page_pool, page, sync, true);
+		}
 		break;
 	case XDP_REDIRECT:
 		err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -908,9 +913,8 @@  static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
 			ret = NETSEC_XDP_REDIR;
 		} else {
 			ret = NETSEC_XDP_CONSUMED;
-			page_pool_put_page(dring->page_pool,
-					   virt_to_head_page(xdp->data), len,
-					   true);
+			page = virt_to_head_page(xdp->data);
+			page_pool_put_page(dring->page_pool, page, sync, true);
 		}
 		break;
 	default:
@@ -921,8 +925,8 @@  static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
 		/* fall through -- handle aborts by dropping packet */
 	case XDP_DROP:
 		ret = NETSEC_XDP_CONSUMED;
-		page_pool_put_page(dring->page_pool,
-				   virt_to_head_page(xdp->data), len, true);
+		page = virt_to_head_page(xdp->data);
+		page_pool_put_page(dring->page_pool, page, sync, true);
 		break;
 	}
 
@@ -936,10 +940,14 @@  static int netsec_process_rx(struct netsec_priv *priv, int budget)
 	struct netsec_rx_pkt_info rx_info;
 	enum dma_data_direction dma_dir;
 	struct bpf_prog *xdp_prog;
+	struct xdp_buff xdp;
 	u16 xdp_xmit = 0;
 	u32 xdp_act = 0;
 	int done = 0;
 
+	xdp.rxq = &dring->xdp_rxq;
+	xdp.frame_sz = PAGE_SIZE;
+
 	rcu_read_lock();
 	xdp_prog = READ_ONCE(priv->xdp_prog);
 	dma_dir = page_pool_get_dma_dir(dring->page_pool);
@@ -953,7 +961,6 @@  static int netsec_process_rx(struct netsec_priv *priv, int budget)
 		struct sk_buff *skb = NULL;
 		u16 pkt_len, desc_len;
 		dma_addr_t dma_handle;
-		struct xdp_buff xdp;
 		void *buf_addr;
 
 		if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
@@ -1002,7 +1009,6 @@  static int netsec_process_rx(struct netsec_priv *priv, int budget)
 		xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + pkt_len;
-		xdp.rxq = &dring->xdp_rxq;
 
 		if (xdp_prog) {
 			xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);