diff mbox series

[v2,net-next,3/9] net: mvneta: update mb bit before passing the xdp buffer to eBPF layer

Message ID 25198d8424778abe9ee3fe25bba542143201b030.1599165031.git.lorenzo@kernel.org
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series mvneta: introduce XDP multi-buffer support | expand

Commit Message

Lorenzo Bianconi Sept. 3, 2020, 8:58 p.m. UTC
Update multi-buffer bit (mb) in xdp_buff to notify XDP/eBPF layer and
XDP remote drivers if this is a "non-linear" XDP buffer. Access
skb_shared_info only if xdp_buff mb is set

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 37 +++++++++++++++------------
 1 file changed, 21 insertions(+), 16 deletions(-)

Comments

Shay Agroskin Sept. 6, 2020, 7:33 a.m. UTC | #1
Lorenzo Bianconi <lorenzo@kernel.org> writes:

> Update multi-buffer bit (mb) in xdp_buff to notify XDP/eBPF 
> layer and
> XDP remote drivers if this is a "non-linear" XDP buffer. Access
> skb_shared_info only if xdp_buff mb is set
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
>  drivers/net/ethernet/marvell/mvneta.c | 37 
>  +++++++++++++++------------
>  1 file changed, 21 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/mvneta.c 
> b/drivers/net/ethernet/marvell/mvneta.c
> index 832bbb8b05c8..4f745a2b702a 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -2027,11 +2027,11 @@ mvneta_xdp_put_buff(struct mvneta_port 
> *pp, struct mvneta_rx_queue *rxq,
>  		    struct xdp_buff *xdp, int sync_len, bool napi)
>  {
>  	struct skb_shared_info *sinfo = 
>  xdp_get_shared_info_from_buff(xdp);
> -	int i;
> +	int i, num_frames = xdp->mb ? sinfo->nr_frags : 0;
>  
>  	page_pool_put_page(rxq->page_pool, 
>  virt_to_head_page(xdp->data),
>  			   sync_len, napi);
> -	for (i = 0; i < sinfo->nr_frags; i++)
> +	for (i = 0; i < num_frames; i++)
>  		page_pool_put_full_page(rxq->page_pool,
>  					skb_frag_page(&sinfo->frags[i]), 
>  napi);
>  }
> ...
> @@ -2175,6 +2175,7 @@ mvneta_run_xdp(struct mvneta_port *pp, 
> struct mvneta_rx_queue *rxq,
>  
>  	len = xdp->data_end - xdp->data_hard_start - 
>  pp->rx_offset_correction;
>  	data_len = xdp->data_end - xdp->data;
> -	sinfo = xdp_get_shared_info_from_buff(xdp);
> -	sinfo->nr_frags = 0;
> +	xdp->mb = 0;
>  
>  	*size = rx_desc->data_size - len;
>  	rx_desc->buf_phys_addr = 0;
> @@ -2269,7 +2267,7 @@ mvneta_swbm_add_rx_fragment(struct 
> mvneta_port *pp,
>  			    struct mvneta_rx_desc *rx_desc,
>  			    struct mvneta_rx_queue *rxq,
>  			    struct xdp_buff *xdp, int *size,
> -			    struct page *page)
> +			    int *nfrags, struct page *page)
>  {
>  	struct skb_shared_info *sinfo = 
>  xdp_get_shared_info_from_buff(xdp);
>  	struct net_device *dev = pp->dev;
> @@ -2288,13 +2286,18 @@ mvneta_swbm_add_rx_fragment(struct 
> mvneta_port *pp,
>  				rx_desc->buf_phys_addr,
>  				len, dma_dir);
>  
> -	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
> -		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
> +	if (data_len > 0 && *nfrags < MAX_SKB_FRAGS) {
> +		skb_frag_t *frag = &sinfo->frags[*nfrags];
>  
>  		skb_frag_off_set(frag, pp->rx_offset_correction);
>  		skb_frag_size_set(frag, data_len);
>  		__skb_frag_set_page(frag, page);
> -		sinfo->nr_frags++;
> +		*nfrags = *nfrags + 1;

nit, why do you use nfrags variable instead of sinfo->nr_frags (as 
it was before this patch) ?
                You doesn't seem to use the nfrags variable in the 
                caller function and you update nr_frags as well.
                If it's used in a different patch (haven't 
                reviewed it all yet), maybe move it to that patch 
                ? (sorry in advance if I missed the logic here)

> +
> +		if (rx_desc->status & MVNETA_RXD_LAST_DESC) {
> +			sinfo->nr_frags = *nfrags;
> +			xdp->mb = true;
> +		}
>  
>  		rx_desc->buf_phys_addr = 0;
>               ...
Lorenzo Bianconi Sept. 6, 2020, 9:05 a.m. UTC | #2
[...]

> >  				rx_desc->buf_phys_addr,
> >  				len, dma_dir);
> > -	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
> > -		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
> > +	if (data_len > 0 && *nfrags < MAX_SKB_FRAGS) {
> > +		skb_frag_t *frag = &sinfo->frags[*nfrags];
> >  		skb_frag_off_set(frag, pp->rx_offset_correction);
> >  		skb_frag_size_set(frag, data_len);
> >  		__skb_frag_set_page(frag, page);
> > -		sinfo->nr_frags++;
> > +		*nfrags = *nfrags + 1;
> 
> nit, why do you use nfrags variable instead of sinfo->nr_frags (as it was
> before this patch) ?
>                You doesn't seem to use the nfrags variable in the
> caller function and you update nr_frags as well.
>                If it's used in a different patch (haven't
> reviewed it all yet), maybe move it to that patch                ? (sorry in
> advance if I missed the logic here)

nfrags pointer is used to avoid the sinfo->nr_frags initialization in
mvneta_swbm_rx_frame() since skb_shared_info is not in the cache usually.
AFAIK the hw does not provide the info "this is the second descriptor" but
just "this the frist/last descriptor".

Regards,
Lorenzo

> 
> > +
> > +		if (rx_desc->status & MVNETA_RXD_LAST_DESC) {
> > +			sinfo->nr_frags = *nfrags;
> > +			xdp->mb = true;
> > +		}
> >  		rx_desc->buf_phys_addr = 0;
> >               ...
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 832bbb8b05c8..4f745a2b702a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2027,11 +2027,11 @@  mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 		    struct xdp_buff *xdp, int sync_len, bool napi)
 {
 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-	int i;
+	int i, num_frames = xdp->mb ? sinfo->nr_frags : 0;
 
 	page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
 			   sync_len, napi);
-	for (i = 0; i < sinfo->nr_frags; i++)
+	for (i = 0; i < num_frames; i++)
 		page_pool_put_full_page(rxq->page_pool,
 					skb_frag_page(&sinfo->frags[i]), napi);
 }
@@ -2175,6 +2175,7 @@  mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 
 	len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
 	data_len = xdp->data_end - xdp->data;
+
 	act = bpf_prog_run_xdp(prog, xdp);
 
 	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
@@ -2234,7 +2235,6 @@  mvneta_swbm_rx_frame(struct mvneta_port *pp,
 	int data_len = -MVNETA_MH_SIZE, len;
 	struct net_device *dev = pp->dev;
 	enum dma_data_direction dma_dir;
-	struct skb_shared_info *sinfo;
 
 	if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
 		len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2256,9 +2256,7 @@  mvneta_swbm_rx_frame(struct mvneta_port *pp,
 	xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
 	xdp->data_end = xdp->data + data_len;
 	xdp_set_data_meta_invalid(xdp);
-
-	sinfo = xdp_get_shared_info_from_buff(xdp);
-	sinfo->nr_frags = 0;
+	xdp->mb = 0;
 
 	*size = rx_desc->data_size - len;
 	rx_desc->buf_phys_addr = 0;
@@ -2269,7 +2267,7 @@  mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
 			    struct mvneta_rx_desc *rx_desc,
 			    struct mvneta_rx_queue *rxq,
 			    struct xdp_buff *xdp, int *size,
-			    struct page *page)
+			    int *nfrags, struct page *page)
 {
 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
 	struct net_device *dev = pp->dev;
@@ -2288,13 +2286,18 @@  mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
 				rx_desc->buf_phys_addr,
 				len, dma_dir);
 
-	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
-		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
+	if (data_len > 0 && *nfrags < MAX_SKB_FRAGS) {
+		skb_frag_t *frag = &sinfo->frags[*nfrags];
 
 		skb_frag_off_set(frag, pp->rx_offset_correction);
 		skb_frag_size_set(frag, data_len);
 		__skb_frag_set_page(frag, page);
-		sinfo->nr_frags++;
+		*nfrags = *nfrags + 1;
+
+		if (rx_desc->status & MVNETA_RXD_LAST_DESC) {
+			sinfo->nr_frags = *nfrags;
+			xdp->mb = true;
+		}
 
 		rx_desc->buf_phys_addr = 0;
 	}
@@ -2306,7 +2309,7 @@  mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 		      struct xdp_buff *xdp, u32 desc_status)
 {
 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-	int i, num_frags = sinfo->nr_frags;
+	int i, num_frags = xdp->mb ? sinfo->nr_frags : 0;
 	skb_frag_t frags[MAX_SKB_FRAGS];
 	struct sk_buff *skb;
 
@@ -2341,13 +2344,14 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 {
 	int rx_proc = 0, rx_todo, refill, size = 0;
 	struct net_device *dev = pp->dev;
-	struct xdp_buff xdp_buf = {
-		.frame_sz = PAGE_SIZE,
-		.rxq = &rxq->xdp_rxq,
-	};
 	struct mvneta_stats ps = {};
 	struct bpf_prog *xdp_prog;
 	u32 desc_status, frame_sz;
+	struct xdp_buff xdp_buf;
+	int nfrags;
+
+	xdp_buf.frame_sz = PAGE_SIZE;
+	xdp_buf.rxq = &rxq->xdp_rxq;
 
 	/* Get number of received packets */
 	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2379,6 +2383,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 			size = rx_desc->data_size;
 			frame_sz = size - ETH_FCS_LEN;
 			desc_status = rx_desc->status;
+			nfrags = 0;
 
 			mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
 					     &size, page, &ps);
@@ -2387,7 +2392,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 				continue;
 
 			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
-						    &size, page);
+						    &size, &nfrags, page);
 		} /* Middle or Last descriptor */
 
 		if (!(rx_status & MVNETA_RXD_LAST_DESC))