diff mbox series

[net-next,V8,01/16] mlx5: basic XDP_REDIRECT forward support

Message ID 152249795210.28297.14005981548436322464.stgit@firesoul
State Changes Requested, archived
Delegated to: David Miller
Headers show
Series XDP redirect memory return API | expand

Commit Message

Jesper Dangaard Brouer March 31, 2018, 12:05 p.m. UTC
This implements basic XDP redirect support in mlx5 driver.

Notice that the ndo_xdp_xmit() is NOT implemented, because that API
need some changes that this patchset is working towards.

The main purpose of this patch is have different drivers doing
XDP_REDIRECT to show how different memory models behave in a cross
driver world.

Update(pre-RFCv2 Tariq): Need to DMA unmap page before xdp_do_redirect,
as the return API does not exist yet to to keep this mapped.

Update(pre-RFCv3 Saeed): Don't mix XDP_TX and XDP_REDIRECT flushing,
introduce xdpsq.db.redirect_flush boolian.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Acked-by: Saeed Mahameed <saeedm@mellanox.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h    |    1 +
 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |   27 ++++++++++++++++++++---
 2 files changed, 25 insertions(+), 3 deletions(-)

Comments

David Miller April 1, 2018, 3:31 a.m. UTC | #1
From: Jesper Dangaard Brouer <brouer@redhat.com>
Date: Sat, 31 Mar 2018 14:05:52 +0200

> +static inline void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
> +					struct mlx5e_dma_info *dma_info)

Please do not use the inline keyword in foo.c files, let the compiler
decide.

I know that this is done in other areas of this file, that just means
that I didn't catch it when those changes went it, rather than meaning
that the rule can be broken because it has been already.

Thank you.
Tariq Toukan April 1, 2018, 8:46 a.m. UTC | #2
On 31/03/2018 3:05 PM, Jesper Dangaard Brouer wrote:

..

> @@ -844,6 +851,15 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
>   		if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
>   			trace_xdp_exception(rq->netdev, prog, act);
>   		return true;
> +	case XDP_REDIRECT:
> +		/* When XDP enabled then page-refcnt==1 here */
> +		err = xdp_do_redirect(rq->netdev, &xdp, prog);
> +		if (!err) {
> +			rq->wqe.xdp_xmit = true; /* XDP xmit owns page */

boolean xdp_xmit does not exist anymore, it was removed recently by:
121e89275471 net/mlx5e: Refactor RQ XDP_TX indication

please replace with:
			__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);

> +			rq->xdpsq.db.redirect_flush = true;
> +			mlx5e_page_dma_unmap(rq, di);
> +		}
> +		return true;
>   	default:
>   		bpf_warn_invalid_xdp_action(act);
>   	case XDP_ABORTED:
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 353ac6daa3dc..8ff8f1b727f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -399,6 +399,7 @@  struct mlx5e_xdpsq {
 	struct {
 		struct mlx5e_dma_info     *di;
 		bool                       doorbell;
+		bool                       redirect_flush;
 	} db;
 
 	/* read only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 781b8f21d6d1..5551bb23e05f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -236,14 +236,20 @@  static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
 	return 0;
 }
 
+static inline void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
+					struct mlx5e_dma_info *dma_info)
+{
+	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+		       rq->buff.map_dir);
+}
+
 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
 			bool recycle)
 {
 	if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
 		return;
 
-	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
-		       rq->buff.map_dir);
+	mlx5e_page_dma_unmap(rq, dma_info);
 	put_page(dma_info->page);
 }
 
@@ -821,9 +827,10 @@  static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
 				   struct mlx5e_dma_info *di,
 				   void *va, u16 *rx_headroom, u32 *len)
 {
-	const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+	struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
 	struct xdp_buff xdp;
 	u32 act;
+	int err;
 
 	if (!prog)
 		return false;
@@ -844,6 +851,15 @@  static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
 		if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
 			trace_xdp_exception(rq->netdev, prog, act);
 		return true;
+	case XDP_REDIRECT:
+		/* When XDP enabled then page-refcnt==1 here */
+		err = xdp_do_redirect(rq->netdev, &xdp, prog);
+		if (!err) {
+			rq->wqe.xdp_xmit = true; /* XDP xmit owns page */
+			rq->xdpsq.db.redirect_flush = true;
+			mlx5e_page_dma_unmap(rq, di);
+		}
+		return true;
 	default:
 		bpf_warn_invalid_xdp_action(act);
 	case XDP_ABORTED:
@@ -1104,6 +1120,11 @@  int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 		xdpsq->db.doorbell = false;
 	}
 
+	if (xdpsq->db.redirect_flush) {
+		xdp_do_flush_map();
+		xdpsq->db.redirect_flush = false;
+	}
+
 	mlx5_cqwq_update_db_record(&cq->wq);
 
 	/* ensure cq space is freed before enabling more cqes */