From patchwork Wed Sep 30 15:41:52 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374481 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=k/0GM3zo; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gVv4qWXz9sT6 for ; Thu, 1 Oct 2020 01:42:31 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1730946AbgI3Pma (ORCPT ); Wed, 30 Sep 2020 11:42:30 -0400 Received: from mail.kernel.org ([198.145.29.99]:52930 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727438AbgI3Pm2 (ORCPT ); Wed, 30 Sep 2020 11:42:28 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 65EFC207C3; Wed, 30 Sep 2020 15:42:25 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480547; bh=jGFg9HepHezzAGnfdT1zopmNIIsBMcFzzfvdUYqTGvs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=k/0GM3zo9PlHVQDZlc0PpiR6f6frrY2UW4kgd0GTd3R1Zh54Qx5Net8fZd/cQABxF 3XVuLq0Dc10g5n5tQfQS5lU4/1p/RXzIZUImkOYODNyRM01yOvVAv+xsWVeMaecstD 27AE9/PRv/xN89aN/BgcqZ1PuWtJw0Ju9P3OalQA= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 01/12] xdp: introduce mb in xdp_buff/xdp_frame Date: Wed, 30 Sep 2020 17:41:52 +0200 Message-Id: <611784e2d737d9e723ee29cbc0e2e2ecdcdfb171.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Introduce multi-buffer bit (mb) in xdp_frame/xdp_buffer to specify if shared_info area has been properly initialized for non-linear xdp buffers Signed-off-by: Lorenzo Bianconi --- include/net/xdp.h | 8 ++++++-- net/core/xdp.c | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/net/xdp.h b/include/net/xdp.h index 3814fb631d52..42f439f9fcda 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -72,7 +72,8 @@ struct xdp_buff { void *data_hard_start; struct xdp_rxq_info *rxq; struct xdp_txq_info *txq; - u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ + u32 frame_sz:31; /* frame size to deduce data_hard_end/reserved tailroom*/ + u32 mb:1; /* xdp non-linear buffer */ }; /* Reserve memory area at end-of data area. @@ -96,7 +97,8 @@ struct xdp_frame { u16 len; u16 headroom; u32 metasize:8; - u32 frame_sz:24; + u32 frame_sz:23; + u32 mb:1; /* xdp non-linear frame */ /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, * while mem info is valid on remote CPU. */ @@ -141,6 +143,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) xdp->data_end = frame->data + frame->len; xdp->data_meta = frame->data - frame->metasize; xdp->frame_sz = frame->frame_sz; + xdp->mb = frame->mb; } static inline @@ -167,6 +170,7 @@ int xdp_update_frame_from_buff(struct xdp_buff *xdp, xdp_frame->headroom = headroom - sizeof(*xdp_frame); xdp_frame->metasize = metasize; xdp_frame->frame_sz = xdp->frame_sz; + xdp_frame->mb = xdp->mb; return 0; } diff --git a/net/core/xdp.c b/net/core/xdp.c index 48aba933a5a8..884f140fc3be 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -454,6 +454,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) xdpf->headroom = 0; xdpf->metasize = metasize; xdpf->frame_sz = PAGE_SIZE; + xdpf->mb = xdp->mb; xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; xsk_buff_free(xdp); From patchwork Wed Sep 30 15:41:53 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374483 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=LYDp6ERL; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gVy31HYz9sT6 for ; Thu, 1 Oct 2020 01:42:34 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731009AbgI3Pmd (ORCPT ); Wed, 30 Sep 2020 11:42:33 -0400 Received: from mail.kernel.org ([198.145.29.99]:52990 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727438AbgI3Pmd (ORCPT ); Wed, 30 Sep 2020 11:42:33 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 6723820789; Wed, 30 Sep 2020 15:42:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480551; bh=LxNPYsGGK2VbzekPFKAjqYOFrt5K/1kDpfiN9h5n05I=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=LYDp6ERLscSGl3J+yFiNGj4p227ta5cJlRVKND6kumeHA35lfP7NHrwgCdowk/c5/ pT8h+Obxi6grE1fx8LVgJbS7NWZaR7rTQiI8YCTspolT2d3MTbKZRiol8UAW06tKHZ fJ3BgXsbTIHvrVgc3EppTgtLbSfe3fX/TVJEy7VQ= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 02/12] xdp: initialize xdp_buff mb bit to 0 in all XDP drivers Date: Wed, 30 Sep 2020 17:41:53 +0200 Message-Id: <673ca12e5092e978f620b057c2e257bc5f6d4ece.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Initialize multi-buffer bit (mb) to 0 in all XDP-capable drivers. This is a preliminary patch to enable xdp multi-buffer support. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 1 + drivers/net/ethernet/cavium/thunder/nicvf_main.c | 1 + drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 1 + drivers/net/ethernet/intel/i40e/i40e_txrx.c | 1 + drivers/net/ethernet/intel/ice/ice_txrx.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 1 + drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 1 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 1 + drivers/net/ethernet/qlogic/qede/qede_fp.c | 1 + drivers/net/ethernet/sfc/rx.c | 1 + drivers/net/ethernet/socionext/netsec.c | 1 + drivers/net/ethernet/ti/cpsw.c | 1 + drivers/net/ethernet/ti/cpsw_new.c | 1 + drivers/net/hyperv/netvsc_bpf.c | 1 + drivers/net/tun.c | 2 ++ drivers/net/veth.c | 1 + drivers/net/virtio_net.c | 2 ++ drivers/net/xen-netfront.c | 1 + net/core/dev.c | 1 + 23 files changed, 25 insertions(+) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index e8131dadc22c..339319b97853 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1595,6 +1595,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, res_budget = budget; xdp.rxq = &rx_ring->xdp_rxq; xdp.frame_sz = ENA_PAGE_SIZE; + xdp.mb = 0; do { xdp_verdict = XDP_PASS; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index fcc262064766..344644b6dd4d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -139,6 +139,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, xdp.data_end = *data_ptr + *len; xdp.rxq = &rxr->xdp_rxq; xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ + xdp.mb = 0; orig_data = xdp.data; rcu_read_lock(); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 0a94c396173b..7fdabaabab1b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -553,6 +553,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, xdp.data_end = xdp.data + len; xdp.rxq = &rq->xdp_rxq; xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM; + xdp.mb = 0; orig_data = xdp.data; rcu_read_lock(); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index fe4caf7aad7c..8410e713162e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -366,6 +366,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE - (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM); + xdp.mb = 0; xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index d43ce13a93c9..5df07bc98283 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2332,6 +2332,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0); #endif xdp.rxq = &rx_ring->xdp_rxq; + xdp.mb = 0; while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index eae75260fe20..d641f513b8d9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1089,6 +1089,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) #if (PAGE_SIZE < 8192) xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0); #endif + xdp.mb = 0; /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a190d5c616fc..39f9d2032b9d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2298,6 +2298,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #if (PAGE_SIZE < 8192) xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0); #endif + xdp.mb = 0; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 82fce27f682b..1fbc740c266e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1129,6 +1129,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; + xdp.mb = 0; /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ #if (PAGE_SIZE < 8192) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index f6616c8933ca..01661ade9009 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3558,6 +3558,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM; xdp.data_end = xdp.data + rx_bytes; xdp.frame_sz = PAGE_SIZE; + xdp.mb = 0; if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) xdp.rxq = &rxq->xdp_rxq_short; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 99d7737e8ad6..de1ae36b068e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -684,6 +684,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud xdp_prog = rcu_dereference(ring->xdp_prog); xdp.rxq = &ring->xdp_rxq; xdp.frame_sz = priv->frag_info[0].frag_stride; + xdp.mb = 0; doorbell_pending = 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 599f5b5ebc97..82c3e755dadd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1133,6 +1133,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, xdp->data_end = xdp->data + len; xdp->rxq = &rq->xdp_rxq; xdp->frame_sz = rq->buff.frame0_sz; + xdp->mb = 0; } static struct sk_buff * diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index b150da43adb2..69fab1010752 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1824,6 +1824,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM; xdp.rxq = &rx_ring->xdp_rxq; + xdp.mb = 0; tx_ring = r_vec->xdp_ring; while (pkts_polled < budget) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index a2494bf85007..14a54094ca08 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1096,6 +1096,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, xdp.data_end = xdp.data + *len; xdp.rxq = &rxq->xdp_rxq; xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */ + xdp.mb = 0; /* Queues always have a full reset currently, so for the time * being until there's atomic program replace just mark read diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index aaa112877561..286feb510c21 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -301,6 +301,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, xdp.data_end = xdp.data + rx_buf->len; xdp.rxq = &rx_queue->xdp_rxq_info; xdp.frame_sz = efx->rx_page_buf_step; + xdp.mb = 0; xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); rcu_read_unlock(); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 806eb651cea3..0f0567083a6c 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -947,6 +947,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) xdp.rxq = &dring->xdp_rxq; xdp.frame_sz = PAGE_SIZE; + xdp.mb = 0; rcu_read_lock(); xdp_prog = READ_ONCE(priv->xdp_prog); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 9fd1f77190ad..558e0abb03c1 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -407,6 +407,7 @@ static void cpsw_rx_handler(void *token, int len, int status) xdp.data_hard_start = pa; xdp.rxq = &priv->xdp_rxq[ch]; xdp.frame_sz = PAGE_SIZE; + xdp.mb = 0; port = priv->emac_port + cpsw->data.dual_emac; ret = cpsw_run_xdp(priv, ch, &xdp, page, port); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index f779d2e1b5c5..7baab97e302a 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -350,6 +350,7 @@ static void cpsw_rx_handler(void *token, int len, int status) xdp.data_hard_start = pa; xdp.rxq = &priv->xdp_rxq[ch]; xdp.frame_sz = PAGE_SIZE; + xdp.mb = 0; ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port); if (ret != CPSW_XDP_PASS) diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 440486d9c999..a4bafc64997f 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -50,6 +50,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, xdp->data_end = xdp->data + len; xdp->rxq = &nvchan->xdp_rxq; xdp->frame_sz = PAGE_SIZE; + xdp->mb = 0; memcpy(xdp->data, data, len); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index be69d272052f..d8380feb7626 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1641,6 +1641,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, xdp.data_end = xdp.data + len; xdp.rxq = &tfile->xdp_rxq; xdp.frame_sz = buflen; + xdp.mb = 0; act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act == XDP_REDIRECT || act == XDP_TX) { @@ -2388,6 +2389,7 @@ static int tun_xdp_one(struct tun_struct *tun, xdp_set_data_meta_invalid(xdp); xdp->rxq = &tfile->xdp_rxq; xdp->frame_sz = buflen; + xdp->mb = 0; act = bpf_prog_run_xdp(xdp_prog, xdp); err = tun_xdp_act(tun, xdp_prog, xdp, act); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 091e5b4ba042..e25af95a532d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -711,6 +711,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, /* SKB "head" area always have tailroom for skb_shared_info */ xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start; xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + xdp.mb = 0; orig_data = xdp.data; orig_data_end = xdp.data_end; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7145c83c6c8c..3d39d7622840 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -690,6 +690,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.data_meta = xdp.data; xdp.rxq = &rq->xdp_rxq; xdp.frame_sz = buflen; + xdp.mb = 0; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); stats->xdp_packets++; @@ -860,6 +861,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.data_meta = xdp.data; xdp.rxq = &rq->xdp_rxq; xdp.frame_sz = frame_sz - vi->hdr_len; + xdp.mb = 0; act = bpf_prog_run_xdp(xdp_prog, &xdp); stats->xdp_packets++; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 3e9895bec15f..00440ad34ca8 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -870,6 +870,7 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, xdp->data_end = xdp->data + len; xdp->rxq = &queue->xdp_rxq; xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; + xdp->mb = 0; act = bpf_prog_run_xdp(prog, xdp); switch (act) { diff --git a/net/core/dev.c b/net/core/dev.c index 9d55bf5d1a65..1e78b028518d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4640,6 +4640,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, /* SKB "head" area always have tailroom for skb_shared_info */ xdp->frame_sz = (void *)skb_end_pointer(skb) - xdp->data_hard_start; xdp->frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + xdp->mb = 0; orig_data_end = xdp->data_end; orig_data = xdp->data; From patchwork Wed Sep 30 15:41:54 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374484 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=amWYgM4R; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gW22H9Mz9sT6 for ; Thu, 1 Oct 2020 01:42:38 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731047AbgI3Pmh (ORCPT ); Wed, 30 Sep 2020 11:42:37 -0400 Received: from mail.kernel.org ([198.145.29.99]:53024 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727438AbgI3Pmg (ORCPT ); Wed, 30 Sep 2020 11:42:36 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id D35B520738; Wed, 30 Sep 2020 15:42:32 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480555; bh=eKPJVzu6E5/S5wENfNGlIxA6X+/VOWn+imBWPRph7Mk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=amWYgM4RMH0spzqabcGmncwJmv6YRYnhBBsN+xvxOrT2yjThaD+CfqACQFOSGZ6To dGDABoZr3vdAbPcqcgd0WrdCxYFfFegG2v+44XRQ3T9ArbWHYE7ZPm3AmTEO6XqiEM 2lxtSTMy8cDyIXfMtdBLc11CDA+wLS6rjJBloDzk= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 03/12] net: mvneta: update mb bit before passing the xdp buffer to eBPF layer Date: Wed, 30 Sep 2020 17:41:54 +0200 Message-Id: <381e1fa2fb5c091780677a9b4f2518a2b3c591a0.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Update multi-buffer bit (mb) in xdp_buff to notify XDP/eBPF layer and XDP remote drivers if this is a "non-linear" XDP buffer. Access skb_shared_info only if xdp_buff mb is set Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 42 +++++++++++++++++---------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d095718355d3..a431e8478297 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2027,12 +2027,17 @@ static void mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, int sync_len, bool napi) { - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + struct skb_shared_info *sinfo; int i; + if (likely(!xdp->mb)) + goto out; + + sinfo = xdp_get_shared_info_from_buff(xdp); for (i = 0; i < sinfo->nr_frags; i++) page_pool_put_full_page(rxq->page_pool, skb_frag_page(&sinfo->frags[i]), napi); +out: page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), sync_len, napi); } @@ -2234,7 +2239,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, int data_len = -MVNETA_MH_SIZE, len; struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; - struct skb_shared_info *sinfo; if (*size > MVNETA_MAX_RX_BUF_SIZE) { len = MVNETA_MAX_RX_BUF_SIZE; @@ -2259,9 +2263,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE; xdp->data_end = xdp->data + data_len; xdp_set_data_meta_invalid(xdp); - - sinfo = xdp_get_shared_info_from_buff(xdp); - sinfo->nr_frags = 0; } static void @@ -2272,9 +2273,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, struct page *page) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + int data_len, len, nfrags = xdp->mb ? sinfo->nr_frags : 0; struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; - int data_len, len; if (*size > MVNETA_MAX_RX_BUF_SIZE) { len = MVNETA_MAX_RX_BUF_SIZE; @@ -2288,17 +2289,21 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, rx_desc->buf_phys_addr, len, dma_dir); - if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { - skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; + if (data_len > 0 && nfrags < MAX_SKB_FRAGS) { + skb_frag_t *frag = &sinfo->frags[nfrags]; skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_size_set(frag, data_len); __skb_frag_set_page(frag, page); - sinfo->nr_frags++; - - rx_desc->buf_phys_addr = 0; + nfrags++; + } else { + page_pool_put_full_page(rxq->page_pool, page, true); } + + rx_desc->buf_phys_addr = 0; + sinfo->nr_frags = nfrags; *size -= len; + xdp->mb = 1; } static struct sk_buff * @@ -2306,7 +2311,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, u32 desc_status) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); - int i, num_frags = sinfo->nr_frags; + int i, num_frags = xdp->mb ? sinfo->nr_frags : 0; struct sk_buff *skb; skb = build_skb(xdp->data_hard_start, PAGE_SIZE); @@ -2319,6 +2324,9 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, skb_put(skb, xdp->data_end - xdp->data); mvneta_rx_csum(pp, desc_status, skb); + if (likely(!xdp->mb)) + return skb; + for (i = 0; i < num_frags; i++) { skb_frag_t *frag = &sinfo->frags[i]; @@ -2338,13 +2346,14 @@ static int mvneta_rx_swbm(struct napi_struct *napi, { int rx_proc = 0, rx_todo, refill, size = 0; struct net_device *dev = pp->dev; - struct xdp_buff xdp_buf = { - .frame_sz = PAGE_SIZE, - .rxq = &rxq->xdp_rxq, - }; struct mvneta_stats ps = {}; struct bpf_prog *xdp_prog; u32 desc_status, frame_sz; + struct xdp_buff xdp_buf; + + xdp_buf.data_hard_start = NULL; + xdp_buf.frame_sz = PAGE_SIZE; + xdp_buf.rxq = &rxq->xdp_rxq; /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); @@ -2377,6 +2386,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, frame_sz = size - ETH_FCS_LEN; desc_status = rx_status; + xdp_buf.mb = 0; mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, &size, page); } else { From patchwork Wed Sep 30 15:41:55 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374485 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=cV12iS6G; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gW60TMtz9sSG for ; Thu, 1 Oct 2020 01:42:42 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731069AbgI3Pml (ORCPT ); Wed, 30 Sep 2020 11:42:41 -0400 Received: from mail.kernel.org ([198.145.29.99]:53058 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1731051AbgI3Pmj (ORCPT ); Wed, 30 Sep 2020 11:42:39 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 38D2E20759; Wed, 30 Sep 2020 15:42:36 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480558; bh=S/93yEwe9N0oMZRZSBWUZ2mWvYgyKCZBmf9CGEZiUhk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=cV12iS6GPkJJtnx8CHHFWekQfCHjk5f1qJU16z/jeea5+lpBsebDcmEykyuLLUnJ4 G46OdOUzxEX0FVEiaONAeOJ7OzyHcd9/k78h01G/4AuZhaaDwmxzaLzhHDXCjF1utw lymZi5H5IqhGSJgcJ6bRgmhDGzuT68O3zmqLYAm4= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 04/12] xdp: add multi-buff support to xdp_return_{buff/frame} Date: Wed, 30 Sep 2020 17:41:55 +0200 Message-Id: <45c1f003bb828e31ba4ab15e44a21c99de24b223.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Take into account if the received xdp_buff/xdp_frame is non-linear recycling/returning the frame memory to the allocator Signed-off-by: Lorenzo Bianconi --- include/net/xdp.h | 18 ++++++++++++++++-- net/core/xdp.c | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/include/net/xdp.h b/include/net/xdp.h index 42f439f9fcda..4d47076546ff 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -208,10 +208,24 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem); static inline void xdp_release_frame(struct xdp_frame *xdpf) { struct xdp_mem_info *mem = &xdpf->mem; + struct skb_shared_info *sinfo; + int i; /* Curr only page_pool needs this */ - if (mem->type == MEM_TYPE_PAGE_POOL) - __xdp_release_frame(xdpf->data, mem); + if (mem->type != MEM_TYPE_PAGE_POOL) + return; + + if (likely(!xdpf->mb)) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_release_frame(page_address(page), mem); + } +out: + __xdp_release_frame(xdpf->data, mem); } int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, diff --git a/net/core/xdp.c b/net/core/xdp.c index 884f140fc3be..6d4fd4dddb00 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -370,18 +370,57 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) void xdp_return_frame(struct xdp_frame *xdpf) { + struct skb_shared_info *sinfo; + int i; + + if (likely(!xdpf->mb)) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_return(page_address(page), &xdpf->mem, false); + } +out: __xdp_return(xdpf->data, &xdpf->mem, false); } EXPORT_SYMBOL_GPL(xdp_return_frame); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) { + struct skb_shared_info *sinfo; + int i; + + if (likely(!xdpf->mb)) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_return(page_address(page), &xdpf->mem, true); + } +out: __xdp_return(xdpf->data, &xdpf->mem, true); } EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); void xdp_return_buff(struct xdp_buff *xdp) { + struct skb_shared_info *sinfo; + int i; + + if (likely(!xdp->mb)) + goto out; + + sinfo = xdp_get_shared_info_from_buff(xdp); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_return(page_address(page), &xdp->rxq->mem, true); + } +out: __xdp_return(xdp->data, &xdp->rxq->mem, true); } From patchwork Wed Sep 30 15:41:56 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374486 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=Ig3UIpfs; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gW96QlDz9sSG for ; Thu, 1 Oct 2020 01:42:45 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731072AbgI3Pmp (ORCPT ); Wed, 30 Sep 2020 11:42:45 -0400 Received: from mail.kernel.org ([198.145.29.99]:53106 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730654AbgI3Pmm (ORCPT ); Wed, 30 Sep 2020 11:42:42 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id B2E54207FB; Wed, 30 Sep 2020 15:42:39 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480562; bh=DHwBCoNNncxM6JrG4uH+mEpyA4XxNAtuDwPzRels8lE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Ig3UIpfsR+ZpaeWLZBk2ok30cLrOivsmWSUOfIXtqj3UQi7DiLowZACXr0NNPgLvT rlNhUl8HIQFIR3gCQ3YYKZW4fj6bJeAzIHGciKcAxJtK9iVvPvHXLOxDgylBhB8jO1 X2OJc4oJH5XQSetycO6wCOdme6fhYxjMmaUtCb4I= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 05/12] net: mvneta: add multi buffer support to XDP_TX Date: Wed, 30 Sep 2020 17:41:56 +0200 Message-Id: X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Introduce the capability to map non-linear xdp buffer running mvneta_xdp_submit_frame() for XDP_TX and XDP_REDIRECT Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 79 +++++++++++++++++---------- 1 file changed, 49 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a431e8478297..f709650974ea 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1852,8 +1852,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, bytes_compl += buf->skb->len; pkts_compl++; dev_kfree_skb_any(buf->skb); - } else if (buf->type == MVNETA_TYPE_XDP_TX || - buf->type == MVNETA_TYPE_XDP_NDO) { + } else if ((buf->type == MVNETA_TYPE_XDP_TX || + buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) { if (napi && buf->type == MVNETA_TYPE_XDP_TX) xdp_return_frame_rx_napi(buf->xdpf); else @@ -2046,43 +2046,62 @@ static int mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, struct xdp_frame *xdpf, bool dma_map) { - struct mvneta_tx_desc *tx_desc; - struct mvneta_tx_buf *buf; - dma_addr_t dma_addr; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + int i, num_frames = xdpf->mb ? sinfo->nr_frags + 1 : 1; + struct mvneta_tx_desc *tx_desc = NULL; + struct page *page; - if (txq->count >= txq->tx_stop_threshold) + if (txq->count + num_frames >= txq->tx_stop_threshold) return MVNETA_XDP_DROPPED; - tx_desc = mvneta_txq_next_desc_get(txq); + for (i = 0; i < num_frames; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + skb_frag_t *frag = i ? &sinfo->frags[i - 1] : NULL; + int len = frag ? skb_frag_size(frag) : xdpf->len; + dma_addr_t dma_addr; - buf = &txq->buf[txq->txq_put_index]; - if (dma_map) { - /* ndo_xdp_xmit */ - dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, - xdpf->len, DMA_TO_DEVICE); - if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { - mvneta_txq_desc_put(txq); - return MVNETA_XDP_DROPPED; + tx_desc = mvneta_txq_next_desc_get(txq); + if (dma_map) { + /* ndo_xdp_xmit */ + void *data; + + data = frag ? skb_frag_address(frag) : xdpf->data; + dma_addr = dma_map_single(pp->dev->dev.parent, data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { + for (; i >= 0; i--) + mvneta_txq_desc_put(txq); + return MVNETA_XDP_DROPPED; + } + buf->type = MVNETA_TYPE_XDP_NDO; + } else { + page = frag ? skb_frag_page(frag) + : virt_to_page(xdpf->data); + dma_addr = page_pool_get_dma_addr(page); + if (frag) + dma_addr += skb_frag_off(frag); + else + dma_addr += sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(pp->dev->dev.parent, + dma_addr, len, + DMA_BIDIRECTIONAL); + buf->type = MVNETA_TYPE_XDP_TX; } - buf->type = MVNETA_TYPE_XDP_NDO; - } else { - struct page *page = virt_to_page(xdpf->data); + buf->xdpf = i ? NULL : xdpf; - dma_addr = page_pool_get_dma_addr(page) + - sizeof(*xdpf) + xdpf->headroom; - dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, - xdpf->len, DMA_BIDIRECTIONAL); - buf->type = MVNETA_TYPE_XDP_TX; + if (!i) + tx_desc->command = MVNETA_TXD_F_DESC; + tx_desc->buf_phys_addr = dma_addr; + tx_desc->data_size = len; + + mvneta_txq_inc_put(txq); } - buf->xdpf = xdpf; - tx_desc->command = MVNETA_TXD_FLZ_DESC; - tx_desc->buf_phys_addr = dma_addr; - tx_desc->data_size = xdpf->len; + /*last descriptor */ + tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; - mvneta_txq_inc_put(txq); - txq->pending++; - txq->count++; + txq->pending += num_frames; + txq->count += num_frames; return MVNETA_XDP_TX; } From patchwork Wed Sep 30 15:41:57 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374487 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=inOaVfTQ; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gWF45Dvz9sSn for ; Thu, 1 Oct 2020 01:42:49 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731081AbgI3Pmt (ORCPT ); Wed, 30 Sep 2020 11:42:49 -0400 Received: from mail.kernel.org ([198.145.29.99]:53174 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728006AbgI3Pms (ORCPT ); Wed, 30 Sep 2020 11:42:48 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id E0E1820789; Wed, 30 Sep 2020 15:42:44 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480567; bh=XBrbju6dXd0874L2ovtgvBsMIReLnuBKM3bucBGyPs8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=inOaVfTQjodlY5xn5CjkU2hFgUOnZSV3UATaFAlnogaTFN08f+aSUVTXQOzXTnYrJ E8VYAarRPgHUaoni4ODch/pgJ/NUmB4tk3R580COWHh1juErJKYkf4ftppdy6Q8BIJ axy6XWPZbIEw7NFAVJAV6XICO8JO6pty153hhOmg= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 06/12] bpf: helpers: add multibuffer support Date: Wed, 30 Sep 2020 17:41:57 +0200 Message-Id: <5e248485713d2470d97f36ad67c9b3ceedfc2b3f.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Sameeh Jubran The implementation is based on this [0] draft by Jesper D. Brouer. Provided two new helpers: * bpf_xdp_get_frag_count() * bpf_xdp_get_frags_total_size() [0] xdp mb design - https://github.com/xdp-project/xdp-project/blob/master/areas/core/xdp-multi-buffer01-design.org Signed-off-by: Sameeh Jubran Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi --- include/uapi/linux/bpf.h | 14 ++++++++++++ net/core/filter.c | 42 ++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 14 ++++++++++++ 3 files changed, 70 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a22812561064..6f97dce8cccf 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3586,6 +3586,18 @@ union bpf_attr { * the data in *dst*. This is a wrapper of **copy_from_user**\ (). * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_xdp_get_frag_count(struct xdp_buff *xdp_md) + * Description + * Get the number of fragments for a given xdp multi-buffer. + * Return + * The number of fragments + * + * int bpf_xdp_get_frags_total_size(struct xdp_buff *xdp_md) + * Description + * Get the total size of fragments for a given xdp multi-buffer. + * Return + * The total size of fragments for a given xdp multi-buffer. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3737,6 +3749,8 @@ union bpf_attr { FN(inode_storage_delete), \ FN(d_path), \ FN(copy_from_user), \ + FN(xdp_get_frag_count), \ + FN(xdp_get_frags_total_size), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/net/core/filter.c b/net/core/filter.c index 706f8db0ccf8..7f33cfae219c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3475,6 +3475,44 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .arg2_type = ARG_ANYTHING, }; +BPF_CALL_1(bpf_xdp_get_frag_count, struct xdp_buff*, xdp) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + + return xdp->mb ? sinfo->nr_frags : 0; +} + +const struct bpf_func_proto bpf_xdp_get_frag_count_proto = { + .func = bpf_xdp_get_frag_count, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BPF_CALL_1(bpf_xdp_get_frags_total_size, struct xdp_buff*, xdp) +{ + struct skb_shared_info *sinfo; + int nfrags, i, size = 0; + + if (likely(!xdp->mb)) + return 0; + + sinfo = xdp_get_shared_info_from_buff(xdp); + nfrags = min_t(u8, sinfo->nr_frags, MAX_SKB_FRAGS); + + for (i = 0; i < nfrags; i++) + size += skb_frag_size(&sinfo->frags[i]); + + return size; +} + +const struct bpf_func_proto bpf_xdp_get_frags_total_size_proto = { + .func = bpf_xdp_get_frags_total_size, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) { void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */ @@ -6824,6 +6862,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_redirect_map_proto; case BPF_FUNC_xdp_adjust_tail: return &bpf_xdp_adjust_tail_proto; + case BPF_FUNC_xdp_get_frag_count: + return &bpf_xdp_get_frag_count_proto; + case BPF_FUNC_xdp_get_frags_total_size: + return &bpf_xdp_get_frags_total_size_proto; case BPF_FUNC_fib_lookup: return &bpf_xdp_fib_lookup_proto; #ifdef CONFIG_INET diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a22812561064..6f97dce8cccf 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3586,6 +3586,18 @@ union bpf_attr { * the data in *dst*. This is a wrapper of **copy_from_user**\ (). * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_xdp_get_frag_count(struct xdp_buff *xdp_md) + * Description + * Get the number of fragments for a given xdp multi-buffer. + * Return + * The number of fragments + * + * int bpf_xdp_get_frags_total_size(struct xdp_buff *xdp_md) + * Description + * Get the total size of fragments for a given xdp multi-buffer. + * Return + * The total size of fragments for a given xdp multi-buffer. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3737,6 +3749,8 @@ union bpf_attr { FN(inode_storage_delete), \ FN(d_path), \ FN(copy_from_user), \ + FN(xdp_get_frag_count), \ + FN(xdp_get_frags_total_size), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper From patchwork Wed Sep 30 15:41:58 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374488 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=zbG9LX5j; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gWR6wLRz9sSs for ; Thu, 1 Oct 2020 01:42:59 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731113AbgI3Pm6 (ORCPT ); Wed, 30 Sep 2020 11:42:58 -0400 Received: from mail.kernel.org ([198.145.29.99]:53298 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1731080AbgI3Pmz (ORCPT ); Wed, 30 Sep 2020 11:42:55 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 5A002207C3; Wed, 30 Sep 2020 15:42:51 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480573; bh=dd+vbQdkgsMj5H2kte9z0Z8PuqVmklgBZzBYRk0e9/c=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=zbG9LX5jFdp/sI+dVAJQ2SQ4HKmuTl5Ep5QjY16f4gv55o0CJ1iQlyovepx3I9O1u b3xrWp1OxdaUYPOqsemCIV4LuzhJWAHmWV9bQHnpNP0hK9r0PfOfbiyGCZ+n6sEAyw 9pJO3Mn0LWY7y6KXa7FO8u1pge4TrYzmpHlRu3js= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 07/12] samples/bpf: add bpf program that uses xdp mb helpers Date: Wed, 30 Sep 2020 17:41:58 +0200 Message-Id: <83bf3e20a63759f1cc8a69f671dd3a4fa01e5200.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Sameeh Jubran The bpf program returns XDP_PASS for every packet and calculates the total number of bytes in its linear and paged parts. The program is executed with: ./xdp_mb [if name] and has the following output format: [if index]: [rx packet count] pkt/sec, [number of bytes] bytes/sec Signed-off-by: Shay Agroskin Signed-off-by: Sameeh Jubran Signed-off-by: Lorenzo Bianconi --- samples/bpf/Makefile | 3 + samples/bpf/xdp_mb_kern.c | 68 ++++++++++++++ samples/bpf/xdp_mb_user.c | 182 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 253 insertions(+) create mode 100644 samples/bpf/xdp_mb_kern.c create mode 100644 samples/bpf/xdp_mb_user.c diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 4f1ed0e3cf9f..12e32516f02a 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -54,6 +54,7 @@ tprogs-y += task_fd_query tprogs-y += xdp_sample_pkts tprogs-y += ibumad tprogs-y += hbm +tprogs-y += xdp_mb # Libbpf dependencies LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a @@ -111,6 +112,7 @@ task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS) xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS) ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS) hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS) +xdp_mb-objs := xdp_mb_user.o # Tell kbuild to always build the programs always-y := $(tprogs-y) @@ -172,6 +174,7 @@ always-y += ibumad_kern.o always-y += hbm_out_kern.o always-y += hbm_edt_kern.o always-y += xdpsock_kern.o +always-y += xdp_mb_kern.o ifeq ($(ARCH), arm) # Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux diff --git a/samples/bpf/xdp_mb_kern.c b/samples/bpf/xdp_mb_kern.c new file mode 100644 index 000000000000..554c3b9a3243 --- /dev/null +++ b/samples/bpf/xdp_mb_kern.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All rights reserved. + */ +#define KBUILD_MODNAME "foo" +#include +#include +#include +#include +#include +#include +#include +#include + +/* count RX packets */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 1); +} rx_cnt SEC(".maps"); + +/* count RX fragments */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 1); +} rx_frags SEC(".maps"); + +/* count total number of bytes */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 1); +} tot_len SEC(".maps"); + +SEC("xdp_mb") +int xdp_mb_prog(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + u32 frag_offset = 0, frag_size = 0; + u32 key = 0, nfrags; + long *value; + int i, len; + + value = bpf_map_lookup_elem(&rx_cnt, &key); + if (value) + *value += 1; + + len = data_end - data; + nfrags = bpf_xdp_get_frag_count(ctx); + len += bpf_xdp_get_frags_total_size(ctx); + + value = bpf_map_lookup_elem(&tot_len, &key); + if (value) + *value += len; + + value = bpf_map_lookup_elem(&rx_frags, &key); + if (value) + *value += nfrags; + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_mb_user.c b/samples/bpf/xdp_mb_user.c new file mode 100644 index 000000000000..6f555e94b748 --- /dev/null +++ b/samples/bpf/xdp_mb_user.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_util.h" +#include +#include + +static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_DRV_MODE; +static __u32 prog_id; +static int rx_cnt_fd, tot_len_fd, rx_frags_fd; +static int ifindex; + +static void int_exit(int sig) +{ + __u32 curr_prog_id = 0; + + if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) { + printf("bpf_get_link_xdp_id failed\n"); + exit(1); + } + if (prog_id == curr_prog_id) + bpf_set_link_xdp_fd(ifindex, -1, xdp_flags); + else if (!curr_prog_id) + printf("couldn't find a prog id on a given interface\n"); + else + printf("program on interface changed, not removing\n"); + exit(0); +} + +/* count total packets and bytes per second */ +static void poll_stats(int interval) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + __u64 rx_frags_cnt[nr_cpus], rx_frags_cnt_prev[nr_cpus]; + __u64 tot_len[nr_cpus], tot_len_prev[nr_cpus]; + __u64 rx_cnt[nr_cpus], rx_cnt_prev[nr_cpus]; + int i; + + memset(rx_frags_cnt_prev, 0, sizeof(rx_frags_cnt_prev)); + memset(tot_len_prev, 0, sizeof(tot_len_prev)); + memset(rx_cnt_prev, 0, sizeof(rx_cnt_prev)); + + while (1) { + __u64 n_rx_pkts = 0, rx_frags = 0, rx_len = 0; + __u32 key = 0; + + sleep(interval); + + /* fetch rx cnt */ + assert(bpf_map_lookup_elem(rx_cnt_fd, &key, rx_cnt) == 0); + for (i = 0; i < nr_cpus; i++) + n_rx_pkts += (rx_cnt[i] - rx_cnt_prev[i]); + memcpy(rx_cnt_prev, rx_cnt, sizeof(rx_cnt)); + + /* fetch rx frags */ + assert(bpf_map_lookup_elem(rx_frags_fd, &key, rx_frags_cnt) == 0); + for (i = 0; i < nr_cpus; i++) + rx_frags += (rx_frags_cnt[i] - rx_frags_cnt_prev[i]); + memcpy(rx_frags_cnt_prev, rx_frags_cnt, sizeof(rx_frags_cnt)); + + /* count total bytes of packets */ + assert(bpf_map_lookup_elem(tot_len_fd, &key, tot_len) == 0); + for (i = 0; i < nr_cpus; i++) + rx_len += (tot_len[i] - tot_len_prev[i]); + memcpy(tot_len_prev, tot_len, sizeof(tot_len)); + + if (n_rx_pkts) + printf("ifindex %i: %10llu pkt/s, %10llu frags/s, %10llu bytes/s\n", + ifindex, n_rx_pkts / interval, rx_frags / interval, + rx_len / interval); + } +} + +static void usage(const char *prog) +{ + fprintf(stderr, + "%s: %s [OPTS] IFACE\n\n" + "OPTS:\n" + " -F force loading prog\n", + __func__, prog); +} + +int main(int argc, char **argv) +{ + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + struct bpf_prog_load_attr prog_load_attr = { + .prog_type = BPF_PROG_TYPE_XDP, + }; + int prog_fd, opt; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + const char *optstr = "F"; + struct bpf_program *prog; + struct bpf_object *obj; + char filename[256]; + int err; + + while ((opt = getopt(argc, argv, optstr)) != -1) { + switch (opt) { + case 'F': + xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST; + break; + default: + usage(basename(argv[0])); + return 1; + } + } + + if (optind == argc) { + usage(basename(argv[0])); + return 1; + } + + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } + + ifindex = if_nametoindex(argv[optind]); + if (!ifindex) { + perror("if_nametoindex"); + return 1; + } + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + prog_load_attr.file = filename; + + if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd)) + return 1; + + prog = bpf_program__next(NULL, obj); + if (!prog) { + printf("finding a prog in obj file failed\n"); + return 1; + } + + if (!prog_fd) { + printf("bpf_prog_load_xattr: %s\n", strerror(errno)); + return 1; + } + + rx_cnt_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt"); + rx_frags_fd = bpf_object__find_map_fd_by_name(obj, "rx_frags"); + tot_len_fd = bpf_object__find_map_fd_by_name(obj, "tot_len"); + if (rx_cnt_fd < 0 || rx_frags_fd < 0 || tot_len_fd < 0) { + printf("bpf_object__find_map_fd_by_name failed\n"); + return 1; + } + + if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) { + printf("ERROR: link set xdp fd failed on %d\n", ifindex); + return 1; + } + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (err) { + printf("can't get prog info - %s\n", strerror(errno)); + return err; + } + prog_id = info.id; + + signal(SIGINT, int_exit); + signal(SIGTERM, int_exit); + + poll_stats(1); + + return 0; +} From patchwork Wed Sep 30 15:41:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374489 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=c1XWIEfj; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gWS4JTSz9sSn for ; Thu, 1 Oct 2020 01:43:00 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731130AbgI3Pm6 (ORCPT ); Wed, 30 Sep 2020 11:42:58 -0400 Received: from mail.kernel.org ([198.145.29.99]:53368 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730567AbgI3Pm5 (ORCPT ); Wed, 30 Sep 2020 11:42:57 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id EB5EA20888; Wed, 30 Sep 2020 15:42:54 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480577; bh=0MEVxFzNy4FeBuDJYmqZYu8vdEbCpyx2XH53dG7mD4I=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=c1XWIEfjqPB8NJgwQMF/wmxT2T8a+DSchF6dGvm41YyeR/fWRn7pFM0bAi+SThRzm +8pzzfNgpvpDTiFH7KvbFnMGVdzvabIyYKiuHwF1ga6+E1sJpuB2Lx8H/fGt0pILOz DAPlJkP6fJVktzL7nCLi49YQMZh/yy1e4/fQwIcc= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 08/12] bpf: move user_size out of bpf_test_init Date: Wed, 30 Sep 2020 17:41:59 +0200 Message-Id: X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Rely on data_size_in in bpf_test_init routine signature. This is a preliminary patch to introduce xdp multi-buff selftest Signed-off-by: Lorenzo Bianconi --- net/bpf/test_run.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index a66f211726e7..5608d5a902ff 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -170,11 +170,10 @@ __diag_pop(); ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); -static void *bpf_test_init(const union bpf_attr *kattr, u32 size, - u32 headroom, u32 tailroom) +static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, + u32 size, u32 headroom, u32 tailroom) { void __user *data_in = u64_to_user_ptr(kattr->test.data_in); - u32 user_size = kattr->test.data_size_in; void *data; if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) @@ -410,7 +409,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, void *data; int ret; - data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, + data = bpf_test_init(kattr, kattr->test.data_size_in, + size, NET_SKB_PAD + NET_IP_ALIGN, SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); if (IS_ERR(data)) return PTR_ERR(data); @@ -547,7 +547,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, /* XDP have extra tailroom as (most) drivers use full page */ max_data_sz = 4096 - headroom - tailroom; - data = bpf_test_init(kattr, max_data_sz, headroom, tailroom); + data = bpf_test_init(kattr, kattr->test.data_size_in, + max_data_sz, headroom, tailroom); if (IS_ERR(data)) return PTR_ERR(data); @@ -610,7 +611,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, if (size < ETH_HLEN) return -EINVAL; - data = bpf_test_init(kattr, size, 0, 0); + data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); if (IS_ERR(data)) return PTR_ERR(data); From patchwork Wed Sep 30 15:42:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374490 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=Pyl8UXdF; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gWW68Cvz9sSn for ; Thu, 1 Oct 2020 01:43:03 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731132AbgI3PnD (ORCPT ); Wed, 30 Sep 2020 11:43:03 -0400 Received: from mail.kernel.org ([198.145.29.99]:53414 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727749AbgI3PnB (ORCPT ); Wed, 30 Sep 2020 11:43:01 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 586F620789; Wed, 30 Sep 2020 15:42:58 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480580; bh=vYgd691/f2R93zk0OZ9nK+DU0fxoqA1IhJBq2Qqet6Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Pyl8UXdFD9vGlpLK1t0375IE2pg5VNugOVqovM+ZwaiSasMgNL19iEYCV+K169w0w nsS3vQ5uaREkkMAkygvuy1hJS0aVduJqHDVr2fv1VoC7YIs9/40y4qJVJY30cKEakH c0mfPoq/3dV5/Pw8QdapzQGnY3jg8X4K+WcTUaRg= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 09/12] bpf: introduce multibuff support to bpf_prog_test_run_xdp() Date: Wed, 30 Sep 2020 17:42:00 +0200 Message-Id: <0f8afde24d12b1ee556cb74badd5aced0a185343.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Introduce the capability to allocate a xdp multi-buff in bpf_prog_test_run_xdp routine. This is a preliminary patch to introduce the selftests for new xdp multi-buff ebpf helpers Signed-off-by: Lorenzo Bianconi --- net/bpf/test_run.c | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 5608d5a902ff..7268542b0f3c 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -532,23 +532,22 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, { u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); u32 headroom = XDP_PACKET_HEADROOM; - u32 size = kattr->test.data_size_in; u32 repeat = kattr->test.repeat; struct netdev_rx_queue *rxqueue; + struct skb_shared_info *sinfo; struct xdp_buff xdp = {}; + u32 max_data_sz, size; u32 retval, duration; - u32 max_data_sz; void *data; - int ret; + int i, ret; if (kattr->test.ctx_in || kattr->test.ctx_out) return -EINVAL; - /* XDP have extra tailroom as (most) drivers use full page */ max_data_sz = 4096 - headroom - tailroom; + size = min_t(u32, kattr->test.data_size_in, max_data_sz); - data = bpf_test_init(kattr, kattr->test.data_size_in, - max_data_sz, headroom, tailroom); + data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); if (IS_ERR(data)) return PTR_ERR(data); @@ -558,6 +557,28 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, xdp.data_end = xdp.data + size; xdp.frame_sz = headroom + max_data_sz + tailroom; + sinfo = xdp_get_shared_info_from_buff(&xdp); + if (unlikely(kattr->test.data_size_in > size)) { + for (; size < kattr->test.data_size_in; size += PAGE_SIZE) { + skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; + struct page *page; + int data_len; + + page = alloc_page(GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto out; + } + + __skb_frag_set_page(frag, page); + data_len = min_t(int, kattr->test.data_size_in - size, + PAGE_SIZE); + skb_frag_size_set(frag, data_len); + sinfo->nr_frags++; + } + xdp.mb = 1; + } + rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); xdp.rxq = &rxqueue->xdp_rxq; bpf_prog_change_xdp(NULL, prog); @@ -569,7 +590,10 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); out: bpf_prog_change_xdp(prog, NULL); + for (i = 0; i < sinfo->nr_frags; i++) + __free_page(skb_frag_page(&sinfo->frags[i])); kfree(data); + return ret; } From patchwork Wed Sep 30 15:42:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374491 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=jnXIR5cJ; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gWc2x6pz9sSs for ; Thu, 1 Oct 2020 01:43:08 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731139AbgI3PnH (ORCPT ); Wed, 30 Sep 2020 11:43:07 -0400 Received: from mail.kernel.org ([198.145.29.99]:53468 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727749AbgI3PnF (ORCPT ); Wed, 30 Sep 2020 11:43:05 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id A4080207FB; Wed, 30 Sep 2020 15:43:01 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480584; bh=ljwjq3pVbbDfsgo6sBm6A5Ha7ul9zA0QoZhTXPsFGqw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=jnXIR5cJoPRruGKfWWz1LNnuJXDwuicZat/ibrfdul6BADiUVFA35FD7Uj2BRvo6x ZolWY0WnjF6ZoA66kJwWLo9NrplWLM9H1Av7hcxQsCdWYHmMfJmNowalYPGIvorznD qR09CanK6+ntKke0ZVFXDvnV9F2ouaZMuP/VEli4= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 10/12] bpf: add xdp multi-buffer selftest Date: Wed, 30 Sep 2020 17:42:01 +0200 Message-Id: X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Introduce xdp multi-buffer selftest for the following ebpf helpers: - bpf_xdp_get_frags_total_size - bpf_xdp_get_frag_count Co-developed-by: Eelco Chaudron Signed-off-by: Eelco Chaudron Signed-off-by: Lorenzo Bianconi --- .../testing/selftests/bpf/prog_tests/xdp_mb.c | 77 +++++++++++++++++++ .../selftests/bpf/progs/test_xdp_multi_buff.c | 24 ++++++ 2 files changed, 101 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/xdp_mb.c create mode 100644 tools/testing/selftests/bpf/progs/test_xdp_multi_buff.c diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_mb.c b/tools/testing/selftests/bpf/prog_tests/xdp_mb.c new file mode 100644 index 000000000000..8cfe7253bf2a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_mb.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include "test_xdp_multi_buff.skel.h" + +static void test_xdp_mb_check_len(void) +{ + int test_sizes[] = { 128, 4096, 9000 }; + struct test_xdp_multi_buff *pkt_skel; + char *pkt_in = NULL, *pkt_out = NULL; + __u32 duration = 0, retval, size; + int err, pkt_fd, i; + + /* Load XDP program */ + pkt_skel = test_xdp_multi_buff__open_and_load(); + if (CHECK(!pkt_skel, "pkt_skel_load", "test_xdp_mb skeleton failed\n")) + goto out; + + /* Allocate resources */ + pkt_out = malloc(test_sizes[ARRAY_SIZE(test_sizes) - 1]); + pkt_in = malloc(test_sizes[ARRAY_SIZE(test_sizes) - 1]); + if (CHECK(!pkt_in || !pkt_out, "malloc", + "Failed malloc, in = %p, out %p\n", pkt_in, pkt_out)) + goto out; + + pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_check_mb_len); + if (pkt_fd < 0) + goto out; + + /* Run test for specific set of packets */ + for (i = 0; i < ARRAY_SIZE(test_sizes); i++) { + int frag_count; + + /* Run test program */ + err = bpf_prog_test_run(pkt_fd, 1, &pkt_in, test_sizes[i], + pkt_out, &size, &retval, &duration); + + if (CHECK(err || retval != XDP_PASS, // || size != test_sizes[i], + "test_run", "err %d errno %d retval %d size %d[%d]\n", + err, errno, retval, size, test_sizes[i])) + goto out; + + /* Verify test results */ + frag_count = DIV_ROUND_UP( + test_sizes[i] - pkt_skel->data->test_result_xdp_len, + getpagesize()); + + if (CHECK(pkt_skel->data->test_result_frag_count != frag_count, + "result", "frag_count = %llu != %u\n", + pkt_skel->data->test_result_frag_count, frag_count)) + goto out; + + if (CHECK(pkt_skel->data->test_result_frag_len != test_sizes[i] - + pkt_skel->data->test_result_xdp_len, + "result", "frag_len = %llu != %llu\n", + pkt_skel->data->test_result_frag_len, + test_sizes[i] - pkt_skel->data->test_result_xdp_len)) + goto out; + } +out: + if (pkt_out) + free(pkt_out); + if (pkt_in) + free(pkt_in); + + test_xdp_multi_buff__destroy(pkt_skel); +} + +void test_xdp_mb(void) +{ + if (test__start_subtest("xdp_mb_check_len_frags")) + test_xdp_mb_check_len(); +} diff --git a/tools/testing/selftests/bpf/progs/test_xdp_multi_buff.c b/tools/testing/selftests/bpf/progs/test_xdp_multi_buff.c new file mode 100644 index 000000000000..1a46e0925282 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_multi_buff.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +__u64 test_result_frag_len = UINT64_MAX; +__u64 test_result_frag_count = UINT64_MAX; +__u64 test_result_xdp_len = UINT64_MAX; + +SEC("xdp_check_mb_len") +int _xdp_check_mb_len(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + + test_result_xdp_len = (__u64)(data_end - data); + test_result_frag_len = bpf_xdp_get_frags_total_size(xdp); + test_result_frag_count = bpf_xdp_get_frag_count(xdp); + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; From patchwork Wed Sep 30 15:42:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374492 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=wi0teLrI; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gWf2jhRz9sSs for ; Thu, 1 Oct 2020 01:43:10 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731148AbgI3PnJ (ORCPT ); Wed, 30 Sep 2020 11:43:09 -0400 Received: from mail.kernel.org ([198.145.29.99]:53486 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1731136AbgI3PnH (ORCPT ); Wed, 30 Sep 2020 11:43:07 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 212CD207C3; Wed, 30 Sep 2020 15:43:04 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480587; bh=xk9bFAyppCfcxz8mjaSlx2KdcKBpE7HdmJ6obgHpAUA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=wi0teLrI93hygU8BlKJxaKim2JeMkAkrYq6kgAIZiNFjzcEPvphlprfHPM1xVlrvQ XLu436DMyGagU2AYjupKuwjYel/1jExdhndKiKf73SBEnwydjCe4Cfj61SgpyP15kD jojAp2lsDwQDzgIrr0vOXzvn/xciOZRvQwzybSuw= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 11/12] net: mvneta: enable jumbo frames for XDP Date: Wed, 30 Sep 2020 17:42:02 +0200 Message-Id: <4b9ba38a87b570569cb05bd94f487168fdd83fa8.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Enable the capability to receive jumbo frames even if the interface is running in XDP mode Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f709650974ea..e3352ed13ea8 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3743,11 +3743,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); } - if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { - netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu); - return -EINVAL; - } - dev->mtu = mtu; if (!netif_running(dev)) { @@ -4445,11 +4440,6 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, struct mvneta_port *pp = netdev_priv(dev); struct bpf_prog *old_prog; - if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { - NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); - return -EOPNOTSUPP; - } - if (pp->bm_priv) { NL_SET_ERR_MSG_MOD(extack, "Hardware Buffer Management not supported on XDP"); From patchwork Wed Sep 30 15:42:03 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1374493 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256 header.s=default header.b=XtO5f5Zk; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4C1gWk4lMrz9sSs for ; Thu, 1 Oct 2020 01:43:14 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1731154AbgI3PnN (ORCPT ); Wed, 30 Sep 2020 11:43:13 -0400 Received: from mail.kernel.org ([198.145.29.99]:53542 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725800AbgI3PnL (ORCPT ); Wed, 30 Sep 2020 11:43:11 -0400 Received: from lore-desk.redhat.com (unknown [176.207.245.61]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 7E06920759; Wed, 30 Sep 2020 15:43:08 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1601480591; bh=LXsn3+iKHn7bwpHBftxmVhBLsQ39Ef3e9iy3wjSNlVI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=XtO5f5ZkwJTZFFdtJVu3vKKczjwxahIRJc446u2Dblu17Z/k/Kne39erEk2MAXE85 w6M6c+o5zzjtuP67zQhWmCLzm/YJx1mnSwZzuQ1j11iK0bzH4w6beygKAKPYNTLxDj d4TSkIl4qwma92Z+4f7wu3WsHPrylRxf/eEuxBUw= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: bpf@vger.kernel.org, davem@davemloft.net, sameehj@amazon.com, kuba@kernel.org, john.fastabend@gmail.com, daniel@iogearbox.net, ast@kernel.org, shayagr@amazon.com, brouer@redhat.com, echaudro@redhat.com, lorenzo.bianconi@redhat.com, dsahern@kernel.org Subject: [PATCH v3 net-next 12/12] bpf: cpumap: introduce xdp multi-buff support Date: Wed, 30 Sep 2020 17:42:03 +0200 Message-Id: <5473fa05a3c3fac3c2bbe132326193073d78dd5e.1601478613.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Introduce __xdp_build_skb_from_frame and xdp_build_skb_from_frame utility routine to build the skb from xdp_frame. Add xdp multi-buff support to cpumap Signed-off-by: Lorenzo Bianconi --- include/net/xdp.h | 5 ++++ kernel/bpf/cpumap.c | 45 +------------------------------ net/core/xdp.c | 64 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 44 deletions(-) diff --git a/include/net/xdp.h b/include/net/xdp.h index 4d47076546ff..8d9224ef75ee 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -134,6 +134,11 @@ void xdp_warn(const char *msg, const char *func, const int line); #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); +struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, + struct sk_buff *skb, + struct net_device *dev); +struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, + struct net_device *dev); static inline void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index c61a23b564aa..fa07b4226836 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -155,49 +155,6 @@ static void cpu_map_kthread_stop(struct work_struct *work) kthread_stop(rcpu->kthread); } -static struct sk_buff *cpu_map_build_skb(struct xdp_frame *xdpf, - struct sk_buff *skb) -{ - unsigned int hard_start_headroom; - unsigned int frame_size; - void *pkt_data_start; - - /* Part of headroom was reserved to xdpf */ - hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom; - - /* Memory size backing xdp_frame data already have reserved - * room for build_skb to place skb_shared_info in tailroom. - */ - frame_size = xdpf->frame_sz; - - pkt_data_start = xdpf->data - hard_start_headroom; - skb = build_skb_around(skb, pkt_data_start, frame_size); - if (unlikely(!skb)) - return NULL; - - skb_reserve(skb, hard_start_headroom); - __skb_put(skb, xdpf->len); - if (xdpf->metasize) - skb_metadata_set(skb, xdpf->metasize); - - /* Essential SKB info: protocol and skb->dev */ - skb->protocol = eth_type_trans(skb, xdpf->dev_rx); - - /* Optional SKB info, currently missing: - * - HW checksum info (skb->ip_summed) - * - HW RX hash (skb_set_hash) - * - RX ring dev queue index (skb_record_rx_queue) - */ - - /* Until page_pool get SKB return path, release DMA here */ - xdp_release_frame(xdpf); - - /* Allow SKB to reuse area used by xdp_frame */ - xdp_scrub_frame(xdpf); - - return skb; -} - static void __cpu_map_ring_cleanup(struct ptr_ring *ring) { /* The tear-down procedure should have made sure that queue is @@ -364,7 +321,7 @@ static int cpu_map_kthread_run(void *data) struct sk_buff *skb = skbs[i]; int ret; - skb = cpu_map_build_skb(xdpf, skb); + skb = __xdp_build_skb_from_frame(xdpf, skb, xdpf->dev_rx); if (!skb) { xdp_return_frame(xdpf); continue; diff --git a/net/core/xdp.c b/net/core/xdp.c index 6d4fd4dddb00..a6bdefed92e6 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -507,3 +507,67 @@ void xdp_warn(const char *msg, const char *func, const int line) WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg); }; EXPORT_SYMBOL_GPL(xdp_warn); + +struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, + struct sk_buff *skb, + struct net_device *dev) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + unsigned int headroom = sizeof(*xdpf) + xdpf->headroom; + int i, num_frags = xdpf->mb ? sinfo->nr_frags : 0; + void *hard_start = xdpf->data - headroom; + + skb = build_skb_around(skb, hard_start, xdpf->frame_sz); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, headroom); + __skb_put(skb, xdpf->len); + if (xdpf->metasize) + skb_metadata_set(skb, xdpf->metasize); + + if (likely(!num_frags)) + goto out; + + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + skb_frag_page(frag), skb_frag_off(frag), + skb_frag_size(frag), xdpf->frame_sz); + } + +out: + /* Essential SKB info: protocol and skb->dev */ + skb->protocol = eth_type_trans(skb, dev); + + /* Optional SKB info, currently missing: + * - HW checksum info (skb->ip_summed) + * - HW RX hash (skb_set_hash) + * - RX ring dev queue index (skb_record_rx_queue) + */ + + /* Until page_pool get SKB return path, release DMA here */ + xdp_release_frame(xdpf); + + /* Allow SKB to reuse area used by xdp_frame */ + xdp_scrub_frame(xdpf); + + return skb; +} +EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame); + +struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, + struct net_device *dev) +{ + struct sk_buff *skb; + + skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); + if (unlikely(!skb)) + return NULL; + + memset(skb, 0, offsetof(struct sk_buff, tail)); + + return __xdp_build_skb_from_frame(xdpf, skb, dev); +} +EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);