From patchwork Sat Oct 19 08:13:21 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1179785 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.b="C/NGgA7/"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 46wFzJ73KHz9sP3 for ; Sat, 19 Oct 2019 19:13:48 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728048AbfJSINq (ORCPT ); Sat, 19 Oct 2019 04:13:46 -0400 Received: from mail.kernel.org ([198.145.29.99]:42132 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727701AbfJSINq (ORCPT ); Sat, 19 Oct 2019 04:13:46 -0400 Received: from localhost.localdomain (unknown [151.66.3.111]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 5472721D80; Sat, 19 Oct 2019 08:13:43 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1571472825; bh=PtLv+q7bTzhPUMnGTPqz7KChjpb4yckxBBHLjfabVig=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=C/NGgA7/5vjHDMnxCHy6KBZMWqcdWhi1Edzx0/hIP0h2f5+4NvtTLY84eWgUKeb5p ICZcPhk8xq8pAjmV7+iBckCd+1ADf0S/+FKZPO5f0Sbg3jk/K9Cd7fySxMYvUbPBP3 7TIv8THomMqUh+h8CZ2+AoUCLz4bqVbwFQGLFw2o= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net, thomas.petazzoni@bootlin.com, brouer@redhat.com, ilias.apalodimas@linaro.org, matteo.croce@redhat.com, mw@semihalf.com, jakub.kicinski@netronome.com Subject: [PATCH v5 net-next 1/7] net: mvneta: introduce mvneta_update_stats routine Date: Sat, 19 Oct 2019 10:13:21 +0200 Message-Id: <537faf80acc8ab153511ce44d03362248ecf789e.1571472169.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.21.0 In-Reply-To: References: MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Introduce mvneta_update_stats routine to collect {rx/tx} statistics (packets and bytes). This is a preliminary patch to add XDP support to mvneta driver Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 43 ++++++++++++++------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e49820675c8c..3a8110d0a5b6 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1900,6 +1900,23 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, } } +static void +mvneta_update_stats(struct mvneta_port *pp, u32 pkts, + u32 len, bool tx) +{ + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + u64_stats_update_begin(&stats->syncp); + if (tx) { + stats->tx_packets += pkts; + stats->tx_bytes += len; + } else { + stats->rx_packets += pkts; + stats->rx_bytes += len; + } + u64_stats_update_end(&stats->syncp); +} + static inline int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { @@ -2075,14 +2092,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, rxq->left_size = 0; } - if (rcvd_pkts) { - struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); - - u64_stats_update_begin(&stats->syncp); - stats->rx_packets += rcvd_pkts; - stats->rx_bytes += rcvd_bytes; - u64_stats_update_end(&stats->syncp); - } + if (rcvd_pkts) + mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false); /* return some buffers to hardware queue, one at a time is too slow */ refill = mvneta_rx_refill_queue(pp, rxq); @@ -2206,14 +2217,8 @@ static int mvneta_rx_hwbm(struct napi_struct *napi, napi_gro_receive(napi, skb); } - if (rcvd_pkts) { - struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); - - u64_stats_update_begin(&stats->syncp); - stats->rx_packets += rcvd_pkts; - stats->rx_bytes += rcvd_bytes; - u64_stats_update_end(&stats->syncp); - } + if (rcvd_pkts) + mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false); /* Update rxq management counters */ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); @@ -2459,7 +2464,6 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) out: if (frags > 0) { - struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); netdev_tx_sent_queue(nq, len); @@ -2474,10 +2478,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) else txq->pending += frags; - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += len; - u64_stats_update_end(&stats->syncp); + mvneta_update_stats(pp, 1, len, true); } else { dev->stats.tx_dropped++; dev_kfree_skb_any(skb); From patchwork Sat Oct 19 08:13:22 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1179786 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.b="DlMooGA/"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 46wFzQ6K9hz9sP3 for ; Sat, 19 Oct 2019 19:13:54 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728137AbfJSINx (ORCPT ); Sat, 19 Oct 2019 04:13:53 -0400 Received: from mail.kernel.org ([198.145.29.99]:42232 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725818AbfJSINw (ORCPT ); Sat, 19 Oct 2019 04:13:52 -0400 Received: from localhost.localdomain (unknown [151.66.3.111]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id D388021D80; Sat, 19 Oct 2019 08:13:49 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1571472831; bh=/x/ZLkYZ86nStsK2jvfic5sf3Njzt8Dpsu1NGEN1clE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=DlMooGA/gNYYpgG6AsmjT0IRIKrk/LkYa0uOLwg5oc/9no1BDohQDrUKRyCn0rR18 B6W25ZVEPPzkZGhbFhGaCBhTYqiTRz7nLoI0bQxYgl/8/tWuNOKn+AJpNEoZ+AytGp nrCISMABJYW4wZTuqhAVW6d8OMWOht2wGeMS+UwE= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net, thomas.petazzoni@bootlin.com, brouer@redhat.com, ilias.apalodimas@linaro.org, matteo.croce@redhat.com, mw@semihalf.com, jakub.kicinski@netronome.com Subject: [PATCH v5 net-next 2/7] net: mvneta: introduce page pool API for sw buffer manager Date: Sat, 19 Oct 2019 10:13:22 +0200 Message-Id: <4070a7ee9ae788d8d83403d0f326c7edb7cc4183.1571472169.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.21.0 In-Reply-To: References: MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Use the page_pool api for allocations and DMA handling instead of __dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page(). Pages are unmapped using page_pool_release_page before packets go into the network stack. The page_pool API offers buffer recycling capabilities for XDP but allocates one page per packet, unless the driver splits and manages the allocated page. This is a preliminary patch to add XDP support to mvneta driver Signed-off-by: Ilias Apalodimas Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/mvneta.c | 83 +++++++++++++++++++++------ 2 files changed, 65 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index fb942167ee54..3d5caea096fb 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -61,6 +61,7 @@ config MVNETA depends on ARCH_MVEBU || COMPILE_TEST select MVMDIO select PHYLINK + select PAGE_POOL ---help--- This driver supports the network interface units in the Marvell ARMADA XP, ARMADA 370, ARMADA 38x and diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3a8110d0a5b6..cd1a59c7e9d2 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -37,6 +37,7 @@ #include #include #include +#include /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) @@ -603,6 +604,10 @@ struct mvneta_rx_queue { u32 pkts_coal; u32 time_coal; + /* page_pool */ + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; + /* Virtual address of the RX buffer */ void **buf_virt_addr; @@ -1812,23 +1817,21 @@ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, gfp_t gfp_mask) { + enum dma_data_direction dma_dir; dma_addr_t phys_addr; struct page *page; - page = __dev_alloc_page(gfp_mask); + page = page_pool_alloc_pages(rxq->page_pool, + gfp_mask | __GFP_NOWARN); if (!page) return -ENOMEM; - /* map page for use */ - phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { - __free_page(page); - return -ENOMEM; - } - - phys_addr += pp->rx_offset_correction; + phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; + dma_dir = page_pool_get_dma_dir(rxq->page_pool); + dma_sync_single_for_device(pp->dev->dev.parent, phys_addr, + PAGE_SIZE, dma_dir); mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); + return 0; } @@ -1894,10 +1897,12 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, if (!data || !(rx_desc->buf_phys_addr)) continue; - dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(data); + page_pool_put_page(rxq->page_pool, data, false); } + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_unreg(&rxq->xdp_rxq); + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; } static void @@ -2029,8 +2034,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, skb_add_rx_frag(rxq->skb, frag_num, page, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_page(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); + page_pool_release_page(rxq->page_pool, page); rxq->left_size -= frag_size; } } else { @@ -2060,9 +2064,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_page(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); - + page_pool_release_page(rxq->page_pool, page); rxq->left_size -= frag_size; } } /* Middle or Last descriptor */ @@ -2831,11 +2833,54 @@ static int mvneta_poll(struct napi_struct *napi, int budget) return rx_done; } +static int mvneta_create_page_pool(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, int size) +{ + struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP, + .pool_size = size, + .nid = cpu_to_node(0), + .dev = pp->dev->dev.parent, + .dma_dir = DMA_FROM_DEVICE, + }; + int err; + + rxq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rxq->page_pool)) { + err = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + return err; + } + + err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id); + if (err < 0) + goto err_free_pp; + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, + rxq->page_pool); + if (err) + goto err_unregister_rxq; + + return 0; + +err_unregister_rxq: + xdp_rxq_info_unreg(&rxq->xdp_rxq); +err_free_pp: + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; + return err; +} + /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int num) { - int i; + int i, err; + + err = mvneta_create_page_pool(pp, rxq, num); + if (err < 0) + return err; for (i = 0; i < num; i++) { memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); From patchwork Sat Oct 19 08:13:23 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1179787 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.b="h+hUwxUk"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 46wFzT4Gkqz9sP3 for ; Sat, 19 Oct 2019 19:13:57 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728182AbfJSINz (ORCPT ); Sat, 19 Oct 2019 04:13:55 -0400 Received: from mail.kernel.org ([198.145.29.99]:42266 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725818AbfJSINz (ORCPT ); Sat, 19 Oct 2019 04:13:55 -0400 Received: from localhost.localdomain (unknown [151.66.3.111]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 2E7CE222C2; Sat, 19 Oct 2019 08:13:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1571472834; bh=innoI2FNan+lBjxCmn65uNftlwJlUa7h4aa0rn2V0JI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=h+hUwxUk9mZSPDfNlIVqDVxqLAyC+mUl+FLL8JA/jV2dU+LiMdgchUdNcjEiWWQoy 2E3/xRseLy7pgj94Rv/mgPoxNsKGVBfJVxfhETTM4Fe6jIMetHKwn0tt4A81M8mbWl RWAFXlilaSLFpNviA3ASPNxh/EAbr0aW3oePEais= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net, thomas.petazzoni@bootlin.com, brouer@redhat.com, ilias.apalodimas@linaro.org, matteo.croce@redhat.com, mw@semihalf.com, jakub.kicinski@netronome.com Subject: [PATCH v5 net-next 3/7] net: mvneta: rely on build_skb in mvneta_rx_swbm poll routine Date: Sat, 19 Oct 2019 10:13:23 +0200 Message-Id: <2977cb2cf3d037f99962cb7ffc3632866b713414.1571472169.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.21.0 In-Reply-To: References: MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Refactor mvneta_rx_swbm code introducing mvneta_swbm_rx_frame and mvneta_swbm_add_rx_fragment routines. Rely on build_skb in oreder to allocate skb since the previous patch introduced buffer recycling using the page_pool API. This patch fixes even an issue in the original driver where dma buffers are accessed before dma sync. mvneta driver can run on not cache coherent devices so it is necessary to sync DMA buffers before sending them to the device in order to avoid memory corruptions. Running perf analysis we can see a performance cost associated with this DMA-sync (anyway it is already there in the original driver code). In follow up patches we will add more logic to reduce DMA-sync as much as possible. Signed-off-by: Ilias Apalodimas Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 179 ++++++++++++++------------ 1 file changed, 95 insertions(+), 84 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd1a59c7e9d2..624e30b3fb4d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -323,6 +323,11 @@ ETH_HLEN + ETH_FCS_LEN, \ cache_line_size()) +#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ + NET_SKB_PAD)) +#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD) +#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) + #define IS_TSO_HEADER(txq, addr) \ ((addr >= txq->tso_hdrs_phys) && \ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) @@ -646,7 +651,6 @@ static int txq_number = 8; static int rxq_def; static int rx_copybreak __read_mostly = 256; -static int rx_header_size __read_mostly = 128; /* HW BM need that each port be identify by a unique ID */ static int global_port_id; @@ -1829,7 +1833,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp, phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; dma_dir = page_pool_get_dma_dir(rxq->page_pool); dma_sync_single_for_device(pp->dev->dev.parent, phys_addr, - PAGE_SIZE, dma_dir); + MVNETA_MAX_RX_BUF_SIZE, dma_dir); mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); return 0; @@ -1947,30 +1951,102 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) return i; } +static int +mvneta_swbm_rx_frame(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc, + struct mvneta_rx_queue *rxq, + struct page *page) +{ + unsigned char *data = page_address(page); + int data_len = -MVNETA_MH_SIZE, len; + struct net_device *dev = pp->dev; + enum dma_data_direction dma_dir; + + if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) { + len = MVNETA_MAX_RX_BUF_SIZE; + data_len += len; + } else { + len = rx_desc->data_size; + data_len += len - ETH_FCS_LEN; + } + + dma_dir = page_pool_get_dma_dir(rxq->page_pool); + dma_sync_single_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, + len, dma_dir); + + rxq->skb = build_skb(data, PAGE_SIZE); + if (unlikely(!rxq->skb)) { + netdev_err(dev, + "Can't allocate skb on queue %d\n", + rxq->id); + dev->stats.rx_dropped++; + rxq->skb_alloc_err++; + return -ENOMEM; + } + page_pool_release_page(rxq->page_pool, page); + + skb_reserve(rxq->skb, MVNETA_MH_SIZE + NET_SKB_PAD); + skb_put(rxq->skb, data_len); + mvneta_rx_csum(pp, rx_desc->status, rxq->skb); + + rxq->left_size = rx_desc->data_size - len; + rx_desc->buf_phys_addr = 0; + + return 0; +} + +static void +mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc, + struct mvneta_rx_queue *rxq, + struct page *page) +{ + struct net_device *dev = pp->dev; + enum dma_data_direction dma_dir; + int data_len, len; + + if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) { + len = MVNETA_MAX_RX_BUF_SIZE; + data_len = len; + } else { + len = rxq->left_size; + data_len = len - ETH_FCS_LEN; + } + dma_dir = page_pool_get_dma_dir(rxq->page_pool); + dma_sync_single_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, + len, dma_dir); + if (data_len > 0) { + /* refill descriptor with new buffer later */ + skb_add_rx_frag(rxq->skb, + skb_shinfo(rxq->skb)->nr_frags, + page, NET_SKB_PAD, data_len, + PAGE_SIZE); + } + page_pool_release_page(rxq->page_pool, page); + rx_desc->buf_phys_addr = 0; + rxq->left_size -= len; +} + /* Main rx processing when using software buffer management */ static int mvneta_rx_swbm(struct napi_struct *napi, struct mvneta_port *pp, int budget, struct mvneta_rx_queue *rxq) { + int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0; struct net_device *dev = pp->dev; - int rx_todo, rx_proc; - int refill = 0; - u32 rcvd_pkts = 0; - u32 rcvd_bytes = 0; + int rx_todo, refill; /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); - rx_proc = 0; /* Fairness NAPI loop */ - while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) { + while (rx_proc < budget && rx_proc < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + u32 rx_status, index; unsigned char *data; struct page *page; - dma_addr_t phys_addr; - u32 rx_status, index; - int rx_bytes, skb_size, copy_size; - int frag_num, frag_size, frag_offset; index = rx_desc - rxq->descs; page = (struct page *)rxq->buf_virt_addr[index]; @@ -1978,12 +2054,13 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Prefetch header */ prefetch(data); - phys_addr = rx_desc->buf_phys_addr; rx_status = rx_desc->status; rx_proc++; rxq->refill_num++; if (rx_status & MVNETA_RXD_FIRST_DESC) { + int err; + /* Check errors only for FIRST descriptor */ if (rx_status & MVNETA_RXD_ERR_SUMMARY) { mvneta_rx_error(pp, rx_desc); @@ -1991,82 +2068,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* leave the descriptor untouched */ continue; } - rx_bytes = rx_desc->data_size - - (ETH_FCS_LEN + MVNETA_MH_SIZE); - /* Allocate small skb for each new packet */ - skb_size = max(rx_copybreak, rx_header_size); - rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size); - if (unlikely(!rxq->skb)) { - netdev_err(dev, - "Can't allocate skb on queue %d\n", - rxq->id); - dev->stats.rx_dropped++; - rxq->skb_alloc_err++; + err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, page); + if (err) continue; - } - copy_size = min(skb_size, rx_bytes); - - /* Copy data from buffer to SKB, skip Marvell header */ - memcpy(rxq->skb->data, data + MVNETA_MH_SIZE, - copy_size); - skb_put(rxq->skb, copy_size); - rxq->left_size = rx_bytes - copy_size; - - mvneta_rx_csum(pp, rx_status, rxq->skb); - if (rxq->left_size == 0) { - int size = copy_size + MVNETA_MH_SIZE; - - dma_sync_single_range_for_cpu(dev->dev.parent, - phys_addr, 0, - size, - DMA_FROM_DEVICE); - - /* leave the descriptor and buffer untouched */ - } else { - /* refill descriptor with new buffer later */ - rx_desc->buf_phys_addr = 0; - - frag_num = 0; - frag_offset = copy_size + MVNETA_MH_SIZE; - frag_size = min(rxq->left_size, - (int)(PAGE_SIZE - frag_offset)); - skb_add_rx_frag(rxq->skb, frag_num, page, - frag_offset, frag_size, - PAGE_SIZE); - page_pool_release_page(rxq->page_pool, page); - rxq->left_size -= frag_size; - } } else { - /* Middle or Last descriptor */ if (unlikely(!rxq->skb)) { pr_debug("no skb for rx_status 0x%x\n", rx_status); continue; } - if (!rxq->left_size) { - /* last descriptor has only FCS */ - /* and can be discarded */ - dma_sync_single_range_for_cpu(dev->dev.parent, - phys_addr, 0, - ETH_FCS_LEN, - DMA_FROM_DEVICE); - /* leave the descriptor and buffer untouched */ - } else { - /* refill descriptor with new buffer later */ - rx_desc->buf_phys_addr = 0; - - frag_num = skb_shinfo(rxq->skb)->nr_frags; - frag_offset = 0; - frag_size = min(rxq->left_size, - (int)(PAGE_SIZE - frag_offset)); - skb_add_rx_frag(rxq->skb, frag_num, page, - frag_offset, frag_size, - PAGE_SIZE); - - page_pool_release_page(rxq->page_pool, page); - rxq->left_size -= frag_size; - } + mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page); } /* Middle or Last descriptor */ if (!(rx_status & MVNETA_RXD_LAST_DESC)) @@ -2091,7 +2103,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* clean uncomplete skb pointer in queue */ rxq->skb = NULL; - rxq->left_size = 0; } if (rcvd_pkts) @@ -2954,7 +2965,7 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp, /* Set Offset */ mvneta_rxq_offset_set(pp, rxq, 0); mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? - PAGE_SIZE : + MVNETA_MAX_RX_BUF_SIZE : MVNETA_RX_BUF_SIZE(pp->pkt_size)); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); @@ -4664,7 +4675,7 @@ static int mvneta_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; - pp->rx_offset_correction = 0; /* not relevant for SW BM */ + pp->rx_offset_correction = NET_SKB_PAD; /* Obtain access to BM resources if enabled and already initialized */ bm_node = of_parse_phandle(dn, "buffer-manager", 0); From patchwork Sat Oct 19 08:13:24 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1179788 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.b="SplzrXAd"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 46wFzX04MNz9sP3 for ; Sat, 19 Oct 2019 19:14:00 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728222AbfJSIN6 (ORCPT ); Sat, 19 Oct 2019 04:13:58 -0400 Received: from mail.kernel.org ([198.145.29.99]:42302 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725818AbfJSIN6 (ORCPT ); Sat, 19 Oct 2019 04:13:58 -0400 Received: from localhost.localdomain (unknown [151.66.3.111]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id C496421D80; Sat, 19 Oct 2019 08:13:54 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1571472836; bh=p0ardPHRKrtE/VYh1UMbeamajkzylBzvlJhKyrnYsLU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=SplzrXAd+6KpZQR5yv4iHI7gLwK1SKaFFx/bih9glTr5pZm6WxY+/MnsVjI3NOQeX UbEBwJvcRuiGIarZXfYUQ4mDAoenwjFT703Bzo1Rwb5hYkwwIuAMRsNeSupGzbgcCO 7bGQejmRVIKSWDK7sMdcyAuQGzYCbb88AkkHPHSU= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net, thomas.petazzoni@bootlin.com, brouer@redhat.com, ilias.apalodimas@linaro.org, matteo.croce@redhat.com, mw@semihalf.com, jakub.kicinski@netronome.com Subject: [PATCH v5 net-next 4/7] net: mvneta: add basic XDP support Date: Sat, 19 Oct 2019 10:13:24 +0200 Message-Id: <0eeda67545c2bd3c7067f16d4e94065c545c3007.1571472169.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.21.0 In-Reply-To: References: MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Add basic XDP support to mvneta driver for devices that rely on software buffer management. Currently supported verdicts are: - XDP_DROP - XDP_PASS - XDP_REDIRECT - XDP_ABORTED - iptables drop: $iptables -t raw -I PREROUTING -p udp --dport 9 -j DROP $nstat -n && sleep 1 && nstat IpInReceives 151169 0.0 IpExtInOctets 6953544 0.0 IpExtInNoECTPkts 151165 0.0 - XDP_DROP via xdp1 $./samples/bpf/xdp1 3 proto 0: 421419 pkt/s proto 0: 421444 pkt/s proto 0: 421393 pkt/s proto 0: 421440 pkt/s proto 0: 421184 pkt/s Tested-by: Matteo Croce Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 148 ++++++++++++++++++++++++-- 1 file changed, 139 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 624e30b3fb4d..7ca3cd16d781 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -38,6 +38,7 @@ #include #include #include +#include /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) @@ -323,8 +324,10 @@ ETH_HLEN + ETH_FCS_LEN, \ cache_line_size()) +#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \ + NET_IP_ALIGN) #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ - NET_SKB_PAD)) + MVNETA_SKB_HEADROOM)) #define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD) #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) @@ -352,6 +355,11 @@ struct mvneta_statistic { #define T_REG_64 64 #define T_SW 1 +#define MVNETA_XDP_PASS BIT(0) +#define MVNETA_XDP_DROPPED BIT(1) +#define MVNETA_XDP_TX BIT(2) +#define MVNETA_XDP_REDIR BIT(3) + static const struct mvneta_statistic mvneta_statistics[] = { { 0x3000, T_REG_64, "good_octets_received", }, { 0x3010, T_REG_32, "good_frames_received", }, @@ -431,6 +439,8 @@ struct mvneta_port { u32 cause_rx_tx; struct napi_struct napi; + struct bpf_prog *xdp_prog; + /* Core clock */ struct clk *clk; /* AXI clock */ @@ -1951,11 +1961,51 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) return i; } +static int +mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + struct bpf_prog *prog, struct xdp_buff *xdp) +{ + u32 ret, act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + ret = MVNETA_XDP_PASS; + break; + case XDP_REDIRECT: { + int err; + + err = xdp_do_redirect(pp->dev, xdp, prog); + if (err) { + ret = MVNETA_XDP_DROPPED; + xdp_return_buff(xdp); + } else { + ret = MVNETA_XDP_REDIR; + } + break; + } + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(pp->dev, prog, act); + /* fall through */ + case XDP_DROP: + page_pool_recycle_direct(rxq->page_pool, + virt_to_head_page(xdp->data)); + ret = MVNETA_XDP_DROPPED; + break; + } + + return ret; +} + static int mvneta_swbm_rx_frame(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, - struct page *page) + struct xdp_buff *xdp, + struct bpf_prog *xdp_prog, + struct page *page, u32 *xdp_ret) { unsigned char *data = page_address(page); int data_len = -MVNETA_MH_SIZE, len; @@ -1975,7 +2025,26 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, rx_desc->buf_phys_addr, len, dma_dir); - rxq->skb = build_skb(data, PAGE_SIZE); + xdp->data_hard_start = data; + xdp->data = data + MVNETA_SKB_HEADROOM + MVNETA_MH_SIZE; + xdp->data_end = xdp->data + data_len; + xdp_set_data_meta_invalid(xdp); + + if (xdp_prog) { + u32 ret; + + ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp); + if (ret != MVNETA_XDP_PASS) { + mvneta_update_stats(pp, 1, + xdp->data_end - xdp->data, + false); + rx_desc->buf_phys_addr = 0; + *xdp_ret |= ret; + return ret; + } + } + + rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE); if (unlikely(!rxq->skb)) { netdev_err(dev, "Can't allocate skb on queue %d\n", @@ -1986,8 +2055,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, } page_pool_release_page(rxq->page_pool, page); - skb_reserve(rxq->skb, MVNETA_MH_SIZE + NET_SKB_PAD); - skb_put(rxq->skb, data_len); + skb_reserve(rxq->skb, + xdp->data - xdp->data_hard_start); + skb_put(rxq->skb, xdp->data_end - xdp->data); mvneta_rx_csum(pp, rx_desc->status, rxq->skb); rxq->left_size = rx_desc->data_size - len; @@ -2021,7 +2091,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, /* refill descriptor with new buffer later */ skb_add_rx_frag(rxq->skb, skb_shinfo(rxq->skb)->nr_frags, - page, NET_SKB_PAD, data_len, + page, MVNETA_SKB_HEADROOM, data_len, PAGE_SIZE); } page_pool_release_page(rxq->page_pool, page); @@ -2036,11 +2106,18 @@ static int mvneta_rx_swbm(struct napi_struct *napi, { int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0; struct net_device *dev = pp->dev; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp_buf; int rx_todo, refill; + u32 xdp_ret = 0; /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); + rcu_read_lock(); + xdp_prog = READ_ONCE(pp->xdp_prog); + xdp_buf.rxq = &rxq->xdp_rxq; + /* Fairness NAPI loop */ while (rx_proc < budget && rx_proc < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); @@ -2069,7 +2146,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, continue; } - err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, page); + err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, + xdp_prog, page, &xdp_ret); if (err) continue; } else { @@ -2104,6 +2182,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* clean uncomplete skb pointer in queue */ rxq->skb = NULL; } + rcu_read_unlock(); + + if (xdp_ret & MVNETA_XDP_REDIR) + xdp_do_flush_map(); if (rcvd_pkts) mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false); @@ -2847,13 +2929,14 @@ static int mvneta_poll(struct napi_struct *napi, int budget) static int mvneta_create_page_pool(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int size) { + struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); struct page_pool_params pp_params = { .order = 0, .flags = PP_FLAG_DMA_MAP, .pool_size = size, .nid = cpu_to_node(0), .dev = pp->dev->dev.parent, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, }; int err; @@ -3320,6 +3403,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); } + if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { + netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu); + return -EINVAL; + } + dev->mtu = mtu; if (!netif_running(dev)) { @@ -3989,6 +4077,47 @@ static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return phylink_mii_ioctl(pp->phylink, ifr, cmd); } +static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + bool need_update, running = netif_running(dev); + struct mvneta_port *pp = netdev_priv(dev); + struct bpf_prog *old_prog; + + if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); + return -EOPNOTSUPP; + } + + need_update = !!pp->xdp_prog != !!prog; + if (running && need_update) + mvneta_stop(dev); + + old_prog = xchg(&pp->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (running && need_update) + return mvneta_open(dev); + + return 0; +} + +static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct mvneta_port *pp = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0; + return 0; + default: + return -EINVAL; + } +} + /* Ethtool methods */ /* Set link ksettings (phy address, speed) for ethtools */ @@ -4385,6 +4514,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, .ndo_do_ioctl = mvneta_ioctl, + .ndo_bpf = mvneta_xdp, }; static const struct ethtool_ops mvneta_eth_tool_ops = { @@ -4675,7 +4805,7 @@ static int mvneta_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; - pp->rx_offset_correction = NET_SKB_PAD; + pp->rx_offset_correction = MVNETA_SKB_HEADROOM; /* Obtain access to BM resources if enabled and already initialized */ bm_node = of_parse_phandle(dn, "buffer-manager", 0); From patchwork Sat Oct 19 08:13:25 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1179789 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.b="dmdUYnoO"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 46wFzZ16JSz9sP3 for ; Sat, 19 Oct 2019 19:14:02 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728230AbfJSIOB (ORCPT ); Sat, 19 Oct 2019 04:14:01 -0400 Received: from mail.kernel.org ([198.145.29.99]:42336 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725818AbfJSIOA (ORCPT ); Sat, 19 Oct 2019 04:14:00 -0400 Received: from localhost.localdomain (unknown [151.66.3.111]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id ADB49222C2; Sat, 19 Oct 2019 08:13:57 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1571472840; bh=0TaHYjFLzo+VLcXm+jMoqFeEBjjFzS+Bn5onpi8CzxI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=dmdUYnoOtGYyccyJ3SnWFLUH/6Xm2Jj+r1TOc+p1u/zeJLNBJUNtjqnBtVY1SvS6C hm0Jr0Q764IPVWs4qlWLfiFwVf+VNKjJXRltj49XnOzd4W3sviqhPeRhh9JcPXfKER p+ZcSO85I9OxLZed/CJV8GfExY3JR8AmAKvhvzbY= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net, thomas.petazzoni@bootlin.com, brouer@redhat.com, ilias.apalodimas@linaro.org, matteo.croce@redhat.com, mw@semihalf.com, jakub.kicinski@netronome.com Subject: [PATCH v5 net-next 5/7] net: mvneta: move header prefetch in mvneta_swbm_rx_frame Date: Sat, 19 Oct 2019 10:13:25 +0200 Message-Id: <31256b3f92b49cb1f652cc858261d8ba4de4801f.1571472169.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.21.0 In-Reply-To: References: MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Move data buffer prefetch in mvneta_swbm_rx_frame after dma_sync_single_range_for_cpu Signed-off-by: Ilias Apalodimas Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 7ca3cd16d781..64f59baa2a3c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2025,6 +2025,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, rx_desc->buf_phys_addr, len, dma_dir); + /* Prefetch header */ + prefetch(data); + xdp->data_hard_start = data; xdp->data = data + MVNETA_SKB_HEADROOM + MVNETA_MH_SIZE; xdp->data_end = xdp->data + data_len; @@ -2122,14 +2125,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi, while (rx_proc < budget && rx_proc < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); u32 rx_status, index; - unsigned char *data; struct page *page; index = rx_desc - rxq->descs; page = (struct page *)rxq->buf_virt_addr[index]; - data = page_address(page); - /* Prefetch header */ - prefetch(data); rx_status = rx_desc->status; rx_proc++; From patchwork Sat Oct 19 08:13:26 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1179790 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.b="JkRcSoqo"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 46wFzd3wlVz9sP3 for ; Sat, 19 Oct 2019 19:14:05 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728241AbfJSIOE (ORCPT ); Sat, 19 Oct 2019 04:14:04 -0400 Received: from mail.kernel.org ([198.145.29.99]:42372 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725818AbfJSIOE (ORCPT ); Sat, 19 Oct 2019 04:14:04 -0400 Received: from localhost.localdomain (unknown [151.66.3.111]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 7040B21D80; Sat, 19 Oct 2019 08:14:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1571472842; bh=c2bIxoSLrWGUQfG7A42YtweDCuZuALMMWi+6pM/Jg7U=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=JkRcSoqoLiYxcFCDi9xpH5rE5KbVafkAX6DZ2Usu8qohliKMTe82J95uPzraThnx6 s32mr619KPb2Q7+Q/GzwZyPyvMqfefpJCD0QP+3n1nh7W59jsvozpYYwATmZevxYWZ 05TKaSzurwQ73xGP7TEPx/y9yr+jX694DfdAZwzs= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net, thomas.petazzoni@bootlin.com, brouer@redhat.com, ilias.apalodimas@linaro.org, matteo.croce@redhat.com, mw@semihalf.com, jakub.kicinski@netronome.com Subject: [PATCH v5 net-next 6/7] net: mvneta: make tx buffer array agnostic Date: Sat, 19 Oct 2019 10:13:26 +0200 Message-Id: <935641e63cf776f836bb4b1d67bd39e9a12e9afb.1571472169.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.21.0 In-Reply-To: References: MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Allow tx buffer array to contain both skb and xdp buffers in order to enable xdp frame recycling adding XDP_TX verdict support Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 66 +++++++++++++++++---------- 1 file changed, 43 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 64f59baa2a3c..e83d5e069b4f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -561,6 +561,20 @@ struct mvneta_rx_desc { }; #endif +enum mvneta_tx_buf_type { + MVNETA_TYPE_SKB, + MVNETA_TYPE_XDP_TX, + MVNETA_TYPE_XDP_NDO, +}; + +struct mvneta_tx_buf { + enum mvneta_tx_buf_type type; + union { + struct xdp_frame *xdpf; + struct sk_buff *skb; + }; +}; + struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ u8 id; @@ -576,8 +590,8 @@ struct mvneta_tx_queue { int tx_stop_threshold; int tx_wake_threshold; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; + /* Array of transmitted buffers */ + struct mvneta_tx_buf *buf; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -1780,14 +1794,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, int i; for (i = 0; i < num; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; struct mvneta_tx_desc *tx_desc = txq->descs + txq->txq_get_index; - struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; - - if (skb) { - bytes_compl += skb->len; - pkts_compl++; - } mvneta_txq_inc_get(txq); @@ -1795,9 +1804,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); - if (!skb) + if (!buf->skb) continue; - dev_kfree_skb_any(skb); + + bytes_compl += buf->skb->len; + pkts_compl++; + dev_kfree_skb_any(buf->skb); } netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); @@ -2324,16 +2336,19 @@ static inline void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_port *pp, struct mvneta_tx_queue *txq) { - struct mvneta_tx_desc *tx_desc; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + struct mvneta_tx_desc *tx_desc; - txq->tx_skb[txq->txq_put_index] = NULL; tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = hdr_len; tx_desc->command = mvneta_skb_tx_csum(pp, skb); tx_desc->command |= MVNETA_TXD_F_DESC; tx_desc->buf_phys_addr = txq->tso_hdrs_phys + txq->txq_put_index * TSO_HEADER_SIZE; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; + mvneta_txq_inc_put(txq); } @@ -2342,6 +2357,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, struct sk_buff *skb, char *data, int size, bool last_tcp, bool is_last) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; tx_desc = mvneta_txq_next_desc_get(txq); @@ -2355,7 +2371,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, } tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; if (last_tcp) { /* last descriptor in the TCP packet */ @@ -2363,7 +2380,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, /* last descriptor in SKB */ if (is_last) - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } mvneta_txq_inc_put(txq); return 0; @@ -2448,6 +2465,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, int i, nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nr_frags; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = skb_frag_address(frag); @@ -2467,12 +2485,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, if (i == nr_frags - 1) { /* Last descriptor */ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; } + buf->type = MVNETA_TYPE_SKB; mvneta_txq_inc_put(txq); } @@ -2500,6 +2519,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; int len = skb->len; int frags = 0; @@ -2532,16 +2552,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; } + buf->type = MVNETA_TYPE_SKB; if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVNETA_TXD_FLZ_DESC; tx_desc->command = tx_cmd; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; mvneta_txq_inc_put(txq); } else { /* First but not Last */ tx_cmd |= MVNETA_TXD_F_DESC; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; mvneta_txq_inc_put(txq); tx_desc->command = tx_cmd; /* Continue with other skb fragments */ @@ -3128,9 +3149,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->last_desc = txq->size - 1; - txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb), - GFP_KERNEL); - if (!txq->tx_skb) { + txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); + if (!txq->buf) { dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3142,7 +3162,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->size * TSO_HEADER_SIZE, &txq->tso_hdrs_phys, GFP_KERNEL); if (!txq->tso_hdrs) { - kfree(txq->tx_skb); + kfree(txq->buf); dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3195,7 +3215,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp, { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); - kfree(txq->tx_skb); + kfree(txq->buf); if (txq->tso_hdrs) dma_free_coherent(pp->dev->dev.parent, From patchwork Sat Oct 19 08:13:27 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 1179791 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (no SPF record) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=pass (p=none dis=none) header.from=kernel.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=kernel.org header.i=@kernel.org header.b="ZzNwDujK"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 46wFzh6K10z9sPW for ; Sat, 19 Oct 2019 19:14:08 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728250AbfJSIOI (ORCPT ); Sat, 19 Oct 2019 04:14:08 -0400 Received: from mail.kernel.org ([198.145.29.99]:42396 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725818AbfJSIOH (ORCPT ); Sat, 19 Oct 2019 04:14:07 -0400 Received: from localhost.localdomain (unknown [151.66.3.111]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 3512F222C2; Sat, 19 Oct 2019 08:14:03 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1571472846; bh=Bp1lVCvsFcSYTkvK9q/5Zl3ey2GMvRLeOdUktHBEaNE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ZzNwDujKCGDOOY9GRuPk1TDduAgzqrJmNBuqaNTHSx85sfE2Jp3AcTdfjBN1igcqZ PGJYpqwb4CqO0eyKQbXquse+AD49ECn0zda7G3MhG91LVY3kDKU3lIr5LP6i7RMPS3 6aTAfgXzE/bvlzqVypKcQPPWLvCTluwqzPB1GqeE= From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: lorenzo.bianconi@redhat.com, davem@davemloft.net, thomas.petazzoni@bootlin.com, brouer@redhat.com, ilias.apalodimas@linaro.org, matteo.croce@redhat.com, mw@semihalf.com, jakub.kicinski@netronome.com Subject: [PATCH v5 net-next 7/7] net: mvneta: add XDP_TX support Date: Sat, 19 Oct 2019 10:13:27 +0200 Message-Id: <79b03187c50a55fa1f1c0ef8aaa6226012f4b433.1571472169.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.21.0 In-Reply-To: References: MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Implement XDP_TX verdict and ndo_xdp_xmit net_device_ops function pointer Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/marvell/mvneta.c | 128 ++++++++++++++++++++++++-- 1 file changed, 121 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e83d5e069b4f..8f9df6efda61 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1800,16 +1800,19 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, mvneta_txq_inc_get(txq); - if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) && + buf->type != MVNETA_TYPE_XDP_TX) dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); - if (!buf->skb) - continue; - - bytes_compl += buf->skb->len; - pkts_compl++; - dev_kfree_skb_any(buf->skb); + if (buf->type == MVNETA_TYPE_SKB && buf->skb) { + bytes_compl += buf->skb->len; + pkts_compl++; + dev_kfree_skb_any(buf->skb); + } else if (buf->type == MVNETA_TYPE_XDP_TX || + buf->type == MVNETA_TYPE_XDP_NDO) { + xdp_return_frame(buf->xdpf); + } } netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); @@ -1973,6 +1976,111 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) return i; } +static int +mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, + struct xdp_frame *xdpf, bool dma_map) +{ + struct mvneta_tx_desc *tx_desc; + struct mvneta_tx_buf *buf; + dma_addr_t dma_addr; + + if (txq->count >= txq->tx_stop_threshold) + return MVNETA_XDP_DROPPED; + + tx_desc = mvneta_txq_next_desc_get(txq); + + buf = &txq->buf[txq->txq_put_index]; + if (dma_map) { + /* ndo_xdp_xmit */ + dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { + mvneta_txq_desc_put(txq); + return MVNETA_XDP_DROPPED; + } + buf->type = MVNETA_TYPE_XDP_NDO; + } else { + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + + sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + buf->type = MVNETA_TYPE_XDP_TX; + } + buf->xdpf = xdpf; + + tx_desc->command = MVNETA_TXD_FLZ_DESC; + tx_desc->buf_phys_addr = dma_addr; + tx_desc->data_size = xdpf->len; + + mvneta_update_stats(pp, 1, xdpf->len, true); + mvneta_txq_inc_put(txq); + txq->pending++; + txq->count++; + + return MVNETA_XDP_TX; +} + +static int +mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) +{ + struct mvneta_tx_queue *txq; + struct netdev_queue *nq; + struct xdp_frame *xdpf; + int cpu; + u32 ret; + + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + return MVNETA_XDP_DROPPED; + + cpu = smp_processor_id(); + txq = &pp->txqs[cpu % txq_number]; + nq = netdev_get_tx_queue(pp->dev, txq->id); + + __netif_tx_lock(nq, cpu); + ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); + if (ret == MVNETA_XDP_TX) + mvneta_txq_pend_desc_add(pp, txq, 0); + __netif_tx_unlock(nq); + + return ret; +} + +static int +mvneta_xdp_xmit(struct net_device *dev, int num_frame, + struct xdp_frame **frames, u32 flags) +{ + struct mvneta_port *pp = netdev_priv(dev); + int cpu = smp_processor_id(); + struct mvneta_tx_queue *txq; + struct netdev_queue *nq; + int i, drops = 0; + u32 ret; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + txq = &pp->txqs[cpu % txq_number]; + nq = netdev_get_tx_queue(pp->dev, txq->id); + + __netif_tx_lock(nq, cpu); + for (i = 0; i < num_frame; i++) { + ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); + if (ret != MVNETA_XDP_TX) { + xdp_return_frame_rx_napi(frames[i]); + drops++; + } + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) + mvneta_txq_pend_desc_add(pp, txq, 0); + __netif_tx_unlock(nq); + + return num_frame - drops; +} + static int mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp) @@ -1995,6 +2103,11 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, } break; } + case XDP_TX: + ret = mvneta_xdp_xmit_back(pp, xdp); + if (ret != MVNETA_XDP_TX) + xdp_return_buff(xdp); + break; default: bpf_warn_invalid_xdp_action(act); /* fall through */ @@ -4534,6 +4647,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_get_stats64 = mvneta_get_stats64, .ndo_do_ioctl = mvneta_ioctl, .ndo_bpf = mvneta_xdp, + .ndo_xdp_xmit = mvneta_xdp_xmit, }; static const struct ethtool_ops mvneta_eth_tool_ops = {