Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1187706/?format=api
{ "id": 1187706, "url": "http://patchwork.ozlabs.org/api/patches/1187706/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191031143005.5002-2-anthony.l.nguyen@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20191031143005.5002-2-anthony.l.nguyen@intel.com>", "list_archive_url": null, "date": "2019-10-31T14:30:04", "name": "[S30,v4,4/9] ice: Move common functions to ice_txrx_lib.c", "commit_ref": null, "pull_url": null, "state": "accepted", "archived": false, "hash": "acbda285cd7b02097e89a6b8d12db2155f319807", "submitter": { "id": 68875, "url": "http://patchwork.ozlabs.org/api/people/68875/?format=api", "name": "Tony Nguyen", "email": "anthony.l.nguyen@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191031143005.5002-2-anthony.l.nguyen@intel.com/mbox/", "series": [ { "id": 139992, "url": "http://patchwork.ozlabs.org/api/series/139992/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=139992", "date": "2019-10-31T14:30:04", "name": null, "version": 4, "mbox": "http://patchwork.ozlabs.org/series/139992/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1187706/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1187706/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.133;\n\thelo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4741552SHDz9sPj\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri, 1 Nov 2019 10:00:41 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 7C68A8836E;\n\tThu, 31 Oct 2019 23:00:39 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id A8WaUN+MlJ49; Thu, 31 Oct 2019 23:00:37 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 4E34E8835E;\n\tThu, 31 Oct 2019 23:00:37 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id DD8431BF27A\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 31 Oct 2019 23:00:34 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id C895788362\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 31 Oct 2019 23:00:34 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id fnO+SThFXoXO for <intel-wired-lan@lists.osuosl.org>;\n\tThu, 31 Oct 2019 23:00:33 +0000 (UTC)", "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id E888B8835E\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tThu, 31 Oct 2019 23:00:32 +0000 (UTC)", "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t31 Oct 2019 16:00:27 -0700", "from unknown (HELO localhost.jf.intel.com) ([10.166.244.174])\n\tby FMSMGA003.fm.intel.com with ESMTP; 31 Oct 2019 16:00:26 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.68,253,1569308400\"; d=\"scan'208\";a=\"206251859\"", "From": "Tony Nguyen <anthony.l.nguyen@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Thu, 31 Oct 2019 07:30:04 -0700", "Message-Id": "<20191031143005.5002-2-anthony.l.nguyen@intel.com>", "X-Mailer": "git-send-email 2.20.1", "In-Reply-To": "<20191031143005.5002-1-anthony.l.nguyen@intel.com>", "References": "<20191031143005.5002-1-anthony.l.nguyen@intel.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH S30 v4 4/9] ice: Move common functions to\n\tice_txrx_lib.c", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Krzysztof Kazimierczak <krzysztof.kazimierczak@intel.com>\n\nIn preparation of AF XDP, move functions that will be used both by skb and\nzero-copy paths to a new file called ice_txrx_lib.c. This allows us to\navoid using ifdefs to control the staticness of said functions.\n\nMove other functions (ice_rx_csum, ice_rx_hash and ice_ptype_to_htype)\ncalled only by the moved ones to the new file as well.\n\nSigned-off-by: Krzysztof Kazimierczak <krzysztof.kazimierczak@intel.com>\nSigned-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>\n---\nv4:\n- Change ice_build_ctob() call to build_ctob()\nv2:\n- Move ice_build_ctob() to ice_txrx_lib.h\n---\n drivers/net/ethernet/intel/ice/Makefile | 1 +\n drivers/net/ethernet/intel/ice/ice_txrx.c | 303 +-----------------\n drivers/net/ethernet/intel/ice/ice_txrx.h | 10 -\n drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 273 ++++++++++++++++\n drivers/net/ethernet/intel/ice/ice_txrx_lib.h | 59 ++++\n 5 files changed, 334 insertions(+), 312 deletions(-)\n create mode 100644 drivers/net/ethernet/intel/ice/ice_txrx_lib.c\n create mode 100644 drivers/net/ethernet/intel/ice/ice_txrx_lib.h", "diff": "diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile\nindex ff737f880c12..94fe430fdfdd 100644\n--- a/drivers/net/ethernet/intel/ice/Makefile\n+++ b/drivers/net/ethernet/intel/ice/Makefile\n@@ -15,6 +15,7 @@ ice-y := ice_main.o\t\\\n \t ice_sched.o\t\\\n \t ice_base.o\t\\\n \t ice_lib.o\t\\\n+\t ice_txrx_lib.o\t\\\n \t ice_txrx.o\t\\\n \t ice_flex_pipe.o\t\\\n \t ice_idc.o\t\\\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c\nindex f79a9376159b..279e5ec7d15f 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.c\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c\n@@ -7,6 +7,7 @@\n #include <linux/mm.h>\n #include <linux/bpf_trace.h>\n #include <net/xdp.h>\n+#include \"ice_txrx_lib.h\"\n #include \"ice_lib.h\"\n #include \"ice.h\"\n #include \"ice_dcb_lib.h\"\n@@ -396,37 +397,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)\n \treturn -ENOMEM;\n }\n \n-/**\n- * ice_release_rx_desc - Store the new tail and head values\n- * @rx_ring: ring to bump\n- * @val: new head index\n- */\n-static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)\n-{\n-\tu16 prev_ntu = rx_ring->next_to_use;\n-\n-\trx_ring->next_to_use = val;\n-\n-\t/* update next to alloc since we have filled the ring */\n-\trx_ring->next_to_alloc = val;\n-\n-\t/* QRX_TAIL will be updated with any tail value, but hardware ignores\n-\t * the lower 3 bits. This makes it so we only bump tail on meaningful\n-\t * boundaries. Also, this allows us to bump tail on intervals of 8 up to\n-\t * the budget depending on the current traffic load.\n-\t */\n-\tval &= ~0x7;\n-\tif (prev_ntu != val) {\n-\t\t/* Force memory writes to complete before letting h/w\n-\t\t * know there are new descriptors to fetch. (Only\n-\t\t * applicable for weak-ordered memory model archs,\n-\t\t * such as IA-64).\n-\t\t */\n-\t\twmb();\n-\t\twritel(val, rx_ring->tail);\n-\t}\n-}\n-\n /**\n * ice_rx_offset - Return expected offset into page to access data\n * @rx_ring: Ring we are requesting offset of\n@@ -438,89 +408,6 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)\n \treturn ice_is_xdp_ena_vsi(rx_ring->vsi) ? XDP_PACKET_HEADROOM : 0;\n }\n \n-/**\n- * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register\n- * @xdp_ring: XDP Tx ring\n- *\n- * This function updates the XDP Tx ring tail register.\n- */\n-static void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)\n-{\n-\t/* Force memory writes to complete before letting h/w\n-\t * know there are new descriptors to fetch.\n-\t */\n-\twmb();\n-\twritel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);\n-}\n-\n-/**\n- * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission\n- * @data: packet data pointer\n- * @size: packet data size\n- * @xdp_ring: XDP ring for transmission\n- */\n-static int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)\n-{\n-\tu16 i = xdp_ring->next_to_use;\n-\tstruct ice_tx_desc *tx_desc;\n-\tstruct ice_tx_buf *tx_buf;\n-\tdma_addr_t dma;\n-\n-\tif (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {\n-\t\txdp_ring->tx_stats.tx_busy++;\n-\t\treturn ICE_XDP_CONSUMED;\n-\t}\n-\n-\tdma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);\n-\tif (dma_mapping_error(xdp_ring->dev, dma))\n-\t\treturn ICE_XDP_CONSUMED;\n-\n-\ttx_buf = &xdp_ring->tx_buf[i];\n-\ttx_buf->bytecount = size;\n-\ttx_buf->gso_segs = 1;\n-\ttx_buf->raw_buf = data;\n-\n-\t/* record length, and DMA address */\n-\tdma_unmap_len_set(tx_buf, len, size);\n-\tdma_unmap_addr_set(tx_buf, dma, dma);\n-\n-\ttx_desc = ICE_TX_DESC(xdp_ring, i);\n-\ttx_desc->buf_addr = cpu_to_le64(dma);\n-\ttx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,\n-\t\t\t\t\t\t size, 0);\n-\n-\t/* Make certain all of the status bits have been updated\n-\t * before next_to_watch is written.\n-\t */\n-\tsmp_wmb();\n-\n-\ti++;\n-\tif (i == xdp_ring->count)\n-\t\ti = 0;\n-\n-\ttx_buf->next_to_watch = tx_desc;\n-\txdp_ring->next_to_use = i;\n-\n-\treturn ICE_XDP_TX;\n-}\n-\n-/**\n- * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it\n- * @xdp: XDP buffer\n- * @xdp_ring: XDP Tx ring\n- *\n- * Returns negative on failure, 0 on success.\n- */\n-static int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)\n-{\n-\tstruct xdp_frame *xdpf = convert_to_xdp_frame(xdp);\n-\n-\tif (unlikely(!xdpf))\n-\t\treturn ICE_XDP_CONSUMED;\n-\n-\treturn ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);\n-}\n-\n /**\n * ice_run_xdp - Executes an XDP program on initialized xdp_buff\n * @rx_ring: Rx ring\n@@ -612,29 +499,6 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,\n \treturn n - drops;\n }\n \n-/**\n- * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map\n- * @rx_ring: Rx ring\n- * @xdp_res: Result of the receive batch\n- *\n- * This function bumps XDP Tx tail and/or flush redirect map, and\n- * should be called when a batch of packets has been processed in the\n- * napi loop.\n- */\n-static void\n-ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)\n-{\n-\tif (xdp_res & ICE_XDP_REDIR)\n-\t\txdp_do_flush_map();\n-\n-\tif (xdp_res & ICE_XDP_TX) {\n-\t\tstruct ice_ring *xdp_ring =\n-\t\t\trx_ring->vsi->xdp_rings[rx_ring->q_index];\n-\n-\t\tice_xdp_ring_update_tail(xdp_ring);\n-\t}\n-}\n-\n /**\n * ice_alloc_mapped_page - recycle or make a new page\n * @rx_ring: ring to use\n@@ -1031,23 +895,6 @@ static bool ice_cleanup_headers(struct sk_buff *skb)\n \treturn false;\n }\n \n-/**\n- * ice_test_staterr - tests bits in Rx descriptor status and error fields\n- * @rx_desc: pointer to receive descriptor (in le64 format)\n- * @stat_err_bits: value to mask\n- *\n- * This function does some fast chicanery in order to return the\n- * value of the mask which is really only used for boolean tests.\n- * The status_error_len doesn't need to be shifted because it begins\n- * at offset zero.\n- */\n-static bool\n-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)\n-{\n-\treturn !!(rx_desc->wb.status_error0 &\n-\t\t cpu_to_le16(stat_err_bits));\n-}\n-\n /**\n * ice_is_non_eop - process handling of non-EOP buffers\n * @rx_ring: Rx ring being processed\n@@ -1073,154 +920,6 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,\n \treturn true;\n }\n \n-/**\n- * ice_ptype_to_htype - get a hash type\n- * @ptype: the ptype value from the descriptor\n- *\n- * Returns a hash type to be used by skb_set_hash\n- */\n-static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)\n-{\n-\treturn PKT_HASH_TYPE_NONE;\n-}\n-\n-/**\n- * ice_rx_hash - set the hash value in the skb\n- * @rx_ring: descriptor ring\n- * @rx_desc: specific descriptor\n- * @skb: pointer to current skb\n- * @rx_ptype: the ptype value from the descriptor\n- */\n-static void\n-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,\n-\t struct sk_buff *skb, u8 rx_ptype)\n-{\n-\tstruct ice_32b_rx_flex_desc_nic *nic_mdid;\n-\tu32 hash;\n-\n-\tif (!(rx_ring->netdev->features & NETIF_F_RXHASH))\n-\t\treturn;\n-\n-\tif (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)\n-\t\treturn;\n-\n-\tnic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;\n-\thash = le32_to_cpu(nic_mdid->rss_hash);\n-\tskb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));\n-}\n-\n-/**\n- * ice_rx_csum - Indicate in skb if checksum is good\n- * @ring: the ring we care about\n- * @skb: skb currently being received and modified\n- * @rx_desc: the receive descriptor\n- * @ptype: the packet type decoded by hardware\n- *\n- * skb->protocol must be set before this function is called\n- */\n-static void\n-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,\n-\t union ice_32b_rx_flex_desc *rx_desc, u8 ptype)\n-{\n-\tstruct ice_rx_ptype_decoded decoded;\n-\tu32 rx_error, rx_status;\n-\tbool ipv4, ipv6;\n-\n-\trx_status = le16_to_cpu(rx_desc->wb.status_error0);\n-\trx_error = rx_status;\n-\n-\tdecoded = ice_decode_rx_desc_ptype(ptype);\n-\n-\t/* Start with CHECKSUM_NONE and by default csum_level = 0 */\n-\tskb->ip_summed = CHECKSUM_NONE;\n-\tskb_checksum_none_assert(skb);\n-\n-\t/* check if Rx checksum is enabled */\n-\tif (!(ring->netdev->features & NETIF_F_RXCSUM))\n-\t\treturn;\n-\n-\t/* check if HW has decoded the packet and checksum */\n-\tif (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))\n-\t\treturn;\n-\n-\tif (!(decoded.known && decoded.outer_ip))\n-\t\treturn;\n-\n-\tipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&\n-\t (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);\n-\tipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&\n-\t (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);\n-\n-\tif (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |\n-\t\t\t\t BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))\n-\t\tgoto checksum_fail;\n-\telse if (ipv6 && (rx_status &\n-\t\t (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))\n-\t\tgoto checksum_fail;\n-\n-\t/* check for L4 errors and handle packets that were not able to be\n-\t * checksummed due to arrival speed\n-\t */\n-\tif (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))\n-\t\tgoto checksum_fail;\n-\n-\t/* Only report checksum unnecessary for TCP, UDP, or SCTP */\n-\tswitch (decoded.inner_prot) {\n-\tcase ICE_RX_PTYPE_INNER_PROT_TCP:\n-\tcase ICE_RX_PTYPE_INNER_PROT_UDP:\n-\tcase ICE_RX_PTYPE_INNER_PROT_SCTP:\n-\t\tskb->ip_summed = CHECKSUM_UNNECESSARY;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\treturn;\n-\n-checksum_fail:\n-\tring->vsi->back->hw_csum_rx_error++;\n-}\n-\n-/**\n- * ice_process_skb_fields - Populate skb header fields from Rx descriptor\n- * @rx_ring: Rx descriptor ring packet is being transacted on\n- * @rx_desc: pointer to the EOP Rx descriptor\n- * @skb: pointer to current skb being populated\n- * @ptype: the packet type decoded by hardware\n- *\n- * This function checks the ring, descriptor, and packet information in\n- * order to populate the hash, checksum, VLAN, protocol, and\n- * other fields within the skb.\n- */\n-static void\n-ice_process_skb_fields(struct ice_ring *rx_ring,\n-\t\t union ice_32b_rx_flex_desc *rx_desc,\n-\t\t struct sk_buff *skb, u8 ptype)\n-{\n-\tice_rx_hash(rx_ring, rx_desc, skb, ptype);\n-\n-\t/* modifies the skb - consumes the enet header */\n-\tskb->protocol = eth_type_trans(skb, rx_ring->netdev);\n-\n-\tice_rx_csum(rx_ring, skb, rx_desc, ptype);\n-}\n-\n-/**\n- * ice_receive_skb - Send a completed packet up the stack\n- * @rx_ring: Rx ring in play\n- * @skb: packet to send up\n- * @vlan_tag: VLAN tag for packet\n- *\n- * This function sends the completed packet (via. skb) up the stack using\n- * gro receive functions (with/without VLAN tag)\n- */\n-static void\n-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)\n-{\n-\tif ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&\n-\t (vlan_tag & VLAN_VID_MASK))\n-\t\t__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);\n-\tnapi_gro_receive(&rx_ring->q_vector->napi, skb);\n-}\n-\n /**\n * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf\n * @rx_ring: Rx descriptor ring to transact packets on\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h\nindex e40b4cb54ce3..a07101b13226 100644\n--- a/drivers/net/ethernet/intel/ice/ice_txrx.h\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h\n@@ -22,16 +22,6 @@\n #define ICE_RX_BUF_WRITE\t16\t/* Must be power of 2 */\n #define ICE_MAX_TXQ_PER_TXQG\t128\n \n-static inline __le64\n-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)\n-{\n-\treturn cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |\n-\t\t\t (td_cmd << ICE_TXD_QW1_CMD_S) |\n-\t\t\t (td_offset << ICE_TXD_QW1_OFFSET_S) |\n-\t\t\t ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |\n-\t\t\t (td_tag << ICE_TXD_QW1_L2TAG1_S));\n-}\n-\n /* We are assuming that the cache line is always 64 Bytes here for ice.\n * In order to make sure that is a correct assumption there is a check in probe\n * to print a warning if the read from GLPCI_CNF2 tells us that the cache line\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c\nnew file mode 100644\nindex 000000000000..35bbc4ff603c\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c\n@@ -0,0 +1,273 @@\n+// SPDX-License-Identifier: GPL-2.0\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"ice_txrx_lib.h\"\n+\n+/**\n+ * ice_release_rx_desc - Store the new tail and head values\n+ * @rx_ring: ring to bump\n+ * @val: new head index\n+ */\n+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)\n+{\n+\tu16 prev_ntu = rx_ring->next_to_use;\n+\n+\trx_ring->next_to_use = val;\n+\n+\t/* update next to alloc since we have filled the ring */\n+\trx_ring->next_to_alloc = val;\n+\n+\t/* QRX_TAIL will be updated with any tail value, but hardware ignores\n+\t * the lower 3 bits. This makes it so we only bump tail on meaningful\n+\t * boundaries. Also, this allows us to bump tail on intervals of 8 up to\n+\t * the budget depending on the current traffic load.\n+\t */\n+\tval &= ~0x7;\n+\tif (prev_ntu != val) {\n+\t\t/* Force memory writes to complete before letting h/w\n+\t\t * know there are new descriptors to fetch. (Only\n+\t\t * applicable for weak-ordered memory model archs,\n+\t\t * such as IA-64).\n+\t\t */\n+\t\twmb();\n+\t\twritel(val, rx_ring->tail);\n+\t}\n+}\n+\n+/**\n+ * ice_ptype_to_htype - get a hash type\n+ * @ptype: the ptype value from the descriptor\n+ *\n+ * Returns a hash type to be used by skb_set_hash\n+ */\n+static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)\n+{\n+\treturn PKT_HASH_TYPE_NONE;\n+}\n+\n+/**\n+ * ice_rx_hash - set the hash value in the skb\n+ * @rx_ring: descriptor ring\n+ * @rx_desc: specific descriptor\n+ * @skb: pointer to current skb\n+ * @rx_ptype: the ptype value from the descriptor\n+ */\n+static void\n+ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,\n+\t struct sk_buff *skb, u8 rx_ptype)\n+{\n+\tstruct ice_32b_rx_flex_desc_nic *nic_mdid;\n+\tu32 hash;\n+\n+\tif (!(rx_ring->netdev->features & NETIF_F_RXHASH))\n+\t\treturn;\n+\n+\tif (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)\n+\t\treturn;\n+\n+\tnic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;\n+\thash = le32_to_cpu(nic_mdid->rss_hash);\n+\tskb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));\n+}\n+\n+/**\n+ * ice_rx_csum - Indicate in skb if checksum is good\n+ * @ring: the ring we care about\n+ * @skb: skb currently being received and modified\n+ * @rx_desc: the receive descriptor\n+ * @ptype: the packet type decoded by hardware\n+ *\n+ * skb->protocol must be set before this function is called\n+ */\n+static void\n+ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,\n+\t union ice_32b_rx_flex_desc *rx_desc, u8 ptype)\n+{\n+\tstruct ice_rx_ptype_decoded decoded;\n+\tu32 rx_error, rx_status;\n+\tbool ipv4, ipv6;\n+\n+\trx_status = le16_to_cpu(rx_desc->wb.status_error0);\n+\trx_error = rx_status;\n+\n+\tdecoded = ice_decode_rx_desc_ptype(ptype);\n+\n+\t/* Start with CHECKSUM_NONE and by default csum_level = 0 */\n+\tskb->ip_summed = CHECKSUM_NONE;\n+\tskb_checksum_none_assert(skb);\n+\n+\t/* check if Rx checksum is enabled */\n+\tif (!(ring->netdev->features & NETIF_F_RXCSUM))\n+\t\treturn;\n+\n+\t/* check if HW has decoded the packet and checksum */\n+\tif (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))\n+\t\treturn;\n+\n+\tif (!(decoded.known && decoded.outer_ip))\n+\t\treturn;\n+\n+\tipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&\n+\t (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);\n+\tipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&\n+\t (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);\n+\n+\tif (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |\n+\t\t\t\t BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))\n+\t\tgoto checksum_fail;\n+\telse if (ipv6 && (rx_status &\n+\t\t (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))\n+\t\tgoto checksum_fail;\n+\n+\t/* check for L4 errors and handle packets that were not able to be\n+\t * checksummed due to arrival speed\n+\t */\n+\tif (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))\n+\t\tgoto checksum_fail;\n+\n+\t/* Only report checksum unnecessary for TCP, UDP, or SCTP */\n+\tswitch (decoded.inner_prot) {\n+\tcase ICE_RX_PTYPE_INNER_PROT_TCP:\n+\tcase ICE_RX_PTYPE_INNER_PROT_UDP:\n+\tcase ICE_RX_PTYPE_INNER_PROT_SCTP:\n+\t\tskb->ip_summed = CHECKSUM_UNNECESSARY;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn;\n+\n+checksum_fail:\n+\tring->vsi->back->hw_csum_rx_error++;\n+}\n+\n+/**\n+ * ice_process_skb_fields - Populate skb header fields from Rx descriptor\n+ * @rx_ring: Rx descriptor ring packet is being transacted on\n+ * @rx_desc: pointer to the EOP Rx descriptor\n+ * @skb: pointer to current skb being populated\n+ * @ptype: the packet type decoded by hardware\n+ *\n+ * This function checks the ring, descriptor, and packet information in\n+ * order to populate the hash, checksum, VLAN, protocol, and\n+ * other fields within the skb.\n+ */\n+void\n+ice_process_skb_fields(struct ice_ring *rx_ring,\n+\t\t union ice_32b_rx_flex_desc *rx_desc,\n+\t\t struct sk_buff *skb, u8 ptype)\n+{\n+\tice_rx_hash(rx_ring, rx_desc, skb, ptype);\n+\n+\t/* modifies the skb - consumes the enet header */\n+\tskb->protocol = eth_type_trans(skb, rx_ring->netdev);\n+\n+\tice_rx_csum(rx_ring, skb, rx_desc, ptype);\n+}\n+\n+/**\n+ * ice_receive_skb - Send a completed packet up the stack\n+ * @rx_ring: Rx ring in play\n+ * @skb: packet to send up\n+ * @vlan_tag: VLAN tag for packet\n+ *\n+ * This function sends the completed packet (via. skb) up the stack using\n+ * gro receive functions (with/without VLAN tag)\n+ */\n+void\n+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)\n+{\n+\tif ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&\n+\t (vlan_tag & VLAN_VID_MASK))\n+\t\t__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);\n+\tnapi_gro_receive(&rx_ring->q_vector->napi, skb);\n+}\n+\n+/**\n+ * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission\n+ * @data: packet data pointer\n+ * @size: packet data size\n+ * @xdp_ring: XDP ring for transmission\n+ */\n+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)\n+{\n+\tu16 i = xdp_ring->next_to_use;\n+\tstruct ice_tx_desc *tx_desc;\n+\tstruct ice_tx_buf *tx_buf;\n+\tdma_addr_t dma;\n+\n+\tif (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {\n+\t\txdp_ring->tx_stats.tx_busy++;\n+\t\treturn ICE_XDP_CONSUMED;\n+\t}\n+\n+\tdma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);\n+\tif (dma_mapping_error(xdp_ring->dev, dma))\n+\t\treturn ICE_XDP_CONSUMED;\n+\n+\ttx_buf = &xdp_ring->tx_buf[i];\n+\ttx_buf->bytecount = size;\n+\ttx_buf->gso_segs = 1;\n+\ttx_buf->raw_buf = data;\n+\n+\t/* record length, and DMA address */\n+\tdma_unmap_len_set(tx_buf, len, size);\n+\tdma_unmap_addr_set(tx_buf, dma, dma);\n+\n+\ttx_desc = ICE_TX_DESC(xdp_ring, i);\n+\ttx_desc->buf_addr = cpu_to_le64(dma);\n+\ttx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,\n+\t\t\t\t\t\t size, 0);\n+\n+\t/* Make certain all of the status bits have been updated\n+\t * before next_to_watch is written.\n+\t */\n+\tsmp_wmb();\n+\n+\ti++;\n+\tif (i == xdp_ring->count)\n+\t\ti = 0;\n+\n+\ttx_buf->next_to_watch = tx_desc;\n+\txdp_ring->next_to_use = i;\n+\n+\treturn ICE_XDP_TX;\n+}\n+\n+/**\n+ * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it\n+ * @xdp: XDP buffer\n+ * @xdp_ring: XDP Tx ring\n+ *\n+ * Returns negative on failure, 0 on success.\n+ */\n+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)\n+{\n+\tstruct xdp_frame *xdpf = convert_to_xdp_frame(xdp);\n+\n+\tif (unlikely(!xdpf))\n+\t\treturn ICE_XDP_CONSUMED;\n+\n+\treturn ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);\n+}\n+\n+/**\n+ * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map\n+ * @rx_ring: Rx ring\n+ * @xdp_res: Result of the receive batch\n+ *\n+ * This function bumps XDP Tx tail and/or flush redirect map, and\n+ * should be called when a batch of packets has been processed in the\n+ * napi loop.\n+ */\n+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)\n+{\n+\tif (xdp_res & ICE_XDP_REDIR)\n+\t\txdp_do_flush_map();\n+\n+\tif (xdp_res & ICE_XDP_TX) {\n+\t\tstruct ice_ring *xdp_ring =\n+\t\t\trx_ring->vsi->xdp_rings[rx_ring->q_index];\n+\n+\t\tice_xdp_ring_update_tail(xdp_ring);\n+\t}\n+}\ndiff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h\nnew file mode 100644\nindex 000000000000..ba9164dad9ae\n--- /dev/null\n+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h\n@@ -0,0 +1,59 @@\n+/* SPDX-License-Identifier: GPL-2.0 */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef _ICE_TXRX_LIB_H_\n+#define _ICE_TXRX_LIB_H_\n+#include \"ice.h\"\n+\n+/**\n+ * ice_test_staterr - tests bits in Rx descriptor status and error fields\n+ * @rx_desc: pointer to receive descriptor (in le64 format)\n+ * @stat_err_bits: value to mask\n+ *\n+ * This function does some fast chicanery in order to return the\n+ * value of the mask which is really only used for boolean tests.\n+ * The status_error_len doesn't need to be shifted because it begins\n+ * at offset zero.\n+ */\n+static inline bool\n+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)\n+{\n+\treturn !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));\n+}\n+\n+static inline __le64\n+build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)\n+{\n+\treturn cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |\n+\t\t\t (td_cmd << ICE_TXD_QW1_CMD_S) |\n+\t\t\t (td_offset << ICE_TXD_QW1_OFFSET_S) |\n+\t\t\t ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |\n+\t\t\t (td_tag << ICE_TXD_QW1_L2TAG1_S));\n+}\n+\n+/**\n+ * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register\n+ * @xdp_ring: XDP Tx ring\n+ *\n+ * This function updates the XDP Tx ring tail register.\n+ */\n+static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)\n+{\n+\t/* Force memory writes to complete before letting h/w\n+\t * know there are new descriptors to fetch.\n+\t */\n+\twmb();\n+\twritel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);\n+}\n+\n+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);\n+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);\n+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);\n+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);\n+void\n+ice_process_skb_fields(struct ice_ring *rx_ring,\n+\t\t union ice_32b_rx_flex_desc *rx_desc,\n+\t\t struct sk_buff *skb, u8 ptype);\n+void\n+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);\n+#endif /* !_ICE_TXRX_LIB_H_ */\n", "prefixes": [ "S30", "v4", "4/9" ] }