From patchwork Sat Apr 18 06:47:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272589 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943Th4Ry2z9sSd for ; Sat, 18 Apr 2020 16:49:12 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726432AbgDRGtE (ORCPT ); Sat, 18 Apr 2020 02:49:04 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54088 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725884AbgDRGsW (ORCPT ); Sat, 18 Apr 2020 02:48:22 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id E93ECC9F5F7DFA70A26B; Sat, 18 Apr 2020 14:48:19 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:09 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 01/10] net: hns3: split out hclge_fd_check_ether_tuple() Date: Sat, 18 Apr 2020 14:47:00 +0800 Message-ID: <1587192429-11463-2-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen For readability and maintainability, this patch separates the handling part of each flow type in hclge_fd_check_ether_tuple() into standalone functions. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 285 +++++++++++++-------- 1 file changed, 173 insertions(+), 112 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a758f9a..6381c0f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5244,157 +5244,158 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage, return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); } -static int hclge_fd_check_spec(struct hclge_dev *hdev, - struct ethtool_rx_flow_spec *fs, u32 *unused) +static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, + u32 *unused_tuple) { - struct ethtool_tcpip4_spec *tcp_ip4_spec; - struct ethtool_usrip4_spec *usr_ip4_spec; - struct ethtool_tcpip6_spec *tcp_ip6_spec; - struct ethtool_usrip6_spec *usr_ip6_spec; - struct ethhdr *ether_spec; - - if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + if (!spec || !unused_tuple) return -EINVAL; - if (!(fs->flow_type & hdev->fd_cfg.proto_support)) - return -EOPNOTSUPP; - - if ((fs->flow_type & FLOW_EXT) && - (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { - dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); - return -EOPNOTSUPP; - } + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); - switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { - case SCTP_V4_FLOW: - case TCP_V4_FLOW: - case UDP_V4_FLOW: - tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; - *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); - if (!tcp_ip4_spec->ip4src) - *unused |= BIT(INNER_SRC_IP); + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); - if (!tcp_ip4_spec->ip4dst) - *unused |= BIT(INNER_DST_IP); + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); - if (!tcp_ip4_spec->psrc) - *unused |= BIT(INNER_SRC_PORT); + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); - if (!tcp_ip4_spec->pdst) - *unused |= BIT(INNER_DST_PORT); + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); - if (!tcp_ip4_spec->tos) - *unused |= BIT(INNER_IP_TOS); + return 0; +} - break; - case IP_USER_FLOW: - usr_ip4_spec = &fs->h_u.usr_ip4_spec; - *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | - BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); +static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; - if (!usr_ip4_spec->ip4src) - *unused |= BIT(INNER_SRC_IP); + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); - if (!usr_ip4_spec->ip4dst) - *unused |= BIT(INNER_DST_IP); + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); - if (!usr_ip4_spec->tos) - *unused |= BIT(INNER_IP_TOS); + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); - if (!usr_ip4_spec->proto) - *unused |= BIT(INNER_IP_PROTO); + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); - if (usr_ip4_spec->l4_4_bytes) - return -EOPNOTSUPP; + if (!spec->proto) + *unused_tuple |= BIT(INNER_IP_PROTO); - if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) - return -EOPNOTSUPP; + if (spec->l4_4_bytes) + return -EOPNOTSUPP; - break; - case SCTP_V6_FLOW: - case TCP_V6_FLOW: - case UDP_V6_FLOW: - tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; - *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | - BIT(INNER_IP_TOS); + if (spec->ip_ver != ETH_RX_NFC_IP4) + return -EOPNOTSUPP; - /* check whether src/dst ip address used */ - if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && - !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) - *unused |= BIT(INNER_SRC_IP); + return 0; +} - if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && - !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) - *unused |= BIT(INNER_DST_IP); +static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; - if (!tcp_ip6_spec->psrc) - *unused |= BIT(INNER_SRC_PORT); + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS); - if (!tcp_ip6_spec->pdst) - *unused |= BIT(INNER_DST_PORT); + /* check whether src/dst ip address used */ + if (!spec->ip6src[0] && !spec->ip6src[1] && + !spec->ip6src[2] && !spec->ip6src[3]) + *unused_tuple |= BIT(INNER_SRC_IP); - if (tcp_ip6_spec->tclass) - return -EOPNOTSUPP; + if (!spec->ip6dst[0] && !spec->ip6dst[1] && + !spec->ip6dst[2] && !spec->ip6dst[3]) + *unused_tuple |= BIT(INNER_DST_IP); - break; - case IPV6_USER_FLOW: - usr_ip6_spec = &fs->h_u.usr_ip6_spec; - *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | - BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | - BIT(INNER_DST_PORT); + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); - /* check whether src/dst ip address used */ - if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && - !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) - *unused |= BIT(INNER_SRC_IP); + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); - if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && - !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) - *unused |= BIT(INNER_DST_IP); + if (spec->tclass) + return -EOPNOTSUPP; - if (!usr_ip6_spec->l4_proto) - *unused |= BIT(INNER_IP_PROTO); + return 0; +} - if (usr_ip6_spec->tclass) - return -EOPNOTSUPP; +static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; - if (usr_ip6_spec->l4_4_bytes) - return -EOPNOTSUPP; + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); - break; - case ETHER_FLOW: - ether_spec = &fs->h_u.ether_spec; - *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | - BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | - BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); + /* check whether src/dst ip address used */ + if (!spec->ip6src[0] && !spec->ip6src[1] && + !spec->ip6src[2] && !spec->ip6src[3]) + *unused_tuple |= BIT(INNER_SRC_IP); - if (is_zero_ether_addr(ether_spec->h_source)) - *unused |= BIT(INNER_SRC_MAC); + if (!spec->ip6dst[0] && !spec->ip6dst[1] && + !spec->ip6dst[2] && !spec->ip6dst[3]) + *unused_tuple |= BIT(INNER_DST_IP); - if (is_zero_ether_addr(ether_spec->h_dest)) - *unused |= BIT(INNER_DST_MAC); + if (!spec->l4_proto) + *unused_tuple |= BIT(INNER_IP_PROTO); - if (!ether_spec->h_proto) - *unused |= BIT(INNER_ETH_TYPE); + if (spec->tclass) + return -EOPNOTSUPP; - break; - default: + if (spec->l4_4_bytes) return -EOPNOTSUPP; - } + return 0; +} + +static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | + BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); + + if (is_zero_ether_addr(spec->h_source)) + *unused_tuple |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(spec->h_dest)) + *unused_tuple |= BIT(INNER_DST_MAC); + + if (!spec->h_proto) + *unused_tuple |= BIT(INNER_ETH_TYPE); + + return 0; +} + +static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) +{ if ((fs->flow_type & FLOW_EXT)) { if (fs->h_ext.vlan_etype) return -EOPNOTSUPP; if (!fs->h_ext.vlan_tci) - *unused |= BIT(INNER_VLAN_TAG_FST); + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); - if (fs->m_ext.vlan_tci) { - if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) - return -EINVAL; - } + if (fs->m_ext.vlan_tci && + be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; } else { - *unused |= BIT(INNER_VLAN_TAG_FST); + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); } if (fs->flow_type & FLOW_MAC_EXT) { @@ -5402,14 +5403,74 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, return -EOPNOTSUPP; if (is_zero_ether_addr(fs->h_ext.h_dest)) - *unused |= BIT(INNER_DST_MAC); + *unused_tuple |= BIT(INNER_DST_MAC); else - *unused &= ~(BIT(INNER_DST_MAC)); + *unused_tuple &= ~(BIT(INNER_DST_MAC)); } return 0; } +static int hclge_fd_check_spec(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) +{ + int ret = 0; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + if (!(fs->flow_type & hdev->fd_cfg.proto_support)) + return -EOPNOTSUPP; + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, + unused_tuple); + break; + case IP_USER_FLOW: + ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, + unused_tuple); + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, + unused_tuple); + break; + case IPV6_USER_FLOW: + ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, + unused_tuple); + break; + case ETHER_FLOW: + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + dev_err(&hdev->pdev->dev, + "ETHER_FLOW is not supported in current fd mode!\n"); + return -EOPNOTSUPP; + } + + ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, + unused_tuple); + break; + default: + return -EOPNOTSUPP; + } + + if (ret) + return ret; + + return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); +} + static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) { struct hclge_fd_rule *rule = NULL; From patchwork Sat Apr 18 06:47:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272584 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943T52pHnz9sSm for ; Sat, 18 Apr 2020 16:48:41 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726284AbgDRGsk (ORCPT ); Sat, 18 Apr 2020 02:48:40 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54280 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726025AbgDRGs0 (ORCPT ); Sat, 18 Apr 2020 02:48:26 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 1C1733D0F3A361891DAD; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:09 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 02/10] net: hns3: split out hclge_get_fd_rule_info() Date: Sat, 18 Apr 2020 14:47:01 +0800 Message-ID: <1587192429-11463-3-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen hclge_get_fd_rule_info() is bloated, this patch separates it into several standalone functions for readability and maintainability. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 303 +++++++++++---------- 1 file changed, 159 insertions(+), 144 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6381c0f..0aa8db1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5938,6 +5938,149 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, return 0; } +static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip4_spec *spec, + struct ethtool_tcpip4_spec *spec_mask) +{ + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); + + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; +} + +static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, + struct ethtool_usrip4_spec *spec, + struct ethtool_usrip4_spec *spec_mask) +{ + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); + + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + spec->proto = rule->tuples.ip_proto; + spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + spec->ip_ver = ETH_RX_NFC_IP4; +} + +static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip6_spec *spec, + struct ethtool_tcpip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, + rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, + rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, + IPV6_SIZE); + + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, + IPV6_SIZE); + + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); +} + +static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, + struct ethtool_usrip6_spec *spec, + struct ethtool_usrip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, + rule->tuples_mask.src_ip, IPV6_SIZE); + + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, + rule->tuples_mask.dst_ip, IPV6_SIZE); + + spec->l4_proto = rule->tuples.ip_proto; + spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; +} + +static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, + struct ethhdr *spec, + struct ethhdr *spec_mask) +{ + ether_addr_copy(spec->h_source, rule->tuples.src_mac); + ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); + + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(spec_mask->h_source); + else + ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); + + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(spec_mask->h_dest); + else + ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); + + spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); + spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? + 0 : cpu_to_be16(rule->tuples_mask.ether_proto); +} + +static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (fs->flow_type & FLOW_EXT) { + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? + cpu_to_be16(VLAN_VID_MASK) : + cpu_to_be16(rule->tuples_mask.vlan_tag1); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + } +} + static int hclge_get_fd_rule_info(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { @@ -5970,162 +6113,34 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, case SCTP_V4_FLOW: case TCP_V4_FLOW: case UDP_V4_FLOW: - fs->h_u.tcp_ip4_spec.ip4src = - cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); - fs->m_u.tcp_ip4_spec.ip4src = - rule->unused_tuple & BIT(INNER_SRC_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); - - fs->h_u.tcp_ip4_spec.ip4dst = - cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); - fs->m_u.tcp_ip4_spec.ip4dst = - rule->unused_tuple & BIT(INNER_DST_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); - - fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); - fs->m_u.tcp_ip4_spec.psrc = - rule->unused_tuple & BIT(INNER_SRC_PORT) ? - 0 : cpu_to_be16(rule->tuples_mask.src_port); - - fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); - fs->m_u.tcp_ip4_spec.pdst = - rule->unused_tuple & BIT(INNER_DST_PORT) ? - 0 : cpu_to_be16(rule->tuples_mask.dst_port); - - fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; - fs->m_u.tcp_ip4_spec.tos = - rule->unused_tuple & BIT(INNER_IP_TOS) ? - 0 : rule->tuples_mask.ip_tos; - + hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, + &fs->m_u.tcp_ip4_spec); break; case IP_USER_FLOW: - fs->h_u.usr_ip4_spec.ip4src = - cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); - fs->m_u.tcp_ip4_spec.ip4src = - rule->unused_tuple & BIT(INNER_SRC_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); - - fs->h_u.usr_ip4_spec.ip4dst = - cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); - fs->m_u.usr_ip4_spec.ip4dst = - rule->unused_tuple & BIT(INNER_DST_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); - - fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; - fs->m_u.usr_ip4_spec.tos = - rule->unused_tuple & BIT(INNER_IP_TOS) ? - 0 : rule->tuples_mask.ip_tos; - - fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; - fs->m_u.usr_ip4_spec.proto = - rule->unused_tuple & BIT(INNER_IP_PROTO) ? - 0 : rule->tuples_mask.ip_proto; - - fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; - + hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec); break; case SCTP_V6_FLOW: case TCP_V6_FLOW: case UDP_V6_FLOW: - cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, - rule->tuples.src_ip, IPV6_SIZE); - if (rule->unused_tuple & BIT(INNER_SRC_IP)) - memset(fs->m_u.tcp_ip6_spec.ip6src, 0, - sizeof(int) * IPV6_SIZE); - else - cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, - rule->tuples_mask.src_ip, IPV6_SIZE); - - cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, - rule->tuples.dst_ip, IPV6_SIZE); - if (rule->unused_tuple & BIT(INNER_DST_IP)) - memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, - sizeof(int) * IPV6_SIZE); - else - cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, - rule->tuples_mask.dst_ip, IPV6_SIZE); - - fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); - fs->m_u.tcp_ip6_spec.psrc = - rule->unused_tuple & BIT(INNER_SRC_PORT) ? - 0 : cpu_to_be16(rule->tuples_mask.src_port); - - fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); - fs->m_u.tcp_ip6_spec.pdst = - rule->unused_tuple & BIT(INNER_DST_PORT) ? - 0 : cpu_to_be16(rule->tuples_mask.dst_port); - + hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, + &fs->m_u.tcp_ip6_spec); break; case IPV6_USER_FLOW: - cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, - rule->tuples.src_ip, IPV6_SIZE); - if (rule->unused_tuple & BIT(INNER_SRC_IP)) - memset(fs->m_u.usr_ip6_spec.ip6src, 0, - sizeof(int) * IPV6_SIZE); - else - cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, - rule->tuples_mask.src_ip, IPV6_SIZE); - - cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, - rule->tuples.dst_ip, IPV6_SIZE); - if (rule->unused_tuple & BIT(INNER_DST_IP)) - memset(fs->m_u.usr_ip6_spec.ip6dst, 0, - sizeof(int) * IPV6_SIZE); - else - cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, - rule->tuples_mask.dst_ip, IPV6_SIZE); - - fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; - fs->m_u.usr_ip6_spec.l4_proto = - rule->unused_tuple & BIT(INNER_IP_PROTO) ? - 0 : rule->tuples_mask.ip_proto; - - break; - case ETHER_FLOW: - ether_addr_copy(fs->h_u.ether_spec.h_source, - rule->tuples.src_mac); - if (rule->unused_tuple & BIT(INNER_SRC_MAC)) - eth_zero_addr(fs->m_u.ether_spec.h_source); - else - ether_addr_copy(fs->m_u.ether_spec.h_source, - rule->tuples_mask.src_mac); - - ether_addr_copy(fs->h_u.ether_spec.h_dest, - rule->tuples.dst_mac); - if (rule->unused_tuple & BIT(INNER_DST_MAC)) - eth_zero_addr(fs->m_u.ether_spec.h_dest); - else - ether_addr_copy(fs->m_u.ether_spec.h_dest, - rule->tuples_mask.dst_mac); - - fs->h_u.ether_spec.h_proto = - cpu_to_be16(rule->tuples.ether_proto); - fs->m_u.ether_spec.h_proto = - rule->unused_tuple & BIT(INNER_ETH_TYPE) ? - 0 : cpu_to_be16(rule->tuples_mask.ether_proto); - + hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, + &fs->m_u.usr_ip6_spec); break; + /* The flow type of fd rule has been checked before adding in to rule + * list. As other flow types have been handled, it must be ETHER_FLOW + * for the default case + */ default: - spin_unlock_bh(&hdev->fd_rule_lock); - return -EOPNOTSUPP; - } - - if (fs->flow_type & FLOW_EXT) { - fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); - fs->m_ext.vlan_tci = - rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? - cpu_to_be16(VLAN_VID_MASK) : - cpu_to_be16(rule->tuples_mask.vlan_tag1); + hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, + &fs->m_u.ether_spec); + break; } - if (fs->flow_type & FLOW_MAC_EXT) { - ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); - if (rule->unused_tuple & BIT(INNER_DST_MAC)) - eth_zero_addr(fs->m_u.ether_spec.h_dest); - else - ether_addr_copy(fs->m_u.ether_spec.h_dest, - rule->tuples_mask.dst_mac); - } + hclge_fd_get_ext_info(fs, rule); if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { fs->ring_cookie = RX_CLS_FLOW_DISC; From patchwork Sat Apr 18 06:47:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272580 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943Sm1L5bz9sSm for ; Sat, 18 Apr 2020 16:48:24 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726036AbgDRGsW (ORCPT ); Sat, 18 Apr 2020 02:48:22 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54148 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725969AbgDRGsV (ORCPT ); Sat, 18 Apr 2020 02:48:21 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 05E794ED5D5154712248; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:09 +0800 From: Huazhong Tan To: CC: , , , , , , Huazhong Tan Subject: [PATCH net-next 03/10] net: hns3: remove an unnecessary case 0 in hclge_fd_convert_tuple() Date: Sat, 18 Apr 2020 14:47:02 +0800 Message-ID: <1587192429-11463-4-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Since case default has included case 0, so removes this redundant case 0. Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 0aa8db1..5f1bea3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5006,8 +5006,6 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, return true; switch (tuple_bit) { - case 0: - return false; case BIT(INNER_DST_MAC): for (i = 0; i < ETH_ALEN; i++) { calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], From patchwork Sat Apr 18 06:47:03 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272582 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943Ss1vrcz9sSm for ; Sat, 18 Apr 2020 16:48:29 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726168AbgDRGs2 (ORCPT ); Sat, 18 Apr 2020 02:48:28 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54288 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726049AbgDRGsY (ORCPT ); Sat, 18 Apr 2020 02:48:24 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 26E4CABB9F7D3D80417B; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:10 +0800 From: Huazhong Tan To: CC: , , , , , , Guojia Liao , Huazhong Tan Subject: [PATCH net-next 04/10] net: hns3: remove useless proto_support field in struct hclge_fd_cfg Date: Sat, 18 Apr 2020 14:47:03 +0800 Message-ID: <1587192429-11463-5-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Guojia Liao proto_support field in struct hclge_fd_cfg shows what protocols in flow direct table are supported now. It is unnecessary since checking which one is unsupported will be more efficient, so this patch removes it. Signed-off-by: Guojia Liao Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 17 ++++++----------- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 - 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5f1bea3..90d2c77 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4876,9 +4876,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) return -EOPNOTSUPP; } - hdev->fd_cfg.proto_support = - TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | - UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, key_cfg->inner_sipv6_word_en = LOW_2_WORDS; @@ -4892,11 +4889,9 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); /* If use max 400bit key, we can support tuples for ether type */ - if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { - hdev->fd_cfg.proto_support |= ETHER_FLOW; + if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) key_cfg->tuple_active |= BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); - } /* roce_type is used to filter roce frames * dst_vport is used to specify the rule @@ -5397,7 +5392,8 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, } if (fs->flow_type & FLOW_MAC_EXT) { - if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) return -EOPNOTSUPP; if (is_zero_ether_addr(fs->h_ext.h_dest)) @@ -5413,21 +5409,20 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, struct ethtool_rx_flow_spec *fs, u32 *unused_tuple) { + u32 flow_type; int ret = 0; if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) return -EINVAL; - if (!(fs->flow_type & hdev->fd_cfg.proto_support)) - return -EOPNOTSUPP; - if ((fs->flow_type & FLOW_EXT) && (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); return -EOPNOTSUPP; } - switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { case SCTP_V4_FLOW: case TCP_V4_FLOW: case UDP_V4_FLOW: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 71df23d..a58c262 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -580,7 +580,6 @@ struct hclge_fd_key_cfg { struct hclge_fd_cfg { u8 fd_mode; u16 max_key_length; /* use bit as unit */ - u32 proto_support; u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; From patchwork Sat Apr 18 06:47:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272587 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943TS27n2z9sP7 for ; Sat, 18 Apr 2020 16:49:00 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726410AbgDRGs7 (ORCPT ); Sat, 18 Apr 2020 02:48:59 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54198 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725987AbgDRGsW (ORCPT ); Sat, 18 Apr 2020 02:48:22 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 1734FD424A09ACE620F8; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:10 +0800 From: Huazhong Tan To: CC: , , , , , , Guojia Liao , Huazhong Tan Subject: [PATCH net-next 05/10] net: hns3: remove two unused structures in hclge_cmd.h Date: Sat, 18 Apr 2020 14:47:04 +0800 Message-ID: <1587192429-11463-6-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Guojia Liao struct hclge_mac_vlan_remove_cmd and hclge_mac_vlan_add_cmd are unused. So removes them from hclge_cmd.h. Signed-off-by: Guojia Liao Signed-off-by: Huazhong Tan --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 25 ---------------------- 1 file changed, 25 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 96498d9..90e422ef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -733,31 +733,6 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -struct hclge_mac_vlan_add_cmd { - __le16 flags; - __le16 mac_addr_hi16; - __le32 mac_addr_lo32; - __le32 mac_addr_msk_hi32; - __le16 mac_addr_msk_lo16; - __le16 vlan_tag; - __le16 ingress_port; - __le16 egress_port; - u8 rsv[4]; -}; - -#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0 -struct hclge_mac_vlan_remove_cmd { - __le16 flags; - __le16 mac_addr_hi16; - __le32 mac_addr_lo32; - __le32 mac_addr_msk_hi32; - __le16 mac_addr_msk_lo16; - __le16 vlan_tag; - __le16 ingress_port; - __le16 egress_port; - u8 rsv[4]; -}; - struct hclge_vlan_filter_ctrl_cmd { u8 vlan_type; u8 vlan_fe; From patchwork Sat Apr 18 06:47:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272585 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943TH2cbhz9sSv for ; Sat, 18 Apr 2020 16:48:51 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726378AbgDRGsr (ORCPT ); Sat, 18 Apr 2020 02:48:47 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54290 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726055AbgDRGs0 (ORCPT ); Sat, 18 Apr 2020 02:48:26 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 304A21235A4284CB4A5E; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:10 +0800 From: Huazhong Tan To: CC: , , , , , , Guojia Liao , Huazhong Tan Subject: [PATCH net-next 06/10] net: hns3: modify some unsuitable type declaration Date: Sat, 18 Apr 2020 14:47:05 +0800 Message-ID: <1587192429-11463-7-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Guojia Liao In hclge_set_fd_key_config(), parameter 'stage' should be as enum HCLGE_FD_STAGE, and in hclge_config_key(), 'tuple_size' should be type u8, also simplify unsigned int with u32 for 'i'. Signed-off-by: Guojia Liao Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 90d2c77..3a08287 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4822,7 +4822,8 @@ static int hclge_get_fd_allocation(struct hclge_dev *hdev, return ret; } -static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) +static int hclge_set_fd_key_config(struct hclge_dev *hdev, + enum HCLGE_FD_STAGE stage_num) { struct hclge_set_fd_key_config_cmd *req; struct hclge_fd_key_cfg *stage; @@ -5158,9 +5159,10 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; u8 *cur_key_x, *cur_key_y; - unsigned int i; - int ret, tuple_size; u8 meta_data_region; + u8 tuple_size; + int ret; + u32 i; memset(key_x, 0, sizeof(key_x)); memset(key_y, 0, sizeof(key_y)); From patchwork Sat Apr 18 06:47:06 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272583 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943T42hj1z9sSm for ; Sat, 18 Apr 2020 16:48:40 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726086AbgDRGs0 (ORCPT ); Sat, 18 Apr 2020 02:48:26 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54282 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726024AbgDRGsY (ORCPT ); Sat, 18 Apr 2020 02:48:24 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 226CC5B96F97D4DE5B2D; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:11 +0800 From: Huazhong Tan To: CC: , , , , , , Huazhong Tan Subject: [PATCH net-next 07/10] net: hns3: clean up some coding style issue Date: Sat, 18 Apr 2020 14:47:06 +0800 Message-ID: <1587192429-11463-8-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch removes some unnecessary blank lines, redundant parentheses, and changes one tab to blank in hclge_dbg_dump_reg_common(). Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 +++------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 1722828..cfc9300 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -143,7 +143,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, return; } - buf_len = sizeof(struct hclge_desc) * bd_num; + buf_len = sizeof(struct hclge_desc) * bd_num; desc_src = kzalloc(buf_len, GFP_KERNEL); if (!desc_src) return; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 3a08287..74efd95 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5380,7 +5380,7 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, struct ethtool_rx_flow_spec *fs, u32 *unused_tuple) { - if ((fs->flow_type & FLOW_EXT)) { + if (fs->flow_type & FLOW_EXT) { if (fs->h_ext.vlan_etype) return -EOPNOTSUPP; if (!fs->h_ext.vlan_tci) @@ -5401,7 +5401,7 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, if (is_zero_ether_addr(fs->h_ext.h_dest)) *unused_tuple |= BIT(INNER_DST_MAC); else - *unused_tuple &= ~(BIT(INNER_DST_MAC)); + *unused_tuple &= ~BIT(INNER_DST_MAC); } return 0; @@ -5674,7 +5674,7 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, break; } - if ((fs->flow_type & FLOW_EXT)) { + if (fs->flow_type & FLOW_EXT) { rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); } @@ -5785,7 +5785,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, } rule->flow_type = fs->flow_type; - rule->location = fs->location; rule->unused_tuple = unused; rule->vf_id = dst_vport_id; @@ -6273,7 +6272,6 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, */ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { spin_unlock_bh(&hdev->fd_rule_lock); - return -EOPNOTSUPP; } @@ -6287,14 +6285,12 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { spin_unlock_bh(&hdev->fd_rule_lock); - return -ENOSPC; } rule = kzalloc(sizeof(*rule), GFP_ATOMIC); if (!rule) { spin_unlock_bh(&hdev->fd_rule_lock); - return -ENOMEM; } From patchwork Sat Apr 18 06:47:07 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272586 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943TM0HR7z9sSw for ; Sat, 18 Apr 2020 16:48:55 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726327AbgDRGsq (ORCPT ); Sat, 18 Apr 2020 02:48:46 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54286 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726048AbgDRGs0 (ORCPT ); Sat, 18 Apr 2020 02:48:26 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 2BD041BD36E313F0A85C; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:11 +0800 From: Huazhong Tan To: CC: , , , , , , Guojia Liao , Huazhong Tan Subject: [PATCH net-next 08/10] net: hns3: add debug information for flow table when failed Date: Sat, 18 Apr 2020 14:47:07 +0800 Message-ID: <1587192429-11463-9-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Guojia Liao Add some debug information for processing flow table if failed. Signed-off-by: Guojia Liao Signed-off-by: Huazhong Tan --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 40 ++++++++++++++++------ 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 74efd95..20216e1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5381,22 +5381,31 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, u32 *unused_tuple) { if (fs->flow_type & FLOW_EXT) { - if (fs->h_ext.vlan_etype) + if (fs->h_ext.vlan_etype) { + dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); return -EOPNOTSUPP; + } + if (!fs->h_ext.vlan_tci) *unused_tuple |= BIT(INNER_VLAN_TAG_FST); if (fs->m_ext.vlan_tci && - be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) + be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { + dev_err(&hdev->pdev->dev, "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n", + ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); return -EINVAL; + } } else { *unused_tuple |= BIT(INNER_VLAN_TAG_FST); } if (fs->flow_type & FLOW_MAC_EXT) { if (hdev->fd_cfg.fd_mode != - HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + dev_err(&hdev->pdev->dev, + "FLOW_MAC_EXT is not supported in current fd mode!\n"); return -EOPNOTSUPP; + } if (is_zero_ether_addr(fs->h_ext.h_dest)) *unused_tuple |= BIT(INNER_DST_MAC); @@ -5414,8 +5423,12 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, u32 flow_type; int ret = 0; - if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, "failed to config fd rules, invalid rule location: %u, max is %u\n.", + fs->location, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); return -EINVAL; + } if ((fs->flow_type & FLOW_EXT) && (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { @@ -5457,11 +5470,16 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, unused_tuple); break; default: + dev_err(&hdev->pdev->dev, "unsupported protocol type, protocol type = %#x\n", + flow_type); return -EOPNOTSUPP; } - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, "failed to check flow union tuple, ret = %d\n", + ret); return ret; + } return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); } @@ -5729,22 +5747,22 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, u8 action; int ret; - if (!hnae3_dev_fd_supported(hdev)) + if (!hnae3_dev_fd_supported(hdev)) { + dev_err(&hdev->pdev->dev, "flow table director is not supported\n"); return -EOPNOTSUPP; + } if (!hdev->fd_en) { - dev_warn(&hdev->pdev->dev, - "Please enable flow director first\n"); + dev_err(&hdev->pdev->dev, + "please enable flow director first\n"); return -EOPNOTSUPP; } fs = (struct ethtool_rx_flow_spec *)&cmd->fs; ret = hclge_fd_check_spec(hdev, fs, &unused); - if (ret) { - dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); + if (ret) return ret; - } if (fs->ring_cookie == RX_CLS_FLOW_DISC) { action = HCLGE_FD_ACTION_DROP_PACKET; From patchwork Sat Apr 18 06:47:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272588 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943Td1Frsz9sSd for ; Sat, 18 Apr 2020 16:49:09 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726447AbgDRGtF (ORCPT ); Sat, 18 Apr 2020 02:49:05 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54156 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725970AbgDRGsV (ORCPT ); Sat, 18 Apr 2020 02:48:21 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 0ABA9546C26ADB52C2C9; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:11 +0800 From: Huazhong Tan To: CC: , , , , , , Yufeng Mo , Huazhong Tan Subject: [PATCH net-next 09/10] net: hns3: add support of dumping MAC reg in debugfs Date: Sat, 18 Apr 2020 14:47:08 +0800 Message-ID: <1587192429-11463-10-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Yufeng Mo This patch adds support of dumping MAC reg in debugfs, which will be helpful for debugging. Signed-off-by: Yufeng Mo Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 2 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 113 +++++++++++++++++++++ 2 files changed, 114 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index e1d8809..c934f32 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -270,7 +270,7 @@ static void hns3_dbg_help(struct hnae3_handle *h) " [igu egu ] [rpu ]", HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); strncat(printf_buf + strlen(printf_buf), - " [rtc] [ppp] [rcb] [tqp ]]\n", + " [rtc] [ppp] [rcb] [tqp ] [mac]]\n", HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); dev_info(&h->pdev->dev, "%s", printf_buf); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index cfc9300..66c1ad3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -173,6 +173,114 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, kfree(desc_src); } +static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev) +{ + struct hclge_config_mac_mode_cmd *req; + struct hclge_desc desc; + u32 loop_en; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac enable status, ret = %d\n", ret); + return; + } + + req = (struct hclge_config_mac_mode_cmd *)desc.data; + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + + dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); + dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); + dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); + dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B)); + dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B)); + dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B)); + dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B)); + dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B)); + dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B)); + dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B)); + dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B)); + dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B)); + dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B)); + dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n", + hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B)); +} + +static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev) +{ + struct hclge_config_max_frm_size_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac frame size, ret = %d\n", ret); + return; + } + + req = (struct hclge_config_max_frm_size_cmd *)desc.data; + + dev_info(&hdev->pdev->dev, "max_frame_size: %u\n", + le16_to_cpu(req->max_frm_size)); + dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size); +} + +static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev) +{ +#define HCLGE_MAC_SPEED_SHIFT 0 +#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0) +#define HCLGE_MAC_DUPLEX_SHIFT 7 + + struct hclge_config_mac_speed_dup_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac speed duplex, ret = %d\n", ret); + return; + } + + req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; + + dev_info(&hdev->pdev->dev, "speed: %#lx\n", + hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK, + HCLGE_MAC_SPEED_SHIFT)); + dev_info(&hdev->pdev->dev, "duplex: %#x\n", + hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT)); +} + +static void hclge_dbg_dump_mac(struct hclge_dev *hdev) +{ + hclge_dbg_dump_mac_enable_status(hdev); + + hclge_dbg_dump_mac_frame_size(hdev); + + hclge_dbg_dump_mac_speed_duplex(hdev); +} + static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) { struct device *dev = &hdev->pdev->dev; @@ -304,6 +412,11 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) } } + if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) { + hclge_dbg_dump_mac(hdev); + has_dump = true; + } + if (strncmp(cmd_buf, "dcb", 3) == 0) { hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); has_dump = true; From patchwork Sat Apr 18 06:47:09 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1272581 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4943Sr3pmfz9sSd for ; Sat, 18 Apr 2020 16:48:28 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726024AbgDRGs1 (ORCPT ); Sat, 18 Apr 2020 02:48:27 -0400 Received: from szxga07-in.huawei.com ([45.249.212.35]:54178 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725983AbgDRGsX (ORCPT ); Sat, 18 Apr 2020 02:48:23 -0400 Received: from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 10F22EB9EB761EFE19EE; Sat, 18 Apr 2020 14:48:20 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP Server id 14.3.487.0; Sat, 18 Apr 2020 14:48:12 +0800 From: Huazhong Tan To: CC: , , , , , , Yufeng Mo , Huazhong Tan Subject: [PATCH net-next 10/10] net: hns3: add trace event support for PF/VF mailbox Date: Sat, 18 Apr 2020 14:47:09 +0800 Message-ID: <1587192429-11463-11-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> References: <1587192429-11463-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Yufeng Mo This patch adds trace event support for PF/VF mailbox. Signed-off-by: Yufeng Mo Signed-off-by: Huazhong Tan --- .../net/ethernet/hisilicon/hns3/hns3pf/Makefile | 1 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 7 ++ .../ethernet/hisilicon/hns3/hns3pf/hclge_trace.h | 87 ++++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3vf/Makefile | 1 + .../ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 7 ++ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h | 87 ++++++++++++++++++++++ 6 files changed, 190 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index 0fb61d4..6c28c8f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -4,6 +4,7 @@ # ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 +ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7f24fcb..103c2ec 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -5,6 +5,9 @@ #include "hclge_mbx.h" #include "hnae3.h" +#define CREATE_TRACE_POINTS +#include "hclge_trace.h" + static u16 hclge_errno_to_resp(int errno) { return abs(errno); @@ -90,6 +93,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) dev_err(&hdev->pdev->dev, @@ -674,6 +679,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev) vport = &hdev->vport[req->mbx_src_vfid]; + trace_hclge_pf_mbx_get(hdev, req); + switch (req->msg.code) { case HCLGE_MBX_MAP_RING_TO_VECTOR: ret = hclge_map_unmap_ring_to_vf_vector(vport, true, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h new file mode 100644 index 0000000..5b0b71b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2020 Hisilicon Limited. */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HCLGE_TRACE_H_ + +#include + +#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) +#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) + +TRACE_EVENT(hclge_pf_mbx_get, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_GET_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->mbx_src_vfid; + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u subcode:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, __entry->subcode, + __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32)) + ) +); + +TRACE_EVENT(hclge_pf_mbx_send, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_SEND_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->dest_vfid; + __entry->code = req->msg.code; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, + __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32)) + ) +); + +#endif /* _HCLGE_TRACE_H_ */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hclge_trace +#include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index 53804d9..2c26ea6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -4,6 +4,7 @@ # ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 +ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 9b81549..5b2dcd9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -5,6 +5,9 @@ #include "hclgevf_main.h" #include "hnae3.h" +#define CREATE_TRACE_POINTS +#include "hclgevf_trace.h" + static int hclgevf_resp_to_errno(u16 resp_code) { return resp_code ? -resp_code : 0; @@ -106,6 +109,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg)); + trace_hclge_vf_mbx_send(hdev, req); + /* synchronous send */ if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); @@ -179,6 +184,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) continue; } + trace_hclge_vf_mbx_get(hdev, req); + /* synchronous messages are time critical and need preferential * treatment. Therefore, we need to acknowledge all the sync * responses as quickly as possible so that waiting tasks do not diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h new file mode 100644 index 0000000..e4bfb61 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +/* This must be outside ifdef _HCLGEVF_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HCLGEVF_TRACE_H_ + +#include + +#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) +#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) + +TRACE_EVENT(hclge_vf_mbx_get, + TP_PROTO( + struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_GET_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->dest_vfid; + __entry->code = req->msg.code; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, + __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32)) + ) +); + +TRACE_EVENT(hclge_vf_mbx_send, + TP_PROTO( + struct hclgevf_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_SEND_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->mbx_src_vfid; + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u subcode:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, __entry->subcode, + __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32)) + ) +); + +#endif /* _HCLGEVF_TRACE_H_ */ + +/* This must be outside ifdef _HCLGEVF_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hclgevf_trace +#include