From patchwork Fri Apr 24 02:23:06 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276103 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dKp4rGkz9sSh for ; Fri, 24 Apr 2020 12:24:46 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726124AbgDXCY0 (ORCPT ); Thu, 23 Apr 2020 22:24:26 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2884 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726008AbgDXCY0 (ORCPT ); Thu, 23 Apr 2020 22:24:26 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 9A446E8C2F3173AF175F; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:16 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 1/8] net: hns3: refine for unicast MAC VLAN space management Date: Fri, 24 Apr 2020 10:23:06 +0800 Message-ID: <1587694993-25183-2-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen Currently, firmware helps manage the unicast MAC VLAN table space for each PF. PF just needs to tell firmware its wanted space when initializing, and unnecessary to free it when un-intializing. So this patch removes the umv space free handle, and removes the forward statement of hclge_set_umv_space() by defining hclge_init_umv_space() after it. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 72 ++++++++-------------- 1 file changed, 24 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 0618f22..ccf269a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -62,8 +62,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev); static void hclge_sync_vlan_filter(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); -static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, - u16 *allocated_size, bool is_alloc); static void hclge_rfs_filter_expire(struct hclge_dev *hdev); static void hclge_clear_arfs_rules(struct hnae3_handle *handle); static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, @@ -7196,50 +7194,6 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, return cfg_status; } -static int hclge_init_umv_space(struct hclge_dev *hdev) -{ - u16 allocated_size = 0; - int ret; - - ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, - true); - if (ret) - return ret; - - if (allocated_size < hdev->wanted_umv_size) - dev_warn(&hdev->pdev->dev, - "Alloc umv space failed, want %u, get %u\n", - hdev->wanted_umv_size, allocated_size); - - mutex_init(&hdev->umv_mutex); - hdev->max_umv_size = allocated_size; - /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to - * preserve some unicast mac vlan table entries shared by pf - * and its vfs. - */ - hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); - hdev->share_umv_size = hdev->priv_umv_size + - hdev->max_umv_size % (hdev->num_req_vfs + 2); - - return 0; -} - -static int hclge_uninit_umv_space(struct hclge_dev *hdev) -{ - int ret; - - if (hdev->max_umv_size > 0) { - ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, - false); - if (ret) - return ret; - hdev->max_umv_size = 0; - } - mutex_destroy(&hdev->umv_mutex); - - return 0; -} - static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, u16 *allocated_size, bool is_alloc) { @@ -7268,6 +7222,30 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, return 0; } +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, + true); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "failed to alloc umv space, want %u, get %u\n", + hdev->wanted_umv_size, allocated_size); + + mutex_init(&hdev->umv_mutex); + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + + return 0; +} + static void hclge_reset_umv_space(struct hclge_dev *hdev) { struct hclge_vport *vport; @@ -10041,8 +10019,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) if (mac->phydev) mdiobus_unregister(mac->mdio_bus); - hclge_uninit_umv_space(hdev); - /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); From patchwork Fri Apr 24 02:23:07 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276104 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dKq0bVbz9sSm for ; Fri, 24 Apr 2020 12:24:47 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726439AbgDXCYa (ORCPT ); Thu, 23 Apr 2020 22:24:30 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2889 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726241AbgDXCYa (ORCPT ); Thu, 23 Apr 2020 22:24:30 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id B844EB038BFB166250BF; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:16 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 2/8] net: hns3: remove unnecessary parameter 'is_alloc' in hclge_set_umv_space() Date: Fri, 24 Apr 2020 10:23:07 +0800 Message-ID: <1587694993-25183-3-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen Since hclge_set_umv_space() is only called by hclge_init_umv_space(), so parameter 'is_alloc' is redundant. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ccf269a..fe6e60a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -7195,7 +7195,7 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, } static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, - u16 *allocated_size, bool is_alloc) + u16 *allocated_size) { struct hclge_umv_spc_alc_cmd *req; struct hclge_desc desc; @@ -7203,20 +7203,17 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, req = (struct hclge_umv_spc_alc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); - if (!is_alloc) - hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1); req->space_size = cpu_to_le32(space_size); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "%s umv space failed for cmd_send, ret =%d\n", - is_alloc ? "allocate" : "free", ret); + dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", + ret); return ret; } - if (is_alloc && allocated_size) + if (allocated_size) *allocated_size = le32_to_cpu(desc.data[1]); return 0; @@ -7227,8 +7224,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) u16 allocated_size = 0; int ret; - ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, - true); + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); if (ret) return ret; From patchwork Fri Apr 24 02:23:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276111 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dLB4d7cz9sSv for ; Fri, 24 Apr 2020 12:25:06 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726527AbgDXCZF (ORCPT ); Thu, 23 Apr 2020 22:25:05 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2886 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726162AbgDXCY2 (ORCPT ); Thu, 23 Apr 2020 22:24:28 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id A6A379E67C8B4A24D602; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:16 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 3/8] net: hns3: replace num_req_vfs with num_alloc_vport in hclge_reset_umv_space() Date: Fri, 24 Apr 2020 10:23:08 +0800 Message-ID: <1587694993-25183-4-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen Like the calculation elsewhere, replaces num_req_vfs with num_alloc_vport in hclge_reset_umv_space(). Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index fe6e60a..a268004 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -7254,7 +7254,7 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev) mutex_lock(&hdev->umv_mutex); hdev->share_umv_size = hdev->priv_umv_size + - hdev->max_umv_size % (hdev->num_req_vfs + 2); + hdev->max_umv_size % (hdev->num_alloc_vport + 1); mutex_unlock(&hdev->umv_mutex); } From patchwork Fri Apr 24 02:23:09 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276107 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dKx1lLvz9sSt for ; Fri, 24 Apr 2020 12:24:53 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726416AbgDXCYa (ORCPT ); Thu, 23 Apr 2020 22:24:30 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2882 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725913AbgDXCY2 (ORCPT ); Thu, 23 Apr 2020 22:24:28 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 8B6FC649B2A10B4FE3F1; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:17 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 4/8] net: hns3: refactor the MAC address configure Date: Fri, 24 Apr 2020 10:23:09 +0800 Message-ID: <1587694993-25183-5-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen Currently, the HNS3 driver sync and unsync MAC address in function hns3_set_rx_mode(). For PF, it adds and deletes MAC address directly in the path of dev_set_rx_mode(). If failed, it won't retry until next calling of hns3_set_rx_mode(). On the other hand, if request add and remove a same address many times at a short interval, each request must be done one by one, can't be merged. For VF, it sends mailbox messages to PF to request adding or deleting MAC address in the path of function hns3_set_rx_mode(), no matter the address is configured success. This patch refines it by recording the MAC address in function hns3_set_rx_mode(), and updating MAC address in the service task. If failed, it will retry by the next calling of periodical service task. It also uses some state to mark the state of each MAC address in the MAC list, which can help merge configure request for a same address. With these changes, when global reset or IMP reset occurs, we can restore the MAC table with the MAC list. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 79 +-- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 592 +++++++++++++++++---- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 27 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 42 +- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 313 ++++++++++- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 25 + 6 files changed, 860 insertions(+), 218 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ac3a48a..341e8b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -40,7 +40,6 @@ } while (0) static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); -static void hns3_remove_hw_addr(struct net_device *netdev); static const char hns3_driver_name[] = "hns3"; static const char hns3_driver_string[] = @@ -548,6 +547,13 @@ static int hns3_nic_uc_unsync(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); + /* need ignore the request of removing device address, because + * we store the device address and other addresses of uc list + * in the function's mac filter list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + if (h->ae_algo->ops->rm_uc_addr) return h->ae_algo->ops->rm_uc_addr(h, addr); @@ -3907,9 +3913,11 @@ static int hns3_init_mac_addr(struct net_device *netdev) eth_hw_addr_random(netdev); dev_warn(priv->dev, "using random MAC address %pM\n", netdev->dev_addr); - } else { + } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { ether_addr_copy(netdev->dev_addr, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); + } else { + return 0; } if (h->ae_algo->ops->set_mac_addr) @@ -4119,8 +4127,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_remove_hw_addr(netdev); - if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); @@ -4191,56 +4197,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) return hns3_nic_set_real_num_queue(ndev); } -static int hns3_recover_hw_addr(struct net_device *ndev) -{ - struct netdev_hw_addr_list *list; - struct netdev_hw_addr *ha, *tmp; - int ret = 0; - - netif_addr_lock_bh(ndev); - /* go through and sync uc_addr entries to the device */ - list = &ndev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) { - ret = hns3_nic_uc_sync(ndev, ha->addr); - if (ret) - goto out; - } - - /* go through and sync mc_addr entries to the device */ - list = &ndev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) { - ret = hns3_nic_mc_sync(ndev, ha->addr); - if (ret) - goto out; - } - -out: - netif_addr_unlock_bh(ndev); - return ret; -} - -static void hns3_remove_hw_addr(struct net_device *netdev) -{ - struct netdev_hw_addr_list *list; - struct netdev_hw_addr *ha, *tmp; - - hns3_nic_uc_unsync(netdev, netdev->dev_addr); - - netif_addr_lock_bh(netdev); - /* go through and unsync uc_addr entries to the device */ - list = &netdev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_uc_unsync(netdev, ha->addr); - - /* go through and unsync mc_addr entries to the device */ - list = &netdev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - if (ha->refcount > 1) - hns3_nic_mc_unsync(netdev, ha->addr); - - netif_addr_unlock_bh(netdev); -} - static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { @@ -4411,10 +4367,8 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) * from table space. Hence, for function reset software intervention is * required to delete the entries */ - if (hns3_dev_ongoing_func_reset(ae_dev)) { - hns3_remove_hw_addr(ndev); + if (hns3_dev_ongoing_func_reset(ae_dev)) hns3_del_all_fd_rules(ndev, false); - } if (!netif_running(ndev)) return 0; @@ -4482,6 +4436,9 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) goto err_init_irq_fail; } + if (!hns3_is_phys_func(handle->pdev)) + hns3_init_mac_addr(netdev); + ret = hns3_client_start(handle); if (ret) { dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); @@ -4513,14 +4470,6 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) bool vlan_filter_enable; int ret; - ret = hns3_init_mac_addr(netdev); - if (ret) - return ret; - - ret = hns3_recover_hw_addr(netdev); - if (ret) - return ret; - ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a268004..c3205ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -68,6 +68,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, unsigned long *addr); static int hclge_set_default_loopback(struct hclge_dev *hdev); +static void hclge_sync_mac_table(struct hclge_dev *hdev); + static struct hnae3_ae_algo ae_algo; static struct workqueue_struct *hclge_wq; @@ -1685,6 +1687,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->uc_mac_list); INIT_LIST_HEAD(&vport->mc_mac_list); + spin_lock_init(&vport->mac_list_lock); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -3971,6 +3974,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) * updated when it is triggered by mbx. */ hclge_update_link_status(hdev); + hclge_sync_mac_table(hdev); if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { delta = jiffies - hdev->last_serv_processed; @@ -6922,8 +6926,16 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int hclge_vport_start(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); vport->last_active_jiffies = jiffies; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + hclge_restore_mac_table_common(vport); + + clear_bit(vport->vport_id, hdev->vport_config_block); + return 0; } @@ -7291,12 +7303,106 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) mutex_unlock(&hdev->umv_mutex); } +static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, + const u8 *mac_addr) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclge_update_mac_node(struct hclge_mac_node *mac_node, + enum HCLGE_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGE_MAC_TO_ADD: + if (mac_node->state == HCLGE_MAC_TO_DEL) + mac_node->state = HCLGE_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGE_MAC_TO_DEL: + if (mac_node->state == HCLGE_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGE_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * ACTIVE. + */ + case HCLGE_MAC_ACTIVE: + if (mac_node->state == HCLGE_MAC_TO_ADD) + mac_node->state = HCLGE_MAC_ACTIVE; + + break; + } +} + +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_node *mac_node; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * new state, or just remove it, or do nothing. + */ + mac_node = hclge_find_mac_node(list, addr); + if (mac_node) { + hclge_update_mac_node(mac_node, state); + spin_unlock_bh(&vport->mac_list_lock); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + return 0; + } + + /* if this address is never added, unnecessary to delete */ + if (state == HCLGE_MAC_TO_DEL) { + spin_unlock_bh(&vport->mac_list_lock); + dev_err(&hdev->pdev->dev, + "failed to delete address %pM from mac list\n", + addr); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&vport->mac_list_lock); + return -ENOMEM; + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; +} + static int hclge_add_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_uc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, + addr); } int hclge_add_uc_addr_common(struct hclge_vport *vport, @@ -7367,7 +7473,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_rm_uc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, + addr); } int hclge_rm_uc_addr_common(struct hclge_vport *vport, @@ -7392,6 +7499,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, ret = hclge_remove_mac_vlan_tbl(vport, &req); if (!ret) hclge_update_umv_space(vport, true); + else if (ret == -ENOENT) + ret = 0; return ret; } @@ -7401,7 +7510,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_mc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, + addr); } int hclge_add_mc_addr_common(struct hclge_vport *vport, @@ -7444,7 +7554,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_rm_mc_addr_common(vport, addr); + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, + addr); } int hclge_rm_mc_addr_common(struct hclge_vport *vport, @@ -7479,111 +7590,328 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); - } else { - /* Maybe this mac address is in mta table, but it cannot be - * deleted here because an entry of mta represents an address - * range rather than a specific address. the delete action to - * all entries will take effect in update_mta_status called by - * hns3_nic_set_rx_mode. - */ + } else if (status == -ENOENT) { status = 0; } return status; } -void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - enum HCLGE_MAC_ADDR_TYPE mac_type) +static void hclge_sync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*sync)(struct hclge_vport *, + const unsigned char *)) { - struct hclge_vport_mac_addr_cfg *mac_cfg; - struct list_head *list; + struct hclge_mac_node *mac_node, *tmp; + int ret; - if (!vport->vport_id) - return; + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = sync(vport, mac_node->mac_addr); + if (!ret) { + mac_node->state = HCLGE_MAC_ACTIVE; + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} - mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); - if (!mac_cfg) - return; +static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + int (*unsync)(struct hclge_vport *, + const unsigned char *)) +{ + struct hclge_mac_node *mac_node, *tmp; + int ret; - mac_cfg->hd_tbl_status = true; - memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unsync(vport, mac_node->mac_addr); + if (!ret || ret == -ENOENT) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; +static void hclge_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; - list_add_tail(&mac_cfg->node, list); + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of adding the mac address into mac + * table. if mac_node state is ACTIVE, then change it to TO_DEL, + * then it will be removed at next time. else it must be TO_ADD, + * this address hasn't been added into mac table, + * so just remove the mac node. + */ + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclge_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_DEL; + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } } -void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - bool is_write_tbl, - enum HCLGE_MAC_ADDR_TYPE mac_type) +static void hclge_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) { - struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct list_head *list; - bool uc_flag, mc_flag; + struct hclge_mac_node *mac_node, *tmp, *new_node; - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr exists in the mac list, it means + * received a new TO_ADD request during the time window + * of configuring the mac address. For the mac node + * state is TO_ADD, and the address is already in the + * in the hardware(due to delete fail), so we just need + * to change the mac node state to ACTIVE. + */ + new_node->state = HCLGE_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } + } +} - uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; - mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; +static void hclge_sync_vport_mac_table(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; - list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { - if (uc_flag && mac_cfg->hd_tbl_status) - hclge_rm_uc_addr_common(vport, mac_addr); + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); - if (mc_flag && mac_cfg->hd_tbl_status) - hclge_rm_mc_addr_common(vport, mac_addr); + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; - list_del(&mac_cfg->node); - kfree(mac_cfg); + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: break; } } + +stop_traverse: + spin_unlock_bh(&vport->mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + if (mac_type == HCLGE_MAC_ADDR_UC) { + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + hclge_sync_vport_mac_list(vport, &tmp_add_list, + hclge_add_uc_addr_common); + } else { + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + hclge_sync_vport_mac_list(vport, &tmp_add_list, + hclge_add_mc_addr_common); + } + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + hclge_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +static bool hclge_need_sync_mac_table(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + return false; + + if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + return true; + + return false; +} + +static void hclge_sync_mac_table(struct hclge_dev *hdev) +{ + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (!hclge_need_sync_mac_table(vport)) + continue; + + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); + } } void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type) { - struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct list_head *list; + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_mac_node *mac_cfg, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + int ret; - list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &vport->uc_mac_list : &vport->mc_mac_list; + if (mac_type == HCLGE_MAC_ADDR_UC) { + list = &vport->uc_mac_list; + unsync = hclge_rm_uc_addr_common; + } else { + list = &vport->mc_mac_list; + unsync = hclge_rm_mc_addr_common; + } - list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) - hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); + INIT_LIST_HEAD(&tmp_del_list); - if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) - hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); + if (!is_del_list) + set_bit(vport->vport_id, hdev->vport_config_block); - mac_cfg->hd_tbl_status = false; - if (is_del_list) { + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + switch (mac_cfg->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: list_del(&mac_cfg->node); - kfree(mac_cfg); + list_add_tail(&mac_cfg->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + break; } } + + spin_unlock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) { + ret = unsync(vport, mac_cfg->mac_addr); + if (!ret || ret == -ENOENT) { + /* clear all mac addr from hardware, but remain these + * mac addr in the mac list, and restore them after + * vf reset finished. + */ + if (!is_del_list && + mac_cfg->state == HCLGE_MAC_ACTIVE) { + mac_cfg->state = HCLGE_MAC_TO_ADD; + } else { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } else if (is_del_list) { + mac_cfg->state = HCLGE_MAC_TO_DEL; + } + } + + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +/* remove all mac address when uninitailize */ +static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; + } + } + + spin_unlock_bh(&vport->mac_list_lock); + + if (mac_type == HCLGE_MAC_ADDR_UC) + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_uc_addr_common); + else + hclge_unsync_vport_mac_list(vport, &tmp_del_list, + hclge_rm_mc_addr_common); + + if (!list_empty(&tmp_del_list)) + dev_warn(&hdev->pdev->dev, + "uninit %s mac list for vport %u not completely.\n", + mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", + vport->vport_id); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } } -void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +static void hclge_uninit_mac_table(struct hclge_dev *hdev) { - struct hclge_vport_mac_addr_cfg *mac, *tmp; struct hclge_vport *vport; int i; for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { - list_del(&mac->node); - kfree(mac); - } - - list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { - list_del(&mac->node); - kfree(mac); - } + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); } } @@ -7747,12 +8075,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) ether_addr_copy(p, hdev->hw.mac.mac_addr); } +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr) +{ + struct list_head *list = &vport->uc_mac_list; + struct hclge_mac_node *old_node, *new_node; + + new_node = hclge_find_mac_node(list, new_addr); + if (!new_node) { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + return -ENOMEM; + + new_node->state = HCLGE_MAC_TO_ADD; + ether_addr_copy(new_node->mac_addr, new_addr); + list_add(&new_node->node, list); + } else { + if (new_node->state == HCLGE_MAC_TO_DEL) + new_node->state = HCLGE_MAC_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after global/imp reset which will clear mac + * table by hardware. + */ + list_move(&new_node->node, list); + } + + if (old_addr && !ether_addr_equal(old_addr, new_addr)) { + old_node = hclge_find_mac_node(list, old_addr); + if (old_node) { + if (old_node->state == HCLGE_MAC_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = HCLGE_MAC_TO_DEL; + } + } + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return 0; +} + static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + unsigned char *old_addr = NULL; int ret; /* mac addr check */ @@ -7760,39 +8133,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, is_broadcast_ether_addr(new_addr) || is_multicast_ether_addr(new_addr)) { dev_err(&hdev->pdev->dev, - "Change uc mac err! invalid mac:%pM.\n", + "change uc mac err! invalid mac: %pM.\n", new_addr); return -EINVAL; } - if ((!is_first || is_kdump_kernel()) && - hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_warn(&hdev->pdev->dev, - "remove old uc mac address fail.\n"); - - ret = hclge_add_uc_addr(handle, new_addr); + ret = hclge_pause_addr_cfg(hdev, new_addr); if (ret) { dev_err(&hdev->pdev->dev, - "add uc mac address fail, ret =%d.\n", + "failed to configure mac pause address, ret = %d\n", ret); - - if (!is_first && - hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) - dev_err(&hdev->pdev->dev, - "restore uc mac address fail.\n"); - - return -EIO; + return ret; } - ret = hclge_pause_addr_cfg(hdev, new_addr); + if (!is_first) + old_addr = hdev->hw.mac.mac_addr; + + spin_lock_bh(&vport->mac_list_lock); + ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); if (ret) { dev_err(&hdev->pdev->dev, - "configure mac pause address fail, ret =%d.\n", - ret); - return -EIO; - } + "failed to change the mac addr:%pM, ret = %d\n", + new_addr, ret); + spin_unlock_bh(&vport->mac_list_lock); + + if (!is_first) + hclge_pause_addr_cfg(hdev, old_addr); + return ret; + } + /* we must update dev addr with spin lock protect, preventing dev addr + * being removed by set_rx_mode path. + */ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + spin_unlock_bh(&vport->mac_list_lock); + + hclge_task_schedule(hdev, 0); return 0; } @@ -8408,6 +8784,37 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle) } } +/* For global reset and imp reset, hardware will clear the mac table, + * so we change the mac address state from ACTIVE to TO_ADD, then they + * can be restored in the service task after reset complete. Furtherly, + * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to + * be restored after reset, so just remove these mac nodes from mac_list. + */ +static void hclge_mac_node_convert_for_reset(struct list_head *list) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_ADD; + } else if (mac_node->state == HCLGE_MAC_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +void hclge_restore_mac_table_common(struct hclge_vport *vport) +{ + spin_lock_bh(&vport->mac_list_lock); + + hclge_mac_node_convert_for_reset(&vport->uc_mac_list); + hclge_mac_node_convert_for_reset(&vport->mc_mac_list); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + spin_unlock_bh(&vport->mac_list_lock); +} + int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -9899,6 +10306,15 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); hclge_stats_clear(hdev); + /* NOTE: pf reset needn't to clear or restore pf and vf table entry. + * so here should not clean table in memory. + */ + if (hdev->reset_type == HNAE3_IMP_RESET || + hdev->reset_type == HNAE3_GLOBAL_RESET) { + bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); + hclge_reset_umv_space(hdev); + } + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); @@ -9914,8 +10330,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - hclge_reset_umv_space(hdev); - ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -10011,6 +10425,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_clear_vf_vlan(hdev); hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); + hclge_uninit_mac_table(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -10028,7 +10443,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); - hclge_uninit_vport_mac_table(hdev); hclge_uninit_vport_vlan_table(hdev); ae_dev->priv = NULL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index a58c262..5fcbc3d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -630,9 +630,15 @@ struct hclge_fd_ad_data { u16 rule_id; }; -struct hclge_vport_mac_addr_cfg { +enum HCLGE_MAC_NODE_STATE { + HCLGE_MAC_TO_ADD, + HCLGE_MAC_TO_DEL, + HCLGE_MAC_ACTIVE +}; + +struct hclge_mac_node { struct list_head node; - int hd_tbl_status; + enum HCLGE_MAC_NODE_STATE state; u8 mac_addr[ETH_ALEN]; }; @@ -805,6 +811,8 @@ struct hclge_dev { unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + struct hclge_fd_cfg fd_cfg; struct hlist_head fd_rule_list; spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ @@ -866,6 +874,7 @@ struct hclge_rss_tuple_cfg { enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_ALIVE, + HCLGE_VPORT_STATE_MAC_TBL_CHANGE, HCLGE_VPORT_STATE_MAX }; @@ -922,6 +931,7 @@ struct hclge_vport { u32 mps; /* Max packet size */ struct hclge_vf_info vf_info; + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head mc_mac_list; /* Store VF multicast table */ struct list_head vlan_list; /* Store VF vlan table */ @@ -977,16 +987,17 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); int hclge_notify_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type); -void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - enum HCLGE_MAC_ADDR_TYPE mac_type); -void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, - bool is_write_tbl, - enum HCLGE_MAC_ADDR_TYPE mac_type); +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr); +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr); void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type); -void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +void hclge_restore_mac_table_common(struct hclge_vport *vport); int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info); int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 103c2ec..0efc045 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -275,26 +275,17 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, if (!is_valid_ether_addr(mac_addr)) return -EINVAL; - hclge_rm_uc_addr_common(vport, old_addr); - status = hclge_add_uc_addr_common(vport, mac_addr); - if (status) { - hclge_add_uc_addr_common(vport, old_addr); - } else { - hclge_rm_vport_mac_table(vport, mac_addr, - false, HCLGE_MAC_ADDR_UC); - hclge_add_vport_mac_table(vport, mac_addr, - HCLGE_MAC_ADDR_UC); - } + spin_lock_bh(&vport->mac_list_lock); + status = hclge_update_mac_node_for_dev_addr(vport, old_addr, + mac_addr); + spin_unlock_bh(&vport->mac_list_lock); + hclge_task_schedule(hdev, 0); } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { - status = hclge_add_uc_addr_common(vport, mac_addr); - if (!status) - hclge_add_vport_mac_table(vport, mac_addr, - HCLGE_MAC_ADDR_UC); + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_UC, mac_addr); } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { - status = hclge_rm_uc_addr_common(vport, mac_addr); - if (!status) - hclge_rm_vport_mac_table(vport, mac_addr, - false, HCLGE_MAC_ADDR_UC); + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_UC, mac_addr); } else { dev_err(&hdev->pdev->dev, "failed to set unicast mac addr, unknown subcode %u\n", @@ -310,18 +301,13 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, { const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); struct hclge_dev *hdev = vport->back; - int status; if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { - status = hclge_add_mc_addr_common(vport, mac_addr); - if (!status) - hclge_add_vport_mac_table(vport, mac_addr, - HCLGE_MAC_ADDR_MC); + hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_MC, mac_addr); } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { - status = hclge_rm_mc_addr_common(vport, mac_addr); - if (!status) - hclge_rm_vport_mac_table(vport, mac_addr, - false, HCLGE_MAC_ADDR_MC); + hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_MC, mac_addr); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %u\n", @@ -329,7 +315,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, return -EIO; } - return status; + return 0; } int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e02d427..05d485a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1245,10 +1245,12 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, int status; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); - send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : - HCLGE_MBX_MAC_VLAN_UC_MODIFY; + send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; ether_addr_copy(send_msg.data, new_mac_addr); - ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); + if (is_first && !hdev->has_pf_mac) + eth_zero_addr(&send_msg.data[ETH_ALEN]); + else + ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); if (!status) ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); @@ -1256,54 +1258,302 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, return status; } -static int hclgevf_add_uc_addr(struct hnae3_handle *handle, - const unsigned char *addr) +static struct hclgevf_mac_addr_node * +hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGEVF_MAC_TO_ADD: + if (mac_node->state == HCLGEVF_MAC_TO_DEL) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGEVF_MAC_TO_DEL: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGEVF_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * HCLGEVF_MAC_ACTIVE + */ + case HCLGEVF_MAC_ACTIVE: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + } +} + +static int hclgevf_update_mac_list(struct hnae3_handle *handle, + enum HCLGEVF_MAC_NODE_STATE state, + enum HCLGEVF_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclge_vf_to_pf_msg send_msg; + struct hclgevf_mac_addr_node *mac_node; + struct list_head *list; - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_ADD); - ether_addr_copy(send_msg.data, addr); - return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; + + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * new state, or just remove it, or do nothing. + */ + mac_node = hclgevf_find_mac_node(list, addr); + if (mac_node) { + hclgevf_update_mac_node(mac_node, state); + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; + } + /* if this address is never added, unnecessary to delete */ + if (state == HCLGEVF_MAC_TO_DEL) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOMEM; + } + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; +} + +static int hclgevf_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_UC, addr); } static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclge_vf_to_pf_msg send_msg; - - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, - HCLGE_MBX_MAC_VLAN_UC_REMOVE); - ether_addr_copy(send_msg.data, addr); - return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_UC, addr); } static int hclgevf_add_mc_addr(struct hnae3_handle *handle, const unsigned char *addr) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclge_vf_to_pf_msg send_msg; - - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_ADD); - ether_addr_copy(send_msg.data, addr); - return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_MC, addr); } static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, const unsigned char *addr) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_MC, addr); +} + +static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, + struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ struct hclge_vf_to_pf_msg send_msg; + u8 code, subcode; - hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_REMOVE); - ether_addr_copy(send_msg.data, addr); + if (mac_type == HCLGEVF_MAC_ADDR_UC) { + code = HCLGE_MBX_SET_UNICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; + } else { + code = HCLGE_MBX_SET_MULTICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; + } + + hclgevf_build_send_msg(&send_msg, code, subcode); + ether_addr_copy(send_msg.data, mac_node->mac_addr); return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } +static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, + struct list_head *list, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to configure mac %pM, state = %d, ret = %d\n", + mac_node->mac_addr, mac_node->state, ret); + return; + } + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + mac_node->state = HCLGEVF_MAC_ACTIVE; + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void hclgevf_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of sending mac config request to PF + * If mac_node state is ACTIVE, then change its state to TO_DEL, + * then it will be removed at next time. If is TO_ADD, it means + * send TO_ADD request failed, so just remove the mac node. + */ + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclgevf_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { + mac_node->state = HCLGEVF_MAC_TO_DEL; + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void hclgevf_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr is exist in the mac list, it means + * received a new request TO_ADD during the time window + * of sending mac addr configurrequest to PF, so just + * change the mac state to ACTIVE. + */ + new_node->state = HCLGEVF_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_del(&mac_node->node); + list_add_tail(&mac_node->node, mac_list); + } + } +} + +static void hclgevf_clear_list(struct list_head *list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; + + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGEVF_MAC_TO_DEL: + list_del(&mac_node->node); + list_add_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGEVF_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: + break; + } + } + +stop_traverse: + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); + hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + hclgevf_sync_from_del_list(&tmp_del_list, list); + hclgevf_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); +} + +static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) +{ + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); +} + +static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) +{ + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + hclgevf_clear_list(&hdev->mac_table.uc_mac_list); + hclgevf_clear_list(&hdev->mac_table.mc_mac_list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); +} + static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) @@ -1951,6 +2201,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) hclgevf_sync_vlan_filter(hdev); + hclgevf_sync_mac_table(hdev); + hdev->last_serv_processed = jiffies; out: @@ -2313,6 +2565,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) mutex_init(&hdev->mbx_resp.mbx_mutex); sema_init(&hdev->reset_sem, 1); + spin_lock_init(&hdev->mac_table.mac_list_lock); + INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); + INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); + /* bring the device down */ set_bit(HCLGEVF_STATE_DOWN, &hdev->state); } @@ -2846,6 +3102,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) hclgevf_pci_uninit(hdev); hclgevf_cmd_uninit(hdev); + hclgevf_uninit_mac_list(hdev); } static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 3b88d86..0222d9b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -234,6 +234,29 @@ struct hclgevf_rst_stats { u32 rst_fail_cnt; /* the number of VF reset fail */ }; +enum HCLGEVF_MAC_ADDR_TYPE { + HCLGEVF_MAC_ADDR_UC, + HCLGEVF_MAC_ADDR_MC +}; + +enum HCLGEVF_MAC_NODE_STATE { + HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ACTIVE +}; + +struct hclgevf_mac_addr_node { + struct list_head node; + enum HCLGEVF_MAC_NODE_STATE state; + u8 mac_addr[ETH_ALEN]; +}; + +struct hclgevf_mac_table_cfg { + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; + struct list_head mc_mac_list; +}; + struct hclgevf_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -282,6 +305,8 @@ struct hclgevf_dev { unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + struct hclgevf_mac_table_cfg mac_table; + bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ From patchwork Fri Apr 24 02:23:10 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276106 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dKt6m78z9sSv for ; Fri, 24 Apr 2020 12:24:50 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726501AbgDXCYo (ORCPT ); Thu, 23 Apr 2020 22:24:44 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2890 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726240AbgDXCYa (ORCPT ); Thu, 23 Apr 2020 22:24:30 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id BD23EB95B492380BB2FC; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:17 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 5/8] net: hns3: add support for dumping UC and MC MAC list Date: Fri, 24 Apr 2020 10:23:10 +0800 Message-ID: <1587694993-25183-6-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen This patch adds support for dumping entries of UC and MC MAC list, which help checking whether a MAC address being added into hardware or not. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 2 + .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 51 ++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index c934f32..fe7fb56 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -262,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "dump mac tnl status\n"); dev_info(&h->pdev->dev, "dump loopback\n"); dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n"); + dev_info(&h->pdev->dev, "dump uc mac list \n"); + dev_info(&h->pdev->dev, "dump mc mac list \n"); memset(printf_buf, 0, HNS3_DBG_BUF_LEN); strncat(printf_buf, "dump reg [[bios common] [ssu ]", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 66c1ad3..6cfa825 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1441,6 +1441,49 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, hclge_dbg_dump_qs_shaper_single(hdev, qsid); } +static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf, + bool is_unicast) +{ + struct hclge_mac_node *mac_node, *tmp; + struct hclge_vport *vport; + struct list_head *list; + u32 func_id; + int ret; + + ret = kstrtouint(cmd_buf, 0, &func_id); + if (ret < 0) { + dev_err(&hdev->pdev->dev, + "dump mac list: bad command string, ret = %d\n", ret); + return -EINVAL; + } + + if (func_id >= hdev->num_alloc_vport) { + dev_err(&hdev->pdev->dev, + "function id(%u) is out of range(0-%u)\n", func_id, + hdev->num_alloc_vport - 1); + return -EINVAL; + } + + vport = &hdev->vport[func_id]; + + list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; + + dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n", + func_id, is_unicast ? "uc" : "mc"); + dev_info(&hdev->pdev->dev, "mac address state\n"); + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + dev_info(&hdev->pdev->dev, "%pM %d\n", + mac_node->mac_addr, mac_node->state); + } + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; +} + int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) { #define DUMP_REG "dump reg" @@ -1485,6 +1528,14 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { hclge_dbg_dump_qs_shaper(hdev, &cmd_buf[sizeof("dump qs shaper")]); + } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) { + hclge_dbg_dump_mac_list(hdev, + &cmd_buf[sizeof("dump uc mac list")], + true); + } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) { + hclge_dbg_dump_mac_list(hdev, + &cmd_buf[sizeof("dump mc mac list")], + false); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return -EINVAL; From patchwork Fri Apr 24 02:23:11 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276108 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dKz68FHz9sSv for ; Fri, 24 Apr 2020 12:24:55 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726399AbgDXCY3 (ORCPT ); Thu, 23 Apr 2020 22:24:29 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2885 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726027AbgDXCY2 (ORCPT ); Thu, 23 Apr 2020 22:24:28 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 9FB14EB905CBE5D341F9; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:17 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 6/8] net: hns3: refactor the promisc mode setting Date: Fri, 24 Apr 2020 10:23:11 +0800 Message-ID: <1587694993-25183-7-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen As the HNS3 driver doesn't update the MAC address directly in function hns3_set_rx_mode() now, it can't know whether the MAC table is full from __dev_uc_sync() and __dev_mc_sync(), so it's senseless to handle the overflow promisc here. This patch removes the handle of overflow promisc from function hns3_set_rx_mode(), and updates the promisc mode in the service task. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 3 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 42 +++------- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 1 + drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 2 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 89 ++++++++++++++++++---- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 4 + .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 26 +++++++ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 1 + 8 files changed, 122 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 5587605..a56f8d6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -270,6 +270,8 @@ struct hnae3_ae_dev { * Set loopback * set_promisc_mode * Set promisc mode + * request_update_promisc_mode + * request to hclge(vf) to update promisc mode * set_mtu() * set mtu * get_pauseparam() @@ -408,6 +410,7 @@ struct hnae3_ae_ops { int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, bool en_mc_pmc); + void (*request_update_promisc_mode)(struct hnae3_handle *handle); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); void (*get_pauseparam)(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 341e8b5..6b9535c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -601,34 +601,25 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); u8 new_flags; - int ret; new_flags = hns3_get_netdev_flags(netdev); - ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); - if (ret) { - netdev_err(netdev, "sync uc address fail\n"); - if (ret == -ENOSPC) - new_flags |= HNAE3_OVERFLOW_UPE; - } - - if (netdev->flags & IFF_MULTICAST) { - ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, - hns3_nic_mc_unsync); - if (ret) { - netdev_err(netdev, "sync mc address fail\n"); - if (ret == -ENOSPC) - new_flags |= HNAE3_OVERFLOW_MPE; - } - } + __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); /* User mode Promisc mode enable and vlan filtering is disabled to - * let all packets in. MAC-VLAN Table overflow Promisc enabled and - * vlan fitering is enabled + * let all packets in. */ - hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); h->netdev_flags = new_flags; - hns3_update_promisc_mode(netdev, new_flags); + hns3_request_update_promisc_mode(h); +} + +void hns3_request_update_promisc_mode(struct hnae3_handle *handle) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (ops->request_update_promisc_mode) + ops->request_update_promisc_mode(handle); } int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) @@ -4467,15 +4458,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; - bool vlan_filter_enable; - int ret; - - ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); - if (ret) - return ret; - - vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; - hns3_enable_vlan_filter(netdev, vlan_filter_enable); if (handle->ae_algo->ops->restore_vlan_table) handle->ae_algo->ops->restore_vlan_table(handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 4b3f0ab..53bc0ed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -658,6 +658,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); +void hns3_request_update_promisc_mode(struct hnae3_handle *handle); #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6a0734b..4d9c85f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -99,7 +99,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) h->ae_algo->ops->set_promisc_mode(h, true, true); } else { /* recover promisc mode before loopback test */ - hns3_update_promisc_mode(ndev, h->netdev_flags); + hns3_request_update_promisc_mode(h); vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; hns3_enable_vlan_filter(ndev, vlan_filter_enable); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c3205ae..71ff0fa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -69,6 +69,7 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, static int hclge_set_default_loopback(struct hclge_dev *hdev); static void hclge_sync_mac_table(struct hclge_dev *hdev); +static void hclge_sync_promisc_mode(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -3975,6 +3976,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) */ hclge_update_link_status(hdev); hclge_sync_mac_table(hdev); + hclge_sync_promisc_mode(hdev); if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { delta = jiffies - hdev->last_serv_processed; @@ -4724,7 +4726,8 @@ static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, - "Set promisc mode fail, status is %d.\n", ret); + "failed to set vport %d promisc mode, ret = %d.\n", + param->vf_id, ret); return ret; } @@ -4774,6 +4777,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, en_bc_pmc); } +static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); +} + static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) { struct hclge_get_fd_mode_cmd *req; @@ -6972,17 +6983,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, } if (op == HCLGE_MAC_VLAN_ADD) { - if ((!resp_code) || (resp_code == 1)) { + if (!resp_code || resp_code == 1) return 0; - } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) { - dev_err(&hdev->pdev->dev, - "add mac addr failed for uc_overflow.\n"); - return -ENOSPC; - } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) { - dev_err(&hdev->pdev->dev, - "add mac addr failed for mc_overflow.\n"); + else if (resp_code == HCLGE_ADD_UC_OVERFLOW || + resp_code == HCLGE_ADD_MC_OVERFLOW) return -ENOSPC; - } dev_err(&hdev->pdev->dev, "add mac addr failed for undefined, code=%u.\n", @@ -7448,8 +7453,9 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, return ret; } - dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", - hdev->priv_umv_size); + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); return -ENOSPC; } @@ -7543,7 +7549,9 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - if (status == -ENOSPC) + /* if already overflow, not to print each time */ + if (status == -ENOSPC && + !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); return status; @@ -7638,12 +7646,16 @@ static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, } } -static void hclge_sync_from_add_list(struct list_head *add_list, +static bool hclge_sync_from_add_list(struct list_head *add_list, struct list_head *mac_list) { struct hclge_mac_node *mac_node, *tmp, *new_node; + bool all_added = true; list_for_each_entry_safe(mac_node, tmp, add_list, node) { + if (mac_node->state == HCLGE_MAC_TO_ADD) + all_added = false; + /* if the mac address from tmp_add_list is not in the * uc/mc_mac_list, it means have received a TO_DEL request * during the time window of adding the mac address into mac @@ -7666,6 +7678,8 @@ static void hclge_sync_from_add_list(struct list_head *add_list, kfree(mac_node); } } + + return all_added; } static void hclge_sync_from_del_list(struct list_head *del_list, @@ -7693,12 +7707,30 @@ static void hclge_sync_from_del_list(struct list_head *del_list, } } +static void hclge_update_overflow_flags(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type, + bool is_all_added) +{ + if (mac_type == HCLGE_MAC_ADDR_UC) { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; + } else { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; + } +} + static void hclge_sync_vport_mac_table(struct hclge_vport *vport, enum HCLGE_MAC_ADDR_TYPE mac_type) { struct hclge_mac_node *mac_node, *tmp, *new_node; struct list_head tmp_add_list, tmp_del_list; struct list_head *list; + bool all_added; INIT_LIST_HEAD(&tmp_add_list); INIT_LIST_HEAD(&tmp_del_list); @@ -7752,9 +7784,11 @@ static void hclge_sync_vport_mac_table(struct hclge_vport *vport, spin_lock_bh(&vport->mac_list_lock); hclge_sync_from_del_list(&tmp_del_list, list); - hclge_sync_from_add_list(&tmp_add_list, list); + all_added = hclge_sync_from_add_list(&tmp_add_list, list); spin_unlock_bh(&vport->mac_list_lock); + + hclge_update_overflow_flags(vport, mac_type, all_added); } static bool hclge_need_sync_mac_table(struct hclge_vport *vport) @@ -11052,6 +11086,30 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) return hclge_config_gro(hdev, enable); } +static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + u8 tmp_flags = 0; + int ret; + + if (vport->last_promisc_flags != vport->overflow_promisc_flags) { + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + vport->last_promisc_flags = vport->overflow_promisc_flags; + } + + if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) { + tmp_flags = handle->netdev_flags | vport->last_promisc_flags; + ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, + tmp_flags & HNAE3_MPE); + if (!ret) { + clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + hclge_enable_vlan_filter(handle, + tmp_flags & HNAE3_VLAN_FLTR); + } + } +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -11064,6 +11122,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_vector = hclge_get_vector, .put_vector = hclge_put_vector, .set_promisc_mode = hclge_set_promisc_mode, + .request_update_promisc_mode = hclge_request_update_promisc_mode, .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 5fcbc3d..85180f4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -217,6 +217,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_STATISTICS_UPDATING, HCLGE_STATE_CMD_DISABLE, HCLGE_STATE_LINK_UPDATING, + HCLGE_STATE_PROMISC_CHANGED, HCLGE_STATE_RST_FAIL, HCLGE_STATE_MAX }; @@ -931,6 +932,9 @@ struct hclge_vport { u32 mps; /* Max packet size */ struct hclge_vf_info vf_info; + u8 overflow_promisc_flags; + u8 last_promisc_flags; + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head mc_mac_list; /* Store VF multicast table */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 05d485a..fea197f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1164,6 +1164,27 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, en_bc_pmc); } +static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); +} + +static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->nic; + bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; + bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; + int ret; + + if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { + ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); + if (!ret) + clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + } +} + static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, int stream_id, bool enable) { @@ -2203,6 +2224,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) hclgevf_sync_mac_table(hdev); + hclgevf_sync_promisc_mode(hdev); + hdev->last_serv_processed = jiffies; out: @@ -2986,6 +3009,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + dev_info(&hdev->pdev->dev, "Reset done\n"); return 0; @@ -3470,6 +3495,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_timer_task = hclgevf_set_timer_task, .get_link_mode = hclgevf_get_link_mode, .set_promisc_mode = hclgevf_set_promisc_mode, + .request_update_promisc_mode = hclgevf_request_update_promisc_mode, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 0222d9b..f19583c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -148,6 +148,7 @@ enum hclgevf_states { HCLGEVF_STATE_MBX_HANDLING, HCLGEVF_STATE_CMD_DISABLE, HCLGEVF_STATE_LINK_UPDATING, + HCLGEVF_STATE_PROMISC_CHANGED, HCLGEVF_STATE_RST_FAIL, }; From patchwork Fri Apr 24 02:23:12 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276110 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dL80XXkz9sSv for ; Fri, 24 Apr 2020 12:25:04 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726316AbgDXCY2 (ORCPT ); Thu, 23 Apr 2020 22:24:28 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2888 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726008AbgDXCY2 (ORCPT ); Thu, 23 Apr 2020 22:24:28 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id B29F8FC5FC49E73D391A; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:18 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 7/8] net: hns3: use mutex vport_lock instead of mutex umv_lock Date: Fri, 24 Apr 2020 10:23:12 +0800 Message-ID: <1587694993-25183-8-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen Currently, the driver use mutex umv_lock to protect the variable vport->share_umv_size. And there is already a mutex vport_lock being defined in the driver, which is designed to protect the resource of vport. So we can use vport_lock instead of umv_lock. Furthermore, there is a time window for protect share_umv_size between checking UMV space and doing MAC configuration in the lin function hclge_add_uc_addr_common(). It should be extended. This patch uses mutex vport_lock intead of spin lock umv_lock to protect share_umv_size, and adjusts the mutex's range. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 31 +++++++++++++--------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 - 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 71ff0fa..177ef5e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -7250,7 +7250,6 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) "failed to alloc umv space, want %u, get %u\n", hdev->wanted_umv_size, allocated_size); - mutex_init(&hdev->umv_mutex); hdev->max_umv_size = allocated_size; hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); hdev->share_umv_size = hdev->priv_umv_size + @@ -7269,21 +7268,25 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev) vport->used_umv_num = 0; } - mutex_lock(&hdev->umv_mutex); + mutex_lock(&hdev->vport_lock); hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); - mutex_unlock(&hdev->umv_mutex); + mutex_unlock(&hdev->vport_lock); } -static bool hclge_is_umv_space_full(struct hclge_vport *vport) +static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) { struct hclge_dev *hdev = vport->back; bool is_full; - mutex_lock(&hdev->umv_mutex); + if (need_lock) + mutex_lock(&hdev->vport_lock); + is_full = (vport->used_umv_num >= hdev->priv_umv_size && hdev->share_umv_size == 0); - mutex_unlock(&hdev->umv_mutex); + + if (need_lock) + mutex_unlock(&hdev->vport_lock); return is_full; } @@ -7292,7 +7295,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) { struct hclge_dev *hdev = vport->back; - mutex_lock(&hdev->umv_mutex); if (is_free) { if (vport->used_umv_num > hdev->priv_umv_size) hdev->share_umv_size++; @@ -7305,7 +7307,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) hdev->share_umv_size--; vport->used_umv_num++; } - mutex_unlock(&hdev->umv_mutex); } static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, @@ -7446,12 +7447,15 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, */ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); if (ret == -ENOENT) { - if (!hclge_is_umv_space_full(vport)) { + mutex_lock(&hdev->vport_lock); + if (!hclge_is_umv_space_full(vport, false)) { ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); if (!ret) hclge_update_umv_space(vport, false); + mutex_unlock(&hdev->vport_lock); return ret; } + mutex_unlock(&hdev->vport_lock); if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", @@ -7503,10 +7507,13 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); - if (!ret) + if (!ret) { + mutex_lock(&hdev->vport_lock); hclge_update_umv_space(vport, true); - else if (ret == -ENOENT) + mutex_unlock(&hdev->vport_lock); + } else if (ret == -ENOENT) { ret = 0; + } return ret; } @@ -10163,7 +10170,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, dev_warn(&hdev->pdev->dev, "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", vf); - else if (enable && hclge_is_umv_space_full(vport)) + else if (enable && hclge_is_umv_space_full(vport, true)) dev_warn(&hdev->pdev->dev, "vf %d mac table is full, enable spoof check may cause its packet send fail\n", vf); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 85180f4..8e69651 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -831,7 +831,6 @@ struct hclge_dev { u16 priv_umv_size; /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; - struct mutex umv_mutex; /* protect share_umv_size */ DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); From patchwork Fri Apr 24 02:23:13 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: tanhuazhong X-Patchwork-Id: 1276105 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=none (p=none dis=none) header.from=huawei.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 497dKt1b4Sz9sSt for ; Fri, 24 Apr 2020 12:24:50 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726479AbgDXCYg (ORCPT ); Thu, 23 Apr 2020 22:24:36 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:2887 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726166AbgDXCYc (ORCPT ); Thu, 23 Apr 2020 22:24:32 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id AB83C554A8AC514D47CD; Fri, 24 Apr 2020 10:24:24 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP Server id 14.3.487.0; Fri, 24 Apr 2020 10:24:18 +0800 From: Huazhong Tan To: CC: , , , , , , Jian Shen , Huazhong Tan Subject: [PATCH net-next 8/8] net: hns3: optimize the filter table entries handling when resetting Date: Fri, 24 Apr 2020 10:23:13 +0800 Message-ID: <1587694993-25183-9-git-send-email-tanhuazhong@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> References: <1587694993-25183-1-git-send-email-tanhuazhong@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Jian Shen Currently, the PF driver removes all (including its VFs') MAC/VLAN flow director table entries when resetting, and restores them after reset completed. In fact, the hardware will clear all table entries only in IMP reset and global reset. So driver only needs to restore the table entries in these cases, and needs do nothing when PF reset, FLR or other function level reset. This patch optimizes it by removing unnecessary table entries clear and restoring handling in the reset flow, and doing the restoring after reset completed. Signed-off-by: Jian Shen Signed-off-by: Huazhong Tan --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 5 ++ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 5 -- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 33 -------- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 9 --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 89 +++++++++++----------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 28 ++++++- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 29 ++++--- 8 files changed, 94 insertions(+), 105 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 948e67e..21a7361 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ + HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ @@ -70,6 +71,10 @@ enum hclge_mbx_vlan_cfg_subcode { HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ }; +enum hclge_mbx_tbl_cfg_subcode { + HCLGE_MBX_VPORT_LIST_CLEAR, +}; + #define HCLGE_MBX_MAX_MSG_SIZE 14 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U #define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a56f8d6..6291aa9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -233,7 +233,6 @@ struct hnae3_ae_dev { struct list_head node; u32 flag; unsigned long hw_err_reset_req; - enum hnae3_reset_type reset_type; void *priv; }; @@ -356,8 +355,6 @@ struct hnae3_ae_dev { * Set vlan filter config of Ports * set_vf_vlan_filter() * Set vlan filter config of vf - * restore_vlan_table() - * Restore vlan filter entries after reset * enable_hw_strip_rxvtag() * Enable/disable hardware strip vlan tag of packets received * set_gro_en @@ -528,7 +525,6 @@ struct hnae3_ae_ops { struct ethtool_rxnfc *cmd); int (*get_fd_all_rules)(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd, u32 *rule_locs); - int (*restore_fd_rules)(struct hnae3_handle *handle); void (*enable_fd)(struct hnae3_handle *handle, bool enable); int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys); @@ -542,7 +538,6 @@ struct hnae3_ae_ops { void (*set_timer_task)(struct hnae3_handle *handle, bool enable); int (*mac_connect_phy)(struct hnae3_handle *handle); void (*mac_disconnect_phy)(struct hnae3_handle *handle); - void (*restore_vlan_table)(struct hnae3_handle *handle); int (*get_vf_config)(struct hnae3_handle *handle, int vf, struct ifla_vf_info *ivf); int (*set_vf_link_state)(struct hnae3_handle *handle, int vf, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 6b9535c..c79d6a3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2102,7 +2102,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->pdev = pdev; ae_dev->flag = ent->driver_data; - ae_dev->reset_type = HNAE3_NONE_RESET; hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); @@ -3936,17 +3935,6 @@ static void hns3_uninit_phy(struct net_device *netdev) h->ae_algo->ops->mac_disconnect_phy(h); } -static int hns3_restore_fd_rules(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = 0; - - if (h->ae_algo->ops->restore_fd_rules) - ret = h->ae_algo->ops->restore_fd_rules(h); - - return ret; -} - static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -4346,7 +4334,6 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv) static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; struct hns3_nic_priv *priv = netdev_priv(ndev); @@ -4354,13 +4341,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) return 0; - /* it is cumbersome for hardware to pick-and-choose entries for deletion - * from table space. Hence, for function reset software intervention is - * required to delete the entries - */ - if (hns3_dev_ongoing_func_reset(ae_dev)) - hns3_del_all_fd_rules(ndev, false); - if (!netif_running(ndev)) return 0; @@ -4455,16 +4435,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) return ret; } -static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) -{ - struct net_device *netdev = handle->kinfo.netdev; - - if (handle->ae_algo->ops->restore_vlan_table) - handle->ae_algo->ops->restore_vlan_table(handle); - - return hns3_restore_fd_rules(netdev); -} - static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; @@ -4514,9 +4484,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle, case HNAE3_UNINIT_CLIENT: ret = hns3_reset_notify_uninit_enet(handle); break; - case HNAE3_RESTORE_CLIENT: - ret = hns3_reset_notify_restore_enet(handle); - break; default: break; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 53bc0ed..240ba06 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -576,15 +576,6 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) writel(value, reg_addr + reg); } -static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) -{ - return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET || - ae_dev->reset_type == HNAE3_FLR_RESET || - ae_dev->reset_type == HNAE3_VF_FUNC_RESET || - ae_dev->reset_type == HNAE3_VF_FULL_RESET || - ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET)); -} - #define hns3_read_dev(a, reg) \ hns3_read_reg((a)->io_base, (reg)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 177ef5e..c74990a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -69,6 +69,7 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, static int hclge_set_default_loopback(struct hclge_dev *hdev); static void hclge_sync_mac_table(struct hclge_dev *hdev); +static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -3731,22 +3732,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - if (ret) - return ret; - - return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT); + return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); } static int hclge_reset_prepare(struct hclge_dev *hdev) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; - /* Initialize ae_dev reset status as well, in case enet layer wants to - * know if device is undergoing reset - */ - ae_dev->reset_type = hdev->reset_type; hdev->rst_stats.reset_cnt++; /* perform reset of the stack & ae device for a client */ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); @@ -3808,7 +3800,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) hdev->last_reset_time = jiffies; hdev->rst_stats.reset_fail_cnt = 0; hdev->rst_stats.reset_done_cnt++; - ae_dev->reset_type = HNAE3_NONE_RESET; clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); /* if default_reset_request has a higher level reset request, @@ -6942,8 +6933,14 @@ int hclge_vport_start(struct hclge_vport *vport) set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); vport->last_active_jiffies = jiffies; - if (test_bit(vport->vport_id, hdev->vport_config_block)) - hclge_restore_mac_table_common(vport); + if (test_bit(vport->vport_id, hdev->vport_config_block)) { + if (vport->vport_id) { + hclge_restore_mac_table_common(vport); + hclge_restore_vport_vlan_table(vport); + } else { + hclge_restore_hw_table(hdev); + } + } clear_bit(vport->vport_id, hdev->vport_config_block); @@ -8789,39 +8786,34 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) } } -static void hclge_restore_vlan_table(struct hnae3_handle *handle) +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) { - struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; u16 vlan_proto; - u16 state, vlan_id; - int i; + u16 vlan_id; + u16 state; + int ret; - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; - vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - state = vport->port_base_vlan_cfg.state; + vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; + vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; + state = vport->port_base_vlan_cfg.state; - if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { - hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), - vport->vport_id, vlan_id, - false); - continue; - } - - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { - int ret; + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, vlan_id, + false); + return; + } - if (!vlan->hd_tbl_status) - continue; - ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), - vport->vport_id, - vlan->vlan_id, false); - if (ret) - break; - } + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; } } @@ -8856,6 +8848,18 @@ void hclge_restore_mac_table_common(struct hclge_vport *vport) spin_unlock_bh(&vport->mac_list_lock); } +static void hclge_restore_hw_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + + hclge_restore_mac_table_common(vport); + hclge_restore_vport_vlan_table(vport); + set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state); + + hclge_restore_fd_entries(handle); +} + int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -10352,13 +10356,12 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) */ if (hdev->reset_type == HNAE3_IMP_RESET || hdev->reset_type == HNAE3_GLOBAL_RESET) { + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); + memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); hclge_reset_umv_space(hdev); } - memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); - memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); - ret = hclge_cmd_init(hdev); if (ret) { dev_err(&pdev->dev, "Cmd queue init failed\n"); @@ -11191,7 +11194,6 @@ static const struct hnae3_ae_ops hclge_ops = { .get_fd_rule_cnt = hclge_get_fd_rule_cnt, .get_fd_rule_info = hclge_get_fd_rule_info, .get_fd_all_rules = hclge_get_all_rules, - .restore_fd_rules = hclge_restore_fd_entries, .enable_fd = hclge_enable_fd, .add_arfs_entry = hclge_add_fd_entry_by_arfs, .dbg_run_cmd = hclge_dbg_run_cmd, @@ -11204,7 +11206,6 @@ static const struct hnae3_ae_ops hclge_ops = { .set_timer_task = hclge_set_timer_task, .mac_connect_phy = hclge_mac_connect_phy, .mac_disconnect_phy = hclge_mac_disconnect_phy, - .restore_vlan_table = hclge_restore_vlan_table, .get_vf_config = hclge_get_vf_config, .set_vf_link_state = hclge_set_vf_link_state, .set_vf_spoofchk = hclge_set_vf_spoofchk, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 8e69651..913c4f6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1001,6 +1001,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_vlan_table(struct hclge_vport *vport); int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info); int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 0efc045..ac70faf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -629,6 +629,23 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev) ae_dev->ops->reset_event(hdev->pdev, NULL); } +static void hclge_handle_vf_tbl(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vf_vlan_cfg *msg_cmd; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + } else { + dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", + msg_cmd->subcode); + } +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -636,6 +653,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) struct hclge_mbx_vf_to_pf_cmd *req; struct hclge_vport *vport; struct hclge_desc *desc; + bool is_del = false; unsigned int flag; int ret = 0; @@ -753,11 +771,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) break; case HCLGE_MBX_GET_VF_FLR_STATUS: case HCLGE_MBX_VF_UNINIT: - hclge_rm_vport_all_mac_table(vport, true, + is_del = req->msg.code == HCLGE_MBX_VF_UNINIT; + hclge_rm_vport_all_mac_table(vport, is_del, HCLGE_MAC_ADDR_UC); - hclge_rm_vport_all_mac_table(vport, true, + hclge_rm_vport_all_mac_table(vport, is_del, HCLGE_MAC_ADDR_MC); - hclge_rm_vport_all_vlan_table(vport, true); + hclge_rm_vport_all_vlan_table(vport, is_del); break; case HCLGE_MBX_GET_MEDIA_TYPE: hclge_get_vf_media_type(vport, &resp_msg); @@ -771,6 +790,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) case HCLGE_MBX_NCSI_ERROR: hclge_handle_ncsi_error(hdev); break; + case HCLGE_MBX_HANDLE_VF_TBL: + hclge_handle_vf_tbl(vport, req); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %u\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index fea197f..32341dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1777,10 +1777,6 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); - if (ret) - return ret; - /* clear handshake status with IMP */ hclgevf_reset_handshake(hdev, false); @@ -1860,13 +1856,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; - /* Initialize ae_dev reset status as well, in case enet layer wants to - * know if device is undergoing reset - */ - ae_dev->reset_type = hdev->reset_type; hdev->rst_stats.rst_cnt++; rtnl_lock(); @@ -1881,7 +1872,6 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; hdev->rst_stats.hw_rst_done_cnt++; @@ -1896,7 +1886,6 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) } hdev->last_reset_time = jiffies; - ae_dev->reset_type = HNAE3_NONE_RESET; hdev->rst_stats.rst_done_cnt++; hdev->rst_stats.rst_fail_cnt = 0; clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); @@ -2974,6 +2963,15 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev) return ret; } +static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, + HCLGE_MBX_VPORT_LIST_CLEAR); + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -3083,6 +3081,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + /* ensure vf tbl list as empty before init*/ + ret = hclgevf_clear_vport_list(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to clear tbl list configuration, ret = %d.\n", + ret); + goto err_config; + } + ret = hclgevf_init_vlan_config(hdev); if (ret) { dev_err(&hdev->pdev->dev,