Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1182429/?format=api
{ "id": 1182429, "url": "http://patchwork.ozlabs.org/api/patches/1182429/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023182253.1115-16-shiraz.saleem@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20191023182253.1115-16-shiraz.saleem@intel.com>", "list_archive_url": null, "date": "2019-10-23T18:22:51", "name": "[rdma-nxt,15/16] RDMA: Add irdma Kconfig/Makefile and remove i40iw", "commit_ref": null, "pull_url": null, "state": "changes-requested", "archived": false, "hash": "30d72d3e66a3ce43da0ddda7618016bcec920cb4", "submitter": { "id": 69500, "url": "http://patchwork.ozlabs.org/api/people/69500/?format=api", "name": "Saleem, Shiraz", "email": "shiraz.saleem@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023182253.1115-16-shiraz.saleem@intel.com/mbox/", "series": [ { "id": 138160, "url": "http://patchwork.ozlabs.org/api/series/138160/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=138160", "date": "2019-10-23T18:22:36", "name": "Add unified Intel Ethernet RDMA driver (irdma)", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/138160/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1182429/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1182429/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.137;\n\thelo=fraxinus.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 46z0411LkVz9sNw\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 24 Oct 2019 05:57:21 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id A54CB864F2;\n\tWed, 23 Oct 2019 18:57:19 +0000 (UTC)", "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id KKNyWp_0Gfln; Wed, 23 Oct 2019 18:56:28 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id EC0A086644;\n\tWed, 23 Oct 2019 18:56:21 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 1DBB11BF48D\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:39:26 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 05B6A88193\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:39:26 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 1WArqeKieRAL for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:39:05 +0000 (UTC)", "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id A3E8B874E4\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:39:05 +0000 (UTC)", "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t23 Oct 2019 11:38:02 -0700", "from ssaleem-mobl.amr.corp.intel.com ([10.122.128.45])\n\tby fmsmga002.fm.intel.com with ESMTP; 23 Oct 2019 11:37:59 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.68,221,1569308400\"; d=\"scan'208\";a=\"228225102\"", "From": "Shiraz Saleem <shiraz.saleem@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Wed, 23 Oct 2019 13:22:51 -0500", "Message-Id": "<20191023182253.1115-16-shiraz.saleem@intel.com>", "X-Mailer": "git-send-email 2.21.0", "In-Reply-To": "<20191023182253.1115-1-shiraz.saleem@intel.com>", "References": "<20191023182253.1115-1-shiraz.saleem@intel.com>", "MIME-Version": "1.0", "X-Mailman-Approved-At": "Wed, 23 Oct 2019 18:56:19 +0000", "Subject": "[Intel-wired-lan] [PATCH rdma-nxt 15/16] RDMA: Add irdma\n\tKconfig/Makefile and remove i40iw", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Cc": "Shiraz Saleem <shiraz.saleem@intel.com>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "Add Kconfig and Makefile to build irdma driver.\n\nRemove i40iw driver. irdma is the replacement driver\nthat supports X722.\n\nSigned-off-by: Shiraz Saleem <shiraz.saleem@intel.com>\n---\n Documentation/ABI/stable/sysfs-class-infiniband | 18 -\n MAINTAINERS | 8 -\n drivers/infiniband/Kconfig | 2 +-\n drivers/infiniband/hw/Makefile | 2 +-\n drivers/infiniband/hw/i40iw/Kconfig | 9 -\n drivers/infiniband/hw/i40iw/Makefile | 9 -\n drivers/infiniband/hw/i40iw/i40iw.h | 602 ---\n drivers/infiniband/hw/i40iw/i40iw_cm.c | 4422 -------------------\n drivers/infiniband/hw/i40iw/i40iw_cm.h | 462 --\n drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 5198 -----------------------\n drivers/infiniband/hw/i40iw/i40iw_d.h | 1737 --------\n drivers/infiniband/hw/i40iw/i40iw_hmc.c | 821 ----\n drivers/infiniband/hw/i40iw/i40iw_hmc.h | 241 --\n drivers/infiniband/hw/i40iw/i40iw_hw.c | 852 ----\n drivers/infiniband/hw/i40iw/i40iw_main.c | 2068 ---------\n drivers/infiniband/hw/i40iw/i40iw_osdep.h | 217 -\n drivers/infiniband/hw/i40iw/i40iw_p.h | 128 -\n drivers/infiniband/hw/i40iw/i40iw_pble.c | 612 ---\n drivers/infiniband/hw/i40iw/i40iw_pble.h | 131 -\n drivers/infiniband/hw/i40iw/i40iw_puda.c | 1493 -------\n drivers/infiniband/hw/i40iw/i40iw_puda.h | 188 -\n drivers/infiniband/hw/i40iw/i40iw_register.h | 1030 -----\n drivers/infiniband/hw/i40iw/i40iw_status.h | 101 -\n drivers/infiniband/hw/i40iw/i40iw_type.h | 1363 ------\n drivers/infiniband/hw/i40iw/i40iw_uk.c | 1232 ------\n drivers/infiniband/hw/i40iw/i40iw_user.h | 430 --\n drivers/infiniband/hw/i40iw/i40iw_utils.c | 1557 -------\n drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2784 ------------\n drivers/infiniband/hw/i40iw/i40iw_verbs.h | 179 -\n drivers/infiniband/hw/i40iw/i40iw_vf.c | 85 -\n drivers/infiniband/hw/i40iw/i40iw_vf.h | 62 -\n drivers/infiniband/hw/i40iw/i40iw_virtchnl.c | 756 ----\n drivers/infiniband/hw/i40iw/i40iw_virtchnl.h | 124 -\n drivers/infiniband/hw/irdma/Kconfig | 11 +\n drivers/infiniband/hw/irdma/Makefile | 28 +\n include/uapi/rdma/i40iw-abi.h | 107 -\n 36 files changed, 41 insertions(+), 29028 deletions(-)\n delete mode 100644 drivers/infiniband/hw/i40iw/Kconfig\n delete mode 100644 drivers/infiniband/hw/i40iw/Makefile\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_cm.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_cm.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_ctrl.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_d.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_hmc.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_hmc.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_hw.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_main.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_osdep.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_p.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_pble.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_pble.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_puda.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_puda.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_register.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_status.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_type.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_uk.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_user.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_utils.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_verbs.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_verbs.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_vf.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_vf.h\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_virtchnl.c\n delete mode 100644 drivers/infiniband/hw/i40iw/i40iw_virtchnl.h\n create mode 100644 drivers/infiniband/hw/irdma/Kconfig\n create mode 100644 drivers/infiniband/hw/irdma/Makefile\n delete mode 100644 include/uapi/rdma/i40iw-abi.h", "diff": "diff --git a/Documentation/ABI/stable/sysfs-class-infiniband b/Documentation/ABI/stable/sysfs-class-infiniband\nindex 96dfe19..7dcb45a 100644\n--- a/Documentation/ABI/stable/sysfs-class-infiniband\n+++ b/Documentation/ABI/stable/sysfs-class-infiniband\n@@ -717,24 +717,6 @@ Description:\n \t\tis the irq number of \"sdma3\", and M is irq number of \"sdma4\" in\n \t\tthe /proc/interrupts file.\n \n-\n-sysfs interface for Intel(R) X722 iWARP i40iw driver\n-----------------------------------------------------\n-\n-What:\t\t/sys/class/infiniband/i40iwX/hw_rev\n-What:\t\t/sys/class/infiniband/i40iwX/hca_type\n-What:\t\t/sys/class/infiniband/i40iwX/board_id\n-Date:\t\tJan, 2016\n-KernelVersion:\tv4.10\n-Contact:\tlinux-rdma@vger.kernel.org\n-Description:\n-\t\thw_rev:\t\t(RO) Hardware revision number\n-\n-\t\thca_type:\t(RO) Show HCA type (I40IW)\n-\n-\t\tboard_id:\t(RO) I40IW board ID\n-\n-\n sysfs interface for QLogic qedr NIC Driver\n ------------------------------------------\n \ndiff --git a/MAINTAINERS b/MAINTAINERS\nindex fa60a2c..21961b1 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -8416,14 +8416,6 @@ L:\tlinux-pm@vger.kernel.org\n S:\tSupported\n F:\tdrivers/cpufreq/intel_pstate.c\n \n-INTEL RDMA RNIC DRIVER\n-M:\tFaisal Latif <faisal.latif@intel.com>\n-M:\tShiraz Saleem <shiraz.saleem@intel.com>\n-L:\tlinux-rdma@vger.kernel.org\n-S:\tSupported\n-F:\tdrivers/infiniband/hw/i40iw/\n-F:\tinclude/uapi/rdma/i40iw-abi.h\n-\n INTEL SPEED SELECT TECHNOLOGY\n M:\tSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>\n L:\tplatform-driver-x86@vger.kernel.org\ndiff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig\nindex ade8638..f4cdb86 100644\n--- a/drivers/infiniband/Kconfig\n+++ b/drivers/infiniband/Kconfig\n@@ -85,7 +85,7 @@ source \"drivers/infiniband/hw/mthca/Kconfig\"\n source \"drivers/infiniband/hw/qib/Kconfig\"\n source \"drivers/infiniband/hw/cxgb4/Kconfig\"\n source \"drivers/infiniband/hw/efa/Kconfig\"\n-source \"drivers/infiniband/hw/i40iw/Kconfig\"\n+source \"drivers/infiniband/hw/irdma/Kconfig\"\n source \"drivers/infiniband/hw/mlx4/Kconfig\"\n source \"drivers/infiniband/hw/mlx5/Kconfig\"\n source \"drivers/infiniband/hw/ocrdma/Kconfig\"\ndiff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile\nindex 0aeccd9..fba0b3b 100644\n--- a/drivers/infiniband/hw/Makefile\n+++ b/drivers/infiniband/hw/Makefile\n@@ -3,7 +3,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA)\t\t+= mthca/\n obj-$(CONFIG_INFINIBAND_QIB)\t\t+= qib/\n obj-$(CONFIG_INFINIBAND_CXGB4)\t\t+= cxgb4/\n obj-$(CONFIG_INFINIBAND_EFA)\t\t+= efa/\n-obj-$(CONFIG_INFINIBAND_I40IW)\t\t+= i40iw/\n+obj-$(CONFIG_INFINIBAND_IRDMA)\t\t+= irdma/\n obj-$(CONFIG_MLX4_INFINIBAND)\t\t+= mlx4/\n obj-$(CONFIG_MLX5_INFINIBAND)\t\t+= mlx5/\n obj-$(CONFIG_INFINIBAND_OCRDMA)\t\t+= ocrdma/\ndiff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig\ndeleted file mode 100644\nindex e4b45f4..0000000\n--- a/drivers/infiniband/hw/i40iw/Kconfig\n+++ /dev/null\n@@ -1,9 +0,0 @@\n-# SPDX-License-Identifier: GPL-2.0-only\n-config INFINIBAND_I40IW\n-\ttristate \"Intel(R) Ethernet X722 iWARP Driver\"\n-\tdepends on INET && I40E\n-\tdepends on IPV6 || !IPV6\n-\tdepends on PCI\n-\tselect GENERIC_ALLOCATOR\n-\t---help---\n-\tIntel(R) Ethernet X722 iWARP Driver\ndiff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile\ndeleted file mode 100644\nindex 34da9eb..0000000\n--- a/drivers/infiniband/hw/i40iw/Makefile\n+++ /dev/null\n@@ -1,9 +0,0 @@\n-# SPDX-License-Identifier: GPL-2.0\n-\n-obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o\n-\n-i40iw-objs :=\\\n- i40iw_cm.o i40iw_ctrl.o \\\n- i40iw_hmc.o i40iw_hw.o i40iw_main.o \\\n- i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \\\n- i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h\ndeleted file mode 100644\nindex 3197e35..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw.h\n+++ /dev/null\n@@ -1,602 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_IW_H\n-#define I40IW_IW_H\n-#include <linux/netdevice.h>\n-#include <linux/inetdevice.h>\n-#include <linux/spinlock.h>\n-#include <linux/kernel.h>\n-#include <linux/delay.h>\n-#include <linux/pci.h>\n-#include <linux/dma-mapping.h>\n-#include <linux/workqueue.h>\n-#include <linux/slab.h>\n-#include <linux/io.h>\n-#include <linux/crc32c.h>\n-#include <rdma/ib_smi.h>\n-#include <rdma/ib_verbs.h>\n-#include <rdma/ib_pack.h>\n-#include <rdma/rdma_cm.h>\n-#include <rdma/iw_cm.h>\n-#include <crypto/hash.h>\n-\n-#include \"i40iw_status.h\"\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_d.h\"\n-#include \"i40iw_hmc.h\"\n-\n-#include <linux/net/intel/i40e_client.h>\n-#include \"i40iw_type.h\"\n-#include \"i40iw_p.h\"\n-#include <rdma/i40iw-abi.h>\n-#include \"i40iw_pble.h\"\n-#include \"i40iw_verbs.h\"\n-#include \"i40iw_cm.h\"\n-#include \"i40iw_user.h\"\n-#include \"i40iw_puda.h\"\n-\n-#define I40IW_FW_VERSION 2\n-#define I40IW_HW_VERSION 2\n-\n-#define I40IW_ARP_ADD 1\n-#define I40IW_ARP_DELETE 2\n-#define I40IW_ARP_RESOLVE 3\n-\n-#define I40IW_MACIP_ADD 1\n-#define I40IW_MACIP_DELETE 2\n-\n-#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1)\n-#define IW_CEQ_SIZE 2048\n-#define IW_AEQ_SIZE 2048\n-\n-#define RX_BUF_SIZE (1536 + 8)\n-#define IW_REG0_SIZE (4 * 1024)\n-#define IW_TX_TIMEOUT (6 * HZ)\n-#define IW_FIRST_QPN 1\n-#define IW_SW_CONTEXT_ALIGN 1024\n-\n-#define MAX_DPC_ITERATIONS\t\t128\n-\n-#define I40IW_EVENT_TIMEOUT\t\t100000\n-#define I40IW_VCHNL_EVENT_TIMEOUT\t100000\n-\n-#define\tI40IW_NO_VLAN\t\t\t0xffff\n-#define\tI40IW_NO_QSET\t\t\t0xffff\n-\n-/* access to mcast filter list */\n-#define IW_ADD_MCAST false\n-#define IW_DEL_MCAST true\n-\n-#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001\n-#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002\n-#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004\n-#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008\n-#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010\n-#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020\n-#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080\n-#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100\n-#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200\n-#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400\n-#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800\n-\n-#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)\n-#define IW_CFG_FPM_QP_COUNT 32768\n-#define I40IW_MAX_PAGES_PER_FMR 512\n-#define I40IW_MIN_PAGES_PER_FMR 1\n-#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2\n-#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3\n-#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4\n-\n-struct i40iw_cqp_compl_info {\n-\tu32 op_ret_val;\n-\tu16 maj_err_code;\n-\tu16 min_err_code;\n-\tbool error;\n-\tu8 op_code;\n-};\n-\n-#define i40iw_pr_err(fmt, args ...) pr_err(\"%s: \"fmt, __func__, ## args)\n-\n-#define i40iw_pr_info(fmt, args ...) pr_info(\"%s: \" fmt, __func__, ## args)\n-\n-#define i40iw_pr_warn(fmt, args ...) pr_warn(\"%s: \" fmt, __func__, ## args)\n-\n-struct i40iw_cqp_request {\n-\tstruct cqp_commands_info info;\n-\twait_queue_head_t waitq;\n-\tstruct list_head list;\n-\tatomic_t refcount;\n-\tvoid (*callback_fcn)(struct i40iw_cqp_request*, u32);\n-\tvoid *param;\n-\tstruct i40iw_cqp_compl_info compl_info;\n-\tbool waiting;\n-\tbool request_done;\n-\tbool dynamic;\n-};\n-\n-struct i40iw_cqp {\n-\tstruct i40iw_sc_cqp sc_cqp;\n-\tspinlock_t req_lock; /*cqp request list */\n-\twait_queue_head_t waitq;\n-\tstruct i40iw_dma_mem sq;\n-\tstruct i40iw_dma_mem host_ctx;\n-\tu64 *scratch_array;\n-\tstruct i40iw_cqp_request *cqp_requests;\n-\tstruct list_head cqp_avail_reqs;\n-\tstruct list_head cqp_pending_reqs;\n-};\n-\n-struct i40iw_device;\n-\n-struct i40iw_ccq {\n-\tstruct i40iw_sc_cq sc_cq;\n-\tspinlock_t lock; /* ccq control */\n-\twait_queue_head_t waitq;\n-\tstruct i40iw_dma_mem mem_cq;\n-\tstruct i40iw_dma_mem shadow_area;\n-};\n-\n-struct i40iw_ceq {\n-\tstruct i40iw_sc_ceq sc_ceq;\n-\tstruct i40iw_dma_mem mem;\n-\tu32 irq;\n-\tu32 msix_idx;\n-\tstruct i40iw_device *iwdev;\n-\tstruct tasklet_struct dpc_tasklet;\n-};\n-\n-struct i40iw_aeq {\n-\tstruct i40iw_sc_aeq sc_aeq;\n-\tstruct i40iw_dma_mem mem;\n-};\n-\n-struct i40iw_arp_entry {\n-\tu32 ip_addr[4];\n-\tu8 mac_addr[ETH_ALEN];\n-};\n-\n-enum init_completion_state {\n-\tINVALID_STATE = 0,\n-\tINITIAL_STATE,\n-\tCQP_CREATED,\n-\tHMC_OBJS_CREATED,\n-\tPBLE_CHUNK_MEM,\n-\tCCQ_CREATED,\n-\tAEQ_CREATED,\n-\tCEQ_CREATED,\n-\tILQ_CREATED,\n-\tIEQ_CREATED,\n-\tIP_ADDR_REGISTERED,\n-\tRDMA_DEV_REGISTERED\n-};\n-\n-struct i40iw_msix_vector {\n-\tu32 idx;\n-\tu32 irq;\n-\tu32 cpu_affinity;\n-\tu32 ceq_id;\n-\tcpumask_t mask;\n-};\n-\n-struct l2params_work {\n-\tstruct work_struct work;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_l2params l2params;\n-};\n-\n-#define I40IW_MSIX_TABLE_SIZE 65\n-\n-struct virtchnl_work {\n-\tstruct work_struct work;\n-\tunion {\n-\t\tstruct i40iw_cqp_request *cqp_request;\n-\t\tstruct i40iw_virtchnl_work_info work_info;\n-\t};\n-};\n-\n-struct i40e_qvlist_info;\n-\n-struct i40iw_device {\n-\tstruct i40iw_ib_device *iwibdev;\n-\tstruct net_device *netdev;\n-\twait_queue_head_t vchnl_waitq;\n-\tstruct i40iw_sc_dev sc_dev;\n-\tstruct i40iw_sc_vsi vsi;\n-\tstruct i40iw_handler *hdl;\n-\tstruct i40e_info *ldev;\n-\tstruct i40e_client *client;\n-\tstruct i40iw_hw hw;\n-\tstruct i40iw_cm_core cm_core;\n-\tu8 *mem_resources;\n-\tunsigned long *allocated_qps;\n-\tunsigned long *allocated_cqs;\n-\tunsigned long *allocated_mrs;\n-\tunsigned long *allocated_pds;\n-\tunsigned long *allocated_arps;\n-\tstruct i40iw_qp **qp_table;\n-\tbool msix_shared;\n-\tu32 msix_count;\n-\tstruct i40iw_msix_vector *iw_msixtbl;\n-\tstruct i40e_qvlist_info *iw_qvlist;\n-\n-\tstruct i40iw_hmc_pble_rsrc *pble_rsrc;\n-\tstruct i40iw_arp_entry *arp_table;\n-\tstruct i40iw_cqp cqp;\n-\tstruct i40iw_ccq ccq;\n-\tu32 ceqs_count;\n-\tstruct i40iw_ceq *ceqlist;\n-\tstruct i40iw_aeq aeq;\n-\tu32 arp_table_size;\n-\tu32 next_arp_index;\n-\tspinlock_t resource_lock; /* hw resource access */\n-\tspinlock_t qptable_lock;\n-\tu32 vendor_id;\n-\tu32 vendor_part_id;\n-\tu32 of_device_registered;\n-\n-\tu32 device_cap_flags;\n-\tunsigned long db_start;\n-\tu8 resource_profile;\n-\tu8 max_rdma_vfs;\n-\tu8 max_enabled_vfs;\n-\tu8 max_sge;\n-\tu8 iw_status;\n-\tu8 send_term_ok;\n-\tbool push_mode;\t\t/* Initialized from parameter passed to driver */\n-\n-\t/* x710 specific */\n-\tstruct mutex pbl_mutex;\n-\tstruct tasklet_struct dpc_tasklet;\n-\tstruct workqueue_struct *virtchnl_wq;\n-\tstruct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT];\n-\tstruct i40iw_dma_mem obj_mem;\n-\tstruct i40iw_dma_mem obj_next;\n-\tu8 *hmc_info_mem;\n-\tu32 sd_type;\n-\tstruct workqueue_struct *param_wq;\n-\tatomic_t params_busy;\n-\tenum init_completion_state init_state;\n-\tu16 mac_ip_table_idx;\n-\tatomic_t vchnl_msgs;\n-\tu32 max_mr;\n-\tu32 max_qp;\n-\tu32 max_cq;\n-\tu32 max_pd;\n-\tu32 next_qp;\n-\tu32 next_cq;\n-\tu32 next_pd;\n-\tu32 max_mr_size;\n-\tu32 max_qp_wr;\n-\tu32 max_cqe;\n-\tu32 mr_stagmask;\n-\tu32 mpa_version;\n-\tbool dcb;\n-\tbool closing;\n-\tbool reset;\n-\tu32 used_pds;\n-\tu32 used_cqs;\n-\tu32 used_mrs;\n-\tu32 used_qps;\n-\twait_queue_head_t close_wq;\n-\tatomic64_t use_count;\n-};\n-\n-struct i40iw_ib_device {\n-\tstruct ib_device ibdev;\n-\tstruct i40iw_device *iwdev;\n-};\n-\n-struct i40iw_handler {\n-\tstruct list_head list;\n-\tstruct i40e_client *client;\n-\tstruct i40iw_device device;\n-\tstruct i40e_info ldev;\n-};\n-\n-/**\n- * to_iwdev - get device\n- * @ibdev: ib device\n- **/\n-static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev)\n-{\n-\treturn container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev;\n-}\n-\n-/**\n- * to_ucontext - get user context\n- * @ibucontext: ib user context\n- **/\n-static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext)\n-{\n-\treturn container_of(ibucontext, struct i40iw_ucontext, ibucontext);\n-}\n-\n-/**\n- * to_iwpd - get protection domain\n- * @ibpd: ib pd\n- **/\n-static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd)\n-{\n-\treturn container_of(ibpd, struct i40iw_pd, ibpd);\n-}\n-\n-/**\n- * to_iwmr - get device memory region\n- * @ibdev: ib memory region\n- **/\n-static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)\n-{\n-\treturn container_of(ibmr, struct i40iw_mr, ibmr);\n-}\n-\n-/**\n- * to_iwmr_from_ibfmr - get device memory region\n- * @ibfmr: ib fmr\n- **/\n-static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)\n-{\n-\treturn container_of(ibfmr, struct i40iw_mr, ibfmr);\n-}\n-\n-/**\n- * to_iwmw - get device memory window\n- * @ibmw: ib memory window\n- **/\n-static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw)\n-{\n-\treturn container_of(ibmw, struct i40iw_mr, ibmw);\n-}\n-\n-/**\n- * to_iwcq - get completion queue\n- * @ibcq: ib cqdevice\n- **/\n-static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq)\n-{\n-\treturn container_of(ibcq, struct i40iw_cq, ibcq);\n-}\n-\n-/**\n- * to_iwqp - get device qp\n- * @ibqp: ib qp\n- **/\n-static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)\n-{\n-\treturn container_of(ibqp, struct i40iw_qp, ibqp);\n-}\n-\n-/* i40iw.c */\n-void i40iw_add_ref(struct ib_qp *);\n-void i40iw_rem_ref(struct ib_qp *);\n-struct ib_qp *i40iw_get_qp(struct ib_device *, int);\n-\n-void i40iw_flush_wqes(struct i40iw_device *iwdev,\n-\t\t struct i40iw_qp *qp);\n-\n-void i40iw_manage_arp_cache(struct i40iw_device *iwdev,\n-\t\t\t unsigned char *mac_addr,\n-\t\t\t u32 *ip_addr,\n-\t\t\t bool ipv4,\n-\t\t\t u32 action);\n-\n-int i40iw_manage_apbvt(struct i40iw_device *iwdev,\n-\t\t u16 accel_local_port,\n-\t\t bool add_port);\n-\n-struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);\n-void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);\n-void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);\n-\n-/**\n- * i40iw_alloc_resource - allocate a resource\n- * @iwdev: device pointer\n- * @resource_array: resource bit array:\n- * @max_resources: maximum resource number\n- * @req_resources_num: Allocated resource number\n- * @next: next free id\n- **/\n-static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,\n-\t\t\t\t unsigned long *resource_array,\n-\t\t\t\t u32 max_resources,\n-\t\t\t\t u32 *req_resource_num,\n-\t\t\t\t u32 *next)\n-{\n-\tu32 resource_num;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&iwdev->resource_lock, flags);\n-\tresource_num = find_next_zero_bit(resource_array, max_resources, *next);\n-\tif (resource_num >= max_resources) {\n-\t\tresource_num = find_first_zero_bit(resource_array, max_resources);\n-\t\tif (resource_num >= max_resources) {\n-\t\t\tspin_unlock_irqrestore(&iwdev->resource_lock, flags);\n-\t\t\treturn -EOVERFLOW;\n-\t\t}\n-\t}\n-\tset_bit(resource_num, resource_array);\n-\t*next = resource_num + 1;\n-\tif (*next == max_resources)\n-\t\t*next = 0;\n-\t*req_resource_num = resource_num;\n-\tspin_unlock_irqrestore(&iwdev->resource_lock, flags);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_is_resource_allocated - detrmine if resource is\n- * allocated\n- * @iwdev: device pointer\n- * @resource_array: resource array for the resource_num\n- * @resource_num: resource number to check\n- **/\n-static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev,\n-\t\t\t\t\t unsigned long *resource_array,\n-\t\t\t\t\t u32 resource_num)\n-{\n-\tbool bit_is_set;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&iwdev->resource_lock, flags);\n-\n-\tbit_is_set = test_bit(resource_num, resource_array);\n-\tspin_unlock_irqrestore(&iwdev->resource_lock, flags);\n-\n-\treturn bit_is_set;\n-}\n-\n-/**\n- * i40iw_free_resource - free a resource\n- * @iwdev: device pointer\n- * @resource_array: resource array for the resource_num\n- * @resource_num: resource number to free\n- **/\n-static inline void i40iw_free_resource(struct i40iw_device *iwdev,\n-\t\t\t\t unsigned long *resource_array,\n-\t\t\t\t u32 resource_num)\n-{\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&iwdev->resource_lock, flags);\n-\tclear_bit(resource_num, resource_array);\n-\tspin_unlock_irqrestore(&iwdev->resource_lock, flags);\n-}\n-\n-/**\n- * to_iwhdl - Get the handler from the device pointer\n- * @iwdev: device pointer\n- **/\n-static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)\n-{\n-\treturn container_of(iw_dev, struct i40iw_handler, device);\n-}\n-\n-struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);\n-\n-/**\n- * iw_init_resources -\n- */\n-u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);\n-\n-int i40iw_register_rdma_device(struct i40iw_device *iwdev);\n-void i40iw_port_ibevent(struct i40iw_device *iwdev);\n-void i40iw_cm_disconn(struct i40iw_qp *iwqp);\n-void i40iw_cm_disconn_worker(void *);\n-int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,\n-\t\t struct sk_buff *);\n-\n-enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_cqp_request *cqp_request);\n-enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,\n-\t\t\t\t\t u8 *mac_addr, u8 *mac_index);\n-int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);\n-void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);\n-\n-void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);\n-void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);\n-void i40iw_add_pdusecount(struct i40iw_pd *iwpd);\n-void i40iw_rem_devusecount(struct i40iw_device *iwdev);\n-void i40iw_add_devusecount(struct i40iw_device *iwdev);\n-void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,\n-\t\t\tstruct i40iw_modify_qp_info *info, bool wait);\n-\n-void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,\n-\t\t\t struct i40iw_sc_qp *qp,\n-\t\t\t bool suspend);\n-enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_cm_info *cminfo,\n-\t\t\t\t\t enum i40iw_quad_entry_type etype,\n-\t\t\t\t\t enum i40iw_quad_hash_manage_type mtype,\n-\t\t\t\t\t void *cmnode,\n-\t\t\t\t\t bool wait);\n-void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);\n-void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);\n-void i40iw_free_qp_resources(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_qp *iwqp,\n-\t\t\t u32 qp_num);\n-enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_dma_mem *memptr,\n-\t\t\t\t\t u32 size, u32 mask);\n-\n-void i40iw_request_reset(struct i40iw_device *iwdev);\n-void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);\n-int i40iw_setup_cm_core(struct i40iw_device *iwdev);\n-void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);\n-void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);\n-void i40iw_process_aeq(struct i40iw_device *);\n-void i40iw_next_iw_state(struct i40iw_qp *iwqp,\n-\t\t\t u8 state, u8 del_hash,\n-\t\t\t u8 term, u8 term_len);\n-int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack);\n-int i40iw_send_reset(struct i40iw_cm_node *cm_node);\n-struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,\n-\t\t\t\t u16 rem_port,\n-\t\t\t\t u32 *rem_addr,\n-\t\t\t\t u16 loc_port,\n-\t\t\t\t u32 *loc_addr,\n-\t\t\t\t bool add_refcnt,\n-\t\t\t\t bool accelerated_list);\n-\n-enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_sc_qp *qp,\n-\t\t\t\t\t struct i40iw_qp_flush_info *info,\n-\t\t\t\t\t bool wait);\n-\n-void i40iw_gen_ae(struct i40iw_device *iwdev,\n-\t\t struct i40iw_sc_qp *qp,\n-\t\t struct i40iw_gen_ae_info *info,\n-\t\t bool wait);\n-\n-void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src);\n-struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,\n-\t\t\t\tu64 addr,\n-\t\t\t\tu64 size,\n-\t\t\t\tint acc,\n-\t\t\t\tu64 *iova_start);\n-\n-int i40iw_inetaddr_event(struct notifier_block *notifier,\n-\t\t\t unsigned long event,\n-\t\t\t void *ptr);\n-int i40iw_inet6addr_event(struct notifier_block *notifier,\n-\t\t\t unsigned long event,\n-\t\t\t void *ptr);\n-int i40iw_net_event(struct notifier_block *notifier,\n-\t\t unsigned long event,\n-\t\t void *ptr);\n-int i40iw_netdevice_event(struct notifier_block *notifier,\n-\t\t\t unsigned long event,\n-\t\t\t void *ptr);\n-\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c\ndeleted file mode 100644\nindex 2d6a378..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c\n+++ /dev/null\n@@ -1,4422 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include <linux/atomic.h>\n-#include <linux/ip.h>\n-#include <linux/tcp.h>\n-#include <linux/init.h>\n-#include <linux/if_arp.h>\n-#include <linux/if_vlan.h>\n-#include <linux/notifier.h>\n-#include <linux/net.h>\n-#include <linux/types.h>\n-#include <linux/timer.h>\n-#include <linux/time.h>\n-#include <linux/delay.h>\n-#include <linux/etherdevice.h>\n-#include <linux/netdevice.h>\n-#include <linux/random.h>\n-#include <linux/list.h>\n-#include <linux/threads.h>\n-#include <linux/highmem.h>\n-#include <net/arp.h>\n-#include <net/ndisc.h>\n-#include <net/neighbour.h>\n-#include <net/route.h>\n-#include <net/addrconf.h>\n-#include <net/ip6_route.h>\n-#include <net/ip_fib.h>\n-#include <net/secure_seq.h>\n-#include <net/tcp.h>\n-#include <asm/checksum.h>\n-\n-#include \"i40iw.h\"\n-\n-static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);\n-static void i40iw_cm_post_event(struct i40iw_cm_event *event);\n-static void i40iw_disconnect_worker(struct work_struct *work);\n-\n-/**\n- * i40iw_free_sqbuf - put back puda buffer if refcount = 0\n- * @vsi: pointer to vsi structure\n- * @buf: puda buffer to free\n- */\n-void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)\n-{\n-\tstruct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;\n-\tstruct i40iw_puda_rsrc *ilq = vsi->ilq;\n-\n-\tif (!atomic_dec_return(&buf->refcount))\n-\t\ti40iw_puda_ret_bufpool(ilq, buf);\n-}\n-\n-/**\n- * i40iw_derive_hw_ird_setting - Calculate IRD\n- *\n- * @cm_ird: IRD of connection's node\n- *\n- * The ird from the connection is rounded to a supported HW\n- * setting (2,8,32,64) and then encoded for ird_size field of\n- * qp_ctx\n- */\n-static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)\n-{\n-\tu8 encoded_ird_size;\n-\n-\t/* ird_size field is encoded in qp_ctx */\n-\tswitch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {\n-\tcase I40IW_HW_IRD_SETTING_64:\n-\t\tencoded_ird_size = 3;\n-\t\tbreak;\n-\tcase I40IW_HW_IRD_SETTING_32:\n-\tcase I40IW_HW_IRD_SETTING_16:\n-\t\tencoded_ird_size = 2;\n-\t\tbreak;\n-\tcase I40IW_HW_IRD_SETTING_8:\n-\tcase I40IW_HW_IRD_SETTING_4:\n-\t\tencoded_ird_size = 1;\n-\t\tbreak;\n-\tcase I40IW_HW_IRD_SETTING_2:\n-\tdefault:\n-\t\tencoded_ird_size = 0;\n-\t\tbreak;\n-\t}\n-\treturn encoded_ird_size;\n-}\n-\n-/**\n- * i40iw_record_ird_ord - Record IRD/ORD passed in\n- * @cm_node: connection's node\n- * @conn_ird: connection IRD\n- * @conn_ord: connection ORD\n- */\n-static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,\n-\t\t\t\t u32 conn_ord)\n-{\n-\tif (conn_ird > I40IW_MAX_IRD_SIZE)\n-\t\tconn_ird = I40IW_MAX_IRD_SIZE;\n-\n-\tif (conn_ord > I40IW_MAX_ORD_SIZE)\n-\t\tconn_ord = I40IW_MAX_ORD_SIZE;\n-\telse if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)\n-\t\tconn_ord = 1;\n-\n-\tcm_node->ird_size = conn_ird;\n-\tcm_node->ord_size = conn_ord;\n-}\n-\n-/**\n- * i40iw_copy_ip_ntohl - change network to host ip\n- * @dst: host ip\n- * @src: big endian\n- */\n-void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)\n-{\n-\t*dst++ = ntohl(*src++);\n-\t*dst++ = ntohl(*src++);\n-\t*dst++ = ntohl(*src++);\n-\t*dst = ntohl(*src);\n-}\n-\n-/**\n- * i40iw_copy_ip_htonl - change host addr to network ip\n- * @dst: host ip\n- * @src: little endian\n- */\n-static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)\n-{\n-\t*dst++ = htonl(*src++);\n-\t*dst++ = htonl(*src++);\n-\t*dst++ = htonl(*src++);\n-\t*dst = htonl(*src);\n-}\n-\n-/**\n- * i40iw_fill_sockaddr4 - get addr info for passive connection\n- * @cm_node: connection's node\n- * @event: upper layer's cm event\n- */\n-static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,\n-\t\t\t\t\tstruct iw_cm_event *event)\n-{\n-\tstruct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;\n-\tstruct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;\n-\n-\tladdr->sin_family = AF_INET;\n-\traddr->sin_family = AF_INET;\n-\n-\tladdr->sin_port = htons(cm_node->loc_port);\n-\traddr->sin_port = htons(cm_node->rem_port);\n-\n-\tladdr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);\n-\traddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);\n-}\n-\n-/**\n- * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side\n- * @cm_node: connection's node\n- * @event: upper layer's cm event\n- */\n-static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,\n-\t\t\t\t\tstruct iw_cm_event *event)\n-{\n-\tstruct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;\n-\tstruct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;\n-\n-\tladdr6->sin6_family = AF_INET6;\n-\traddr6->sin6_family = AF_INET6;\n-\n-\tladdr6->sin6_port = htons(cm_node->loc_port);\n-\traddr6->sin6_port = htons(cm_node->rem_port);\n-\n-\ti40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,\n-\t\t\t cm_node->loc_addr);\n-\ti40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,\n-\t\t\t cm_node->rem_addr);\n-}\n-\n-/**\n- * i40iw_get_addr_info\n- * @cm_node: contains ip/tcp info\n- * @cm_info: to get a copy of the cm_node ip/tcp info\n-*/\n-static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,\n-\t\t\t\tstruct i40iw_cm_info *cm_info)\n-{\n-\tcm_info->ipv4 = cm_node->ipv4;\n-\tcm_info->vlan_id = cm_node->vlan_id;\n-\tmemcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));\n-\tmemcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));\n-\tcm_info->loc_port = cm_node->loc_port;\n-\tcm_info->rem_port = cm_node->rem_port;\n-\tcm_info->user_pri = cm_node->user_pri;\n-}\n-\n-/**\n- * i40iw_get_cmevent_info - for cm event upcall\n- * @cm_node: connection's node\n- * @cm_id: upper layers cm struct for the event\n- * @event: upper layer's cm event\n- */\n-static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,\n-\t\t\t\t\t struct iw_cm_id *cm_id,\n-\t\t\t\t\t struct iw_cm_event *event)\n-{\n-\tmemcpy(&event->local_addr, &cm_id->m_local_addr,\n-\t sizeof(event->local_addr));\n-\tmemcpy(&event->remote_addr, &cm_id->m_remote_addr,\n-\t sizeof(event->remote_addr));\n-\tif (cm_node) {\n-\t\tevent->private_data = (void *)cm_node->pdata_buf;\n-\t\tevent->private_data_len = (u8)cm_node->pdata.size;\n-\t\tevent->ird = cm_node->ird_size;\n-\t\tevent->ord = cm_node->ord_size;\n-\t}\n-}\n-\n-/**\n- * i40iw_send_cm_event - upcall cm's event handler\n- * @cm_node: connection's node\n- * @cm_id: upper layer's cm info struct\n- * @type: Event type to indicate\n- * @status: status for the event type\n- */\n-static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,\n-\t\t\t struct iw_cm_id *cm_id,\n-\t\t\t enum iw_cm_event_type type,\n-\t\t\t int status)\n-{\n-\tstruct iw_cm_event event;\n-\n-\tmemset(&event, 0, sizeof(event));\n-\tevent.event = type;\n-\tevent.status = status;\n-\tswitch (type) {\n-\tcase IW_CM_EVENT_CONNECT_REQUEST:\n-\t\tif (cm_node->ipv4)\n-\t\t\ti40iw_fill_sockaddr4(cm_node, &event);\n-\t\telse\n-\t\t\ti40iw_fill_sockaddr6(cm_node, &event);\n-\t\tevent.provider_data = (void *)cm_node;\n-\t\tevent.private_data = (void *)cm_node->pdata_buf;\n-\t\tevent.private_data_len = (u8)cm_node->pdata.size;\n-\t\tevent.ird = cm_node->ird_size;\n-\t\tbreak;\n-\tcase IW_CM_EVENT_CONNECT_REPLY:\n-\t\ti40iw_get_cmevent_info(cm_node, cm_id, &event);\n-\t\tbreak;\n-\tcase IW_CM_EVENT_ESTABLISHED:\n-\t\tevent.ird = cm_node->ird_size;\n-\t\tevent.ord = cm_node->ord_size;\n-\t\tbreak;\n-\tcase IW_CM_EVENT_DISCONNECT:\n-\t\tbreak;\n-\tcase IW_CM_EVENT_CLOSE:\n-\t\tbreak;\n-\tdefault:\n-\t\ti40iw_pr_err(\"event type received type = %d\\n\", type);\n-\t\treturn -1;\n-\t}\n-\treturn cm_id->event_handler(cm_id, &event);\n-}\n-\n-/**\n- * i40iw_create_event - create cm event\n- * @cm_node: connection's node\n- * @type: Event type to generate\n- */\n-static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,\n-\t\t\t\t\t\t enum i40iw_cm_event_type type)\n-{\n-\tstruct i40iw_cm_event *event;\n-\n-\tif (!cm_node->cm_id)\n-\t\treturn NULL;\n-\n-\tevent = kzalloc(sizeof(*event), GFP_ATOMIC);\n-\n-\tif (!event)\n-\t\treturn NULL;\n-\n-\tevent->type = type;\n-\tevent->cm_node = cm_node;\n-\tmemcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));\n-\tmemcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));\n-\tevent->cm_info.rem_port = cm_node->rem_port;\n-\tevent->cm_info.loc_port = cm_node->loc_port;\n-\tevent->cm_info.cm_id = cm_node->cm_id;\n-\n-\ti40iw_debug(cm_node->dev,\n-\t\t I40IW_DEBUG_CM,\n-\t\t \"node=%p event=%p type=%u dst=%pI4 src=%pI4\\n\",\n-\t\t cm_node,\n-\t\t event,\n-\t\t type,\n-\t\t event->cm_info.loc_addr,\n-\t\t event->cm_info.rem_addr);\n-\n-\ti40iw_cm_post_event(event);\n-\treturn event;\n-}\n-\n-/**\n- * i40iw_free_retrans_entry - free send entry\n- * @cm_node: connection's node\n- */\n-static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)\n-{\n-\tstruct i40iw_device *iwdev = cm_node->iwdev;\n-\tstruct i40iw_timer_entry *send_entry;\n-\n-\tsend_entry = cm_node->send_entry;\n-\tif (send_entry) {\n-\t\tcm_node->send_entry = NULL;\n-\t\ti40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);\n-\t\tkfree(send_entry);\n-\t\tatomic_dec(&cm_node->ref_count);\n-\t}\n-}\n-\n-/**\n- * i40iw_cleanup_retrans_entry - free send entry with lock\n- * @cm_node: connection's node\n- */\n-static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)\n-{\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&cm_node->retrans_list_lock, flags);\n-\ti40iw_free_retrans_entry(cm_node);\n-\tspin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);\n-}\n-\n-/**\n- * i40iw_form_cm_frame - get a free packet and build frame\n- * @cm_node: connection's node ionfo to use in frame\n- * @options: pointer to options info\n- * @hdr: pointer mpa header\n- * @pdata: pointer to private data\n- * @flags: indicates FIN or ACK\n- */\n-static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,\n-\t\t\t\t\t\t struct i40iw_kmem_info *options,\n-\t\t\t\t\t\t struct i40iw_kmem_info *hdr,\n-\t\t\t\t\t\t struct i40iw_kmem_info *pdata,\n-\t\t\t\t\t\t u8 flags)\n-{\n-\tstruct i40iw_puda_buf *sqbuf;\n-\tstruct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;\n-\tu8 *buf;\n-\n-\tstruct tcphdr *tcph;\n-\tstruct iphdr *iph;\n-\tstruct ipv6hdr *ip6h;\n-\tstruct ethhdr *ethh;\n-\tu16 packetsize;\n-\tu16 eth_hlen = ETH_HLEN;\n-\tu32 opts_len = 0;\n-\tu32 pd_len = 0;\n-\tu32 hdr_len = 0;\n-\tu16 vtag;\n-\n-\tsqbuf = i40iw_puda_get_bufpool(vsi->ilq);\n-\tif (!sqbuf)\n-\t\treturn NULL;\n-\tbuf = sqbuf->mem.va;\n-\n-\tif (options)\n-\t\topts_len = (u32)options->size;\n-\n-\tif (hdr)\n-\t\thdr_len = hdr->size;\n-\n-\tif (pdata)\n-\t\tpd_len = pdata->size;\n-\n-\tif (cm_node->vlan_id <= VLAN_VID_MASK)\n-\t\teth_hlen += 4;\n-\n-\tif (cm_node->ipv4)\n-\t\tpacketsize = sizeof(*iph) + sizeof(*tcph);\n-\telse\n-\t\tpacketsize = sizeof(*ip6h) + sizeof(*tcph);\n-\tpacketsize += opts_len + hdr_len + pd_len;\n-\n-\tmemset(buf, 0x00, eth_hlen + packetsize);\n-\n-\tsqbuf->totallen = packetsize + eth_hlen;\n-\tsqbuf->maclen = eth_hlen;\n-\tsqbuf->tcphlen = sizeof(*tcph) + opts_len;\n-\tsqbuf->scratch = (void *)cm_node;\n-\n-\tethh = (struct ethhdr *)buf;\n-\tbuf += eth_hlen;\n-\n-\tif (cm_node->ipv4) {\n-\t\tsqbuf->ipv4 = true;\n-\n-\t\tiph = (struct iphdr *)buf;\n-\t\tbuf += sizeof(*iph);\n-\t\ttcph = (struct tcphdr *)buf;\n-\t\tbuf += sizeof(*tcph);\n-\n-\t\tether_addr_copy(ethh->h_dest, cm_node->rem_mac);\n-\t\tether_addr_copy(ethh->h_source, cm_node->loc_mac);\n-\t\tif (cm_node->vlan_id <= VLAN_VID_MASK) {\n-\t\t\t((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);\n-\t\t\tvtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;\n-\t\t\t((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);\n-\n-\t\t\t((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);\n-\t\t} else {\n-\t\t\tethh->h_proto = htons(ETH_P_IP);\n-\t\t}\n-\n-\t\tiph->version = IPVERSION;\n-\t\tiph->ihl = 5;\t/* 5 * 4Byte words, IP headr len */\n-\t\tiph->tos = cm_node->tos;\n-\t\tiph->tot_len = htons(packetsize);\n-\t\tiph->id = htons(++cm_node->tcp_cntxt.loc_id);\n-\n-\t\tiph->frag_off = htons(0x4000);\n-\t\tiph->ttl = 0x40;\n-\t\tiph->protocol = IPPROTO_TCP;\n-\t\tiph->saddr = htonl(cm_node->loc_addr[0]);\n-\t\tiph->daddr = htonl(cm_node->rem_addr[0]);\n-\t} else {\n-\t\tsqbuf->ipv4 = false;\n-\t\tip6h = (struct ipv6hdr *)buf;\n-\t\tbuf += sizeof(*ip6h);\n-\t\ttcph = (struct tcphdr *)buf;\n-\t\tbuf += sizeof(*tcph);\n-\n-\t\tether_addr_copy(ethh->h_dest, cm_node->rem_mac);\n-\t\tether_addr_copy(ethh->h_source, cm_node->loc_mac);\n-\t\tif (cm_node->vlan_id <= VLAN_VID_MASK) {\n-\t\t\t((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);\n-\t\t\tvtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;\n-\t\t\t((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);\n-\t\t\t((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);\n-\t\t} else {\n-\t\t\tethh->h_proto = htons(ETH_P_IPV6);\n-\t\t}\n-\t\tip6h->version = 6;\n-\t\tip6h->priority = cm_node->tos >> 4;\n-\t\tip6h->flow_lbl[0] = cm_node->tos << 4;\n-\t\tip6h->flow_lbl[1] = 0;\n-\t\tip6h->flow_lbl[2] = 0;\n-\t\tip6h->payload_len = htons(packetsize - sizeof(*ip6h));\n-\t\tip6h->nexthdr = 6;\n-\t\tip6h->hop_limit = 128;\n-\t\ti40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,\n-\t\t\t\t cm_node->loc_addr);\n-\t\ti40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,\n-\t\t\t\t cm_node->rem_addr);\n-\t}\n-\n-\ttcph->source = htons(cm_node->loc_port);\n-\ttcph->dest = htons(cm_node->rem_port);\n-\n-\ttcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);\n-\n-\tif (flags & SET_ACK) {\n-\t\tcm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;\n-\t\ttcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);\n-\t\ttcph->ack = 1;\n-\t} else {\n-\t\ttcph->ack_seq = 0;\n-\t}\n-\n-\tif (flags & SET_SYN) {\n-\t\tcm_node->tcp_cntxt.loc_seq_num++;\n-\t\ttcph->syn = 1;\n-\t} else {\n-\t\tcm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;\n-\t}\n-\n-\tif (flags & SET_FIN) {\n-\t\tcm_node->tcp_cntxt.loc_seq_num++;\n-\t\ttcph->fin = 1;\n-\t}\n-\n-\tif (flags & SET_RST)\n-\t\ttcph->rst = 1;\n-\n-\ttcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);\n-\tsqbuf->tcphlen = tcph->doff << 2;\n-\ttcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);\n-\ttcph->urg_ptr = 0;\n-\n-\tif (opts_len) {\n-\t\tmemcpy(buf, options->addr, opts_len);\n-\t\tbuf += opts_len;\n-\t}\n-\n-\tif (hdr_len) {\n-\t\tmemcpy(buf, hdr->addr, hdr_len);\n-\t\tbuf += hdr_len;\n-\t}\n-\n-\tif (pdata && pdata->addr)\n-\t\tmemcpy(buf, pdata->addr, pdata->size);\n-\n-\tatomic_set(&sqbuf->refcount, 1);\n-\n-\treturn sqbuf;\n-}\n-\n-/**\n- * i40iw_send_reset - Send RST packet\n- * @cm_node: connection's node\n- */\n-int i40iw_send_reset(struct i40iw_cm_node *cm_node)\n-{\n-\tstruct i40iw_puda_buf *sqbuf;\n-\tint flags = SET_RST | SET_ACK;\n-\n-\tsqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);\n-\tif (!sqbuf) {\n-\t\ti40iw_pr_err(\"no sqbuf\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);\n-}\n-\n-/**\n- * i40iw_active_open_err - send event for active side cm error\n- * @cm_node: connection's node\n- * @reset: Flag to send reset or not\n- */\n-static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)\n-{\n-\ti40iw_cleanup_retrans_entry(cm_node);\n-\tcm_node->cm_core->stats_connect_errs++;\n-\tif (reset) {\n-\t\ti40iw_debug(cm_node->dev,\n-\t\t\t I40IW_DEBUG_CM,\n-\t\t\t \"%s cm_node=%p state=%d\\n\",\n-\t\t\t __func__,\n-\t\t\t cm_node,\n-\t\t\t cm_node->state);\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t\ti40iw_send_reset(cm_node);\n-\t}\n-\n-\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\ti40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);\n-}\n-\n-/**\n- * i40iw_passive_open_err - handle passive side cm error\n- * @cm_node: connection's node\n- * @reset: send reset or just free cm_node\n- */\n-static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)\n-{\n-\ti40iw_cleanup_retrans_entry(cm_node);\n-\tcm_node->cm_core->stats_passive_errs++;\n-\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\ti40iw_debug(cm_node->dev,\n-\t\t I40IW_DEBUG_CM,\n-\t\t \"%s cm_node=%p state =%d\\n\",\n-\t\t __func__,\n-\t\t cm_node,\n-\t\t cm_node->state);\n-\tif (reset)\n-\t\ti40iw_send_reset(cm_node);\n-\telse\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-}\n-\n-/**\n- * i40iw_event_connect_error - to create connect error event\n- * @event: cm information for connect event\n- */\n-static void i40iw_event_connect_error(struct i40iw_cm_event *event)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\tstruct iw_cm_id *cm_id;\n-\n-\tcm_id = event->cm_node->cm_id;\n-\tif (!cm_id)\n-\t\treturn;\n-\n-\tiwqp = cm_id->provider_data;\n-\n-\tif (!iwqp || !iwqp->iwdev)\n-\t\treturn;\n-\n-\tiwqp->cm_id = NULL;\n-\tcm_id->provider_data = NULL;\n-\ti40iw_send_cm_event(event->cm_node, cm_id,\n-\t\t\t IW_CM_EVENT_CONNECT_REPLY,\n-\t\t\t -ECONNRESET);\n-\tcm_id->rem_ref(cm_id);\n-\ti40iw_rem_ref_cm_node(event->cm_node);\n-}\n-\n-/**\n- * i40iw_process_options\n- * @cm_node: connection's node\n- * @optionsloc: point to start of options\n- * @optionsize: size of all options\n- * @syn_packet: flag if syn packet\n- */\n-static int i40iw_process_options(struct i40iw_cm_node *cm_node,\n-\t\t\t\t u8 *optionsloc,\n-\t\t\t\t u32 optionsize,\n-\t\t\t\t u32 syn_packet)\n-{\n-\tu32 tmp;\n-\tu32 offset = 0;\n-\tunion all_known_options *all_options;\n-\tchar got_mss_option = 0;\n-\n-\twhile (offset < optionsize) {\n-\t\tall_options = (union all_known_options *)(optionsloc + offset);\n-\t\tswitch (all_options->as_base.optionnum) {\n-\t\tcase OPTION_NUMBER_END:\n-\t\t\toffset = optionsize;\n-\t\t\tbreak;\n-\t\tcase OPTION_NUMBER_NONE:\n-\t\t\toffset += 1;\n-\t\t\tcontinue;\n-\t\tcase OPTION_NUMBER_MSS:\n-\t\t\ti40iw_debug(cm_node->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"%s: MSS Length: %d Offset: %d Size: %d\\n\",\n-\t\t\t\t __func__,\n-\t\t\t\t all_options->as_mss.length,\n-\t\t\t\t offset,\n-\t\t\t\t optionsize);\n-\t\t\tgot_mss_option = 1;\n-\t\t\tif (all_options->as_mss.length != 4)\n-\t\t\t\treturn -1;\n-\t\t\ttmp = ntohs(all_options->as_mss.mss);\n-\t\t\tif (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)\n-\t\t\t\tcm_node->tcp_cntxt.mss = tmp;\n-\t\t\tbreak;\n-\t\tcase OPTION_NUMBER_WINDOW_SCALE:\n-\t\t\tcm_node->tcp_cntxt.snd_wscale =\n-\t\t\t all_options->as_windowscale.shiftcount;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\ti40iw_debug(cm_node->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"TCP Option not understood: %x\\n\",\n-\t\t\t\t all_options->as_base.optionnum);\n-\t\t\tbreak;\n-\t\t}\n-\t\toffset += all_options->as_base.length;\n-\t}\n-\tif (!got_mss_option && syn_packet)\n-\t\tcm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_handle_tcp_options -\n- * @cm_node: connection's node\n- * @tcph: pointer tcp header\n- * @optionsize: size of options rcvd\n- * @passive: active or passive flag\n- */\n-static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,\n-\t\t\t\t struct tcphdr *tcph,\n-\t\t\t\t int optionsize,\n-\t\t\t\t int passive)\n-{\n-\tu8 *optionsloc = (u8 *)&tcph[1];\n-\n-\tif (optionsize) {\n-\t\tif (i40iw_process_options(cm_node,\n-\t\t\t\t\t optionsloc,\n-\t\t\t\t\t optionsize,\n-\t\t\t\t\t (u32)tcph->syn)) {\n-\t\t\ti40iw_debug(cm_node->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"%s: Node %p, Sending RESET\\n\",\n-\t\t\t\t __func__,\n-\t\t\t\t cm_node);\n-\t\t\tif (passive)\n-\t\t\t\ti40iw_passive_open_err(cm_node, true);\n-\t\t\telse\n-\t\t\t\ti40iw_active_open_err(cm_node, true);\n-\t\t\treturn -1;\n-\t\t}\n-\t}\n-\n-\tcm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<\n-\t cm_node->tcp_cntxt.snd_wscale;\n-\n-\tif (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)\n-\t\tcm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_build_mpa_v1 - build a MPA V1 frame\n- * @cm_node: connection's node\n- * @mpa_key: to do read0 or write0\n- */\n-static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,\n-\t\t\t void *start_addr,\n-\t\t\t u8 mpa_key)\n-{\n-\tstruct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;\n-\n-\tswitch (mpa_key) {\n-\tcase MPA_KEY_REQUEST:\n-\t\tmemcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);\n-\t\tbreak;\n-\tcase MPA_KEY_REPLY:\n-\t\tmemcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\tmpa_frame->flags = IETF_MPA_FLAGS_CRC;\n-\tmpa_frame->rev = cm_node->mpa_frame_rev;\n-\tmpa_frame->priv_data_len = htons(cm_node->pdata.size);\n-}\n-\n-/**\n- * i40iw_build_mpa_v2 - build a MPA V2 frame\n- * @cm_node: connection's node\n- * @start_addr: buffer start address\n- * @mpa_key: to do read0 or write0\n- */\n-static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,\n-\t\t\t void *start_addr,\n-\t\t\t u8 mpa_key)\n-{\n-\tstruct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;\n-\tstruct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;\n-\tu16 ctrl_ird, ctrl_ord;\n-\n-\t/* initialize the upper 5 bytes of the frame */\n-\ti40iw_build_mpa_v1(cm_node, start_addr, mpa_key);\n-\tmpa_frame->flags |= IETF_MPA_V2_FLAG;\n-\tmpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);\n-\n-\t/* initialize RTR msg */\n-\tif (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {\n-\t\tctrl_ird = IETF_NO_IRD_ORD;\n-\t\tctrl_ord = IETF_NO_IRD_ORD;\n-\t} else {\n-\t\tctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?\n-\t\t\tIETF_NO_IRD_ORD : cm_node->ird_size;\n-\t\tctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?\n-\t\t\tIETF_NO_IRD_ORD : cm_node->ord_size;\n-\t}\n-\n-\tctrl_ird |= IETF_PEER_TO_PEER;\n-\n-\tswitch (mpa_key) {\n-\tcase MPA_KEY_REQUEST:\n-\t\tctrl_ord |= IETF_RDMA0_WRITE;\n-\t\tctrl_ord |= IETF_RDMA0_READ;\n-\t\tbreak;\n-\tcase MPA_KEY_REPLY:\n-\t\tswitch (cm_node->send_rdma0_op) {\n-\t\tcase SEND_RDMA_WRITE_ZERO:\n-\t\t\tctrl_ord |= IETF_RDMA0_WRITE;\n-\t\t\tbreak;\n-\t\tcase SEND_RDMA_READ_ZERO:\n-\t\t\tctrl_ord |= IETF_RDMA0_READ;\n-\t\t\tbreak;\n-\t\t}\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\trtr_msg->ctrl_ird = htons(ctrl_ird);\n-\trtr_msg->ctrl_ord = htons(ctrl_ord);\n-}\n-\n-/**\n- * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2\n- * @cm_node: connection's node\n- * @mpa: mpa: data buffer\n- * @mpa_key: to do read0 or write0\n- */\n-static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,\n-\t\t\t\t struct i40iw_kmem_info *mpa,\n-\t\t\t\t u8 mpa_key)\n-{\n-\tint hdr_len = 0;\n-\n-\tswitch (cm_node->mpa_frame_rev) {\n-\tcase IETF_MPA_V1:\n-\t\thdr_len = sizeof(struct ietf_mpa_v1);\n-\t\ti40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);\n-\t\tbreak;\n-\tcase IETF_MPA_V2:\n-\t\thdr_len = sizeof(struct ietf_mpa_v2);\n-\t\ti40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\n-\treturn hdr_len;\n-}\n-\n-/**\n- * i40iw_send_mpa_request - active node send mpa request to passive node\n- * @cm_node: connection's node\n- */\n-static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)\n-{\n-\tstruct i40iw_puda_buf *sqbuf;\n-\n-\tif (!cm_node) {\n-\t\ti40iw_pr_err(\"cm_node == NULL\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tcm_node->mpa_hdr.addr = &cm_node->mpa_frame;\n-\tcm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,\n-\t\t\t\t\t\t\t &cm_node->mpa_hdr,\n-\t\t\t\t\t\t\t MPA_KEY_REQUEST);\n-\tif (!cm_node->mpa_hdr.size) {\n-\t\ti40iw_pr_err(\"mpa size = %d\\n\", cm_node->mpa_hdr.size);\n-\t\treturn -1;\n-\t}\n-\n-\tsqbuf = i40iw_form_cm_frame(cm_node,\n-\t\t\t\t NULL,\n-\t\t\t\t &cm_node->mpa_hdr,\n-\t\t\t\t &cm_node->pdata,\n-\t\t\t\t SET_ACK);\n-\tif (!sqbuf) {\n-\t\ti40iw_pr_err(\"sq_buf == NULL\\n\");\n-\t\treturn -1;\n-\t}\n-\treturn i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);\n-}\n-\n-/**\n- * i40iw_send_mpa_reject -\n- * @cm_node: connection's node\n- * @pdata: reject data for connection\n- * @plen: length of reject data\n- */\n-static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,\n-\t\t\t\t const void *pdata,\n-\t\t\t\t u8 plen)\n-{\n-\tstruct i40iw_puda_buf *sqbuf;\n-\tstruct i40iw_kmem_info priv_info;\n-\n-\tcm_node->mpa_hdr.addr = &cm_node->mpa_frame;\n-\tcm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,\n-\t\t\t\t\t\t\t &cm_node->mpa_hdr,\n-\t\t\t\t\t\t\t MPA_KEY_REPLY);\n-\n-\tcm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;\n-\tpriv_info.addr = (void *)pdata;\n-\tpriv_info.size = plen;\n-\n-\tsqbuf = i40iw_form_cm_frame(cm_node,\n-\t\t\t\t NULL,\n-\t\t\t\t &cm_node->mpa_hdr,\n-\t\t\t\t &priv_info,\n-\t\t\t\t SET_ACK | SET_FIN);\n-\tif (!sqbuf) {\n-\t\ti40iw_pr_err(\"no sqbuf\\n\");\n-\t\treturn -ENOMEM;\n-\t}\n-\tcm_node->state = I40IW_CM_STATE_FIN_WAIT1;\n-\treturn i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);\n-}\n-\n-/**\n- * recv_mpa - process an IETF MPA frame\n- * @cm_node: connection's node\n- * @buffer: Data pointer\n- * @type: to return accept or reject\n- * @len: Len of mpa buffer\n- */\n-static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)\n-{\n-\tstruct ietf_mpa_v1 *mpa_frame;\n-\tstruct ietf_mpa_v2 *mpa_v2_frame;\n-\tstruct ietf_rtr_msg *rtr_msg;\n-\tint mpa_hdr_len;\n-\tint priv_data_len;\n-\n-\t*type = I40IW_MPA_REQUEST_ACCEPT;\n-\n-\tif (len < sizeof(struct ietf_mpa_v1)) {\n-\t\ti40iw_pr_err(\"ietf buffer small (%x)\\n\", len);\n-\t\treturn -1;\n-\t}\n-\n-\tmpa_frame = (struct ietf_mpa_v1 *)buffer;\n-\tmpa_hdr_len = sizeof(struct ietf_mpa_v1);\n-\tpriv_data_len = ntohs(mpa_frame->priv_data_len);\n-\n-\tif (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {\n-\t\ti40iw_pr_err(\"large pri_data %d\\n\", priv_data_len);\n-\t\treturn -1;\n-\t}\n-\tif (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {\n-\t\ti40iw_pr_err(\"unsupported mpa rev = %d\\n\", mpa_frame->rev);\n-\t\treturn -1;\n-\t}\n-\tif (mpa_frame->rev > cm_node->mpa_frame_rev) {\n-\t\ti40iw_pr_err(\"rev %d\\n\", mpa_frame->rev);\n-\t\treturn -1;\n-\t}\n-\tcm_node->mpa_frame_rev = mpa_frame->rev;\n-\n-\tif (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {\n-\t\tif (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {\n-\t\t\ti40iw_pr_err(\"Unexpected MPA Key received\\n\");\n-\t\t\treturn -1;\n-\t\t}\n-\t} else {\n-\t\tif (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {\n-\t\t\ti40iw_pr_err(\"Unexpected MPA Key received\\n\");\n-\t\t\treturn -1;\n-\t\t}\n-\t}\n-\n-\tif (priv_data_len + mpa_hdr_len > len) {\n-\t\ti40iw_pr_err(\"ietf buffer len(%x + %x != %x)\\n\",\n-\t\t\t priv_data_len, mpa_hdr_len, len);\n-\t\treturn -1;\n-\t}\n-\tif (len > MAX_CM_BUFFER) {\n-\t\ti40iw_pr_err(\"ietf buffer large len = %d\\n\", len);\n-\t\treturn -1;\n-\t}\n-\n-\tswitch (mpa_frame->rev) {\n-\tcase IETF_MPA_V2:{\n-\t\t\tu16 ird_size;\n-\t\t\tu16 ord_size;\n-\t\t\tu16 ctrl_ord;\n-\t\t\tu16 ctrl_ird;\n-\n-\t\t\tmpa_v2_frame = (struct ietf_mpa_v2 *)buffer;\n-\t\t\tmpa_hdr_len += IETF_RTR_MSG_SIZE;\n-\t\t\trtr_msg = &mpa_v2_frame->rtr_msg;\n-\n-\t\t\t/* parse rtr message */\n-\t\t\tctrl_ord = ntohs(rtr_msg->ctrl_ord);\n-\t\t\tctrl_ird = ntohs(rtr_msg->ctrl_ird);\n-\t\t\tird_size = ctrl_ird & IETF_NO_IRD_ORD;\n-\t\t\tord_size = ctrl_ord & IETF_NO_IRD_ORD;\n-\n-\t\t\tif (!(ctrl_ird & IETF_PEER_TO_PEER))\n-\t\t\t\treturn -1;\n-\n-\t\t\tif (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {\n-\t\t\t\tcm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;\n-\t\t\t\tgoto negotiate_done;\n-\t\t\t}\n-\n-\t\t\tif (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {\n-\t\t\t\t/* responder */\n-\t\t\t\tif (!ord_size && (ctrl_ord & IETF_RDMA0_READ))\n-\t\t\t\t\tcm_node->ird_size = 1;\n-\t\t\t\tif (cm_node->ord_size > ird_size)\n-\t\t\t\t\tcm_node->ord_size = ird_size;\n-\t\t\t} else {\n-\t\t\t\t/* initiator */\n-\t\t\t\tif (!ird_size && (ctrl_ord & IETF_RDMA0_READ))\n-\t\t\t\t\treturn -1;\n-\t\t\t\tif (cm_node->ord_size > ird_size)\n-\t\t\t\t\tcm_node->ord_size = ird_size;\n-\n-\t\t\t\tif (cm_node->ird_size < ord_size)\n-\t\t\t\t\t/* no resources available */\n-\t\t\t\t\treturn -1;\n-\t\t\t}\n-\n-negotiate_done:\n-\t\t\tif (ctrl_ord & IETF_RDMA0_READ)\n-\t\t\t\tcm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;\n-\t\t\telse if (ctrl_ord & IETF_RDMA0_WRITE)\n-\t\t\t\tcm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;\n-\t\t\telse\t/* Not supported RDMA0 operation */\n-\t\t\t\treturn -1;\n-\t\t\ti40iw_debug(cm_node->dev, I40IW_DEBUG_CM,\n-\t\t\t\t \"MPAV2: Negotiated ORD: %d, IRD: %d\\n\",\n-\t\t\t\t cm_node->ord_size, cm_node->ird_size);\n-\t\t\tbreak;\n-\t\t}\n-\t\tbreak;\n-\tcase IETF_MPA_V1:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\n-\tmemcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);\n-\tcm_node->pdata.size = priv_data_len;\n-\n-\tif (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)\n-\t\t*type = I40IW_MPA_REQUEST_REJECT;\n-\n-\tif (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)\n-\t\tcm_node->snd_mark_en = true;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_schedule_cm_timer\n- * @@cm_node: connection's node\n- * @sqbuf: buffer to send\n- * @type: if it is send or close\n- * @send_retrans: if rexmits to be done\n- * @close_when_complete: is cm_node to be removed\n- *\n- * note - cm_node needs to be protected before calling this. Encase in:\n- *\t\ti40iw_rem_ref_cm_node(cm_core, cm_node);\n- *\t\ti40iw_schedule_cm_timer(...)\n- *\t\tatomic_inc(&cm_node->ref_count);\n- */\n-int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,\n-\t\t\t struct i40iw_puda_buf *sqbuf,\n-\t\t\t enum i40iw_timer_type type,\n-\t\t\t int send_retrans,\n-\t\t\t int close_when_complete)\n-{\n-\tstruct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;\n-\tstruct i40iw_cm_core *cm_core = cm_node->cm_core;\n-\tstruct i40iw_timer_entry *new_send;\n-\tint ret = 0;\n-\tu32 was_timer_set;\n-\tunsigned long flags;\n-\n-\tnew_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);\n-\tif (!new_send) {\n-\t\tif (type != I40IW_TIMER_TYPE_CLOSE)\n-\t\t\ti40iw_free_sqbuf(vsi, (void *)sqbuf);\n-\t\treturn -ENOMEM;\n-\t}\n-\tnew_send->retrycount = I40IW_DEFAULT_RETRYS;\n-\tnew_send->retranscount = I40IW_DEFAULT_RETRANS;\n-\tnew_send->sqbuf = sqbuf;\n-\tnew_send->timetosend = jiffies;\n-\tnew_send->type = type;\n-\tnew_send->send_retrans = send_retrans;\n-\tnew_send->close_when_complete = close_when_complete;\n-\n-\tif (type == I40IW_TIMER_TYPE_CLOSE) {\n-\t\tnew_send->timetosend += (HZ / 10);\n-\t\tif (cm_node->close_entry) {\n-\t\t\tkfree(new_send);\n-\t\t\ti40iw_pr_err(\"already close entry\\n\");\n-\t\t\treturn -EINVAL;\n-\t\t}\n-\t\tcm_node->close_entry = new_send;\n-\t}\n-\n-\tif (type == I40IW_TIMER_TYPE_SEND) {\n-\t\tspin_lock_irqsave(&cm_node->retrans_list_lock, flags);\n-\t\tcm_node->send_entry = new_send;\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t\tspin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);\n-\t\tnew_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;\n-\n-\t\tatomic_inc(&sqbuf->refcount);\n-\t\ti40iw_puda_send_buf(vsi->ilq, sqbuf);\n-\t\tif (!send_retrans) {\n-\t\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\t\tif (close_when_complete)\n-\t\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\treturn ret;\n-\t\t}\n-\t}\n-\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\twas_timer_set = timer_pending(&cm_core->tcp_timer);\n-\n-\tif (!was_timer_set) {\n-\t\tcm_core->tcp_timer.expires = new_send->timetosend;\n-\t\tadd_timer(&cm_core->tcp_timer);\n-\t}\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_retrans_expired - Could not rexmit the packet\n- * @cm_node: connection's node\n- */\n-static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)\n-{\n-\tstruct iw_cm_id *cm_id = cm_node->cm_id;\n-\tenum i40iw_cm_node_state state = cm_node->state;\n-\n-\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\tswitch (state) {\n-\tcase I40IW_CM_STATE_SYN_RCVD:\n-\tcase I40IW_CM_STATE_CLOSING:\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_FIN_WAIT1:\n-\tcase I40IW_CM_STATE_LAST_ACK:\n-\t\tif (cm_node->cm_id)\n-\t\t\tcm_id->rem_ref(cm_id);\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tdefault:\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t\ti40iw_send_reset(cm_node);\n-\t\ti40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_handle_close_entry - for handling retry/timeouts\n- * @cm_node: connection's node\n- * @rem_node: flag for remove cm_node\n- */\n-static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)\n-{\n-\tstruct i40iw_timer_entry *close_entry = cm_node->close_entry;\n-\tstruct iw_cm_id *cm_id = cm_node->cm_id;\n-\tstruct i40iw_qp *iwqp;\n-\tunsigned long flags;\n-\n-\tif (!close_entry)\n-\t\treturn;\n-\tiwqp = (struct i40iw_qp *)close_entry->sqbuf;\n-\tif (iwqp) {\n-\t\tspin_lock_irqsave(&iwqp->lock, flags);\n-\t\tif (iwqp->cm_id) {\n-\t\t\tiwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;\n-\t\t\tiwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;\n-\t\t\tiwqp->last_aeq = I40IW_AE_RESET_SENT;\n-\t\t\tiwqp->ibqp_state = IB_QPS_ERR;\n-\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\t\t\ti40iw_cm_disconn(iwqp);\n-\t\t} else {\n-\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\t\t}\n-\t} else if (rem_node) {\n-\t\t/* TIME_WAIT state */\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t}\n-\tif (cm_id)\n-\t\tcm_id->rem_ref(cm_id);\n-\tkfree(close_entry);\n-\tcm_node->close_entry = NULL;\n-}\n-\n-/**\n- * i40iw_build_timer_list - Add cm_nodes to timer list\n- * @timer_list: ptr to timer list\n- * @hte: ptr to accelerated or non-accelerated list\n- */\n-static void i40iw_build_timer_list(struct list_head *timer_list,\n-\t\t\t\t struct list_head *hte)\n-{\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct list_head *list_core_temp, *list_node;\n-\n-\tlist_for_each_safe(list_node, list_core_temp, hte) {\n-\t\tcm_node = container_of(list_node, struct i40iw_cm_node, list);\n-\t\tif (cm_node->close_entry || cm_node->send_entry) {\n-\t\t\tatomic_inc(&cm_node->ref_count);\n-\t\t\tlist_add(&cm_node->timer_entry, timer_list);\n-\t\t}\n-\t}\n-}\n-\n-/**\n- * i40iw_cm_timer_tick - system's timer expired callback\n- * @pass: Pointing to cm_core\n- */\n-static void i40iw_cm_timer_tick(struct timer_list *t)\n-{\n-\tunsigned long nexttimeout = jiffies + I40IW_LONG_TIME;\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct i40iw_timer_entry *send_entry, *close_entry;\n-\tstruct list_head *list_core_temp;\n-\tstruct i40iw_sc_vsi *vsi;\n-\tstruct list_head *list_node;\n-\tstruct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);\n-\tu32 settimer = 0;\n-\tunsigned long timetosend;\n-\tunsigned long flags;\n-\n-\tstruct list_head timer_list;\n-\n-\tINIT_LIST_HEAD(&timer_list);\n-\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\ti40iw_build_timer_list(&timer_list, &cm_core->non_accelerated_list);\n-\ti40iw_build_timer_list(&timer_list, &cm_core->accelerated_list);\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\n-\tlist_for_each_safe(list_node, list_core_temp, &timer_list) {\n-\t\tcm_node = container_of(list_node,\n-\t\t\t\t struct i40iw_cm_node,\n-\t\t\t\t timer_entry);\n-\t\tclose_entry = cm_node->close_entry;\n-\n-\t\tif (close_entry) {\n-\t\t\tif (time_after(close_entry->timetosend, jiffies)) {\n-\t\t\t\tif (nexttimeout > close_entry->timetosend ||\n-\t\t\t\t !settimer) {\n-\t\t\t\t\tnexttimeout = close_entry->timetosend;\n-\t\t\t\t\tsettimer = 1;\n-\t\t\t\t}\n-\t\t\t} else {\n-\t\t\t\ti40iw_handle_close_entry(cm_node, 1);\n-\t\t\t}\n-\t\t}\n-\n-\t\tspin_lock_irqsave(&cm_node->retrans_list_lock, flags);\n-\n-\t\tsend_entry = cm_node->send_entry;\n-\t\tif (!send_entry)\n-\t\t\tgoto done;\n-\t\tif (time_after(send_entry->timetosend, jiffies)) {\n-\t\t\tif (cm_node->state != I40IW_CM_STATE_OFFLOADED) {\n-\t\t\t\tif ((nexttimeout > send_entry->timetosend) ||\n-\t\t\t\t !settimer) {\n-\t\t\t\t\tnexttimeout = send_entry->timetosend;\n-\t\t\t\t\tsettimer = 1;\n-\t\t\t\t}\n-\t\t\t} else {\n-\t\t\t\ti40iw_free_retrans_entry(cm_node);\n-\t\t\t}\n-\t\t\tgoto done;\n-\t\t}\n-\n-\t\tif ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||\n-\t\t (cm_node->state == I40IW_CM_STATE_CLOSED)) {\n-\t\t\ti40iw_free_retrans_entry(cm_node);\n-\t\t\tgoto done;\n-\t\t}\n-\n-\t\tif (!send_entry->retranscount || !send_entry->retrycount) {\n-\t\t\ti40iw_free_retrans_entry(cm_node);\n-\n-\t\t\tspin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);\n-\t\t\ti40iw_retrans_expired(cm_node);\n-\t\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\t\tspin_lock_irqsave(&cm_node->retrans_list_lock, flags);\n-\t\t\tgoto done;\n-\t\t}\n-\t\tspin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);\n-\n-\t\tvsi = &cm_node->iwdev->vsi;\n-\n-\t\tif (!cm_node->ack_rcvd) {\n-\t\t\tatomic_inc(&send_entry->sqbuf->refcount);\n-\t\t\ti40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);\n-\t\t\tcm_node->cm_core->stats_pkt_retrans++;\n-\t\t}\n-\t\tspin_lock_irqsave(&cm_node->retrans_list_lock, flags);\n-\t\tif (send_entry->send_retrans) {\n-\t\t\tsend_entry->retranscount--;\n-\t\t\ttimetosend = (I40IW_RETRY_TIMEOUT <<\n-\t\t\t\t (I40IW_DEFAULT_RETRANS -\n-\t\t\t\t send_entry->retranscount));\n-\n-\t\t\tsend_entry->timetosend = jiffies +\n-\t\t\t min(timetosend, I40IW_MAX_TIMEOUT);\n-\t\t\tif (nexttimeout > send_entry->timetosend || !settimer) {\n-\t\t\t\tnexttimeout = send_entry->timetosend;\n-\t\t\t\tsettimer = 1;\n-\t\t\t}\n-\t\t} else {\n-\t\t\tint close_when_complete;\n-\n-\t\t\tclose_when_complete = send_entry->close_when_complete;\n-\t\t\ti40iw_debug(cm_node->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"cm_node=%p state=%d\\n\",\n-\t\t\t\t cm_node,\n-\t\t\t\t cm_node->state);\n-\t\t\ti40iw_free_retrans_entry(cm_node);\n-\t\t\tif (close_when_complete)\n-\t\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t}\n-done:\n-\t\tspin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t}\n-\n-\tif (settimer) {\n-\t\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\t\tif (!timer_pending(&cm_core->tcp_timer)) {\n-\t\t\tcm_core->tcp_timer.expires = nexttimeout;\n-\t\t\tadd_timer(&cm_core->tcp_timer);\n-\t\t}\n-\t\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\t}\n-}\n-\n-/**\n- * i40iw_send_syn - send SYN packet\n- * @cm_node: connection's node\n- * @sendack: flag to set ACK bit or not\n- */\n-int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)\n-{\n-\tstruct i40iw_puda_buf *sqbuf;\n-\tint flags = SET_SYN;\n-\tchar optionsbuffer[sizeof(struct option_mss) +\n-\t\t\t sizeof(struct option_windowscale) +\n-\t\t\t sizeof(struct option_base) + TCP_OPTIONS_PADDING];\n-\tstruct i40iw_kmem_info opts;\n-\n-\tint optionssize = 0;\n-\t/* Sending MSS option */\n-\tunion all_known_options *options;\n-\n-\topts.addr = optionsbuffer;\n-\tif (!cm_node) {\n-\t\ti40iw_pr_err(\"no cm_node\\n\");\n-\t\treturn -EINVAL;\n-\t}\n-\n-\toptions = (union all_known_options *)&optionsbuffer[optionssize];\n-\toptions->as_mss.optionnum = OPTION_NUMBER_MSS;\n-\toptions->as_mss.length = sizeof(struct option_mss);\n-\toptions->as_mss.mss = htons(cm_node->tcp_cntxt.mss);\n-\toptionssize += sizeof(struct option_mss);\n-\n-\toptions = (union all_known_options *)&optionsbuffer[optionssize];\n-\toptions->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;\n-\toptions->as_windowscale.length = sizeof(struct option_windowscale);\n-\toptions->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;\n-\toptionssize += sizeof(struct option_windowscale);\n-\toptions = (union all_known_options *)&optionsbuffer[optionssize];\n-\toptions->as_end = OPTION_NUMBER_END;\n-\toptionssize += 1;\n-\n-\tif (sendack)\n-\t\tflags |= SET_ACK;\n-\n-\topts.size = optionssize;\n-\n-\tsqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);\n-\tif (!sqbuf) {\n-\t\ti40iw_pr_err(\"no sqbuf\\n\");\n-\t\treturn -1;\n-\t}\n-\treturn i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);\n-}\n-\n-/**\n- * i40iw_send_ack - Send ACK packet\n- * @cm_node: connection's node\n- */\n-static void i40iw_send_ack(struct i40iw_cm_node *cm_node)\n-{\n-\tstruct i40iw_puda_buf *sqbuf;\n-\tstruct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;\n-\n-\tsqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);\n-\tif (sqbuf)\n-\t\ti40iw_puda_send_buf(vsi->ilq, sqbuf);\n-\telse\n-\t\ti40iw_pr_err(\"no sqbuf\\n\");\n-}\n-\n-/**\n- * i40iw_send_fin - Send FIN pkt\n- * @cm_node: connection's node\n- */\n-static int i40iw_send_fin(struct i40iw_cm_node *cm_node)\n-{\n-\tstruct i40iw_puda_buf *sqbuf;\n-\n-\tsqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);\n-\tif (!sqbuf) {\n-\t\ti40iw_pr_err(\"no sqbuf\\n\");\n-\t\treturn -1;\n-\t}\n-\treturn i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);\n-}\n-\n-/**\n- * i40iw_find_node - find a cm node that matches the reference cm node\n- * @cm_core: cm's core\n- * @rem_port: remote tcp port num\n- * @rem_addr: remote ip addr\n- * @loc_port: local tcp port num\n- * @loc_addr: loc ip addr\n- * @add_refcnt: flag to increment refcount of cm_node\n- * @accelerated_list: flag for accelerated vs non-accelerated list to search\n- */\n-struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,\n-\t\t\t\t u16 rem_port,\n-\t\t\t\t u32 *rem_addr,\n-\t\t\t\t u16 loc_port,\n-\t\t\t\t u32 *loc_addr,\n-\t\t\t\t bool add_refcnt,\n-\t\t\t\t bool accelerated_list)\n-{\n-\tstruct list_head *hte;\n-\tstruct i40iw_cm_node *cm_node;\n-\tunsigned long flags;\n-\n-\thte = accelerated_list ?\n-\t &cm_core->accelerated_list : &cm_core->non_accelerated_list;\n-\n-\t/* walk list and find cm_node associated with this session ID */\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\tlist_for_each_entry(cm_node, hte, list) {\n-\t\tif (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&\n-\t\t (cm_node->loc_port == loc_port) &&\n-\t\t !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&\n-\t\t (cm_node->rem_port == rem_port)) {\n-\t\t\tif (add_refcnt)\n-\t\t\t\tatomic_inc(&cm_node->ref_count);\n-\t\t\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\t\t\treturn cm_node;\n-\t\t}\n-\t}\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\n-\t/* no owner node */\n-\treturn NULL;\n-}\n-\n-/**\n- * i40iw_find_listener - find a cm node listening on this addr-port pair\n- * @cm_core: cm's core\n- * @dst_port: listener tcp port num\n- * @dst_addr: listener ip addr\n- * @listener_state: state to match with listen node's\n- */\n-static struct i40iw_cm_listener *i40iw_find_listener(\n-\t\t\t\t\t\t struct i40iw_cm_core *cm_core,\n-\t\t\t\t\t\t u32 *dst_addr,\n-\t\t\t\t\t\t u16 dst_port,\n-\t\t\t\t\t\t u16 vlan_id,\n-\t\t\t\t\t\t enum i40iw_cm_listener_state\n-\t\t\t\t\t\t listener_state)\n-{\n-\tstruct i40iw_cm_listener *listen_node;\n-\tstatic const u32 ip_zero[4] = { 0, 0, 0, 0 };\n-\tu32 listen_addr[4];\n-\tu16 listen_port;\n-\tunsigned long flags;\n-\n-\t/* walk list and find cm_node associated with this session ID */\n-\tspin_lock_irqsave(&cm_core->listen_list_lock, flags);\n-\tlist_for_each_entry(listen_node, &cm_core->listen_nodes, list) {\n-\t\tmemcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));\n-\t\tlisten_port = listen_node->loc_port;\n-\t\t/* compare node pair, return node handle if a match */\n-\t\tif ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||\n-\t\t !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&\n-\t\t (listen_port == dst_port) &&\n-\t\t (listener_state & listen_node->listener_state)) {\n-\t\t\tatomic_inc(&listen_node->ref_count);\n-\t\t\tspin_unlock_irqrestore(&cm_core->listen_list_lock, flags);\n-\t\t\treturn listen_node;\n-\t\t}\n-\t}\n-\tspin_unlock_irqrestore(&cm_core->listen_list_lock, flags);\n-\treturn NULL;\n-}\n-\n-/**\n- * i40iw_add_hte_node - add a cm node to the hash table\n- * @cm_core: cm's core\n- * @cm_node: connection's node\n- */\n-static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,\n-\t\t\t struct i40iw_cm_node *cm_node)\n-{\n-\tunsigned long flags;\n-\n-\tif (!cm_node || !cm_core) {\n-\t\ti40iw_pr_err(\"cm_node or cm_core == NULL\\n\");\n-\t\treturn;\n-\t}\n-\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\tlist_add_tail(&cm_node->list, &cm_core->non_accelerated_list);\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-}\n-\n-/**\n- * i40iw_find_port - find port that matches reference port\n- * @hte: ptr to accelerated or non-accelerated list\n- * @accelerated_list: flag for accelerated vs non-accelerated list\n- */\n-static bool i40iw_find_port(struct list_head *hte, u16 port)\n-{\n-\tstruct i40iw_cm_node *cm_node;\n-\n-\tlist_for_each_entry(cm_node, hte, list) {\n-\t\tif (cm_node->loc_port == port)\n-\t\t\treturn true;\n-\t}\n-\treturn false;\n-}\n-\n-/**\n- * i40iw_port_in_use - determine if port is in use\n- * @cm_core: cm's core\n- * @port: port number\n- */\n-bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port)\n-{\n-\tstruct i40iw_cm_listener *listen_node;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\tif (i40iw_find_port(&cm_core->accelerated_list, port) ||\n-\t i40iw_find_port(&cm_core->non_accelerated_list, port)) {\n-\t\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\t\treturn true;\n-\t}\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\n-\tspin_lock_irqsave(&cm_core->listen_list_lock, flags);\n-\tlist_for_each_entry(listen_node, &cm_core->listen_nodes, list) {\n-\t\tif (listen_node->loc_port == port) {\n-\t\t\tspin_unlock_irqrestore(&cm_core->listen_list_lock, flags);\n-\t\t\treturn true;\n-\t\t}\n-\t}\n-\tspin_unlock_irqrestore(&cm_core->listen_list_lock, flags);\n-\n-\treturn false;\n-}\n-\n-/**\n- * i40iw_del_multiple_qhash - Remove qhash and child listens\n- * @iwdev: iWarp device\n- * @cm_info: CM info for parent listen node\n- * @cm_parent_listen_node: The parent listen node\n- */\n-static enum i40iw_status_code i40iw_del_multiple_qhash(\n-\t\t\t\t\t\t struct i40iw_device *iwdev,\n-\t\t\t\t\t\t struct i40iw_cm_info *cm_info,\n-\t\t\t\t\t\t struct i40iw_cm_listener *cm_parent_listen_node)\n-{\n-\tstruct i40iw_cm_listener *child_listen_node;\n-\tenum i40iw_status_code ret = I40IW_ERR_CONFIG;\n-\tstruct list_head *pos, *tpos;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);\n-\tlist_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {\n-\t\tchild_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);\n-\t\tif (child_listen_node->ipv4)\n-\t\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"removing child listen for IP=%pI4, port=%d, vlan=%d\\n\",\n-\t\t\t\t child_listen_node->loc_addr,\n-\t\t\t\t child_listen_node->loc_port,\n-\t\t\t\t child_listen_node->vlan_id);\n-\t\telse\n-\t\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,\n-\t\t\t\t \"removing child listen for IP=%pI6, port=%d, vlan=%d\\n\",\n-\t\t\t\t child_listen_node->loc_addr,\n-\t\t\t\t child_listen_node->loc_port,\n-\t\t\t\t child_listen_node->vlan_id);\n-\t\tlist_del(pos);\n-\t\tmemcpy(cm_info->loc_addr, child_listen_node->loc_addr,\n-\t\t sizeof(cm_info->loc_addr));\n-\t\tcm_info->vlan_id = child_listen_node->vlan_id;\n-\t\tif (child_listen_node->qhash_set) {\n-\t\t\tret = i40iw_manage_qhash(iwdev, cm_info,\n-\t\t\t\t\t\t I40IW_QHASH_TYPE_TCP_SYN,\n-\t\t\t\t\t\t I40IW_QHASH_MANAGE_TYPE_DELETE,\n-\t\t\t\t\t\t NULL, false);\n-\t\t\tchild_listen_node->qhash_set = false;\n-\t\t} else {\n-\t\t\tret = I40IW_SUCCESS;\n-\t\t}\n-\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t I40IW_DEBUG_CM,\n-\t\t\t \"freed pointer = %p\\n\",\n-\t\t\t child_listen_node);\n-\t\tkfree(child_listen_node);\n-\t\tcm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;\n-\t}\n-\tspin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);\n-\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_netdev_vlan_ipv6 - Gets the netdev and vlan\n- * @addr: local IPv6 address\n- * @vlan_id: vlan id for the given IPv6 address\n- *\n- * Returns the net_device of the IPv6 address and also sets the\n- * vlan id for that address.\n- */\n-static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id)\n-{\n-\tstruct net_device *ip_dev = NULL;\n-\tstruct in6_addr laddr6;\n-\n-\tif (!IS_ENABLED(CONFIG_IPV6))\n-\t\treturn NULL;\n-\ti40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);\n-\tif (vlan_id)\n-\t\t*vlan_id = I40IW_NO_VLAN;\n-\trcu_read_lock();\n-\tfor_each_netdev_rcu(&init_net, ip_dev) {\n-\t\tif (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {\n-\t\t\tif (vlan_id)\n-\t\t\t\t*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\trcu_read_unlock();\n-\treturn ip_dev;\n-}\n-\n-/**\n- * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address\n- * @addr: local IPv4 address\n- */\n-static u16 i40iw_get_vlan_ipv4(u32 *addr)\n-{\n-\tstruct net_device *netdev;\n-\tu16 vlan_id = I40IW_NO_VLAN;\n-\n-\tnetdev = ip_dev_find(&init_net, htonl(addr[0]));\n-\tif (netdev) {\n-\t\tvlan_id = rdma_vlan_dev_vlan_id(netdev);\n-\t\tdev_put(netdev);\n-\t}\n-\treturn vlan_id;\n-}\n-\n-/**\n- * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6\n- * @iwdev: iWarp device\n- * @cm_info: CM info for parent listen node\n- * @cm_parent_listen_node: The parent listen node\n- *\n- * Adds a qhash and a child listen node for every IPv6 address\n- * on the adapter and adds the associated qhash filter\n- */\n-static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_cm_info *cm_info,\n-\t\t\t\t\t struct i40iw_cm_listener *cm_parent_listen_node)\n-{\n-\tstruct net_device *ip_dev;\n-\tstruct inet6_dev *idev;\n-\tstruct inet6_ifaddr *ifp, *tmp;\n-\tenum i40iw_status_code ret = 0;\n-\tstruct i40iw_cm_listener *child_listen_node;\n-\tunsigned long flags;\n-\n-\trtnl_lock();\n-\tfor_each_netdev(&init_net, ip_dev) {\n-\t\tif ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&\n-\t\t (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||\n-\t\t (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {\n-\t\t\tidev = __in6_dev_get(ip_dev);\n-\t\t\tif (!idev) {\n-\t\t\t\ti40iw_pr_err(\"idev == NULL\\n\");\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t\tlist_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {\n-\t\t\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t\t \"IP=%pI6, vlan_id=%d, MAC=%pM\\n\",\n-\t\t\t\t\t &ifp->addr,\n-\t\t\t\t\t rdma_vlan_dev_vlan_id(ip_dev),\n-\t\t\t\t\t ip_dev->dev_addr);\n-\t\t\t\tchild_listen_node =\n-\t\t\t\t\tkzalloc(sizeof(*child_listen_node), GFP_ATOMIC);\n-\t\t\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t\t \"Allocating child listener %p\\n\",\n-\t\t\t\t\t child_listen_node);\n-\t\t\t\tif (!child_listen_node) {\n-\t\t\t\t\tret = I40IW_ERR_NO_MEMORY;\n-\t\t\t\t\tgoto exit;\n-\t\t\t\t}\n-\t\t\t\tcm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);\n-\t\t\t\tcm_parent_listen_node->vlan_id = cm_info->vlan_id;\n-\n-\t\t\t\tmemcpy(child_listen_node, cm_parent_listen_node,\n-\t\t\t\t sizeof(*child_listen_node));\n-\n-\t\t\t\ti40iw_copy_ip_ntohl(child_listen_node->loc_addr,\n-\t\t\t\t\t\t ifp->addr.in6_u.u6_addr32);\n-\t\t\t\tmemcpy(cm_info->loc_addr, child_listen_node->loc_addr,\n-\t\t\t\t sizeof(cm_info->loc_addr));\n-\n-\t\t\t\tret = i40iw_manage_qhash(iwdev, cm_info,\n-\t\t\t\t\t\t\t I40IW_QHASH_TYPE_TCP_SYN,\n-\t\t\t\t\t\t\t I40IW_QHASH_MANAGE_TYPE_ADD,\n-\t\t\t\t\t\t\t NULL, true);\n-\t\t\t\tif (!ret) {\n-\t\t\t\t\tchild_listen_node->qhash_set = true;\n-\t\t\t\t\tspin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);\n-\t\t\t\t\tlist_add(&child_listen_node->child_listen_list,\n-\t\t\t\t\t\t &cm_parent_listen_node->child_listen_list);\n-\t\t\t\t\tspin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);\n-\t\t\t\t\tcm_parent_listen_node->cm_core->stats_listen_nodes_created++;\n-\t\t\t\t} else {\n-\t\t\t\t\tkfree(child_listen_node);\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t}\n-exit:\n-\trtnl_unlock();\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4\n- * @iwdev: iWarp device\n- * @cm_info: CM info for parent listen node\n- * @cm_parent_listen_node: The parent listen node\n- *\n- * Adds a qhash and a child listen node for every IPv4 address\n- * on the adapter and adds the associated qhash filter\n- */\n-static enum i40iw_status_code i40iw_add_mqh_4(\n-\t\t\t\tstruct i40iw_device *iwdev,\n-\t\t\t\tstruct i40iw_cm_info *cm_info,\n-\t\t\t\tstruct i40iw_cm_listener *cm_parent_listen_node)\n-{\n-\tstruct net_device *dev;\n-\tstruct in_device *idev;\n-\tstruct i40iw_cm_listener *child_listen_node;\n-\tenum i40iw_status_code ret = 0;\n-\tunsigned long flags;\n-\n-\trtnl_lock();\n-\tfor_each_netdev(&init_net, dev) {\n-\t\tif ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&\n-\t\t (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||\n-\t\t (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {\n-\t\t\tconst struct in_ifaddr *ifa;\n-\n-\t\t\tidev = in_dev_get(dev);\n-\n-\t\t\tin_dev_for_each_ifa_rtnl(ifa, idev) {\n-\t\t\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t\t \"Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\\n\",\n-\t\t\t\t\t &ifa->ifa_address,\n-\t\t\t\t\t rdma_vlan_dev_vlan_id(dev),\n-\t\t\t\t\t dev->dev_addr);\n-\t\t\t\tchild_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);\n-\t\t\t\tcm_parent_listen_node->cm_core->stats_listen_nodes_created++;\n-\t\t\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t\t \"Allocating child listener %p\\n\",\n-\t\t\t\t\t child_listen_node);\n-\t\t\t\tif (!child_listen_node) {\n-\t\t\t\t\tin_dev_put(idev);\n-\t\t\t\t\tret = I40IW_ERR_NO_MEMORY;\n-\t\t\t\t\tgoto exit;\n-\t\t\t\t}\n-\t\t\t\tcm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);\n-\t\t\t\tcm_parent_listen_node->vlan_id = cm_info->vlan_id;\n-\t\t\t\tmemcpy(child_listen_node,\n-\t\t\t\t cm_parent_listen_node,\n-\t\t\t\t sizeof(*child_listen_node));\n-\n-\t\t\t\tchild_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);\n-\t\t\t\tmemcpy(cm_info->loc_addr, child_listen_node->loc_addr,\n-\t\t\t\t sizeof(cm_info->loc_addr));\n-\n-\t\t\t\tret = i40iw_manage_qhash(iwdev,\n-\t\t\t\t\t\t\t cm_info,\n-\t\t\t\t\t\t\t I40IW_QHASH_TYPE_TCP_SYN,\n-\t\t\t\t\t\t\t I40IW_QHASH_MANAGE_TYPE_ADD,\n-\t\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t\t true);\n-\t\t\t\tif (!ret) {\n-\t\t\t\t\tchild_listen_node->qhash_set = true;\n-\t\t\t\t\tspin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);\n-\t\t\t\t\tlist_add(&child_listen_node->child_listen_list,\n-\t\t\t\t\t\t &cm_parent_listen_node->child_listen_list);\n-\t\t\t\t\tspin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);\n-\t\t\t\t} else {\n-\t\t\t\t\tkfree(child_listen_node);\n-\t\t\t\t\tcm_parent_listen_node->cm_core->stats_listen_nodes_created--;\n-\t\t\t\t}\n-\t\t\t}\n-\n-\t\t\tin_dev_put(idev);\n-\t\t}\n-\t}\n-exit:\n-\trtnl_unlock();\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_dec_refcnt_listen - delete listener and associated cm nodes\n- * @cm_core: cm's core\n- * @free_hanging_nodes: to free associated cm_nodes\n- * @apbvt_del: flag to delete the apbvt\n- */\n-static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,\n-\t\t\t\t struct i40iw_cm_listener *listener,\n-\t\t\t\t int free_hanging_nodes, bool apbvt_del)\n-{\n-\tint ret = -EINVAL;\n-\tint err = 0;\n-\tstruct list_head *list_pos;\n-\tstruct list_head *list_temp;\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct list_head reset_list;\n-\tstruct i40iw_cm_info nfo;\n-\tstruct i40iw_cm_node *loopback;\n-\tenum i40iw_cm_node_state old_state;\n-\tunsigned long flags;\n-\n-\t/* free non-accelerated child nodes for this listener */\n-\tINIT_LIST_HEAD(&reset_list);\n-\tif (free_hanging_nodes) {\n-\t\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\t\tlist_for_each_safe(list_pos,\n-\t\t\t\t list_temp, &cm_core->non_accelerated_list) {\n-\t\t\tcm_node = container_of(list_pos, struct i40iw_cm_node, list);\n-\t\t\tif ((cm_node->listener == listener) &&\n-\t\t\t !cm_node->accelerated) {\n-\t\t\t\tatomic_inc(&cm_node->ref_count);\n-\t\t\t\tlist_add(&cm_node->reset_entry, &reset_list);\n-\t\t\t}\n-\t\t}\n-\t\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\t}\n-\n-\tlist_for_each_safe(list_pos, list_temp, &reset_list) {\n-\t\tcm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);\n-\t\tloopback = cm_node->loopbackpartner;\n-\t\tif (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {\n-\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t} else {\n-\t\t\tif (!loopback) {\n-\t\t\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\t\t\terr = i40iw_send_reset(cm_node);\n-\t\t\t\tif (err) {\n-\t\t\t\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\t\t\t\ti40iw_pr_err(\"send reset\\n\");\n-\t\t\t\t} else {\n-\t\t\t\t\told_state = cm_node->state;\n-\t\t\t\t\tcm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;\n-\t\t\t\t\tif (old_state != I40IW_CM_STATE_MPAREQ_RCVD)\n-\t\t\t\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\t\t}\n-\t\t\t} else {\n-\t\t\t\tstruct i40iw_cm_event event;\n-\n-\t\t\t\tevent.cm_node = loopback;\n-\t\t\t\tmemcpy(event.cm_info.rem_addr,\n-\t\t\t\t loopback->rem_addr, sizeof(event.cm_info.rem_addr));\n-\t\t\t\tmemcpy(event.cm_info.loc_addr,\n-\t\t\t\t loopback->loc_addr, sizeof(event.cm_info.loc_addr));\n-\t\t\t\tevent.cm_info.rem_port = loopback->rem_port;\n-\t\t\t\tevent.cm_info.loc_port = loopback->loc_port;\n-\t\t\t\tevent.cm_info.cm_id = loopback->cm_id;\n-\t\t\t\tevent.cm_info.ipv4 = loopback->ipv4;\n-\t\t\t\tatomic_inc(&loopback->ref_count);\n-\t\t\t\tloopback->state = I40IW_CM_STATE_CLOSED;\n-\t\t\t\ti40iw_event_connect_error(&event);\n-\t\t\t\tcm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;\n-\t\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\tif (!atomic_dec_return(&listener->ref_count)) {\n-\t\tspin_lock_irqsave(&cm_core->listen_list_lock, flags);\n-\t\tlist_del(&listener->list);\n-\t\tspin_unlock_irqrestore(&cm_core->listen_list_lock, flags);\n-\n-\t\tif (listener->iwdev) {\n-\t\t\tif (apbvt_del)\n-\t\t\t\ti40iw_manage_apbvt(listener->iwdev,\n-\t\t\t\t\t\t listener->loc_port,\n-\t\t\t\t\t\t I40IW_MANAGE_APBVT_DEL);\n-\n-\t\t\tmemcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));\n-\t\t\tnfo.loc_port = listener->loc_port;\n-\t\t\tnfo.ipv4 = listener->ipv4;\n-\t\t\tnfo.vlan_id = listener->vlan_id;\n-\t\t\tnfo.user_pri = listener->user_pri;\n-\n-\t\t\tif (!list_empty(&listener->child_listen_list)) {\n-\t\t\t\ti40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);\n-\t\t\t} else {\n-\t\t\t\tif (listener->qhash_set)\n-\t\t\t\t\ti40iw_manage_qhash(listener->iwdev,\n-\t\t\t\t\t\t\t &nfo,\n-\t\t\t\t\t\t\t I40IW_QHASH_TYPE_TCP_SYN,\n-\t\t\t\t\t\t\t I40IW_QHASH_MANAGE_TYPE_DELETE,\n-\t\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t\t false);\n-\t\t\t}\n-\t\t}\n-\n-\t\tcm_core->stats_listen_destroyed++;\n-\t\tkfree(listener);\n-\t\tcm_core->stats_listen_nodes_destroyed++;\n-\t\tlistener = NULL;\n-\t\tret = 0;\n-\t}\n-\n-\tif (listener) {\n-\t\tif (atomic_read(&listener->pend_accepts_cnt) > 0)\n-\t\t\ti40iw_debug(cm_core->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"%s: listener (%p) pending accepts=%u\\n\",\n-\t\t\t\t __func__,\n-\t\t\t\t listener,\n-\t\t\t\t atomic_read(&listener->pend_accepts_cnt));\n-\t}\n-\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_cm_del_listen - delete a linstener\n- * @cm_core: cm's core\n- * @listener: passive connection's listener\n- * @apbvt_del: flag to delete apbvt\n- */\n-static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,\n-\t\t\t struct i40iw_cm_listener *listener,\n-\t\t\t bool apbvt_del)\n-{\n-\tlistener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;\n-\tlistener->cm_id = NULL;\t/* going to be destroyed pretty soon */\n-\treturn i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);\n-}\n-\n-/**\n- * i40iw_addr_resolve_neigh - resolve neighbor address\n- * @iwdev: iwarp device structure\n- * @src_ip: local ip address\n- * @dst_ip: remote ip address\n- * @arpindex: if there is an arp entry\n- */\n-static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,\n-\t\t\t\t u32 src_ip,\n-\t\t\t\t u32 dst_ip,\n-\t\t\t\t int arpindex)\n-{\n-\tstruct rtable *rt;\n-\tstruct neighbour *neigh;\n-\tint rc = arpindex;\n-\tstruct net_device *netdev = iwdev->netdev;\n-\t__be32 dst_ipaddr = htonl(dst_ip);\n-\t__be32 src_ipaddr = htonl(src_ip);\n-\n-\trt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);\n-\tif (IS_ERR(rt)) {\n-\t\ti40iw_pr_err(\"ip_route_output\\n\");\n-\t\treturn rc;\n-\t}\n-\n-\tif (netif_is_bond_slave(netdev))\n-\t\tnetdev = netdev_master_upper_dev_get(netdev);\n-\n-\tneigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);\n-\n-\trcu_read_lock();\n-\tif (neigh) {\n-\t\tif (neigh->nud_state & NUD_VALID) {\n-\t\t\tif (arpindex >= 0) {\n-\t\t\t\tif (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,\n-\t\t\t\t\t\t neigh->ha))\n-\t\t\t\t\t/* Mac address same as arp table */\n-\t\t\t\t\tgoto resolve_neigh_exit;\n-\t\t\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t\t\t iwdev->arp_table[arpindex].mac_addr,\n-\t\t\t\t\t\t &dst_ip,\n-\t\t\t\t\t\t true,\n-\t\t\t\t\t\t I40IW_ARP_DELETE);\n-\t\t\t}\n-\n-\t\t\ti40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);\n-\t\t\trc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);\n-\t\t} else {\n-\t\t\tneigh_event_send(neigh, NULL);\n-\t\t}\n-\t}\n- resolve_neigh_exit:\n-\n-\trcu_read_unlock();\n-\tif (neigh)\n-\t\tneigh_release(neigh);\n-\n-\tip_rt_put(rt);\n-\treturn rc;\n-}\n-\n-/**\n- * i40iw_get_dst_ipv6\n- */\n-static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,\n-\t\t\t\t\t struct sockaddr_in6 *dst_addr)\n-{\n-\tstruct dst_entry *dst;\n-\tstruct flowi6 fl6;\n-\n-\tmemset(&fl6, 0, sizeof(fl6));\n-\tfl6.daddr = dst_addr->sin6_addr;\n-\tfl6.saddr = src_addr->sin6_addr;\n-\tif (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)\n-\t\tfl6.flowi6_oif = dst_addr->sin6_scope_id;\n-\n-\tdst = ip6_route_output(&init_net, NULL, &fl6);\n-\treturn dst;\n-}\n-\n-/**\n- * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address\n- * @iwdev: iwarp device structure\n- * @dst_ip: remote ip address\n- * @arpindex: if there is an arp entry\n- */\n-static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,\n-\t\t\t\t\t u32 *src,\n-\t\t\t\t\t u32 *dest,\n-\t\t\t\t\t int arpindex)\n-{\n-\tstruct neighbour *neigh;\n-\tint rc = arpindex;\n-\tstruct net_device *netdev = iwdev->netdev;\n-\tstruct dst_entry *dst;\n-\tstruct sockaddr_in6 dst_addr;\n-\tstruct sockaddr_in6 src_addr;\n-\n-\tmemset(&dst_addr, 0, sizeof(dst_addr));\n-\tdst_addr.sin6_family = AF_INET6;\n-\ti40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);\n-\tmemset(&src_addr, 0, sizeof(src_addr));\n-\tsrc_addr.sin6_family = AF_INET6;\n-\ti40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);\n-\tdst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);\n-\tif (!dst || dst->error) {\n-\t\tif (dst) {\n-\t\t\tdst_release(dst);\n-\t\t\ti40iw_pr_err(\"ip6_route_output returned dst->error = %d\\n\",\n-\t\t\t\t dst->error);\n-\t\t}\n-\t\treturn rc;\n-\t}\n-\n-\tif (netif_is_bond_slave(netdev))\n-\t\tnetdev = netdev_master_upper_dev_get(netdev);\n-\n-\tneigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);\n-\n-\trcu_read_lock();\n-\tif (neigh) {\n-\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, \"dst_neigh_lookup MAC=%pM\\n\", neigh->ha);\n-\t\tif (neigh->nud_state & NUD_VALID) {\n-\t\t\tif (arpindex >= 0) {\n-\t\t\t\tif (ether_addr_equal\n-\t\t\t\t (iwdev->arp_table[arpindex].mac_addr,\n-\t\t\t\t neigh->ha)) {\n-\t\t\t\t\t/* Mac address same as in arp table */\n-\t\t\t\t\tgoto resolve_neigh_exit6;\n-\t\t\t\t}\n-\t\t\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t\t\t iwdev->arp_table[arpindex].mac_addr,\n-\t\t\t\t\t\t dest,\n-\t\t\t\t\t\t false,\n-\t\t\t\t\t\t I40IW_ARP_DELETE);\n-\t\t\t}\n-\t\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t\t neigh->ha,\n-\t\t\t\t\t dest,\n-\t\t\t\t\t false,\n-\t\t\t\t\t I40IW_ARP_ADD);\n-\t\t\trc = i40iw_arp_table(iwdev,\n-\t\t\t\t\t dest,\n-\t\t\t\t\t false,\n-\t\t\t\t\t NULL,\n-\t\t\t\t\t I40IW_ARP_RESOLVE);\n-\t\t} else {\n-\t\t\tneigh_event_send(neigh, NULL);\n-\t\t}\n-\t}\n-\n- resolve_neigh_exit6:\n-\trcu_read_unlock();\n-\tif (neigh)\n-\t\tneigh_release(neigh);\n-\tdst_release(dst);\n-\treturn rc;\n-}\n-\n-/**\n- * i40iw_ipv4_is_loopback - check if loopback\n- * @loc_addr: local addr to compare\n- * @rem_addr: remote address\n- */\n-static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)\n-{\n-\treturn ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);\n-}\n-\n-/**\n- * i40iw_ipv6_is_loopback - check if loopback\n- * @loc_addr: local addr to compare\n- * @rem_addr: remote address\n- */\n-static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)\n-{\n-\tstruct in6_addr raddr6;\n-\n-\ti40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);\n-\treturn !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);\n-}\n-\n-/**\n- * i40iw_make_cm_node - create a new instance of a cm node\n- * @cm_core: cm's core\n- * @iwdev: iwarp device structure\n- * @cm_info: quad info for connection\n- * @listener: passive connection's listener\n- */\n-static struct i40iw_cm_node *i40iw_make_cm_node(\n-\t\t\t\t struct i40iw_cm_core *cm_core,\n-\t\t\t\t struct i40iw_device *iwdev,\n-\t\t\t\t struct i40iw_cm_info *cm_info,\n-\t\t\t\t struct i40iw_cm_listener *listener)\n-{\n-\tstruct i40iw_cm_node *cm_node;\n-\tint oldarpindex;\n-\tint arpindex;\n-\tstruct net_device *netdev = iwdev->netdev;\n-\n-\t/* create an hte and cm_node for this instance */\n-\tcm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);\n-\tif (!cm_node)\n-\t\treturn NULL;\n-\n-\t/* set our node specific transport info */\n-\tcm_node->ipv4 = cm_info->ipv4;\n-\tcm_node->vlan_id = cm_info->vlan_id;\n-\tif ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)\n-\t\tcm_node->vlan_id = 0;\n-\tcm_node->tos = cm_info->tos;\n-\tcm_node->user_pri = cm_info->user_pri;\n-\tif (listener) {\n-\t\tif (listener->tos != cm_info->tos)\n-\t\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,\n-\t\t\t\t \"application TOS[%d] and remote client TOS[%d] mismatch\\n\",\n-\t\t\t\t listener->tos, cm_info->tos);\n-\t\tcm_node->tos = max(listener->tos, cm_info->tos);\n-\t\tcm_node->user_pri = rt_tos2priority(cm_node->tos);\n-\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, \"listener: TOS:[%d] UP:[%d]\\n\",\n-\t\t\t cm_node->tos, cm_node->user_pri);\n-\t}\n-\tmemcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));\n-\tmemcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));\n-\tcm_node->loc_port = cm_info->loc_port;\n-\tcm_node->rem_port = cm_info->rem_port;\n-\n-\tcm_node->mpa_frame_rev = iwdev->mpa_version;\n-\tcm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;\n-\tcm_node->ird_size = I40IW_MAX_IRD_SIZE;\n-\tcm_node->ord_size = I40IW_MAX_ORD_SIZE;\n-\n-\tcm_node->listener = listener;\n-\tcm_node->cm_id = cm_info->cm_id;\n-\tether_addr_copy(cm_node->loc_mac, netdev->dev_addr);\n-\tspin_lock_init(&cm_node->retrans_list_lock);\n-\tcm_node->ack_rcvd = false;\n-\n-\tatomic_set(&cm_node->ref_count, 1);\n-\t/* associate our parent CM core */\n-\tcm_node->cm_core = cm_core;\n-\tcm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;\n-\tcm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;\n-\tcm_node->tcp_cntxt.rcv_wnd =\n-\t\t\tI40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;\n-\tif (cm_node->ipv4) {\n-\t\tcm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),\n-\t\t\t\t\t\t\thtonl(cm_node->rem_addr[0]),\n-\t\t\t\t\t\t\thtons(cm_node->loc_port),\n-\t\t\t\t\t\t\thtons(cm_node->rem_port));\n-\t\tcm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4;\n-\t} else if (IS_ENABLED(CONFIG_IPV6)) {\n-\t\t__be32 loc[4] = {\n-\t\t\thtonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),\n-\t\t\thtonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])\n-\t\t};\n-\t\t__be32 rem[4] = {\n-\t\t\thtonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),\n-\t\t\thtonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])\n-\t\t};\n-\t\tcm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,\n-\t\t\t\t\t\t\thtons(cm_node->loc_port),\n-\t\t\t\t\t\t\thtons(cm_node->rem_port));\n-\t\tcm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6;\n-\t}\n-\n-\tcm_node->iwdev = iwdev;\n-\tcm_node->dev = &iwdev->sc_dev;\n-\n-\tif ((cm_node->ipv4 &&\n-\t i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||\n-\t (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,\n-\t\t\t\t\t\t cm_node->rem_addr))) {\n-\t\tarpindex = i40iw_arp_table(iwdev,\n-\t\t\t\t\t cm_node->rem_addr,\n-\t\t\t\t\t false,\n-\t\t\t\t\t NULL,\n-\t\t\t\t\t I40IW_ARP_RESOLVE);\n-\t} else {\n-\t\toldarpindex = i40iw_arp_table(iwdev,\n-\t\t\t\t\t cm_node->rem_addr,\n-\t\t\t\t\t false,\n-\t\t\t\t\t NULL,\n-\t\t\t\t\t I40IW_ARP_RESOLVE);\n-\t\tif (cm_node->ipv4)\n-\t\t\tarpindex = i40iw_addr_resolve_neigh(iwdev,\n-\t\t\t\t\t\t\t cm_info->loc_addr[0],\n-\t\t\t\t\t\t\t cm_info->rem_addr[0],\n-\t\t\t\t\t\t\t oldarpindex);\n-\t\telse if (IS_ENABLED(CONFIG_IPV6))\n-\t\t\tarpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,\n-\t\t\t\t\t\t\t\t cm_info->loc_addr,\n-\t\t\t\t\t\t\t\t cm_info->rem_addr,\n-\t\t\t\t\t\t\t\t oldarpindex);\n-\t\telse\n-\t\t\tarpindex = -EINVAL;\n-\t}\n-\tif (arpindex < 0) {\n-\t\ti40iw_pr_err(\"cm_node arpindex\\n\");\n-\t\tkfree(cm_node);\n-\t\treturn NULL;\n-\t}\n-\tether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);\n-\ti40iw_add_hte_node(cm_core, cm_node);\n-\tcm_core->stats_nodes_created++;\n-\treturn cm_node;\n-}\n-\n-/**\n- * i40iw_rem_ref_cm_node - destroy an instance of a cm node\n- * @cm_node: connection's node\n- */\n-static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)\n-{\n-\tstruct i40iw_cm_core *cm_core = cm_node->cm_core;\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_cm_info nfo;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);\n-\tif (atomic_dec_return(&cm_node->ref_count)) {\n-\t\tspin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);\n-\t\treturn;\n-\t}\n-\tlist_del(&cm_node->list);\n-\tspin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);\n-\n-\t/* if the node is destroyed before connection was accelerated */\n-\tif (!cm_node->accelerated && cm_node->accept_pend) {\n-\t\tpr_err(\"node destroyed before established\\n\");\n-\t\tatomic_dec(&cm_node->listener->pend_accepts_cnt);\n-\t}\n-\tif (cm_node->close_entry)\n-\t\ti40iw_handle_close_entry(cm_node, 0);\n-\tif (cm_node->listener) {\n-\t\ti40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);\n-\t} else {\n-\t\tif (cm_node->apbvt_set) {\n-\t\t\ti40iw_manage_apbvt(cm_node->iwdev,\n-\t\t\t\t\t cm_node->loc_port,\n-\t\t\t\t\t I40IW_MANAGE_APBVT_DEL);\n-\t\t\tcm_node->apbvt_set = 0;\n-\t\t}\n-\t\ti40iw_get_addr_info(cm_node, &nfo);\n-\t\tif (cm_node->qhash_set) {\n-\t\t\ti40iw_manage_qhash(cm_node->iwdev,\n-\t\t\t\t\t &nfo,\n-\t\t\t\t\t I40IW_QHASH_TYPE_TCP_ESTABLISHED,\n-\t\t\t\t\t I40IW_QHASH_MANAGE_TYPE_DELETE,\n-\t\t\t\t\t NULL,\n-\t\t\t\t\t false);\n-\t\t\tcm_node->qhash_set = 0;\n-\t\t}\n-\t}\n-\n-\tiwqp = cm_node->iwqp;\n-\tif (iwqp) {\n-\t\tiwqp->cm_node = NULL;\n-\t\ti40iw_rem_ref(&iwqp->ibqp);\n-\t\tcm_node->iwqp = NULL;\n-\t} else if (cm_node->qhash_set) {\n-\t\ti40iw_get_addr_info(cm_node, &nfo);\n-\t\ti40iw_manage_qhash(cm_node->iwdev,\n-\t\t\t\t &nfo,\n-\t\t\t\t I40IW_QHASH_TYPE_TCP_ESTABLISHED,\n-\t\t\t\t I40IW_QHASH_MANAGE_TYPE_DELETE,\n-\t\t\t\t NULL,\n-\t\t\t\t false);\n-\t\tcm_node->qhash_set = 0;\n-\t}\n-\n-\tcm_node->cm_core->stats_nodes_destroyed++;\n-\tkfree(cm_node);\n-}\n-\n-/**\n- * i40iw_handle_fin_pkt - FIN packet received\n- * @cm_node: connection's node\n- */\n-static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)\n-{\n-\tu32 ret;\n-\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_SYN_RCVD:\n-\tcase I40IW_CM_STATE_SYN_SENT:\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\tcase I40IW_CM_STATE_MPAREJ_RCVD:\n-\t\tcm_node->tcp_cntxt.rcv_nxt++;\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_LAST_ACK;\n-\t\ti40iw_send_fin(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\t\ti40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);\n-\t\tcm_node->tcp_cntxt.rcv_nxt++;\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_FIN_WAIT1:\n-\t\tcm_node->tcp_cntxt.rcv_nxt++;\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_CLOSING;\n-\t\ti40iw_send_ack(cm_node);\n-\t\t/*\n-\t\t * Wait for ACK as this is simultaneous close.\n-\t\t * After we receive ACK, do not send anything.\n-\t\t * Just rm the node.\n-\t\t */\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_FIN_WAIT2:\n-\t\tcm_node->tcp_cntxt.rcv_nxt++;\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_TIME_WAIT;\n-\t\ti40iw_send_ack(cm_node);\n-\t\tret =\n-\t\t i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);\n-\t\tif (ret)\n-\t\t\ti40iw_pr_err(\"node %p state = %d\\n\", cm_node, cm_node->state);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_TIME_WAIT:\n-\t\tcm_node->tcp_cntxt.rcv_nxt++;\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_OFFLOADED:\n-\tdefault:\n-\t\ti40iw_pr_err(\"bad state node %p state = %d\\n\", cm_node, cm_node->state);\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_handle_rst_pkt - process received RST packet\n- * @cm_node: connection's node\n- * @rbuf: receive buffer\n- */\n-static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,\n-\t\t\t\t struct i40iw_puda_buf *rbuf)\n-{\n-\ti40iw_cleanup_retrans_entry(cm_node);\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_SYN_SENT:\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\t\tswitch (cm_node->mpa_frame_rev) {\n-\t\tcase IETF_MPA_V2:\n-\t\t\tcm_node->mpa_frame_rev = IETF_MPA_V1;\n-\t\t\t/* send a syn and goto syn sent state */\n-\t\t\tcm_node->state = I40IW_CM_STATE_SYN_SENT;\n-\t\t\tif (i40iw_send_syn(cm_node, 0))\n-\t\t\t\ti40iw_active_open_err(cm_node, false);\n-\t\t\tbreak;\n-\t\tcase IETF_MPA_V1:\n-\t\tdefault:\n-\t\t\ti40iw_active_open_err(cm_node, false);\n-\t\t\tbreak;\n-\t\t}\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_MPAREQ_RCVD:\n-\t\tatomic_add_return(1, &cm_node->passive_state);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\tcase I40IW_CM_STATE_SYN_RCVD:\n-\tcase I40IW_CM_STATE_LISTENING:\n-\t\ti40iw_pr_err(\"Bad state state = %d\\n\", cm_node->state);\n-\t\ti40iw_passive_open_err(cm_node, false);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_OFFLOADED:\n-\t\ti40iw_active_open_err(cm_node, false);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_CLOSED:\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_FIN_WAIT2:\n-\tcase I40IW_CM_STATE_FIN_WAIT1:\n-\tcase I40IW_CM_STATE_LAST_ACK:\n-\t\tcm_node->cm_id->rem_ref(cm_node->cm_id);\n-\t\t/* fall through */\n-\tcase I40IW_CM_STATE_TIME_WAIT:\n-\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer\n- * @cm_node: connection's node\n- * @rbuf: receive buffer\n- */\n-static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,\n-\t\t\t\t struct i40iw_puda_buf *rbuf)\n-{\n-\tint ret;\n-\tint datasize = rbuf->datalen;\n-\tu8 *dataloc = rbuf->data;\n-\n-\tenum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;\n-\tu32 res_type;\n-\n-\tret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);\n-\tif (ret) {\n-\t\tif (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)\n-\t\t\ti40iw_active_open_err(cm_node, true);\n-\t\telse\n-\t\t\ti40iw_passive_open_err(cm_node, true);\n-\t\treturn;\n-\t}\n-\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\t\tif (res_type == I40IW_MPA_REQUEST_REJECT)\n-\t\t\ti40iw_pr_err(\"state for reject\\n\");\n-\t\tcm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;\n-\t\ttype = I40IW_CM_EVENT_MPA_REQ;\n-\t\ti40iw_send_ack(cm_node);\t/* ACK received MPA request */\n-\t\tatomic_set(&cm_node->passive_state,\n-\t\t\t I40IW_PASSIVE_STATE_INDICATED);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tif (res_type == I40IW_MPA_REQUEST_REJECT) {\n-\t\t\ttype = I40IW_CM_EVENT_MPA_REJECT;\n-\t\t\tcm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;\n-\t\t} else {\n-\t\t\ttype = I40IW_CM_EVENT_CONNECTED;\n-\t\t\tcm_node->state = I40IW_CM_STATE_OFFLOADED;\n-\t\t}\n-\t\ti40iw_send_ack(cm_node);\n-\t\tbreak;\n-\tdefault:\n-\t\tpr_err(\"%s wrong cm_node state =%d\\n\", __func__, cm_node->state);\n-\t\tbreak;\n-\t}\n-\ti40iw_create_event(cm_node, type);\n-}\n-\n-/**\n- * i40iw_indicate_pkt_err - Send up err event to cm\n- * @cm_node: connection's node\n- */\n-static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)\n-{\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_SYN_SENT:\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\t\ti40iw_active_open_err(cm_node, true);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\tcase I40IW_CM_STATE_SYN_RCVD:\n-\t\ti40iw_passive_open_err(cm_node, true);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_OFFLOADED:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_check_syn - Check for error on received syn ack\n- * @cm_node: connection's node\n- * @tcph: pointer tcp header\n- */\n-static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)\n-{\n-\tint err = 0;\n-\n-\tif (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {\n-\t\terr = 1;\n-\t\ti40iw_active_open_err(cm_node, true);\n-\t}\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_check_seq - check seq numbers if OK\n- * @cm_node: connection's node\n- * @tcph: pointer tcp header\n- */\n-static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)\n-{\n-\tint err = 0;\n-\tu32 seq;\n-\tu32 ack_seq;\n-\tu32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;\n-\tu32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;\n-\tu32 rcv_wnd;\n-\n-\tseq = ntohl(tcph->seq);\n-\tack_seq = ntohl(tcph->ack_seq);\n-\trcv_wnd = cm_node->tcp_cntxt.rcv_wnd;\n-\tif (ack_seq != loc_seq_num)\n-\t\terr = -1;\n-\telse if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))\n-\t\terr = -1;\n-\tif (err) {\n-\t\ti40iw_pr_err(\"seq number\\n\");\n-\t\ti40iw_indicate_pkt_err(cm_node);\n-\t}\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_handle_syn_pkt - is for Passive node\n- * @cm_node: connection's node\n- * @rbuf: receive buffer\n- */\n-static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,\n-\t\t\t\t struct i40iw_puda_buf *rbuf)\n-{\n-\tstruct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;\n-\tint ret;\n-\tu32 inc_sequence;\n-\tint optionsize;\n-\tstruct i40iw_cm_info nfo;\n-\n-\toptionsize = (tcph->doff << 2) - sizeof(struct tcphdr);\n-\tinc_sequence = ntohl(tcph->seq);\n-\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_SYN_SENT:\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\t\t/* Rcvd syn on active open connection */\n-\t\ti40iw_active_open_err(cm_node, 1);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_LISTENING:\n-\t\t/* Passive OPEN */\n-\t\tif (atomic_read(&cm_node->listener->pend_accepts_cnt) >\n-\t\t cm_node->listener->backlog) {\n-\t\t\tcm_node->cm_core->stats_backlog_drops++;\n-\t\t\ti40iw_passive_open_err(cm_node, false);\n-\t\t\tbreak;\n-\t\t}\n-\t\tret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);\n-\t\tif (ret) {\n-\t\t\ti40iw_passive_open_err(cm_node, false);\n-\t\t\t/* drop pkt */\n-\t\t\tbreak;\n-\t\t}\n-\t\tcm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;\n-\t\tcm_node->accept_pend = 1;\n-\t\tatomic_inc(&cm_node->listener->pend_accepts_cnt);\n-\n-\t\tcm_node->state = I40IW_CM_STATE_SYN_RCVD;\n-\t\ti40iw_get_addr_info(cm_node, &nfo);\n-\t\tret = i40iw_manage_qhash(cm_node->iwdev,\n-\t\t\t\t\t &nfo,\n-\t\t\t\t\t I40IW_QHASH_TYPE_TCP_ESTABLISHED,\n-\t\t\t\t\t I40IW_QHASH_MANAGE_TYPE_ADD,\n-\t\t\t\t\t (void *)cm_node,\n-\t\t\t\t\t false);\n-\t\tcm_node->qhash_set = true;\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_CLOSED:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_OFFLOADED:\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\tcase I40IW_CM_STATE_FIN_WAIT1:\n-\tcase I40IW_CM_STATE_FIN_WAIT2:\n-\tcase I40IW_CM_STATE_MPAREQ_RCVD:\n-\tcase I40IW_CM_STATE_LAST_ACK:\n-\tcase I40IW_CM_STATE_CLOSING:\n-\tcase I40IW_CM_STATE_UNKNOWN:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)\n- * @cm_node: connection's node\n- * @rbuf: receive buffer\n- */\n-static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,\n-\t\t\t\t struct i40iw_puda_buf *rbuf)\n-{\n-\tstruct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;\n-\tint ret;\n-\tu32 inc_sequence;\n-\tint optionsize;\n-\n-\toptionsize = (tcph->doff << 2) - sizeof(struct tcphdr);\n-\tinc_sequence = ntohl(tcph->seq);\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_SYN_SENT:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\t/* active open */\n-\t\tif (i40iw_check_syn(cm_node, tcph)) {\n-\t\t\ti40iw_pr_err(\"check syn fail\\n\");\n-\t\t\treturn;\n-\t\t}\n-\t\tcm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);\n-\t\t/* setup options */\n-\t\tret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);\n-\t\tif (ret) {\n-\t\t\ti40iw_debug(cm_node->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"cm_node=%p tcp_options failed\\n\",\n-\t\t\t\t cm_node);\n-\t\t\tbreak;\n-\t\t}\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;\n-\t\ti40iw_send_ack(cm_node);\t/* ACK for the syn_ack */\n-\t\tret = i40iw_send_mpa_request(cm_node);\n-\t\tif (ret) {\n-\t\t\ti40iw_debug(cm_node->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"cm_node=%p i40iw_send_mpa_request failed\\n\",\n-\t\t\t\t cm_node);\n-\t\t\tbreak;\n-\t\t}\n-\t\tcm_node->state = I40IW_CM_STATE_MPAREQ_SENT;\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_MPAREQ_RCVD:\n-\t\ti40iw_passive_open_err(cm_node, true);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_LISTENING:\n-\t\tcm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_CLOSED:\n-\t\tcm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\tcase I40IW_CM_STATE_FIN_WAIT1:\n-\tcase I40IW_CM_STATE_FIN_WAIT2:\n-\tcase I40IW_CM_STATE_LAST_ACK:\n-\tcase I40IW_CM_STATE_OFFLOADED:\n-\tcase I40IW_CM_STATE_CLOSING:\n-\tcase I40IW_CM_STATE_UNKNOWN:\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_handle_ack_pkt - process packet with ACK\n- * @cm_node: connection's node\n- * @rbuf: receive buffer\n- */\n-static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,\n-\t\t\t\tstruct i40iw_puda_buf *rbuf)\n-{\n-\tstruct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;\n-\tu32 inc_sequence;\n-\tint ret = 0;\n-\tint optionsize;\n-\tu32 datasize = rbuf->datalen;\n-\n-\toptionsize = (tcph->doff << 2) - sizeof(struct tcphdr);\n-\n-\tif (i40iw_check_seq(cm_node, tcph))\n-\t\treturn -EINVAL;\n-\n-\tinc_sequence = ntohl(tcph->seq);\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_SYN_RCVD:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);\n-\t\tif (ret)\n-\t\t\tbreak;\n-\t\tcm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);\n-\t\tcm_node->state = I40IW_CM_STATE_ESTABLISHED;\n-\t\tif (datasize) {\n-\t\t\tcm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;\n-\t\t\ti40iw_handle_rcv_mpa(cm_node, rbuf);\n-\t\t}\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tif (datasize) {\n-\t\t\tcm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;\n-\t\t\ti40iw_handle_rcv_mpa(cm_node, rbuf);\n-\t\t}\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\t\tcm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);\n-\t\tif (datasize) {\n-\t\t\tcm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;\n-\t\t\tcm_node->ack_rcvd = false;\n-\t\t\ti40iw_handle_rcv_mpa(cm_node, rbuf);\n-\t\t} else {\n-\t\t\tcm_node->ack_rcvd = true;\n-\t\t}\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_LISTENING:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_CLOSED:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_LAST_ACK:\n-\tcase I40IW_CM_STATE_CLOSING:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\tif (!cm_node->accept_pend)\n-\t\t\tcm_node->cm_id->rem_ref(cm_node->cm_id);\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_FIN_WAIT1:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tcm_node->state = I40IW_CM_STATE_FIN_WAIT2;\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_SYN_SENT:\n-\tcase I40IW_CM_STATE_FIN_WAIT2:\n-\tcase I40IW_CM_STATE_OFFLOADED:\n-\tcase I40IW_CM_STATE_MPAREQ_RCVD:\n-\tcase I40IW_CM_STATE_UNKNOWN:\n-\tdefault:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\tbreak;\n-\t}\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_process_packet - process cm packet\n- * @cm_node: connection's node\n- * @rbuf: receive buffer\n- */\n-static void i40iw_process_packet(struct i40iw_cm_node *cm_node,\n-\t\t\t\t struct i40iw_puda_buf *rbuf)\n-{\n-\tenum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;\n-\tstruct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;\n-\tu32 fin_set = 0;\n-\tint ret;\n-\n-\tif (tcph->rst) {\n-\t\tpkt_type = I40IW_PKT_TYPE_RST;\n-\t} else if (tcph->syn) {\n-\t\tpkt_type = I40IW_PKT_TYPE_SYN;\n-\t\tif (tcph->ack)\n-\t\t\tpkt_type = I40IW_PKT_TYPE_SYNACK;\n-\t} else if (tcph->ack) {\n-\t\tpkt_type = I40IW_PKT_TYPE_ACK;\n-\t}\n-\tif (tcph->fin)\n-\t\tfin_set = 1;\n-\n-\tswitch (pkt_type) {\n-\tcase I40IW_PKT_TYPE_SYN:\n-\t\ti40iw_handle_syn_pkt(cm_node, rbuf);\n-\t\tbreak;\n-\tcase I40IW_PKT_TYPE_SYNACK:\n-\t\ti40iw_handle_synack_pkt(cm_node, rbuf);\n-\t\tbreak;\n-\tcase I40IW_PKT_TYPE_ACK:\n-\t\tret = i40iw_handle_ack_pkt(cm_node, rbuf);\n-\t\tif (fin_set && !ret)\n-\t\t\ti40iw_handle_fin_pkt(cm_node);\n-\t\tbreak;\n-\tcase I40IW_PKT_TYPE_RST:\n-\t\ti40iw_handle_rst_pkt(cm_node, rbuf);\n-\t\tbreak;\n-\tdefault:\n-\t\tif (fin_set &&\n-\t\t (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))\n-\t\t\ti40iw_handle_fin_pkt(cm_node);\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_make_listen_node - create a listen node with params\n- * @cm_core: cm's core\n- * @iwdev: iwarp device structure\n- * @cm_info: quad info for connection\n- */\n-static struct i40iw_cm_listener *i40iw_make_listen_node(\n-\t\t\t\t\tstruct i40iw_cm_core *cm_core,\n-\t\t\t\t\tstruct i40iw_device *iwdev,\n-\t\t\t\t\tstruct i40iw_cm_info *cm_info)\n-{\n-\tstruct i40iw_cm_listener *listener;\n-\tunsigned long flags;\n-\n-\t/* cannot have multiple matching listeners */\n-\tlistener = i40iw_find_listener(cm_core, cm_info->loc_addr,\n-\t\t\t\t cm_info->loc_port,\n-\t\t\t\t cm_info->vlan_id,\n-\t\t\t\t I40IW_CM_LISTENER_EITHER_STATE);\n-\tif (listener &&\n-\t (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {\n-\t\tatomic_dec(&listener->ref_count);\n-\t\ti40iw_debug(cm_core->dev,\n-\t\t\t I40IW_DEBUG_CM,\n-\t\t\t \"Not creating listener since it already exists\\n\");\n-\t\treturn NULL;\n-\t}\n-\n-\tif (!listener) {\n-\t\t/* create a CM listen node (1/2 node to compare incoming traffic to) */\n-\t\tlistener = kzalloc(sizeof(*listener), GFP_KERNEL);\n-\t\tif (!listener)\n-\t\t\treturn NULL;\n-\t\tcm_core->stats_listen_nodes_created++;\n-\t\tmemcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));\n-\t\tlistener->loc_port = cm_info->loc_port;\n-\n-\t\tINIT_LIST_HEAD(&listener->child_listen_list);\n-\n-\t\tatomic_set(&listener->ref_count, 1);\n-\t} else {\n-\t\tlistener->reused_node = 1;\n-\t}\n-\n-\tlistener->cm_id = cm_info->cm_id;\n-\tlistener->ipv4 = cm_info->ipv4;\n-\tlistener->vlan_id = cm_info->vlan_id;\n-\tatomic_set(&listener->pend_accepts_cnt, 0);\n-\tlistener->cm_core = cm_core;\n-\tlistener->iwdev = iwdev;\n-\n-\tlistener->backlog = cm_info->backlog;\n-\tlistener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;\n-\n-\tif (!listener->reused_node) {\n-\t\tspin_lock_irqsave(&cm_core->listen_list_lock, flags);\n-\t\tlist_add(&listener->list, &cm_core->listen_nodes);\n-\t\tspin_unlock_irqrestore(&cm_core->listen_list_lock, flags);\n-\t}\n-\n-\treturn listener;\n-}\n-\n-/**\n- * i40iw_create_cm_node - make a connection node with params\n- * @cm_core: cm's core\n- * @iwdev: iwarp device structure\n- * @conn_param: upper layer connection parameters\n- * @cm_info: quad info for connection\n- */\n-static struct i40iw_cm_node *i40iw_create_cm_node(\n-\t\t\t\t\tstruct i40iw_cm_core *cm_core,\n-\t\t\t\t\tstruct i40iw_device *iwdev,\n-\t\t\t\t\tstruct iw_cm_conn_param *conn_param,\n-\t\t\t\t\tstruct i40iw_cm_info *cm_info)\n-{\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct i40iw_cm_listener *loopback_remotelistener;\n-\tstruct i40iw_cm_node *loopback_remotenode;\n-\tstruct i40iw_cm_info loopback_cm_info;\n-\n-\tu16 private_data_len = conn_param->private_data_len;\n-\tconst void *private_data = conn_param->private_data;\n-\n-\t/* create a CM connection node */\n-\tcm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);\n-\tif (!cm_node)\n-\t\treturn ERR_PTR(-ENOMEM);\n-\t/* set our node side to client (active) side */\n-\tcm_node->tcp_cntxt.client = 1;\n-\tcm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;\n-\n-\ti40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);\n-\n-\tif (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {\n-\t\tloopback_remotelistener = i40iw_find_listener(\n-\t\t\t\t\t\tcm_core,\n-\t\t\t\t\t\tcm_info->rem_addr,\n-\t\t\t\t\t\tcm_node->rem_port,\n-\t\t\t\t\t\tcm_node->vlan_id,\n-\t\t\t\t\t\tI40IW_CM_LISTENER_ACTIVE_STATE);\n-\t\tif (!loopback_remotelistener) {\n-\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\treturn ERR_PTR(-ECONNREFUSED);\n-\t\t} else {\n-\t\t\tloopback_cm_info = *cm_info;\n-\t\t\tloopback_cm_info.loc_port = cm_info->rem_port;\n-\t\t\tloopback_cm_info.rem_port = cm_info->loc_port;\n-\t\t\tloopback_cm_info.cm_id = loopback_remotelistener->cm_id;\n-\t\t\tloopback_cm_info.ipv4 = cm_info->ipv4;\n-\t\t\tloopback_remotenode = i40iw_make_cm_node(cm_core,\n-\t\t\t\t\t\t\t\t iwdev,\n-\t\t\t\t\t\t\t\t &loopback_cm_info,\n-\t\t\t\t\t\t\t\t loopback_remotelistener);\n-\t\t\tif (!loopback_remotenode) {\n-\t\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\t\treturn ERR_PTR(-ENOMEM);\n-\t\t\t}\n-\t\t\tcm_core->stats_loopbacks++;\n-\t\t\tloopback_remotenode->loopbackpartner = cm_node;\n-\t\t\tloopback_remotenode->tcp_cntxt.rcv_wscale =\n-\t\t\t\tI40IW_CM_DEFAULT_RCV_WND_SCALE;\n-\t\t\tcm_node->loopbackpartner = loopback_remotenode;\n-\t\t\tmemcpy(loopback_remotenode->pdata_buf, private_data,\n-\t\t\t private_data_len);\n-\t\t\tloopback_remotenode->pdata.size = private_data_len;\n-\n-\t\t\tif (loopback_remotenode->ord_size > cm_node->ird_size)\n-\t\t\t\tloopback_remotenode->ord_size =\n-\t\t\t\t\tcm_node->ird_size;\n-\n-\t\t\tcm_node->state = I40IW_CM_STATE_OFFLOADED;\n-\t\t\tcm_node->tcp_cntxt.rcv_nxt =\n-\t\t\t\tloopback_remotenode->tcp_cntxt.loc_seq_num;\n-\t\t\tloopback_remotenode->tcp_cntxt.rcv_nxt =\n-\t\t\t\tcm_node->tcp_cntxt.loc_seq_num;\n-\t\t\tcm_node->tcp_cntxt.max_snd_wnd =\n-\t\t\t\tloopback_remotenode->tcp_cntxt.rcv_wnd;\n-\t\t\tloopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;\n-\t\t\tcm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;\n-\t\t\tloopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;\n-\t\t\tcm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;\n-\t\t\tloopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;\n-\t\t}\n-\t\treturn cm_node;\n-\t}\n-\n-\tcm_node->pdata.size = private_data_len;\n-\tcm_node->pdata.addr = cm_node->pdata_buf;\n-\n-\tmemcpy(cm_node->pdata_buf, private_data, private_data_len);\n-\n-\tcm_node->state = I40IW_CM_STATE_SYN_SENT;\n-\treturn cm_node;\n-}\n-\n-/**\n- * i40iw_cm_reject - reject and teardown a connection\n- * @cm_node: connection's node\n- * @pdate: ptr to private data for reject\n- * @plen: size of private data\n- */\n-static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)\n-{\n-\tint ret = 0;\n-\tint err;\n-\tint passive_state;\n-\tstruct iw_cm_id *cm_id = cm_node->cm_id;\n-\tstruct i40iw_cm_node *loopback = cm_node->loopbackpartner;\n-\n-\tif (cm_node->tcp_cntxt.client)\n-\t\treturn ret;\n-\ti40iw_cleanup_retrans_entry(cm_node);\n-\n-\tif (!loopback) {\n-\t\tpassive_state = atomic_add_return(1, &cm_node->passive_state);\n-\t\tif (passive_state == I40IW_SEND_RESET_EVENT) {\n-\t\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t} else {\n-\t\t\tif (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {\n-\t\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\t} else {\n-\t\t\t\tret = i40iw_send_mpa_reject(cm_node, pdata, plen);\n-\t\t\t\tif (ret) {\n-\t\t\t\t\tcm_node->state = I40IW_CM_STATE_CLOSED;\n-\t\t\t\t\terr = i40iw_send_reset(cm_node);\n-\t\t\t\t\tif (err)\n-\t\t\t\t\t\ti40iw_pr_err(\"send reset failed\\n\");\n-\t\t\t\t} else {\n-\t\t\t\t\tcm_id->add_ref(cm_id);\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t} else {\n-\t\tcm_node->cm_id = NULL;\n-\t\tif (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {\n-\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\ti40iw_rem_ref_cm_node(loopback);\n-\t\t} else {\n-\t\t\tret = i40iw_send_cm_event(loopback,\n-\t\t\t\t\t\t loopback->cm_id,\n-\t\t\t\t\t\t IW_CM_EVENT_CONNECT_REPLY,\n-\t\t\t\t\t\t -ECONNREFUSED);\n-\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\tloopback->state = I40IW_CM_STATE_CLOSING;\n-\n-\t\t\tcm_id = loopback->cm_id;\n-\t\t\ti40iw_rem_ref_cm_node(loopback);\n-\t\t\tcm_id->rem_ref(cm_id);\n-\t\t}\n-\t}\n-\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_cm_close - close of cm connection\n- * @cm_node: connection's node\n- */\n-static int i40iw_cm_close(struct i40iw_cm_node *cm_node)\n-{\n-\tint ret = 0;\n-\n-\tif (!cm_node)\n-\t\treturn -EINVAL;\n-\n-\tswitch (cm_node->state) {\n-\tcase I40IW_CM_STATE_SYN_RCVD:\n-\tcase I40IW_CM_STATE_SYN_SENT:\n-\tcase I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:\n-\tcase I40IW_CM_STATE_ESTABLISHED:\n-\tcase I40IW_CM_STATE_ACCEPTING:\n-\tcase I40IW_CM_STATE_MPAREQ_SENT:\n-\tcase I40IW_CM_STATE_MPAREQ_RCVD:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_CLOSE_WAIT:\n-\t\tcm_node->state = I40IW_CM_STATE_LAST_ACK;\n-\t\ti40iw_send_fin(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_FIN_WAIT1:\n-\tcase I40IW_CM_STATE_FIN_WAIT2:\n-\tcase I40IW_CM_STATE_LAST_ACK:\n-\tcase I40IW_CM_STATE_TIME_WAIT:\n-\tcase I40IW_CM_STATE_CLOSING:\n-\t\tret = -1;\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_LISTENING:\n-\t\ti40iw_cleanup_retrans_entry(cm_node);\n-\t\ti40iw_send_reset(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_MPAREJ_RCVD:\n-\tcase I40IW_CM_STATE_UNKNOWN:\n-\tcase I40IW_CM_STATE_INITED:\n-\tcase I40IW_CM_STATE_CLOSED:\n-\tcase I40IW_CM_STATE_LISTENER_DESTROYED:\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\tbreak;\n-\tcase I40IW_CM_STATE_OFFLOADED:\n-\t\tif (cm_node->send_entry)\n-\t\t\ti40iw_pr_err(\"send_entry\\n\");\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\tbreak;\n-\t}\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_receive_ilq - recv an ETHERNET packet, and process it\n- * through CM\n- * @vsi: pointer to the vsi structure\n- * @rbuf: receive buffer\n- */\n-void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)\n-{\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct i40iw_cm_listener *listener;\n-\tstruct iphdr *iph;\n-\tstruct ipv6hdr *ip6h;\n-\tstruct tcphdr *tcph;\n-\tstruct i40iw_cm_info cm_info;\n-\tstruct i40iw_sc_dev *dev = vsi->dev;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\tstruct i40iw_cm_core *cm_core = &iwdev->cm_core;\n-\tstruct vlan_ethhdr *ethh;\n-\tu16 vtag;\n-\n-\t/* if vlan, then maclen = 18 else 14 */\n-\tiph = (struct iphdr *)rbuf->iph;\n-\tmemset(&cm_info, 0, sizeof(cm_info));\n-\n-\ti40iw_debug_buf(dev,\n-\t\t\tI40IW_DEBUG_ILQ,\n-\t\t\t\"RECEIVE ILQ BUFFER\",\n-\t\t\trbuf->mem.va,\n-\t\t\trbuf->totallen);\n-\tethh = (struct vlan_ethhdr *)rbuf->mem.va;\n-\n-\tif (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {\n-\t\tvtag = ntohs(ethh->h_vlan_TCI);\n-\t\tcm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;\n-\t\tcm_info.vlan_id = vtag & VLAN_VID_MASK;\n-\t\ti40iw_debug(cm_core->dev,\n-\t\t\t I40IW_DEBUG_CM,\n-\t\t\t \"%s vlan_id=%d\\n\",\n-\t\t\t __func__,\n-\t\t\t cm_info.vlan_id);\n-\t} else {\n-\t\tcm_info.vlan_id = I40IW_NO_VLAN;\n-\t}\n-\ttcph = (struct tcphdr *)rbuf->tcph;\n-\n-\tif (rbuf->ipv4) {\n-\t\tcm_info.loc_addr[0] = ntohl(iph->daddr);\n-\t\tcm_info.rem_addr[0] = ntohl(iph->saddr);\n-\t\tcm_info.ipv4 = true;\n-\t\tcm_info.tos = iph->tos;\n-\t} else {\n-\t\tip6h = (struct ipv6hdr *)rbuf->iph;\n-\t\ti40iw_copy_ip_ntohl(cm_info.loc_addr,\n-\t\t\t\t ip6h->daddr.in6_u.u6_addr32);\n-\t\ti40iw_copy_ip_ntohl(cm_info.rem_addr,\n-\t\t\t\t ip6h->saddr.in6_u.u6_addr32);\n-\t\tcm_info.ipv4 = false;\n-\t\tcm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);\n-\t}\n-\tcm_info.loc_port = ntohs(tcph->dest);\n-\tcm_info.rem_port = ntohs(tcph->source);\n-\tcm_node = i40iw_find_node(cm_core,\n-\t\t\t\t cm_info.rem_port,\n-\t\t\t\t cm_info.rem_addr,\n-\t\t\t\t cm_info.loc_port,\n-\t\t\t\t cm_info.loc_addr,\n-\t\t\t\t true,\n-\t\t\t\t false);\n-\n-\tif (!cm_node) {\n-\t\t/* Only type of packet accepted are for */\n-\t\t/* the PASSIVE open (syn only) */\n-\t\tif (!tcph->syn || tcph->ack)\n-\t\t\treturn;\n-\t\tlistener =\n-\t\t i40iw_find_listener(cm_core,\n-\t\t\t\t\tcm_info.loc_addr,\n-\t\t\t\t\tcm_info.loc_port,\n-\t\t\t\t\tcm_info.vlan_id,\n-\t\t\t\t\tI40IW_CM_LISTENER_ACTIVE_STATE);\n-\t\tif (!listener) {\n-\t\t\tcm_info.cm_id = NULL;\n-\t\t\ti40iw_debug(cm_core->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"%s no listener found\\n\",\n-\t\t\t\t __func__);\n-\t\t\treturn;\n-\t\t}\n-\t\tcm_info.cm_id = listener->cm_id;\n-\t\tcm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);\n-\t\tif (!cm_node) {\n-\t\t\ti40iw_debug(cm_core->dev,\n-\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t \"%s allocate node failed\\n\",\n-\t\t\t\t __func__);\n-\t\t\tatomic_dec(&listener->ref_count);\n-\t\t\treturn;\n-\t\t}\n-\t\tif (!tcph->rst && !tcph->fin) {\n-\t\t\tcm_node->state = I40IW_CM_STATE_LISTENING;\n-\t\t} else {\n-\t\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\t\treturn;\n-\t\t}\n-\t\tatomic_inc(&cm_node->ref_count);\n-\t} else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\treturn;\n-\t}\n-\ti40iw_process_packet(cm_node, rbuf);\n-\ti40iw_rem_ref_cm_node(cm_node);\n-}\n-\n-/**\n- * i40iw_setup_cm_core - allocate a top level instance of a cm\n- * core\n- * @iwdev: iwarp device structure\n- */\n-int i40iw_setup_cm_core(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_cm_core *cm_core = &iwdev->cm_core;\n-\n-\tcm_core->iwdev = iwdev;\n-\tcm_core->dev = &iwdev->sc_dev;\n-\n-\tINIT_LIST_HEAD(&cm_core->accelerated_list);\n-\tINIT_LIST_HEAD(&cm_core->non_accelerated_list);\n-\tINIT_LIST_HEAD(&cm_core->listen_nodes);\n-\n-\ttimer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);\n-\n-\tspin_lock_init(&cm_core->ht_lock);\n-\tspin_lock_init(&cm_core->listen_list_lock);\n-\tspin_lock_init(&cm_core->apbvt_lock);\n-\n-\tcm_core->event_wq = alloc_ordered_workqueue(\"iwewq\",\n-\t\t\t\t\t\t WQ_MEM_RECLAIM);\n-\tif (!cm_core->event_wq)\n-\t\tgoto error;\n-\n-\tcm_core->disconn_wq = alloc_ordered_workqueue(\"iwdwq\",\n-\t\t\t\t\t\t WQ_MEM_RECLAIM);\n-\tif (!cm_core->disconn_wq)\n-\t\tgoto error;\n-\n-\treturn 0;\n-error:\n-\ti40iw_cleanup_cm_core(&iwdev->cm_core);\n-\n-\treturn -ENOMEM;\n-}\n-\n-/**\n- * i40iw_cleanup_cm_core - deallocate a top level instance of a\n- * cm core\n- * @cm_core: cm's core\n- */\n-void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)\n-{\n-\tunsigned long flags;\n-\n-\tif (!cm_core)\n-\t\treturn;\n-\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\tif (timer_pending(&cm_core->tcp_timer))\n-\t\tdel_timer_sync(&cm_core->tcp_timer);\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\n-\tif (cm_core->event_wq)\n-\t\tdestroy_workqueue(cm_core->event_wq);\n-\tif (cm_core->disconn_wq)\n-\t\tdestroy_workqueue(cm_core->disconn_wq);\n-}\n-\n-/**\n- * i40iw_init_tcp_ctx - setup qp context\n- * @cm_node: connection's node\n- * @tcp_info: offload info for tcp\n- * @iwqp: associate qp for the connection\n- */\n-static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,\n-\t\t\t struct i40iw_tcp_offload_info *tcp_info,\n-\t\t\t struct i40iw_qp *iwqp)\n-{\n-\ttcp_info->ipv4 = cm_node->ipv4;\n-\ttcp_info->drop_ooo_seg = true;\n-\ttcp_info->wscale = true;\n-\ttcp_info->ignore_tcp_opt = true;\n-\ttcp_info->ignore_tcp_uns_opt = true;\n-\ttcp_info->no_nagle = false;\n-\n-\ttcp_info->ttl = I40IW_DEFAULT_TTL;\n-\ttcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);\n-\ttcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);\n-\ttcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;\n-\n-\ttcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;\n-\ttcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;\n-\ttcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;\n-\n-\ttcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);\n-\ttcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);\n-\ttcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);\n-\ttcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);\n-\n-\ttcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);\n-\ttcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);\n-\ttcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);\n-\ttcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);\n-\ttcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);\n-\ttcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<\n-\t\t\t\t\tcm_node->tcp_cntxt.rcv_wscale);\n-\n-\ttcp_info->flow_label = 0;\n-\ttcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));\n-\tif (cm_node->vlan_id <= VLAN_VID_MASK) {\n-\t\ttcp_info->insert_vlan_tag = true;\n-\t\ttcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |\n-\t\t\t\t\t\t cm_node->vlan_id);\n-\t}\n-\tif (cm_node->ipv4) {\n-\t\ttcp_info->src_port = cpu_to_le16(cm_node->loc_port);\n-\t\ttcp_info->dst_port = cpu_to_le16(cm_node->rem_port);\n-\n-\t\ttcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);\n-\t\ttcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);\n-\t\ttcp_info->arp_idx =\n-\t\t\tcpu_to_le16((u16)i40iw_arp_table(\n-\t\t\t\t\t\t\t iwqp->iwdev,\n-\t\t\t\t\t\t\t &tcp_info->dest_ip_addr3,\n-\t\t\t\t\t\t\t true,\n-\t\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t\t I40IW_ARP_RESOLVE));\n-\t} else {\n-\t\ttcp_info->src_port = cpu_to_le16(cm_node->loc_port);\n-\t\ttcp_info->dst_port = cpu_to_le16(cm_node->rem_port);\n-\t\ttcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);\n-\t\ttcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);\n-\t\ttcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);\n-\t\ttcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);\n-\t\ttcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);\n-\t\ttcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);\n-\t\ttcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);\n-\t\ttcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);\n-\t\ttcp_info->arp_idx =\n-\t\t\tcpu_to_le16((u16)i40iw_arp_table(\n-\t\t\t\t\t\t\t iwqp->iwdev,\n-\t\t\t\t\t\t\t &tcp_info->dest_ip_addr0,\n-\t\t\t\t\t\t\t false,\n-\t\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t\t I40IW_ARP_RESOLVE));\n-\t}\n-}\n-\n-/**\n- * i40iw_cm_init_tsa_conn - setup qp for RTS\n- * @iwqp: associate qp for the connection\n- * @cm_node: connection's node\n- */\n-static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,\n-\t\t\t\t struct i40iw_cm_node *cm_node)\n-{\n-\tstruct i40iw_tcp_offload_info tcp_info;\n-\tstruct i40iwarp_offload_info *iwarp_info;\n-\tstruct i40iw_qp_host_ctx_info *ctx_info;\n-\tstruct i40iw_device *iwdev = iwqp->iwdev;\n-\tstruct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;\n-\n-\tmemset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));\n-\tiwarp_info = &iwqp->iwarp_info;\n-\tctx_info = &iwqp->ctx_info;\n-\n-\tctx_info->tcp_info = &tcp_info;\n-\tctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;\n-\tctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;\n-\n-\tiwarp_info->ord_size = cm_node->ord_size;\n-\tiwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);\n-\n-\tif (iwarp_info->ord_size == 1)\n-\t\tiwarp_info->ord_size = 2;\n-\n-\tiwarp_info->rd_enable = true;\n-\tiwarp_info->rdmap_ver = 1;\n-\tiwarp_info->ddp_ver = 1;\n-\n-\tiwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;\n-\n-\tctx_info->tcp_info_valid = true;\n-\tctx_info->iwarp_info_valid = true;\n-\tctx_info->add_to_qoslist = true;\n-\tctx_info->user_pri = cm_node->user_pri;\n-\n-\ti40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);\n-\tif (cm_node->snd_mark_en) {\n-\t\tiwarp_info->snd_mark_en = true;\n-\t\tiwarp_info->snd_mark_offset = (tcp_info.snd_nxt &\n-\t\t\t\tSNDMARKER_SEQNMASK) + cm_node->lsmm_size;\n-\t}\n-\n-\tcm_node->state = I40IW_CM_STATE_OFFLOADED;\n-\ttcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;\n-\ttcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;\n-\ttcp_info.tos = cm_node->tos;\n-\n-\tdev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);\n-\n-\t/* once tcp_info is set, no need to do it again */\n-\tctx_info->tcp_info_valid = false;\n-\tctx_info->iwarp_info_valid = false;\n-\tctx_info->add_to_qoslist = false;\n-}\n-\n-/**\n- * i40iw_cm_disconn - when a connection is being closed\n- * @iwqp: associate qp for the connection\n- */\n-void i40iw_cm_disconn(struct i40iw_qp *iwqp)\n-{\n-\tstruct disconn_work *work;\n-\tstruct i40iw_device *iwdev = iwqp->iwdev;\n-\tstruct i40iw_cm_core *cm_core = &iwdev->cm_core;\n-\tunsigned long flags;\n-\n-\twork = kzalloc(sizeof(*work), GFP_ATOMIC);\n-\tif (!work)\n-\t\treturn;\t/* Timer will clean up */\n-\n-\tspin_lock_irqsave(&iwdev->qptable_lock, flags);\n-\tif (!iwdev->qp_table[iwqp->ibqp.qp_num]) {\n-\t\tspin_unlock_irqrestore(&iwdev->qptable_lock, flags);\n-\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,\n-\t\t\t \"%s qp_id %d is already freed\\n\",\n-\t\t\t __func__, iwqp->ibqp.qp_num);\n-\t\tkfree(work);\n-\t\treturn;\n-\t}\n-\ti40iw_add_ref(&iwqp->ibqp);\n-\tspin_unlock_irqrestore(&iwdev->qptable_lock, flags);\n-\n-\twork->iwqp = iwqp;\n-\tINIT_WORK(&work->work, i40iw_disconnect_worker);\n-\tqueue_work(cm_core->disconn_wq, &work->work);\n-\treturn;\n-}\n-\n-/**\n- * i40iw_qp_disconnect - free qp and close cm\n- * @iwqp: associate qp for the connection\n- */\n-static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)\n-{\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_ib_device *iwibdev;\n-\n-\tiwdev = to_iwdev(iwqp->ibqp.device);\n-\tif (!iwdev) {\n-\t\ti40iw_pr_err(\"iwdev == NULL\\n\");\n-\t\treturn;\n-\t}\n-\n-\tiwibdev = iwdev->iwibdev;\n-\n-\tif (iwqp->active_conn) {\n-\t\t/* indicate this connection is NOT active */\n-\t\tiwqp->active_conn = 0;\n-\t} else {\n-\t\t/* Need to free the Last Streaming Mode Message */\n-\t\tif (iwqp->ietf_mem.va) {\n-\t\t\tif (iwqp->lsmm_mr)\n-\t\t\t\tiwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr,\n-\t\t\t\t\t\t\t NULL);\n-\t\t\ti40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);\n-\t\t}\n-\t}\n-\n-\t/* close the CM node down if it is still active */\n-\tif (iwqp->cm_node) {\n-\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, \"%s Call close API\\n\", __func__);\n-\t\ti40iw_cm_close(iwqp->cm_node);\n-\t}\n-}\n-\n-/**\n- * i40iw_cm_disconn_true - called by worker thread to disconnect qp\n- * @iwqp: associate qp for the connection\n- */\n-static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)\n-{\n-\tstruct iw_cm_id *cm_id;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_sc_qp *qp = &iwqp->sc_qp;\n-\tu16 last_ae;\n-\tu8 original_hw_tcp_state;\n-\tu8 original_ibqp_state;\n-\tint disconn_status = 0;\n-\tint issue_disconn = 0;\n-\tint issue_close = 0;\n-\tint issue_flush = 0;\n-\tstruct ib_event ibevent;\n-\tunsigned long flags;\n-\tint ret;\n-\n-\tif (!iwqp) {\n-\t\ti40iw_pr_err(\"iwqp == NULL\\n\");\n-\t\treturn;\n-\t}\n-\n-\tspin_lock_irqsave(&iwqp->lock, flags);\n-\tcm_id = iwqp->cm_id;\n-\t/* make sure we havent already closed this connection */\n-\tif (!cm_id) {\n-\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\t\treturn;\n-\t}\n-\n-\tiwdev = to_iwdev(iwqp->ibqp.device);\n-\n-\toriginal_hw_tcp_state = iwqp->hw_tcp_state;\n-\toriginal_ibqp_state = iwqp->ibqp_state;\n-\tlast_ae = iwqp->last_aeq;\n-\n-\tif (qp->term_flags) {\n-\t\tissue_disconn = 1;\n-\t\tissue_close = 1;\n-\t\tiwqp->cm_id = NULL;\n-\t\t/*When term timer expires after cm_timer, don't want\n-\t\t *terminate-handler to issue cm_disconn which can re-free\n-\t\t *a QP even after its refcnt=0.\n-\t\t */\n-\t\ti40iw_terminate_del_timer(qp);\n-\t\tif (!iwqp->flush_issued) {\n-\t\t\tiwqp->flush_issued = 1;\n-\t\t\tissue_flush = 1;\n-\t\t}\n-\t} else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||\n-\t\t ((original_ibqp_state == IB_QPS_RTS) &&\n-\t\t (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {\n-\t\tissue_disconn = 1;\n-\t\tif (last_ae == I40IW_AE_LLP_CONNECTION_RESET)\n-\t\t\tdisconn_status = -ECONNRESET;\n-\t}\n-\n-\tif (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||\n-\t (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||\n-\t (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||\n-\t (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||\n-\t iwdev->reset)) {\n-\t\tissue_close = 1;\n-\t\tiwqp->cm_id = NULL;\n-\t\tif (!iwqp->flush_issued) {\n-\t\t\tiwqp->flush_issued = 1;\n-\t\t\tissue_flush = 1;\n-\t\t}\n-\t}\n-\n-\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\tif (issue_flush && !iwqp->destroyed) {\n-\t\t/* Flush the queues */\n-\t\ti40iw_flush_wqes(iwdev, iwqp);\n-\n-\t\tif (qp->term_flags && iwqp->ibqp.event_handler) {\n-\t\t\tibevent.device = iwqp->ibqp.device;\n-\t\t\tibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?\n-\t\t\t\t\tIB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;\n-\t\t\tibevent.element.qp = &iwqp->ibqp;\n-\t\t\tiwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);\n-\t\t}\n-\t}\n-\n-\tif (cm_id && cm_id->event_handler) {\n-\t\tif (issue_disconn) {\n-\t\t\tret = i40iw_send_cm_event(NULL,\n-\t\t\t\t\t\t cm_id,\n-\t\t\t\t\t\t IW_CM_EVENT_DISCONNECT,\n-\t\t\t\t\t\t disconn_status);\n-\n-\t\t\tif (ret)\n-\t\t\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t\t \"disconnect event failed %s: - cm_id = %p\\n\",\n-\t\t\t\t\t __func__, cm_id);\n-\t\t}\n-\t\tif (issue_close) {\n-\t\t\ti40iw_qp_disconnect(iwqp);\n-\t\t\tcm_id->provider_data = iwqp;\n-\t\t\tret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);\n-\t\t\tif (ret)\n-\t\t\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t\t\t I40IW_DEBUG_CM,\n-\t\t\t\t\t \"close event failed %s: - cm_id = %p\\n\",\n-\t\t\t\t\t __func__, cm_id);\n-\t\t\tcm_id->rem_ref(cm_id);\n-\t\t}\n-\t}\n-}\n-\n-/**\n- * i40iw_disconnect_worker - worker for connection close\n- * @work: points or disconn structure\n- */\n-static void i40iw_disconnect_worker(struct work_struct *work)\n-{\n-\tstruct disconn_work *dwork = container_of(work, struct disconn_work, work);\n-\tstruct i40iw_qp *iwqp = dwork->iwqp;\n-\n-\tkfree(dwork);\n-\ti40iw_cm_disconn_true(iwqp);\n-\ti40iw_rem_ref(&iwqp->ibqp);\n-}\n-\n-/**\n- * i40iw_accept - registered call for connection to be accepted\n- * @cm_id: cm information for passive connection\n- * @conn_param: accpet parameters\n- */\n-int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)\n-{\n-\tstruct ib_qp *ibqp;\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_cm_core *cm_core;\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct ib_qp_attr attr;\n-\tint passive_state;\n-\tstruct ib_mr *ibmr;\n-\tstruct i40iw_pd *iwpd;\n-\tu16 buf_len = 0;\n-\tstruct i40iw_kmem_info accept;\n-\tenum i40iw_status_code status;\n-\tu64 tagged_offset;\n-\tunsigned long flags;\n-\n-\tmemset(&attr, 0, sizeof(attr));\n-\tibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);\n-\tif (!ibqp)\n-\t\treturn -EINVAL;\n-\n-\tiwqp = to_iwqp(ibqp);\n-\tiwdev = iwqp->iwdev;\n-\tdev = &iwdev->sc_dev;\n-\tcm_core = &iwdev->cm_core;\n-\tcm_node = (struct i40iw_cm_node *)cm_id->provider_data;\n-\n-\tif (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {\n-\t\tcm_node->ipv4 = true;\n-\t\tcm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);\n-\t} else {\n-\t\tcm_node->ipv4 = false;\n-\t\ti40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id);\n-\t}\n-\ti40iw_debug(cm_node->dev,\n-\t\t I40IW_DEBUG_CM,\n-\t\t \"Accept vlan_id=%d\\n\",\n-\t\t cm_node->vlan_id);\n-\tif (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {\n-\t\tif (cm_node->loopbackpartner)\n-\t\t\ti40iw_rem_ref_cm_node(cm_node->loopbackpartner);\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tpassive_state = atomic_add_return(1, &cm_node->passive_state);\n-\tif (passive_state == I40IW_SEND_RESET_EVENT) {\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t\treturn -ECONNRESET;\n-\t}\n-\n-\tcm_node->cm_core->stats_accepts++;\n-\tiwqp->cm_node = (void *)cm_node;\n-\tcm_node->iwqp = iwqp;\n-\n-\tbuf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;\n-\n-\tstatus = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);\n-\n-\tif (status)\n-\t\treturn -ENOMEM;\n-\tcm_node->pdata.size = conn_param->private_data_len;\n-\taccept.addr = iwqp->ietf_mem.va;\n-\taccept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);\n-\tmemcpy(accept.addr + accept.size, conn_param->private_data,\n-\t conn_param->private_data_len);\n-\n-\t/* setup our first outgoing iWarp send WQE (the IETF frame response) */\n-\tif ((cm_node->ipv4 &&\n-\t !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||\n-\t (!cm_node->ipv4 &&\n-\t !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {\n-\t\tiwpd = iwqp->iwpd;\n-\t\ttagged_offset = (uintptr_t)iwqp->ietf_mem.va;\n-\t\tibmr = i40iw_reg_phys_mr(&iwpd->ibpd,\n-\t\t\t\t\t iwqp->ietf_mem.pa,\n-\t\t\t\t\t buf_len,\n-\t\t\t\t\t IB_ACCESS_LOCAL_WRITE,\n-\t\t\t\t\t &tagged_offset);\n-\t\tif (IS_ERR(ibmr)) {\n-\t\t\ti40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);\n-\t\t\treturn -ENOMEM;\n-\t\t}\n-\n-\t\tibmr->pd = &iwpd->ibpd;\n-\t\tibmr->device = iwpd->ibpd.device;\n-\t\tiwqp->lsmm_mr = ibmr;\n-\t\tif (iwqp->page)\n-\t\t\tiwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);\n-\t\tdev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,\n-\t\t\t\t\t\t\tiwqp->ietf_mem.va,\n-\t\t\t\t\t\t\t(accept.size + conn_param->private_data_len),\n-\t\t\t\t\t\t\tibmr->lkey);\n-\n-\t} else {\n-\t\tif (iwqp->page)\n-\t\t\tiwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);\n-\t\tdev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);\n-\t}\n-\n-\tif (iwqp->page)\n-\t\tkunmap(iwqp->page);\n-\n-\tiwqp->cm_id = cm_id;\n-\tcm_node->cm_id = cm_id;\n-\n-\tcm_id->provider_data = (void *)iwqp;\n-\tiwqp->active_conn = 0;\n-\n-\tcm_node->lsmm_size = accept.size + conn_param->private_data_len;\n-\ti40iw_cm_init_tsa_conn(iwqp, cm_node);\n-\tcm_id->add_ref(cm_id);\n-\ti40iw_add_ref(&iwqp->ibqp);\n-\n-\tattr.qp_state = IB_QPS_RTS;\n-\tcm_node->qhash_set = false;\n-\ti40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);\n-\n-\tcm_node->accelerated = true;\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\tlist_move_tail(&cm_node->list, &cm_core->accelerated_list);\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\n-\tstatus =\n-\t\ti40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);\n-\tif (status)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_CM, \"error sending cm event - ESTABLISHED\\n\");\n-\n-\tif (cm_node->loopbackpartner) {\n-\t\tcm_node->loopbackpartner->pdata.size = conn_param->private_data_len;\n-\n-\t\t/* copy entire MPA frame to our cm_node's frame */\n-\t\tmemcpy(cm_node->loopbackpartner->pdata_buf,\n-\t\t conn_param->private_data,\n-\t\t conn_param->private_data_len);\n-\t\ti40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);\n-\t}\n-\n-\tif (cm_node->accept_pend) {\n-\t\tatomic_dec(&cm_node->listener->pend_accepts_cnt);\n-\t\tcm_node->accept_pend = 0;\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_reject - registered call for connection to be rejected\n- * @cm_id: cm information for passive connection\n- * @pdata: private data to be sent\n- * @pdata_len: private data length\n- */\n-int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)\n-{\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct i40iw_cm_node *loopback;\n-\n-\tcm_node = (struct i40iw_cm_node *)cm_id->provider_data;\n-\tloopback = cm_node->loopbackpartner;\n-\tcm_node->cm_id = cm_id;\n-\tcm_node->pdata.size = pdata_len;\n-\n-\tiwdev = to_iwdev(cm_id->device);\n-\tif (!iwdev)\n-\t\treturn -EINVAL;\n-\tcm_node->cm_core->stats_rejects++;\n-\n-\tif (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)\n-\t\treturn -EINVAL;\n-\n-\tif (loopback) {\n-\t\tmemcpy(&loopback->pdata_buf, pdata, pdata_len);\n-\t\tloopback->pdata.size = pdata_len;\n-\t}\n-\n-\treturn i40iw_cm_reject(cm_node, pdata, pdata_len);\n-}\n-\n-/**\n- * i40iw_connect - registered call for connection to be established\n- * @cm_id: cm information for passive connection\n- * @conn_param: Information about the connection\n- */\n-int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)\n-{\n-\tstruct ib_qp *ibqp;\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct i40iw_cm_info cm_info;\n-\tstruct sockaddr_in *laddr;\n-\tstruct sockaddr_in *raddr;\n-\tstruct sockaddr_in6 *laddr6;\n-\tstruct sockaddr_in6 *raddr6;\n-\tint ret = 0;\n-\n-\tibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);\n-\tif (!ibqp)\n-\t\treturn -EINVAL;\n-\tiwqp = to_iwqp(ibqp);\n-\tif (!iwqp)\n-\t\treturn -EINVAL;\n-\tiwdev = to_iwdev(iwqp->ibqp.device);\n-\tif (!iwdev)\n-\t\treturn -EINVAL;\n-\n-\tladdr = (struct sockaddr_in *)&cm_id->m_local_addr;\n-\traddr = (struct sockaddr_in *)&cm_id->m_remote_addr;\n-\tladdr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;\n-\traddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;\n-\n-\tif (!(laddr->sin_port) || !(raddr->sin_port))\n-\t\treturn -EINVAL;\n-\n-\tiwqp->active_conn = 1;\n-\tiwqp->cm_id = NULL;\n-\tcm_id->provider_data = iwqp;\n-\n-\t/* set up the connection params for the node */\n-\tif (cm_id->remote_addr.ss_family == AF_INET) {\n-\t\tcm_info.ipv4 = true;\n-\t\tmemset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));\n-\t\tmemset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));\n-\t\tcm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);\n-\t\tcm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);\n-\t\tcm_info.loc_port = ntohs(laddr->sin_port);\n-\t\tcm_info.rem_port = ntohs(raddr->sin_port);\n-\t\tcm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);\n-\t} else {\n-\t\tcm_info.ipv4 = false;\n-\t\ti40iw_copy_ip_ntohl(cm_info.loc_addr,\n-\t\t\t\t laddr6->sin6_addr.in6_u.u6_addr32);\n-\t\ti40iw_copy_ip_ntohl(cm_info.rem_addr,\n-\t\t\t\t raddr6->sin6_addr.in6_u.u6_addr32);\n-\t\tcm_info.loc_port = ntohs(laddr6->sin6_port);\n-\t\tcm_info.rem_port = ntohs(raddr6->sin6_port);\n-\t\ti40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id);\n-\t}\n-\tcm_info.cm_id = cm_id;\n-\tcm_info.tos = cm_id->tos;\n-\tcm_info.user_pri = rt_tos2priority(cm_id->tos);\n-\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, \"%s TOS:[%d] UP:[%d]\\n\",\n-\t\t __func__, cm_id->tos, cm_info.user_pri);\n-\tcm_id->add_ref(cm_id);\n-\tcm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,\n-\t\t\t\t conn_param, &cm_info);\n-\n-\tif (IS_ERR(cm_node)) {\n-\t\tret = PTR_ERR(cm_node);\n-\t\tcm_id->rem_ref(cm_id);\n-\t\treturn ret;\n-\t}\n-\n-\tif ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||\n-\t (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,\n-\t\t\t\t raddr6->sin6_addr.in6_u.u6_addr32,\n-\t\t\t\t sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {\n-\t\tif (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,\n-\t\t\t\t I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {\n-\t\t\tret = -EINVAL;\n-\t\t\tgoto err;\n-\t\t}\n-\t\tcm_node->qhash_set = true;\n-\t}\n-\n-\tif (i40iw_manage_apbvt(iwdev, cm_info.loc_port,\n-\t\t\t I40IW_MANAGE_APBVT_ADD)) {\n-\t\tret = -EINVAL;\n-\t\tgoto err;\n-\t}\n-\n-\tcm_node->apbvt_set = true;\n-\tiwqp->cm_node = cm_node;\n-\tcm_node->iwqp = iwqp;\n-\tiwqp->cm_id = cm_id;\n-\ti40iw_add_ref(&iwqp->ibqp);\n-\n-\tif (cm_node->state != I40IW_CM_STATE_OFFLOADED) {\n-\t\tcm_node->state = I40IW_CM_STATE_SYN_SENT;\n-\t\tret = i40iw_send_syn(cm_node, 0);\n-\t\tif (ret)\n-\t\t\tgoto err;\n-\t}\n-\n-\tif (cm_node->loopbackpartner) {\n-\t\tcm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;\n-\t\ti40iw_create_event(cm_node->loopbackpartner,\n-\t\t\t\t I40IW_CM_EVENT_MPA_REQ);\n-\t}\n-\n-\ti40iw_debug(cm_node->dev,\n-\t\t I40IW_DEBUG_CM,\n-\t\t \"Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\\n\",\n-\t\t cm_node->rem_port,\n-\t\t cm_node,\n-\t\t cm_node->cm_id);\n-\n-\treturn 0;\n-\n-err:\n-\tif (cm_info.ipv4)\n-\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t I40IW_DEBUG_CM,\n-\t\t\t \"Api - connect() FAILED: dest addr=%pI4\",\n-\t\t\t cm_info.rem_addr);\n-\telse\n-\t\ti40iw_debug(&iwdev->sc_dev,\n-\t\t\t I40IW_DEBUG_CM,\n-\t\t\t \"Api - connect() FAILED: dest addr=%pI6\",\n-\t\t\t cm_info.rem_addr);\n-\n-\ti40iw_rem_ref_cm_node(cm_node);\n-\tcm_id->rem_ref(cm_id);\n-\tiwdev->cm_core.stats_connect_errs++;\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_create_listen - registered call creating listener\n- * @cm_id: cm information for passive connection\n- * @backlog: to max accept pending count\n- */\n-int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)\n-{\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_cm_listener *cm_listen_node;\n-\tstruct i40iw_cm_info cm_info;\n-\tenum i40iw_status_code ret;\n-\tstruct sockaddr_in *laddr;\n-\tstruct sockaddr_in6 *laddr6;\n-\tbool wildcard = false;\n-\n-\tiwdev = to_iwdev(cm_id->device);\n-\tif (!iwdev)\n-\t\treturn -EINVAL;\n-\n-\tladdr = (struct sockaddr_in *)&cm_id->m_local_addr;\n-\tladdr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;\n-\tmemset(&cm_info, 0, sizeof(cm_info));\n-\tif (laddr->sin_family == AF_INET) {\n-\t\tcm_info.ipv4 = true;\n-\t\tcm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);\n-\t\tcm_info.loc_port = ntohs(laddr->sin_port);\n-\n-\t\tif (laddr->sin_addr.s_addr != INADDR_ANY)\n-\t\t\tcm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);\n-\t\telse\n-\t\t\twildcard = true;\n-\n-\t} else {\n-\t\tcm_info.ipv4 = false;\n-\t\ti40iw_copy_ip_ntohl(cm_info.loc_addr,\n-\t\t\t\t laddr6->sin6_addr.in6_u.u6_addr32);\n-\t\tcm_info.loc_port = ntohs(laddr6->sin6_port);\n-\t\tif (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)\n-\t\t\ti40iw_netdev_vlan_ipv6(cm_info.loc_addr,\n-\t\t\t\t\t &cm_info.vlan_id);\n-\t\telse\n-\t\t\twildcard = true;\n-\t}\n-\tcm_info.backlog = backlog;\n-\tcm_info.cm_id = cm_id;\n-\n-\tcm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);\n-\tif (!cm_listen_node) {\n-\t\ti40iw_pr_err(\"cm_listen_node == NULL\\n\");\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tcm_id->provider_data = cm_listen_node;\n-\n-\tcm_listen_node->tos = cm_id->tos;\n-\tcm_listen_node->user_pri = rt_tos2priority(cm_id->tos);\n-\tcm_info.user_pri = cm_listen_node->user_pri;\n-\n-\tif (!cm_listen_node->reused_node) {\n-\t\tif (wildcard) {\n-\t\t\tif (cm_info.ipv4)\n-\t\t\t\tret = i40iw_add_mqh_4(iwdev,\n-\t\t\t\t\t\t &cm_info,\n-\t\t\t\t\t\t cm_listen_node);\n-\t\t\telse\n-\t\t\t\tret = i40iw_add_mqh_6(iwdev,\n-\t\t\t\t\t\t &cm_info,\n-\t\t\t\t\t\t cm_listen_node);\n-\t\t\tif (ret)\n-\t\t\t\tgoto error;\n-\n-\t\t\tret = i40iw_manage_apbvt(iwdev,\n-\t\t\t\t\t\t cm_info.loc_port,\n-\t\t\t\t\t\t I40IW_MANAGE_APBVT_ADD);\n-\n-\t\t\tif (ret)\n-\t\t\t\tgoto error;\n-\t\t} else {\n-\t\t\tret = i40iw_manage_qhash(iwdev,\n-\t\t\t\t\t\t &cm_info,\n-\t\t\t\t\t\t I40IW_QHASH_TYPE_TCP_SYN,\n-\t\t\t\t\t\t I40IW_QHASH_MANAGE_TYPE_ADD,\n-\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t true);\n-\t\t\tif (ret)\n-\t\t\t\tgoto error;\n-\t\t\tcm_listen_node->qhash_set = true;\n-\t\t\tret = i40iw_manage_apbvt(iwdev,\n-\t\t\t\t\t\t cm_info.loc_port,\n-\t\t\t\t\t\t I40IW_MANAGE_APBVT_ADD);\n-\t\t\tif (ret)\n-\t\t\t\tgoto error;\n-\t\t}\n-\t}\n-\tcm_id->add_ref(cm_id);\n-\tcm_listen_node->cm_core->stats_listen_created++;\n-\treturn 0;\n- error:\n-\ti40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);\n-\treturn -EINVAL;\n-}\n-\n-/**\n- * i40iw_destroy_listen - registered call to destroy listener\n- * @cm_id: cm information for passive connection\n- */\n-int i40iw_destroy_listen(struct iw_cm_id *cm_id)\n-{\n-\tstruct i40iw_device *iwdev;\n-\n-\tiwdev = to_iwdev(cm_id->device);\n-\tif (cm_id->provider_data)\n-\t\ti40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);\n-\telse\n-\t\ti40iw_pr_err(\"cm_id->provider_data was NULL\\n\");\n-\n-\tcm_id->rem_ref(cm_id);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_cm_event_connected - handle connected active node\n- * @event: the info for cm_node of connection\n- */\n-static void i40iw_cm_event_connected(struct i40iw_cm_event *event)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_cm_core *cm_core;\n-\tstruct i40iw_cm_node *cm_node;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct ib_qp_attr attr;\n-\tstruct iw_cm_id *cm_id;\n-\tunsigned long flags;\n-\tint status;\n-\tbool read0;\n-\n-\tcm_node = event->cm_node;\n-\tcm_id = cm_node->cm_id;\n-\tiwqp = (struct i40iw_qp *)cm_id->provider_data;\n-\tiwdev = to_iwdev(iwqp->ibqp.device);\n-\tdev = &iwdev->sc_dev;\n-\tcm_core = &iwdev->cm_core;\n-\n-\tif (iwqp->destroyed) {\n-\t\tstatus = -ETIMEDOUT;\n-\t\tgoto error;\n-\t}\n-\ti40iw_cm_init_tsa_conn(iwqp, cm_node);\n-\tread0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);\n-\tif (iwqp->page)\n-\t\tiwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);\n-\tdev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);\n-\tif (iwqp->page)\n-\t\tkunmap(iwqp->page);\n-\n-\tmemset(&attr, 0, sizeof(attr));\n-\tattr.qp_state = IB_QPS_RTS;\n-\tcm_node->qhash_set = false;\n-\ti40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);\n-\n-\tcm_node->accelerated = true;\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\tlist_move_tail(&cm_node->list, &cm_core->accelerated_list);\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\tstatus = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,\n-\t\t\t\t 0);\n-\tif (status)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_CM, \"error sending cm event - CONNECT_REPLY\\n\");\n-\n-\treturn;\n-\n-error:\n-\tiwqp->cm_id = NULL;\n-\tcm_id->provider_data = NULL;\n-\ti40iw_send_cm_event(event->cm_node,\n-\t\t\t cm_id,\n-\t\t\t IW_CM_EVENT_CONNECT_REPLY,\n-\t\t\t status);\n-\tcm_id->rem_ref(cm_id);\n-\ti40iw_rem_ref_cm_node(event->cm_node);\n-}\n-\n-/**\n- * i40iw_cm_event_reset - handle reset\n- * @event: the info for cm_node of connection\n- */\n-static void i40iw_cm_event_reset(struct i40iw_cm_event *event)\n-{\n-\tstruct i40iw_cm_node *cm_node = event->cm_node;\n-\tstruct iw_cm_id *cm_id = cm_node->cm_id;\n-\tstruct i40iw_qp *iwqp;\n-\n-\tif (!cm_id)\n-\t\treturn;\n-\n-\tiwqp = cm_id->provider_data;\n-\tif (!iwqp)\n-\t\treturn;\n-\n-\ti40iw_debug(cm_node->dev,\n-\t\t I40IW_DEBUG_CM,\n-\t\t \"reset event %p - cm_id = %p\\n\",\n-\t\t event->cm_node, cm_id);\n-\tiwqp->cm_id = NULL;\n-\n-\ti40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);\n-\ti40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);\n-}\n-\n-/**\n- * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer\n- * @work: pointer of cm event info.\n- */\n-static void i40iw_cm_event_handler(struct work_struct *work)\n-{\n-\tstruct i40iw_cm_event *event = container_of(work,\n-\t\t\t\t\t\t struct i40iw_cm_event,\n-\t\t\t\t\t\t event_work);\n-\tstruct i40iw_cm_node *cm_node;\n-\n-\tif (!event || !event->cm_node || !event->cm_node->cm_core)\n-\t\treturn;\n-\n-\tcm_node = event->cm_node;\n-\n-\tswitch (event->type) {\n-\tcase I40IW_CM_EVENT_MPA_REQ:\n-\t\ti40iw_send_cm_event(cm_node,\n-\t\t\t\t cm_node->cm_id,\n-\t\t\t\t IW_CM_EVENT_CONNECT_REQUEST,\n-\t\t\t\t 0);\n-\t\tbreak;\n-\tcase I40IW_CM_EVENT_RESET:\n-\t\ti40iw_cm_event_reset(event);\n-\t\tbreak;\n-\tcase I40IW_CM_EVENT_CONNECTED:\n-\t\tif (!event->cm_node->cm_id ||\n-\t\t (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))\n-\t\t\tbreak;\n-\t\ti40iw_cm_event_connected(event);\n-\t\tbreak;\n-\tcase I40IW_CM_EVENT_MPA_REJECT:\n-\t\tif (!event->cm_node->cm_id ||\n-\t\t (cm_node->state == I40IW_CM_STATE_OFFLOADED))\n-\t\t\tbreak;\n-\t\ti40iw_send_cm_event(cm_node,\n-\t\t\t\t cm_node->cm_id,\n-\t\t\t\t IW_CM_EVENT_CONNECT_REPLY,\n-\t\t\t\t -ECONNREFUSED);\n-\t\tbreak;\n-\tcase I40IW_CM_EVENT_ABORTED:\n-\t\tif (!event->cm_node->cm_id ||\n-\t\t (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))\n-\t\t\tbreak;\n-\t\ti40iw_event_connect_error(event);\n-\t\tbreak;\n-\tdefault:\n-\t\ti40iw_pr_err(\"event type = %d\\n\", event->type);\n-\t\tbreak;\n-\t}\n-\n-\tevent->cm_info.cm_id->rem_ref(event->cm_info.cm_id);\n-\ti40iw_rem_ref_cm_node(event->cm_node);\n-\tkfree(event);\n-}\n-\n-/**\n- * i40iw_cm_post_event - queue event request for worker thread\n- * @event: cm node's info for up event call\n- */\n-static void i40iw_cm_post_event(struct i40iw_cm_event *event)\n-{\n-\tatomic_inc(&event->cm_node->ref_count);\n-\tevent->cm_info.cm_id->add_ref(event->cm_info.cm_id);\n-\tINIT_WORK(&event->event_work, i40iw_cm_event_handler);\n-\n-\tqueue_work(event->cm_node->cm_core->event_wq, &event->event_work);\n-}\n-\n-/**\n- * i40iw_qhash_ctrl - enable/disable qhash for list\n- * @iwdev: device pointer\n- * @parent_listen_node: parent listen node\n- * @nfo: cm info node\n- * @ipaddr: Pointer to IPv4 or IPv6 address\n- * @ipv4: flag indicating IPv4 when true\n- * @ifup: flag indicating interface up when true\n- *\n- * Enables or disables the qhash for the node in the child\n- * listen list that matches ipaddr. If no matching IP was found\n- * it will allocate and add a new child listen node to the\n- * parent listen node. The listen_list_lock is assumed to be\n- * held when called.\n- */\n-static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_cm_listener *parent_listen_node,\n-\t\t\t struct i40iw_cm_info *nfo,\n-\t\t\t u32 *ipaddr, bool ipv4, bool ifup)\n-{\n-\tstruct list_head *child_listen_list = &parent_listen_node->child_listen_list;\n-\tstruct i40iw_cm_listener *child_listen_node;\n-\tstruct list_head *pos, *tpos;\n-\tenum i40iw_status_code ret;\n-\tbool node_allocated = false;\n-\tenum i40iw_quad_hash_manage_type op =\n-\t\tifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;\n-\n-\tlist_for_each_safe(pos, tpos, child_listen_list) {\n-\t\tchild_listen_node =\n-\t\t\tlist_entry(pos,\n-\t\t\t\t struct i40iw_cm_listener,\n-\t\t\t\t child_listen_list);\n-\t\tif (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))\n-\t\t\tgoto set_qhash;\n-\t}\n-\n-\t/* if not found then add a child listener if interface is going up */\n-\tif (!ifup)\n-\t\treturn;\n-\tchild_listen_node = kmemdup(parent_listen_node,\n-\t\t\tsizeof(*child_listen_node), GFP_ATOMIC);\n-\tif (!child_listen_node)\n-\t\treturn;\n-\tnode_allocated = true;\n-\n-\tmemcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);\n-\n-set_qhash:\n-\tmemcpy(nfo->loc_addr,\n-\t child_listen_node->loc_addr,\n-\t sizeof(nfo->loc_addr));\n-\tnfo->vlan_id = child_listen_node->vlan_id;\n-\tret = i40iw_manage_qhash(iwdev, nfo,\n-\t\t\t\t I40IW_QHASH_TYPE_TCP_SYN,\n-\t\t\t\t op,\n-\t\t\t\t NULL, false);\n-\tif (!ret) {\n-\t\tchild_listen_node->qhash_set = ifup;\n-\t\tif (node_allocated)\n-\t\t\tlist_add(&child_listen_node->child_listen_list,\n-\t\t\t\t &parent_listen_node->child_listen_list);\n-\t} else if (node_allocated) {\n-\t\tkfree(child_listen_node);\n-\t}\n-}\n-\n-/**\n- * i40iw_cm_teardown_connections - teardown QPs\n- * @iwdev: device pointer\n- * @ipaddr: Pointer to IPv4 or IPv6 address\n- * @ipv4: flag indicating IPv4 when true\n- * @disconnect_all: flag indicating disconnect all QPs\n- * teardown QPs where source or destination addr matches ip addr\n- */\n-void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,\n-\t\t\t\t struct i40iw_cm_info *nfo,\n-\t\t\t\t bool disconnect_all)\n-{\n-\tstruct i40iw_cm_core *cm_core = &iwdev->cm_core;\n-\tstruct list_head *list_core_temp;\n-\tstruct list_head *list_node;\n-\tstruct i40iw_cm_node *cm_node;\n-\tunsigned long flags;\n-\tstruct list_head teardown_list;\n-\tstruct ib_qp_attr attr;\n-\n-\tINIT_LIST_HEAD(&teardown_list);\n-\tspin_lock_irqsave(&cm_core->ht_lock, flags);\n-\tlist_for_each_safe(list_node, list_core_temp,\n-\t\t\t &cm_core->accelerated_list) {\n-\t\tcm_node = container_of(list_node, struct i40iw_cm_node, list);\n-\t\tif (disconnect_all ||\n-\t\t (nfo->vlan_id == cm_node->vlan_id &&\n-\t\t (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||\n-\t\t !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {\n-\t\t\tatomic_inc(&cm_node->ref_count);\n-\t\t\tlist_add(&cm_node->teardown_entry, &teardown_list);\n-\t\t}\n-\t}\n-\tlist_for_each_safe(list_node, list_core_temp,\n-\t\t\t &cm_core->non_accelerated_list) {\n-\t\tcm_node = container_of(list_node, struct i40iw_cm_node, list);\n-\t\tif (disconnect_all ||\n-\t\t (nfo->vlan_id == cm_node->vlan_id &&\n-\t\t (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||\n-\t\t !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {\n-\t\t\tatomic_inc(&cm_node->ref_count);\n-\t\t\tlist_add(&cm_node->teardown_entry, &teardown_list);\n-\t\t}\n-\t}\n-\tspin_unlock_irqrestore(&cm_core->ht_lock, flags);\n-\n-\tlist_for_each_safe(list_node, list_core_temp, &teardown_list) {\n-\t\tcm_node = container_of(list_node, struct i40iw_cm_node,\n-\t\t\t\t teardown_entry);\n-\t\tattr.qp_state = IB_QPS_ERR;\n-\t\ti40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);\n-\t\tif (iwdev->reset)\n-\t\t\ti40iw_cm_disconn(cm_node->iwqp);\n-\t\ti40iw_rem_ref_cm_node(cm_node);\n-\t}\n-}\n-\n-/**\n- * i40iw_ifdown_notify - process an ifdown on an interface\n- * @iwdev: device pointer\n- * @ipaddr: Pointer to IPv4 or IPv6 address\n- * @ipv4: flag indicating IPv4 when true\n- * @ifup: flag indicating interface up when true\n- */\n-void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,\n-\t\t u32 *ipaddr, bool ipv4, bool ifup)\n-{\n-\tstruct i40iw_cm_core *cm_core = &iwdev->cm_core;\n-\tunsigned long flags;\n-\tstruct i40iw_cm_listener *listen_node;\n-\tstatic const u32 ip_zero[4] = { 0, 0, 0, 0 };\n-\tstruct i40iw_cm_info nfo;\n-\tu16 vlan_id = rdma_vlan_dev_vlan_id(netdev);\n-\tenum i40iw_status_code ret;\n-\tenum i40iw_quad_hash_manage_type op =\n-\t\tifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;\n-\n-\tnfo.vlan_id = vlan_id;\n-\tnfo.ipv4 = ipv4;\n-\n-\t/* Disable or enable qhash for listeners */\n-\tspin_lock_irqsave(&cm_core->listen_list_lock, flags);\n-\tlist_for_each_entry(listen_node, &cm_core->listen_nodes, list) {\n-\t\tif (vlan_id == listen_node->vlan_id &&\n-\t\t (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||\n-\t\t !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {\n-\t\t\tmemcpy(nfo.loc_addr, listen_node->loc_addr,\n-\t\t\t sizeof(nfo.loc_addr));\n-\t\t\tnfo.loc_port = listen_node->loc_port;\n-\t\t\tnfo.user_pri = listen_node->user_pri;\n-\t\t\tif (!list_empty(&listen_node->child_listen_list)) {\n-\t\t\t\ti40iw_qhash_ctrl(iwdev,\n-\t\t\t\t\t\t listen_node,\n-\t\t\t\t\t\t &nfo,\n-\t\t\t\t\t\t ipaddr, ipv4, ifup);\n-\t\t\t} else if (memcmp(listen_node->loc_addr, ip_zero,\n-\t\t\t\t\t ipv4 ? 4 : 16)) {\n-\t\t\t\tret = i40iw_manage_qhash(iwdev,\n-\t\t\t\t\t\t\t &nfo,\n-\t\t\t\t\t\t\t I40IW_QHASH_TYPE_TCP_SYN,\n-\t\t\t\t\t\t\t op,\n-\t\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t\t false);\n-\t\t\t\tif (!ret)\n-\t\t\t\t\tlisten_node->qhash_set = ifup;\n-\t\t\t}\n-\t\t}\n-\t}\n-\tspin_unlock_irqrestore(&cm_core->listen_list_lock, flags);\n-\n-\t/* teardown connected qp's on ifdown */\n-\tif (!ifup)\n-\t\ti40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h\ndeleted file mode 100644\nindex 66dc1ba..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h\n+++ /dev/null\n@@ -1,462 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_CM_H\n-#define I40IW_CM_H\n-\n-#define QUEUE_EVENTS\n-\n-#define I40IW_MANAGE_APBVT_DEL 0\n-#define I40IW_MANAGE_APBVT_ADD 1\n-\n-#define I40IW_MPA_REQUEST_ACCEPT 1\n-#define I40IW_MPA_REQUEST_REJECT 2\n-\n-/* IETF MPA -- defines, enums, structs */\n-#define IEFT_MPA_KEY_REQ \"MPA ID Req Frame\"\n-#define IEFT_MPA_KEY_REP \"MPA ID Rep Frame\"\n-#define IETF_MPA_KEY_SIZE 16\n-#define IETF_MPA_VERSION 1\n-#define IETF_MAX_PRIV_DATA_LEN 512\n-#define IETF_MPA_FRAME_SIZE 20\n-#define IETF_RTR_MSG_SIZE 4\n-#define IETF_MPA_V2_FLAG 0x10\n-#define SNDMARKER_SEQNMASK 0x000001FF\n-\n-#define I40IW_MAX_IETF_SIZE 32\n-\n-/* IETF RTR MSG Fields */\n-#define IETF_PEER_TO_PEER 0x8000\n-#define IETF_FLPDU_ZERO_LEN 0x4000\n-#define IETF_RDMA0_WRITE 0x8000\n-#define IETF_RDMA0_READ 0x4000\n-#define IETF_NO_IRD_ORD 0x3FFF\n-\n-/* HW-supported IRD sizes*/\n-#define\tI40IW_HW_IRD_SETTING_2\t2\n-#define\tI40IW_HW_IRD_SETTING_4\t4\n-#define\tI40IW_HW_IRD_SETTING_8\t8\n-#define\tI40IW_HW_IRD_SETTING_16\t16\n-#define\tI40IW_HW_IRD_SETTING_32\t32\n-#define\tI40IW_HW_IRD_SETTING_64\t64\n-\n-#define MAX_PORTS\t\t65536\n-#define I40IW_VLAN_PRIO_SHIFT 13\n-\n-enum ietf_mpa_flags {\n-\tIETF_MPA_FLAGS_MARKERS = 0x80,\t/* receive Markers */\n-\tIETF_MPA_FLAGS_CRC = 0x40,\t/* receive Markers */\n-\tIETF_MPA_FLAGS_REJECT = 0x20,\t/* Reject */\n-};\n-\n-struct ietf_mpa_v1 {\n-\tu8 key[IETF_MPA_KEY_SIZE];\n-\tu8 flags;\n-\tu8 rev;\n-\t__be16 priv_data_len;\n-\tu8 priv_data[0];\n-};\n-\n-#define ietf_mpa_req_resp_frame ietf_mpa_frame\n-\n-struct ietf_rtr_msg {\n-\t__be16 ctrl_ird;\n-\t__be16 ctrl_ord;\n-};\n-\n-struct ietf_mpa_v2 {\n-\tu8 key[IETF_MPA_KEY_SIZE];\n-\tu8 flags;\n-\tu8 rev;\n-\t__be16 priv_data_len;\n-\tstruct ietf_rtr_msg rtr_msg;\n-\tu8 priv_data[0];\n-};\n-\n-struct i40iw_cm_node;\n-enum i40iw_timer_type {\n-\tI40IW_TIMER_TYPE_SEND,\n-\tI40IW_TIMER_TYPE_RECV,\n-\tI40IW_TIMER_NODE_CLEANUP,\n-\tI40IW_TIMER_TYPE_CLOSE,\n-};\n-\n-#define I40IW_PASSIVE_STATE_INDICATED 0\n-#define I40IW_DO_NOT_SEND_RESET_EVENT 1\n-#define I40IW_SEND_RESET_EVENT 2\n-\n-#define MAX_I40IW_IFS 4\n-\n-#define SET_ACK 0x1\n-#define SET_SYN 0x2\n-#define SET_FIN 0x4\n-#define SET_RST 0x8\n-\n-#define TCP_OPTIONS_PADDING 3\n-\n-struct option_base {\n-\tu8 optionnum;\n-\tu8 length;\n-};\n-\n-enum option_numbers {\n-\tOPTION_NUMBER_END,\n-\tOPTION_NUMBER_NONE,\n-\tOPTION_NUMBER_MSS,\n-\tOPTION_NUMBER_WINDOW_SCALE,\n-\tOPTION_NUMBER_SACK_PERM,\n-\tOPTION_NUMBER_SACK,\n-\tOPTION_NUMBER_WRITE0 = 0xbc\n-};\n-\n-struct option_mss {\n-\tu8 optionnum;\n-\tu8 length;\n-\t__be16 mss;\n-};\n-\n-struct option_windowscale {\n-\tu8 optionnum;\n-\tu8 length;\n-\tu8 shiftcount;\n-};\n-\n-union all_known_options {\n-\tchar as_end;\n-\tstruct option_base as_base;\n-\tstruct option_mss as_mss;\n-\tstruct option_windowscale as_windowscale;\n-};\n-\n-struct i40iw_timer_entry {\n-\tstruct list_head list;\n-\tunsigned long timetosend;\t/* jiffies */\n-\tstruct i40iw_puda_buf *sqbuf;\n-\tu32 type;\n-\tu32 retrycount;\n-\tu32 retranscount;\n-\tu32 context;\n-\tu32 send_retrans;\n-\tint close_when_complete;\n-};\n-\n-#define I40IW_DEFAULT_RETRYS\t64\n-#define I40IW_DEFAULT_RETRANS\t8\n-#define I40IW_DEFAULT_TTL\t0x40\n-#define I40IW_DEFAULT_RTT_VAR\t0x6\n-#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF\n-#define I40IW_DEFAULT_REXMIT_THRESH 8\n-\n-#define I40IW_RETRY_TIMEOUT HZ\n-#define I40IW_SHORT_TIME 10\n-#define I40IW_LONG_TIME (2 * HZ)\n-#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ))\n-\n-#define I40IW_CM_HASHTABLE_SIZE 1024\n-#define I40IW_CM_TCP_TIMER_INTERVAL 3000\n-#define I40IW_CM_DEFAULT_MTU 1540\n-#define I40IW_CM_DEFAULT_FRAME_CNT 10\n-#define I40IW_CM_THREAD_STACK_SIZE 256\n-#define I40IW_CM_DEFAULT_RCV_WND 64240\n-#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc\n-#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2\n-#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A\n-#define I40IW_CM_FREE_PKT_LO_WATERMARK 2\n-\n-#define I40IW_CM_DEFAULT_MSS 536\n-\n-#define I40IW_CM_DEF_SEQ 0x159bf75f\n-#define I40IW_CM_DEF_LOCAL_ID 0x3b47\n-\n-#define I40IW_CM_DEF_SEQ2 0x18ed5740\n-#define I40IW_CM_DEF_LOCAL_ID2 0xb807\n-#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)\n-\n-typedef u32 i40iw_addr_t;\n-\n-#define i40iw_cm_tsa_context i40iw_qp_context\n-\n-struct i40iw_qp;\n-\n-/* cm node transition states */\n-enum i40iw_cm_node_state {\n-\tI40IW_CM_STATE_UNKNOWN,\n-\tI40IW_CM_STATE_INITED,\n-\tI40IW_CM_STATE_LISTENING,\n-\tI40IW_CM_STATE_SYN_RCVD,\n-\tI40IW_CM_STATE_SYN_SENT,\n-\tI40IW_CM_STATE_ONE_SIDE_ESTABLISHED,\n-\tI40IW_CM_STATE_ESTABLISHED,\n-\tI40IW_CM_STATE_ACCEPTING,\n-\tI40IW_CM_STATE_MPAREQ_SENT,\n-\tI40IW_CM_STATE_MPAREQ_RCVD,\n-\tI40IW_CM_STATE_MPAREJ_RCVD,\n-\tI40IW_CM_STATE_OFFLOADED,\n-\tI40IW_CM_STATE_FIN_WAIT1,\n-\tI40IW_CM_STATE_FIN_WAIT2,\n-\tI40IW_CM_STATE_CLOSE_WAIT,\n-\tI40IW_CM_STATE_TIME_WAIT,\n-\tI40IW_CM_STATE_LAST_ACK,\n-\tI40IW_CM_STATE_CLOSING,\n-\tI40IW_CM_STATE_LISTENER_DESTROYED,\n-\tI40IW_CM_STATE_CLOSED\n-};\n-\n-enum mpa_frame_version {\n-\tIETF_MPA_V1 = 1,\n-\tIETF_MPA_V2 = 2\n-};\n-\n-enum mpa_frame_key {\n-\tMPA_KEY_REQUEST,\n-\tMPA_KEY_REPLY\n-};\n-\n-enum send_rdma0 {\n-\tSEND_RDMA_READ_ZERO = 1,\n-\tSEND_RDMA_WRITE_ZERO = 2\n-};\n-\n-enum i40iw_tcpip_pkt_type {\n-\tI40IW_PKT_TYPE_UNKNOWN,\n-\tI40IW_PKT_TYPE_SYN,\n-\tI40IW_PKT_TYPE_SYNACK,\n-\tI40IW_PKT_TYPE_ACK,\n-\tI40IW_PKT_TYPE_FIN,\n-\tI40IW_PKT_TYPE_RST\n-};\n-\n-/* CM context params */\n-struct i40iw_cm_tcp_context {\n-\tu8 client;\n-\n-\tu32 loc_seq_num;\n-\tu32 loc_ack_num;\n-\tu32 rem_ack_num;\n-\tu32 rcv_nxt;\n-\n-\tu32 loc_id;\n-\tu32 rem_id;\n-\n-\tu32 snd_wnd;\n-\tu32 max_snd_wnd;\n-\n-\tu32 rcv_wnd;\n-\tu32 mss;\n-\tu8 snd_wscale;\n-\tu8 rcv_wscale;\n-};\n-\n-enum i40iw_cm_listener_state {\n-\tI40IW_CM_LISTENER_PASSIVE_STATE = 1,\n-\tI40IW_CM_LISTENER_ACTIVE_STATE = 2,\n-\tI40IW_CM_LISTENER_EITHER_STATE = 3\n-};\n-\n-struct i40iw_cm_listener {\n-\tstruct list_head list;\n-\tstruct i40iw_cm_core *cm_core;\n-\tu8 loc_mac[ETH_ALEN];\n-\tu32 loc_addr[4];\n-\tu16 loc_port;\n-\tstruct iw_cm_id *cm_id;\n-\tatomic_t ref_count;\n-\tstruct i40iw_device *iwdev;\n-\tatomic_t pend_accepts_cnt;\n-\tint backlog;\n-\tenum i40iw_cm_listener_state listener_state;\n-\tu32 reused_node;\n-\tu8 user_pri;\n-\tu8 tos;\n-\tu16 vlan_id;\n-\tbool qhash_set;\n-\tbool ipv4;\n-\tstruct list_head child_listen_list;\n-\n-};\n-\n-struct i40iw_kmem_info {\n-\tvoid *addr;\n-\tu32 size;\n-};\n-\n-/* per connection node and node state information */\n-struct i40iw_cm_node {\n-\tu32 loc_addr[4], rem_addr[4];\n-\tu16 loc_port, rem_port;\n-\tu16 vlan_id;\n-\tenum i40iw_cm_node_state state;\n-\tu8 loc_mac[ETH_ALEN];\n-\tu8 rem_mac[ETH_ALEN];\n-\tatomic_t ref_count;\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_cm_tcp_context tcp_cntxt;\n-\tstruct i40iw_cm_core *cm_core;\n-\tstruct i40iw_cm_node *loopbackpartner;\n-\tstruct i40iw_timer_entry *send_entry;\n-\tstruct i40iw_timer_entry *close_entry;\n-\tspinlock_t retrans_list_lock; /* cm transmit packet */\n-\tenum send_rdma0 send_rdma0_op;\n-\tu16 ird_size;\n-\tu16 ord_size;\n-\tu16 mpav2_ird_ord;\n-\tstruct iw_cm_id *cm_id;\n-\tstruct list_head list;\n-\tbool accelerated;\n-\tstruct i40iw_cm_listener *listener;\n-\tint apbvt_set;\n-\tint accept_pend;\n-\tstruct list_head timer_entry;\n-\tstruct list_head reset_entry;\n-\tstruct list_head teardown_entry;\n-\tatomic_t passive_state;\n-\tbool qhash_set;\n-\tu8 user_pri;\n-\tu8 tos;\n-\tbool ipv4;\n-\tbool snd_mark_en;\n-\tu16 lsmm_size;\n-\tenum mpa_frame_version mpa_frame_rev;\n-\tstruct i40iw_kmem_info pdata;\n-\tunion {\n-\t\tstruct ietf_mpa_v1 mpa_frame;\n-\t\tstruct ietf_mpa_v2 mpa_v2_frame;\n-\t};\n-\n-\tu8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];\n-\tstruct i40iw_kmem_info mpa_hdr;\n-\tbool ack_rcvd;\n-};\n-\n-/* structure for client or CM to fill when making CM api calls. */\n-/*\t- only need to set relevant data, based on op. */\n-struct i40iw_cm_info {\n-\tstruct iw_cm_id *cm_id;\n-\tu16 loc_port;\n-\tu16 rem_port;\n-\tu32 loc_addr[4];\n-\tu32 rem_addr[4];\n-\tu16 vlan_id;\n-\tint backlog;\n-\tu8 user_pri;\n-\tu8 tos;\n-\tbool ipv4;\n-};\n-\n-/* CM event codes */\n-enum i40iw_cm_event_type {\n-\tI40IW_CM_EVENT_UNKNOWN,\n-\tI40IW_CM_EVENT_ESTABLISHED,\n-\tI40IW_CM_EVENT_MPA_REQ,\n-\tI40IW_CM_EVENT_MPA_CONNECT,\n-\tI40IW_CM_EVENT_MPA_ACCEPT,\n-\tI40IW_CM_EVENT_MPA_REJECT,\n-\tI40IW_CM_EVENT_MPA_ESTABLISHED,\n-\tI40IW_CM_EVENT_CONNECTED,\n-\tI40IW_CM_EVENT_RESET,\n-\tI40IW_CM_EVENT_ABORTED\n-};\n-\n-/* event to post to CM event handler */\n-struct i40iw_cm_event {\n-\tenum i40iw_cm_event_type type;\n-\tstruct i40iw_cm_info cm_info;\n-\tstruct work_struct event_work;\n-\tstruct i40iw_cm_node *cm_node;\n-};\n-\n-struct i40iw_cm_core {\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_sc_dev *dev;\n-\n-\tstruct list_head listen_nodes;\n-\tstruct list_head accelerated_list;\n-\tstruct list_head non_accelerated_list;\n-\n-\tstruct timer_list tcp_timer;\n-\n-\tstruct workqueue_struct *event_wq;\n-\tstruct workqueue_struct *disconn_wq;\n-\n-\tspinlock_t ht_lock; /* manage hash table */\n-\tspinlock_t listen_list_lock; /* listen list */\n-\tspinlock_t apbvt_lock; /*manage apbvt entries*/\n-\n-\tunsigned long ports_in_use[BITS_TO_LONGS(MAX_PORTS)];\n-\n-\tu64\tstats_nodes_created;\n-\tu64\tstats_nodes_destroyed;\n-\tu64\tstats_listen_created;\n-\tu64\tstats_listen_destroyed;\n-\tu64\tstats_listen_nodes_created;\n-\tu64\tstats_listen_nodes_destroyed;\n-\tu64\tstats_loopbacks;\n-\tu64\tstats_accepts;\n-\tu64\tstats_rejects;\n-\tu64\tstats_connect_errs;\n-\tu64\tstats_passive_errs;\n-\tu64\tstats_pkt_retrans;\n-\tu64\tstats_backlog_drops;\n-};\n-\n-int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,\n-\t\t\t struct i40iw_puda_buf *sqbuf,\n-\t\t\t enum i40iw_timer_type type,\n-\t\t\t int send_retrans,\n-\t\t\t int close_when_complete);\n-\n-int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *);\n-int i40iw_reject(struct iw_cm_id *, const void *, u8);\n-int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *);\n-int i40iw_create_listen(struct iw_cm_id *, int);\n-int i40iw_destroy_listen(struct iw_cm_id *);\n-\n-int i40iw_cm_start(struct i40iw_device *);\n-int i40iw_cm_stop(struct i40iw_device *);\n-\n-int i40iw_arp_table(struct i40iw_device *iwdev,\n-\t\t u32 *ip_addr,\n-\t\t bool ipv4,\n-\t\t u8 *mac_addr,\n-\t\t u32 action);\n-\n-void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,\n-\t\t u32 *ipaddr, bool ipv4, bool ifup);\n-void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,\n-\t\t\t\t struct i40iw_cm_info *nfo,\n-\t\t\t\t bool disconnect_all);\n-bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port);\n-#endif /* I40IW_CM_H */\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c\ndeleted file mode 100644\nindex 4d841a3..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c\n+++ /dev/null\n@@ -1,5198 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_register.h\"\n-#include \"i40iw_status.h\"\n-#include \"i40iw_hmc.h\"\n-\n-#include \"i40iw_d.h\"\n-#include \"i40iw_type.h\"\n-#include \"i40iw_p.h\"\n-#include \"i40iw_vf.h\"\n-#include \"i40iw_virtchnl.h\"\n-\n-/**\n- * i40iw_insert_wqe_hdr - write wqe header\n- * @wqe: cqp wqe for header\n- * @header: header for the cqp wqe\n- */\n-void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)\n-{\n-\twmb(); /* make sure WQE is populated before polarity is set */\n-\tset_64bit_val(wqe, 24, header);\n-}\n-\n-void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)\n-{\n-\tif (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {\n-\t\tcqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];\n-\t\tcqp_timeout->count = 0;\n-\t} else {\n-\t\tif (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)\n-\t\t\tcqp_timeout->count++;\n-\t}\n-}\n-\n-/**\n- * i40iw_get_cqp_reg_info - get head and tail for cqp using registers\n- * @cqp: struct for cqp hw\n- * @val: cqp tail register value\n- * @tail:wqtail register value\n- * @error: cqp processing err\n- */\n-static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t u32 *val,\n-\t\t\t\t\t u32 *tail,\n-\t\t\t\t\t u32 *error)\n-{\n-\tif (cqp->dev->is_pf) {\n-\t\t*val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);\n-\t\t*tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);\n-\t\t*error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);\n-\t} else {\n-\t\t*val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);\n-\t\t*tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);\n-\t\t*error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);\n-\t}\n-}\n-\n-/**\n- * i40iw_cqp_poll_registers - poll cqp registers\n- * @cqp: struct for cqp hw\n- * @tail:wqtail register value\n- * @count: how many times to try for completion\n- */\n-static enum i40iw_status_code i40iw_cqp_poll_registers(\n-\t\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t\tu32 tail,\n-\t\t\t\t\t\tu32 count)\n-{\n-\tu32 i = 0;\n-\tu32 newtail, error, val;\n-\n-\twhile (i < count) {\n-\t\ti++;\n-\t\ti40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);\n-\t\tif (error) {\n-\t\t\terror = (cqp->dev->is_pf) ?\n-\t\t\t\t i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :\n-\t\t\t\t i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);\n-\t\t\treturn I40IW_ERR_CQP_COMPL_ERROR;\n-\t\t}\n-\t\tif (newtail != tail) {\n-\t\t\t/* SUCCESS */\n-\t\t\tI40IW_RING_MOVE_TAIL(cqp->sq_ring);\n-\t\t\tcqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;\n-\t\t\treturn 0;\n-\t\t}\n-\t\tudelay(I40IW_SLEEP_COUNT);\n-\t}\n-\treturn I40IW_ERR_TIMEOUT;\n-}\n-\n-/**\n- * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer\n- * @buf: ptr to fpm commit buffer\n- * @info: ptr to i40iw_hmc_obj_info struct\n- * @sd: number of SDs for HMC objects\n- *\n- * parses fpm commit info and copy base value\n- * of hmc objects in hmc_info\n- */\n-static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(\n-\t\t\t\tu64 *buf,\n-\t\t\t\tstruct i40iw_hmc_obj_info *info,\n-\t\t\t\tu32 *sd)\n-{\n-\tu64 temp;\n-\tu64 size;\n-\tu64 base = 0;\n-\tu32 i, j;\n-\tu32 k = 0;\n-\n-\t/* copy base values in obj_info */\n-\tfor (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {\n-\t\tif ((i == I40IW_HMC_IW_SRQ) ||\n-\t\t\t(i == I40IW_HMC_IW_FSIMC) ||\n-\t\t\t(i == I40IW_HMC_IW_FSIAV)) {\n-\t\t\tinfo[i].base = 0;\n-\t\t\tinfo[i].cnt = 0;\n-\t\t\tcontinue;\n-\t\t}\n-\t\tget_64bit_val(buf, j, &temp);\n-\t\tinfo[i].base = RS_64_1(temp, 32) * 512;\n-\t\tif (info[i].base > base) {\n-\t\t\tbase = info[i].base;\n-\t\t\tk = i;\n-\t\t}\n-\t\tif (i == I40IW_HMC_IW_APBVT_ENTRY) {\n-\t\t\tinfo[i].cnt = 1;\n-\t\t\tcontinue;\n-\t\t}\n-\t\tif (i == I40IW_HMC_IW_QP)\n-\t\t\tinfo[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);\n-\t\telse if (i == I40IW_HMC_IW_CQ)\n-\t\t\tinfo[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);\n-\t\telse\n-\t\t\tinfo[i].cnt = (u32)(temp);\n-\t}\n-\tsize = info[k].cnt * info[k].size + info[k].base;\n-\tif (size & 0x1FFFFF)\n-\t\t*sd = (u32)((size >> 21) + 1); /* add 1 for remainder */\n-\telse\n-\t\t*sd = (u32)(size >> 21);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size\n- * @buf: ptr to fpm query buffer\n- * @buf_idx: index into buf\n- * @info: ptr to i40iw_hmc_obj_info struct\n- * @rsrc_idx: resource index into info\n- *\n- * Decode a 64 bit value from fpm query buffer into max count and size\n- */\n-static u64 i40iw_sc_decode_fpm_query(u64 *buf,\n-\t\t\t\t\t u32 buf_idx,\n-\t\t\t\t\t struct i40iw_hmc_obj_info *obj_info,\n-\t\t\t\t\t u32 rsrc_idx)\n-{\n-\tu64 temp;\n-\tu32 size;\n-\n-\tget_64bit_val(buf, buf_idx, &temp);\n-\tobj_info[rsrc_idx].max_cnt = (u32)temp;\n-\tsize = (u32)RS_64_1(temp, 32);\n-\tobj_info[rsrc_idx].size = LS_64_1(1, size);\n-\n-\treturn temp;\n-}\n-\n-/**\n- * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer\n- * @buf: ptr to fpm query buffer\n- * @info: ptr to i40iw_hmc_obj_info struct\n- * @hmc_fpm_misc: ptr to fpm data\n- *\n- * parses fpm query buffer and copy max_cnt and\n- * size value of hmc objects in hmc_info\n- */\n-static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(\n-\t\t\t\tu64 *buf,\n-\t\t\t\tstruct i40iw_hmc_info *hmc_info,\n-\t\t\t\tstruct i40iw_hmc_fpm_misc *hmc_fpm_misc)\n-{\n-\tstruct i40iw_hmc_obj_info *obj_info;\n-\tu64 temp;\n-\tu32 size;\n-\tu16 max_pe_sds;\n-\n-\tobj_info = hmc_info->hmc_obj;\n-\n-\tget_64bit_val(buf, 0, &temp);\n-\thmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);\n-\tmax_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);\n-\n-\t/* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */\n-\tif (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)\n-\t\tmax_pe_sds--;\n-\thmc_fpm_misc->max_sds = max_pe_sds;\n-\thmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;\n-\n-\tget_64bit_val(buf, 8, &temp);\n-\tobj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);\n-\tsize = (u32)RS_64_1(temp, 32);\n-\tobj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);\n-\n-\tget_64bit_val(buf, 16, &temp);\n-\tobj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);\n-\tsize = (u32)RS_64_1(temp, 32);\n-\tobj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);\n-\n-\ti40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);\n-\ti40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);\n-\n-\tobj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;\n-\tobj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;\n-\n-\ti40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);\n-\ti40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);\n-\n-\tget_64bit_val(buf, 64, &temp);\n-\tobj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;\n-\tobj_info[I40IW_HMC_IW_XFFL].size = 4;\n-\thmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);\n-\tif (!hmc_fpm_misc->xf_block_size)\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\n-\ti40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);\n-\n-\tget_64bit_val(buf, 80, &temp);\n-\tobj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;\n-\tobj_info[I40IW_HMC_IW_Q1FL].size = 4;\n-\thmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);\n-\tif (!hmc_fpm_misc->q1_block_size)\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\n-\ti40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);\n-\n-\tget_64bit_val(buf, 112, &temp);\n-\tobj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;\n-\tobj_info[I40IW_HMC_IW_PBLE].size = 8;\n-\n-\tget_64bit_val(buf, 120, &temp);\n-\thmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);\n-\thmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);\n-\thmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_fill_qos_list - Change all unknown qs handles to available ones\n- * @qs_list: list of qs_handles to be fixed with valid qs_handles\n- */\n-static void i40iw_fill_qos_list(u16 *qs_list)\n-{\n-\tu16 qshandle = qs_list[0];\n-\tint i;\n-\n-\tfor (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {\n-\t\tif (qs_list[i] == QS_HANDLE_UNKNOWN)\n-\t\t\tqs_list[i] = qshandle;\n-\t\telse\n-\t\t\tqshandle = qs_list[i];\n-\t}\n-}\n-\n-/**\n- * i40iw_qp_from_entry - Given entry, get to the qp structure\n- * @entry: Points to list of qp structure\n- */\n-static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)\n-{\n-\tif (!entry)\n-\t\treturn NULL;\n-\n-\treturn (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));\n-}\n-\n-/**\n- * i40iw_get_qp - get the next qp from the list given current qp\n- * @head: Listhead of qp's\n- * @qp: current qp\n- */\n-static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)\n-{\n-\tstruct list_head *entry = NULL;\n-\tstruct list_head *lastentry;\n-\n-\tif (list_empty(head))\n-\t\treturn NULL;\n-\n-\tif (!qp) {\n-\t\tentry = head->next;\n-\t} else {\n-\t\tlastentry = &qp->list;\n-\t\tentry = (lastentry != head) ? lastentry->next : NULL;\n-\t}\n-\n-\treturn i40iw_qp_from_entry(entry);\n-}\n-\n-/**\n- * i40iw_change_l2params - given the new l2 parameters, change all qp\n- * @vsi: pointer to the vsi structure\n- * @l2params: New paramaters from l2\n- */\n-void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)\n-{\n-\tstruct i40iw_sc_dev *dev = vsi->dev;\n-\tstruct i40iw_sc_qp *qp = NULL;\n-\tbool qs_handle_change = false;\n-\tunsigned long flags;\n-\tu16 qs_handle;\n-\tint i;\n-\n-\tif (vsi->mtu != l2params->mtu) {\n-\t\tvsi->mtu = l2params->mtu;\n-\t\ti40iw_reinitialize_ieq(dev);\n-\t}\n-\n-\ti40iw_fill_qos_list(l2params->qs_handle_list);\n-\tfor (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {\n-\t\tqs_handle = l2params->qs_handle_list[i];\n-\t\tif (vsi->qos[i].qs_handle != qs_handle)\n-\t\t\tqs_handle_change = true;\n-\t\tspin_lock_irqsave(&vsi->qos[i].lock, flags);\n-\t\tqp = i40iw_get_qp(&vsi->qos[i].qplist, qp);\n-\t\twhile (qp) {\n-\t\t\tif (qs_handle_change) {\n-\t\t\t\tqp->qs_handle = qs_handle;\n-\t\t\t\t/* issue cqp suspend command */\n-\t\t\t\ti40iw_qp_suspend_resume(dev, qp, true);\n-\t\t\t}\n-\t\t\tqp = i40iw_get_qp(&vsi->qos[i].qplist, qp);\n-\t\t}\n-\t\tspin_unlock_irqrestore(&vsi->qos[i].lock, flags);\n-\t\tvsi->qos[i].qs_handle = qs_handle;\n-\t}\n-}\n-\n-/**\n- * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp\n- * @qp: qp to be removed from qos\n- */\n-void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_sc_vsi *vsi = qp->vsi;\n-\tunsigned long flags;\n-\n-\tif (!qp->on_qoslist)\n-\t\treturn;\n-\tspin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);\n-\tlist_del(&qp->list);\n-\tspin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);\n-}\n-\n-/**\n- * i40iw_qp_add_qos - called during setctx fot qp to be added to qos\n- * @qp: qp to be added to qos\n- */\n-void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_sc_vsi *vsi = qp->vsi;\n-\tunsigned long flags;\n-\n-\tif (qp->on_qoslist)\n-\t\treturn;\n-\tspin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);\n-\tqp->qs_handle = vsi->qos[qp->user_pri].qs_handle;\n-\tlist_add(&qp->list, &vsi->qos[qp->user_pri].qplist);\n-\tqp->on_qoslist = true;\n-\tspin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);\n-}\n-\n-/**\n- * i40iw_sc_pd_init - initialize sc pd struct\n- * @dev: sc device struct\n- * @pd: sc pd ptr\n- * @pd_id: pd_id for allocated pd\n- * @abi_ver: ABI version from user context, -1 if not valid\n- */\n-static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,\n-\t\t\t struct i40iw_sc_pd *pd,\n-\t\t\t u16 pd_id,\n-\t\t\t int abi_ver)\n-{\n-\tpd->size = sizeof(*pd);\n-\tpd->pd_id = pd_id;\n-\tpd->abi_ver = abi_ver;\n-\tpd->dev = dev;\n-}\n-\n-/**\n- * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size\n- * @wqsize: size of the wq (sq, rq, srq) to encoded_size\n- * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's\n- */\n-u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)\n-{\n-\tu8 encoded_size = 0;\n-\n-\t/* cqp sq's hw coded value starts from 1 for size of 4\n-\t * while it starts from 0 for qp' wq's.\n-\t */\n-\tif (cqpsq)\n-\t\tencoded_size = 1;\n-\twqsize >>= 2;\n-\twhile (wqsize >>= 1)\n-\t\tencoded_size++;\n-\treturn encoded_size;\n-}\n-\n-/**\n- * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair\n- * @cqp: IWARP control queue pair pointer\n- * @info: IWARP control queue pair init info pointer\n- *\n- * Initializes the object and context buffers for a control Queue Pair.\n- */\n-static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t\tstruct i40iw_cqp_init_info *info)\n-{\n-\tu8 hw_sq_size;\n-\n-\tif ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||\n-\t (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||\n-\t ((info->sq_size & (info->sq_size - 1))))\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\n-\thw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);\n-\tcqp->size = sizeof(*cqp);\n-\tcqp->sq_size = info->sq_size;\n-\tcqp->hw_sq_size = hw_sq_size;\n-\tcqp->sq_base = info->sq;\n-\tcqp->host_ctx = info->host_ctx;\n-\tcqp->sq_pa = info->sq_pa;\n-\tcqp->host_ctx_pa = info->host_ctx_pa;\n-\tcqp->dev = info->dev;\n-\tcqp->struct_ver = info->struct_ver;\n-\tcqp->scratch_array = info->scratch_array;\n-\tcqp->polarity = 0;\n-\tcqp->en_datacenter_tcp = info->en_datacenter_tcp;\n-\tcqp->enabled_vf_count = info->enabled_vf_count;\n-\tcqp->hmc_profile = info->hmc_profile;\n-\tinfo->dev->cqp = cqp;\n-\n-\tI40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);\n-\tcqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;\n-\tcqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;\n-\tINIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */\n-\n-\ti40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);\n-\ti40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);\n-\n-\ti40iw_debug(cqp->dev, I40IW_DEBUG_WQE,\n-\t\t \"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\\n\",\n-\t\t __func__, cqp->sq_size, cqp->hw_sq_size,\n-\t\t cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_cqp_create - create cqp during bringup\n- * @cqp: struct for cqp hw\n- * @maj_err: If error, major err number\n- * @min_err: If error, minor err number\n- */\n-static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t\t u16 *maj_err,\n-\t\t\t\t\t\t u16 *min_err)\n-{\n-\tu64 temp;\n-\tu32 cnt = 0, p1, p2, val = 0, err_code;\n-\tenum i40iw_status_code ret_code;\n-\n-\t*maj_err = 0;\n-\t*min_err = 0;\n-\n-\tret_code = i40iw_allocate_dma_mem(cqp->dev->hw,\n-\t\t\t\t\t &cqp->sdbuf,\n-\t\t\t\t\t I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,\n-\t\t\t\t\t I40IW_SD_BUF_ALIGNMENT);\n-\n-\tif (ret_code)\n-\t\tgoto exit;\n-\n-\ttemp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |\n-\t LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);\n-\n-\tset_64bit_val(cqp->host_ctx, 0, temp);\n-\tset_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);\n-\ttemp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |\n-\t LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);\n-\tset_64bit_val(cqp->host_ctx, 16, temp);\n-\tset_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);\n-\tset_64bit_val(cqp->host_ctx, 32, 0);\n-\tset_64bit_val(cqp->host_ctx, 40, 0);\n-\tset_64bit_val(cqp->host_ctx, 48, 0);\n-\tset_64bit_val(cqp->host_ctx, 56, 0);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CQP_HOST_CTX\",\n-\t\t\tcqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);\n-\n-\tp1 = RS_32_1(cqp->host_ctx_pa, 32);\n-\tp2 = (u32)cqp->host_ctx_pa;\n-\n-\tif (cqp->dev->is_pf) {\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);\n-\t} else {\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);\n-\t}\n-\tdo {\n-\t\tif (cnt++ > I40IW_DONE_COUNT) {\n-\t\t\ti40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);\n-\t\t\tret_code = I40IW_ERR_TIMEOUT;\n-\t\t\t/*\n-\t\t\t * read PFPE_CQPERRORCODES register to get the minor\n-\t\t\t * and major error code\n-\t\t\t */\n-\t\t\tif (cqp->dev->is_pf)\n-\t\t\t\terr_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);\n-\t\t\telse\n-\t\t\t\terr_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);\n-\t\t\t*min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);\n-\t\t\t*maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);\n-\t\t\tgoto exit;\n-\t\t}\n-\t\tudelay(I40IW_SLEEP_COUNT);\n-\t\tif (cqp->dev->is_pf)\n-\t\t\tval = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);\n-\t\telse\n-\t\t\tval = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);\n-\t} while (!val);\n-\n-exit:\n-\tif (!ret_code)\n-\t\tcqp->process_cqp_sds = i40iw_update_sds_noccq;\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_cqp_post_sq - post of cqp's sq\n- * @cqp: struct for cqp hw\n- */\n-void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)\n-{\n-\tif (cqp->dev->is_pf)\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));\n-\telse\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));\n-\n-\ti40iw_debug(cqp->dev,\n-\t\t I40IW_DEBUG_WQE,\n-\t\t \"%s: HEAD_TAIL[%04d,%04d,%04d]\\n\",\n-\t\t __func__,\n-\t\t cqp->sq_ring.head,\n-\t\t cqp->sq_ring.tail,\n-\t\t cqp->sq_ring.size);\n-}\n-\n-/**\n- * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index\n- * @cqp: pointer to CQP structure\n- * @scratch: private data for CQP WQE\n- * @wqe_idx: WQE index for next WQE on CQP SQ\n- */\n-static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t u64 scratch, u32 *wqe_idx)\n-{\n-\tu64 *wqe = NULL;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (I40IW_RING_FULL_ERR(cqp->sq_ring)) {\n-\t\ti40iw_debug(cqp->dev,\n-\t\t\t I40IW_DEBUG_WQE,\n-\t\t\t \"%s: ring is full head %x tail %x size %x\\n\",\n-\t\t\t __func__,\n-\t\t\t cqp->sq_ring.head,\n-\t\t\t cqp->sq_ring.tail,\n-\t\t\t cqp->sq_ring.size);\n-\t\treturn NULL;\n-\t}\n-\tI40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);\n-\tcqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;\n-\tif (ret_code)\n-\t\treturn NULL;\n-\tif (!*wqe_idx)\n-\t\tcqp->polarity = !cqp->polarity;\n-\n-\twqe = cqp->sq_base[*wqe_idx].elem;\n-\tcqp->scratch_array[*wqe_idx] = scratch;\n-\tI40IW_CQP_INIT_WQE(wqe);\n-\n-\treturn wqe;\n-}\n-\n-/**\n- * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq\n- * @cqp: struct for cqp hw\n- * @scratch: private data for CQP WQE\n- */\n-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)\n-{\n-\tu32 wqe_idx;\n-\n-\treturn i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);\n-}\n-\n-/**\n- * i40iw_sc_cqp_destroy - destroy cqp during close\n- * @cqp: struct for cqp hw\n- */\n-static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)\n-{\n-\tu32 cnt = 0, val = 1;\n-\tenum i40iw_status_code ret_code = 0;\n-\tu32 cqpstat_addr;\n-\n-\tif (cqp->dev->is_pf) {\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);\n-\t\tcqpstat_addr = I40E_PFPE_CCQPSTATUS;\n-\t} else {\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);\n-\t\ti40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);\n-\t\tcqpstat_addr = I40E_VFPE_CCQPSTATUS1;\n-\t}\n-\tdo {\n-\t\tif (cnt++ > I40IW_DONE_COUNT) {\n-\t\t\tret_code = I40IW_ERR_TIMEOUT;\n-\t\t\tbreak;\n-\t\t}\n-\t\tudelay(I40IW_SLEEP_COUNT);\n-\t\tval = i40iw_rd32(cqp->dev->hw, cqpstat_addr);\n-\t} while (val);\n-\n-\ti40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_ccq_arm - enable intr for control cq\n- * @ccq: ccq sc struct\n- */\n-static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)\n-{\n-\tu64 temp_val;\n-\tu16 sw_cq_sel;\n-\tu8 arm_next_se;\n-\tu8 arm_seq_num;\n-\n-\t/* write to cq doorbell shadow area */\n-\t/* arm next se should always be zero */\n-\tget_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);\n-\n-\tsw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);\n-\tarm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);\n-\n-\tarm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);\n-\tarm_seq_num++;\n-\n-\ttemp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |\n-\t\t LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |\n-\t\t LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |\n-\t\t LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);\n-\n-\tset_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);\n-\n-\twmb(); /* make sure shadow area is updated before arming */\n-\n-\tif (ccq->dev->is_pf)\n-\t\ti40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);\n-\telse\n-\t\ti40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);\n-}\n-\n-/**\n- * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry\n- * @ccq: ccq sc struct\n- * @info: completion q entry to return\n- */\n-static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(\n-\t\t\t\t\tstruct i40iw_sc_cq *ccq,\n-\t\t\t\t\tstruct i40iw_ccq_cqe_info *info)\n-{\n-\tu64 qp_ctx, temp, temp1;\n-\tu64 *cqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu32 wqe_idx;\n-\tu8 polarity;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\tif (ccq->cq_uk.avoid_mem_cflct)\n-\t\tcqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);\n-\telse\n-\t\tcqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);\n-\n-\tget_64bit_val(cqe, 24, &temp);\n-\tpolarity = (u8)RS_64(temp, I40IW_CQ_VALID);\n-\tif (polarity != ccq->cq_uk.polarity)\n-\t\treturn I40IW_ERR_QUEUE_EMPTY;\n-\n-\tget_64bit_val(cqe, 8, &qp_ctx);\n-\tcqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;\n-\tinfo->error = (bool)RS_64(temp, I40IW_CQ_ERROR);\n-\tinfo->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);\n-\tif (info->error) {\n-\t\tinfo->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);\n-\t\tinfo->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);\n-\t}\n-\twqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);\n-\tinfo->scratch = cqp->scratch_array[wqe_idx];\n-\n-\tget_64bit_val(cqe, 16, &temp1);\n-\tinfo->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);\n-\tget_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);\n-\tinfo->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);\n-\tinfo->cqp = cqp;\n-\n-\t/* move the head for cq */\n-\tI40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);\n-\tif (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)\n-\t\tccq->cq_uk.polarity ^= 1;\n-\n-\t/* update cq tail in cq shadow memory also */\n-\tI40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);\n-\tset_64bit_val(ccq->cq_uk.shadow_area,\n-\t\t 0,\n-\t\t I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));\n-\twmb(); /* write shadow area before tail */\n-\tI40IW_RING_MOVE_TAIL(cqp->sq_ring);\n-\tccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ\n- * @cqp: struct for cqp hw\n- * @op_code: cqp opcode for completion\n- * @info: completion q entry to return\n- */\n-static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(\n-\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\tu8 op_code,\n-\t\t\t\t\tstruct i40iw_ccq_cqe_info *compl_info)\n-{\n-\tstruct i40iw_ccq_cqe_info info;\n-\tstruct i40iw_sc_cq *ccq;\n-\tenum i40iw_status_code ret_code = 0;\n-\tu32 cnt = 0;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tccq = cqp->dev->ccq;\n-\twhile (1) {\n-\t\tif (cnt++ > I40IW_DONE_COUNT)\n-\t\t\treturn I40IW_ERR_TIMEOUT;\n-\n-\t\tif (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {\n-\t\t\tudelay(I40IW_SLEEP_COUNT);\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\tif (info.error) {\n-\t\t\tret_code = I40IW_ERR_CQP_COMPL_ERROR;\n-\t\t\tbreak;\n-\t\t}\n-\t\t/* check if opcode is cq create */\n-\t\tif (op_code != info.op_code) {\n-\t\t\ti40iw_debug(cqp->dev, I40IW_DEBUG_WQE,\n-\t\t\t\t \"%s: opcode mismatch for my op code 0x%x, returned opcode %x\\n\",\n-\t\t\t\t __func__, op_code, info.op_code);\n-\t\t}\n-\t\t/* success, exit out of the loop */\n-\t\tif (op_code == info.op_code)\n-\t\t\tbreak;\n-\t}\n-\n-\tif (compl_info)\n-\t\tmemcpy(compl_info, &info, sizeof(*compl_info));\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_manage_push_page - Handle push page\n- * @cqp: struct for cqp hw\n- * @info: push page info\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_manage_push_page(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tstruct i40iw_cqp_manage_push_page_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\tif (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)\n-\t\treturn I40IW_ERR_INVALID_PUSH_PAGE_INDEX;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 16, info->qs_handle);\n-\n-\theader = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |\n-\t\t LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |\n-\t\t LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"MANAGE_PUSH_PAGES WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_manage_hmc_pm_func_table - manage of function table\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @vf_index: vf index for cqp\n- * @free_pm_fcn: function number\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tu8 vf_index,\n-\t\t\t\tbool free_pm_fcn,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\tif (vf_index >= I40IW_MAX_VF_PER_PF)\n-\t\treturn I40IW_ERR_INVALID_VF_ID;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\theader = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |\n-\t\t LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"MANAGE_HMC_PM_FUNC_TABLE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @hmc_profile_type: type of profile to set\n- * @vf_num: vf number for profile\n- * @post_sq: flag for cqp db to ring\n- * @poll_registers: flag to poll register for cqp completion\n- */\n-static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tu8 hmc_profile_type,\n-\t\t\t\tu8 vf_num, bool post_sq,\n-\t\t\t\tbool poll_registers)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tu32 val, tail, error;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 16,\n-\t\t (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |\n-\t\t\t\tLS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));\n-\n-\theader = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"MANAGE_HMC_PM_FUNC_TABLE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\ti40iw_get_cqp_reg_info(cqp, &val, &tail, &error);\n-\tif (error)\n-\t\treturn I40IW_ERR_CQP_COMPL_ERROR;\n-\n-\tif (post_sq) {\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\t\tif (poll_registers)\n-\t\t\tret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);\n-\t\telse\n-\t\t\tret_code = i40iw_sc_poll_for_cqp_op_done(cqp,\n-\t\t\t\t\t\t\t\t I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,\n-\t\t\t\t\t\t\t\t NULL);\n-\t}\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table\n- * @cqp: struct for cqp hw\n- */\n-static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)\n-{\n-\treturn i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);\n-}\n-\n-/**\n- * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit\n- * @cqp: struct for cqp hw\n- */\n-static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)\n-{\n-\treturn i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);\n-}\n-\n-/**\n- * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @hmc_fn_id: hmc function id\n- * @commit_fpm_mem; Memory for fpm values\n- * @post_sq: flag for cqp db to ring\n- * @wait_type: poll ccq or cqp registers for cqp completion\n- */\n-static enum i40iw_status_code i40iw_sc_commit_fpm_values(\n-\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tu8 hmc_fn_id,\n-\t\t\t\t\tstruct i40iw_dma_mem *commit_fpm_mem,\n-\t\t\t\t\tbool post_sq,\n-\t\t\t\t\tu8 wait_type)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tu32 tail, val, error;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 16, hmc_fn_id);\n-\tset_64bit_val(wqe, 32, commit_fpm_mem->pa);\n-\n-\theader = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"COMMIT_FPM_VALUES WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\ti40iw_get_cqp_reg_info(cqp, &val, &tail, &error);\n-\tif (error)\n-\t\treturn I40IW_ERR_CQP_COMPL_ERROR;\n-\n-\tif (post_sq) {\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\n-\t\tif (wait_type == I40IW_CQP_WAIT_POLL_REGS)\n-\t\t\tret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);\n-\t\telse if (wait_type == I40IW_CQP_WAIT_POLL_CQ)\n-\t\t\tret_code = i40iw_sc_commit_fpm_values_done(cqp);\n-\t}\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm\n- * @cqp: struct for cqp hw\n- */\n-static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)\n-{\n-\treturn i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);\n-}\n-\n-/**\n- * i40iw_sc_query_fpm_values - cqp wqe query fpm values\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @hmc_fn_id: hmc function id\n- * @query_fpm_mem: memory for return fpm values\n- * @post_sq: flag for cqp db to ring\n- * @wait_type: poll ccq or cqp registers for cqp completion\n- */\n-static enum i40iw_status_code i40iw_sc_query_fpm_values(\n-\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tu8 hmc_fn_id,\n-\t\t\t\t\tstruct i40iw_dma_mem *query_fpm_mem,\n-\t\t\t\t\tbool post_sq,\n-\t\t\t\t\tu8 wait_type)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tu32 tail, val, error;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 16, hmc_fn_id);\n-\tset_64bit_val(wqe, 32, query_fpm_mem->pa);\n-\n-\theader = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"QUERY_FPM WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\t/* read the tail from CQP_TAIL register */\n-\ti40iw_get_cqp_reg_info(cqp, &val, &tail, &error);\n-\n-\tif (error)\n-\t\treturn I40IW_ERR_CQP_COMPL_ERROR;\n-\n-\tif (post_sq) {\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\t\tif (wait_type == I40IW_CQP_WAIT_POLL_REGS)\n-\t\t\tret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);\n-\t\telse if (wait_type == I40IW_CQP_WAIT_POLL_CQ)\n-\t\t\tret_code = i40iw_sc_query_fpm_values_done(cqp);\n-\t}\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry\n- * @cqp: struct for cqp hw\n- * @info: arp entry information\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tstruct i40iw_add_arp_cache_entry_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 temp, header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 8, info->reach_max);\n-\n-\ttemp = info->mac_addr[5] |\n-\t LS_64_1(info->mac_addr[4], 8) |\n-\t LS_64_1(info->mac_addr[3], 16) |\n-\t LS_64_1(info->mac_addr[2], 24) |\n-\t LS_64_1(info->mac_addr[1], 32) |\n-\t LS_64_1(info->mac_addr[0], 40);\n-\n-\tset_64bit_val(wqe, 16, temp);\n-\n-\theader = info->arp_index |\n-\t\t LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |\n-\t\t LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"ARP_CACHE_ENTRY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_del_arp_cache_entry - dele arp cache entry\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @arp_index: arp index to delete arp entry\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(\n-\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tu16 arp_index,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\theader = arp_index |\n-\t\t LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"ARP_CACHE_DEL_ENTRY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @arp_index: arp index to delete arp entry\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tu16 arp_index,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\theader = arp_index |\n-\t\t LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IW_CQPSQ_MAT_QUERY) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"QUERY_ARP_CACHE_ENTRY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries\n- * @cqp: struct for cqp hw\n- * @info: info for apbvt entry to add or delete\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tstruct i40iw_apbvt_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 16, info->port);\n-\n-\theader = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"MANAGE_APBVT WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_manage_qhash_table_entry - manage quad hash entries\n- * @cqp: struct for cqp hw\n- * @info: info for quad hash to manage\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- *\n- * This is called before connection establishment is started. For passive connections, when\n- * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local\n- * ip address and tcp port. When SYN is received (passive connections) or\n- * sent (active connections), this routine is called with entry type of\n- * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.\n- *\n- * When iwarp connection is done and its state moves to RTS, the quad hash entry in\n- * the hardware will point to iwarp's qp number and requires no calls from the driver.\n- */\n-static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(\n-\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\tstruct i40iw_qhash_table_info *info,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 qw1 = 0;\n-\tu64 qw2 = 0;\n-\tu64 temp;\n-\tstruct i40iw_sc_vsi *vsi = info->vsi;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\ttemp = info->mac_addr[5] |\n-\t\tLS_64_1(info->mac_addr[4], 8) |\n-\t\tLS_64_1(info->mac_addr[3], 16) |\n-\t\tLS_64_1(info->mac_addr[2], 24) |\n-\t\tLS_64_1(info->mac_addr[1], 32) |\n-\t\tLS_64_1(info->mac_addr[0], 40);\n-\n-\tset_64bit_val(wqe, 0, temp);\n-\n-\tqw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |\n-\t LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);\n-\tif (info->ipv4_valid) {\n-\t\tset_64bit_val(wqe,\n-\t\t\t 48,\n-\t\t\t LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));\n-\t} else {\n-\t\tset_64bit_val(wqe,\n-\t\t\t 56,\n-\t\t\t LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |\n-\t\t\t LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));\n-\n-\t\tset_64bit_val(wqe,\n-\t\t\t 48,\n-\t\t\t LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |\n-\t\t\t LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));\n-\t}\n-\tqw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);\n-\tif (info->vlan_valid)\n-\t\tqw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);\n-\tset_64bit_val(wqe, 16, qw2);\n-\tif (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {\n-\t\tqw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);\n-\t\tif (!info->ipv4_valid) {\n-\t\t\tset_64bit_val(wqe,\n-\t\t\t\t 40,\n-\t\t\t\t LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |\n-\t\t\t\t LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));\n-\t\t\tset_64bit_val(wqe,\n-\t\t\t\t 32,\n-\t\t\t\t LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |\n-\t\t\t\t LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));\n-\t\t} else {\n-\t\t\tset_64bit_val(wqe,\n-\t\t\t\t 32,\n-\t\t\t\t LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));\n-\t\t}\n-\t}\n-\n-\tset_64bit_val(wqe, 8, qw1);\n-\ttemp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |\n-\t LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |\n-\t LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |\n-\t LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |\n-\t LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |\n-\t LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);\n-\n-\ti40iw_insert_wqe_hdr(wqe, temp);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"MANAGE_QHASH WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(\n-\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\theader = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"ALLOCATE_LOCAL_MAC_IPADDR WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry\n- * @cqp: struct for cqp hw\n- * @info:mac addr info\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tstruct i40iw_local_mac_ipaddr_entry_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 temp, header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\ttemp = info->mac_addr[5] |\n-\t\tLS_64_1(info->mac_addr[4], 8) |\n-\t\tLS_64_1(info->mac_addr[3], 16) |\n-\t\tLS_64_1(info->mac_addr[2], 24) |\n-\t\tLS_64_1(info->mac_addr[1], 32) |\n-\t\tLS_64_1(info->mac_addr[0], 40);\n-\n-\tset_64bit_val(wqe, 32, temp);\n-\n-\theader = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |\n-\t\t LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"ADD_LOCAL_MAC_IPADDR WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @entry_idx: index of mac entry\n- * @ ignore_ref_count: to force mac adde delete\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(\n-\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tu8 entry_idx,\n-\t\t\t\tu8 ignore_ref_count,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\theader = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |\n-\t\t LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |\n-\t\t LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"DEL_LOCAL_MAC_IPADDR WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_cqp_nop - send a nop wqe\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\theader = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"NOP WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_ceq_init - initialize ceq\n- * @ceq: ceq sc structure\n- * @info: ceq initialization info\n- */\n-static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,\n-\t\t\t\t\t\tstruct i40iw_ceq_init_info *info)\n-{\n-\tu32 pble_obj_cnt;\n-\n-\tif ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||\n-\t (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\n-\tif (info->ceq_id >= I40IW_MAX_CEQID)\n-\t\treturn I40IW_ERR_INVALID_CEQ_ID;\n-\n-\tpble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;\n-\n-\tif (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))\n-\t\treturn I40IW_ERR_INVALID_PBLE_INDEX;\n-\n-\tceq->size = sizeof(*ceq);\n-\tceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;\n-\tceq->ceq_id = info->ceq_id;\n-\tceq->dev = info->dev;\n-\tceq->elem_cnt = info->elem_cnt;\n-\tceq->ceq_elem_pa = info->ceqe_pa;\n-\tceq->virtual_map = info->virtual_map;\n-\n-\tceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);\n-\tceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);\n-\tceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);\n-\n-\tceq->tph_en = info->tph_en;\n-\tceq->tph_val = info->tph_val;\n-\tceq->polarity = 1;\n-\tI40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);\n-\tceq->dev->ceq[info->ceq_id] = ceq;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_ceq_create - create ceq wqe\n- * @ceq: ceq sc structure\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\tcqp = ceq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 16, ceq->elem_cnt);\n-\tset_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));\n-\tset_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));\n-\tset_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));\n-\n-\theader = ceq->ceq_id |\n-\t\t LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |\n-\t\t LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |\n-\t\t LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CEQ_CREATE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete\n- * @ceq: ceq sc structure\n- */\n-static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\n-\tcqp = ceq->dev->cqp;\n-\treturn i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);\n-}\n-\n-/**\n- * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete\n- * @ceq: ceq sc structure\n- */\n-static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\n-\tcqp = ceq->dev->cqp;\n-\tcqp->process_cqp_sds = i40iw_update_sds_noccq;\n-\treturn i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);\n-}\n-\n-/**\n- * i40iw_sc_cceq_create - create cceq\n- * @ceq: ceq sc structure\n- * @scratch: u64 saved to be used during cqp completion\n- */\n-static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)\n-{\n-\tenum i40iw_status_code ret_code;\n-\n-\tret_code = i40iw_sc_ceq_create(ceq, scratch, true);\n-\tif (!ret_code)\n-\t\tret_code = i40iw_sc_cceq_create_done(ceq);\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_ceq_destroy - destroy ceq\n- * @ceq: ceq sc structure\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\tcqp = ceq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 16, ceq->elem_cnt);\n-\tset_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);\n-\theader = ceq->ceq_id |\n-\t\t LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |\n-\t\t LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |\n-\t\t LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CEQ_DESTROY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_process_ceq - process ceq\n- * @dev: sc device struct\n- * @ceq: ceq sc structure\n- */\n-static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)\n-{\n-\tu64 temp;\n-\tu64 *ceqe;\n-\tstruct i40iw_sc_cq *cq = NULL;\n-\tu8 polarity;\n-\n-\tceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);\n-\tget_64bit_val(ceqe, 0, &temp);\n-\tpolarity = (u8)RS_64(temp, I40IW_CEQE_VALID);\n-\tif (polarity != ceq->polarity)\n-\t\treturn cq;\n-\n-\tcq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);\n-\n-\tI40IW_RING_MOVE_TAIL(ceq->ceq_ring);\n-\tif (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)\n-\t\tceq->polarity ^= 1;\n-\n-\tif (dev->is_pf)\n-\t\ti40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);\n-\telse\n-\t\ti40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);\n-\n-\treturn cq;\n-}\n-\n-/**\n- * i40iw_sc_aeq_init - initialize aeq\n- * @aeq: aeq structure ptr\n- * @info: aeq initialization info\n- */\n-static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,\n-\t\t\t\t\t\tstruct i40iw_aeq_init_info *info)\n-{\n-\tu32 pble_obj_cnt;\n-\n-\tif ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||\n-\t (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\tpble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;\n-\n-\tif (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))\n-\t\treturn I40IW_ERR_INVALID_PBLE_INDEX;\n-\n-\taeq->size = sizeof(*aeq);\n-\taeq->polarity = 1;\n-\taeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;\n-\taeq->dev = info->dev;\n-\taeq->elem_cnt = info->elem_cnt;\n-\n-\taeq->aeq_elem_pa = info->aeq_elem_pa;\n-\tI40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);\n-\tinfo->dev->aeq = aeq;\n-\n-\taeq->virtual_map = info->virtual_map;\n-\taeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);\n-\taeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);\n-\taeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);\n-\tinfo->dev->aeq = aeq;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_aeq_create - create aeq\n- * @aeq: aeq structure ptr\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\n-\tcqp = aeq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 16, aeq->elem_cnt);\n-\tset_64bit_val(wqe, 32,\n-\t\t (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));\n-\tset_64bit_val(wqe, 48,\n-\t\t (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));\n-\n-\theader = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |\n-\t\t LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"AEQ_CREATE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_aeq_destroy - destroy aeq during close\n- * @aeq: aeq structure ptr\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\n-\tcqp = aeq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 16, aeq->elem_cnt);\n-\tset_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);\n-\theader = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |\n-\t\t LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"AEQ_DESTROY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_get_next_aeqe - get next aeq entry\n- * @aeq: aeq structure ptr\n- * @info: aeqe info to be returned\n- */\n-static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,\n-\t\t\t\t\t\t struct i40iw_aeqe_info *info)\n-{\n-\tu64 temp, compl_ctx;\n-\tu64 *aeqe;\n-\tu16 wqe_idx;\n-\tu8 ae_src;\n-\tu8 polarity;\n-\n-\taeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);\n-\tget_64bit_val(aeqe, 0, &compl_ctx);\n-\tget_64bit_val(aeqe, 8, &temp);\n-\tpolarity = (u8)RS_64(temp, I40IW_AEQE_VALID);\n-\n-\tif (aeq->polarity != polarity)\n-\t\treturn I40IW_ERR_QUEUE_EMPTY;\n-\n-\ti40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, \"AEQ_ENTRY\", aeqe, 16);\n-\n-\tae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);\n-\twqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);\n-\tinfo->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);\n-\tinfo->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);\n-\tinfo->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);\n-\tinfo->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);\n-\tinfo->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);\n-\tinfo->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);\n-\n-\tswitch (info->ae_id) {\n-\tcase I40IW_AE_PRIV_OPERATION_DENIED:\n-\tcase I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:\n-\tcase I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:\n-\tcase I40IW_AE_BAD_CLOSE:\n-\tcase I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:\n-\tcase I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:\n-\tcase I40IW_AE_STAG_ZERO_INVALID:\n-\tcase I40IW_AE_IB_RREQ_AND_Q1_FULL:\n-\tcase I40IW_AE_WQE_UNEXPECTED_OPCODE:\n-\tcase I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:\n-\tcase I40IW_AE_DDP_UBE_INVALID_MO:\n-\tcase I40IW_AE_DDP_UBE_INVALID_QN:\n-\tcase I40IW_AE_DDP_NO_L_BIT:\n-\tcase I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:\n-\tcase I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:\n-\tcase I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:\n-\tcase I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:\n-\tcase I40IW_AE_INVALID_ARP_ENTRY:\n-\tcase I40IW_AE_INVALID_TCP_OPTION_RCVD:\n-\tcase I40IW_AE_STALE_ARP_ENTRY:\n-\tcase I40IW_AE_LLP_CLOSE_COMPLETE:\n-\tcase I40IW_AE_LLP_CONNECTION_RESET:\n-\tcase I40IW_AE_LLP_FIN_RECEIVED:\n-\tcase I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:\n-\tcase I40IW_AE_LLP_SEGMENT_TOO_SMALL:\n-\tcase I40IW_AE_LLP_SYN_RECEIVED:\n-\tcase I40IW_AE_LLP_TERMINATE_RECEIVED:\n-\tcase I40IW_AE_LLP_TOO_MANY_RETRIES:\n-\tcase I40IW_AE_LLP_DOUBT_REACHABILITY:\n-\tcase I40IW_AE_RESET_SENT:\n-\tcase I40IW_AE_TERMINATE_SENT:\n-\tcase I40IW_AE_RESET_NOT_SENT:\n-\tcase I40IW_AE_LCE_QP_CATASTROPHIC:\n-\tcase I40IW_AE_QP_SUSPEND_COMPLETE:\n-\t\tinfo->qp = true;\n-\t\tinfo->compl_ctx = compl_ctx;\n-\t\tae_src = I40IW_AE_SOURCE_RSVD;\n-\t\tbreak;\n-\tcase I40IW_AE_LCE_CQ_CATASTROPHIC:\n-\t\tinfo->cq = true;\n-\t\tinfo->compl_ctx = LS_64_1(compl_ctx, 1);\n-\t\tae_src = I40IW_AE_SOURCE_RSVD;\n-\t\tbreak;\n-\t}\n-\n-\tswitch (ae_src) {\n-\tcase I40IW_AE_SOURCE_RQ:\n-\tcase I40IW_AE_SOURCE_RQ_0011:\n-\t\tinfo->qp = true;\n-\t\tinfo->wqe_idx = wqe_idx;\n-\t\tinfo->compl_ctx = compl_ctx;\n-\t\tbreak;\n-\tcase I40IW_AE_SOURCE_CQ:\n-\tcase I40IW_AE_SOURCE_CQ_0110:\n-\tcase I40IW_AE_SOURCE_CQ_1010:\n-\tcase I40IW_AE_SOURCE_CQ_1110:\n-\t\tinfo->cq = true;\n-\t\tinfo->compl_ctx = LS_64_1(compl_ctx, 1);\n-\t\tbreak;\n-\tcase I40IW_AE_SOURCE_SQ:\n-\tcase I40IW_AE_SOURCE_SQ_0111:\n-\t\tinfo->qp = true;\n-\t\tinfo->sq = true;\n-\t\tinfo->wqe_idx = wqe_idx;\n-\t\tinfo->compl_ctx = compl_ctx;\n-\t\tbreak;\n-\tcase I40IW_AE_SOURCE_IN_RR_WR:\n-\tcase I40IW_AE_SOURCE_IN_RR_WR_1011:\n-\t\tinfo->qp = true;\n-\t\tinfo->compl_ctx = compl_ctx;\n-\t\tinfo->in_rdrsp_wr = true;\n-\t\tbreak;\n-\tcase I40IW_AE_SOURCE_OUT_RR:\n-\tcase I40IW_AE_SOURCE_OUT_RR_1111:\n-\t\tinfo->qp = true;\n-\t\tinfo->compl_ctx = compl_ctx;\n-\t\tinfo->out_rdrsp = true;\n-\t\tbreak;\n-\tcase I40IW_AE_SOURCE_RSVD:\n-\t\t/* fallthrough */\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\tI40IW_RING_MOVE_TAIL(aeq->aeq_ring);\n-\tif (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)\n-\t\taeq->polarity ^= 1;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_repost_aeq_entries - repost completed aeq entries\n- * @dev: sc device struct\n- * @count: allocate count\n- */\n-static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\t u32 count)\n-{\n-\n-\tif (dev->is_pf)\n-\t\ti40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);\n-\telse\n-\t\ti40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_aeq_create_done - create aeq\n- * @aeq: aeq structure ptr\n- */\n-static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\n-\tcqp = aeq->dev->cqp;\n-\treturn i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);\n-}\n-\n-/**\n- * i40iw_sc_aeq_destroy_done - destroy of aeq during close\n- * @aeq: aeq structure ptr\n- */\n-static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\n-\tcqp = aeq->dev->cqp;\n-\treturn i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);\n-}\n-\n-/**\n- * i40iw_sc_ccq_init - initialize control cq\n- * @cq: sc's cq ctruct\n- * @info: info for control cq initialization\n- */\n-static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,\n-\t\t\t\t\t\tstruct i40iw_ccq_init_info *info)\n-{\n-\tu32 pble_obj_cnt;\n-\n-\tif (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\n-\tif (info->ceq_id > I40IW_MAX_CEQID)\n-\t\treturn I40IW_ERR_INVALID_CEQ_ID;\n-\n-\tpble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;\n-\n-\tif (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))\n-\t\treturn I40IW_ERR_INVALID_PBLE_INDEX;\n-\n-\tcq->cq_pa = info->cq_pa;\n-\tcq->cq_uk.cq_base = info->cq_base;\n-\tcq->shadow_area_pa = info->shadow_area_pa;\n-\tcq->cq_uk.shadow_area = info->shadow_area;\n-\tcq->shadow_read_threshold = info->shadow_read_threshold;\n-\tcq->dev = info->dev;\n-\tcq->ceq_id = info->ceq_id;\n-\tcq->cq_uk.cq_size = info->num_elem;\n-\tcq->cq_type = I40IW_CQ_TYPE_CQP;\n-\tcq->ceqe_mask = info->ceqe_mask;\n-\tI40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);\n-\n-\tcq->cq_uk.cq_id = 0; /* control cq is id 0 always */\n-\tcq->ceq_id_valid = info->ceq_id_valid;\n-\tcq->tph_en = info->tph_en;\n-\tcq->tph_val = info->tph_val;\n-\tcq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;\n-\n-\tcq->pbl_list = info->pbl_list;\n-\tcq->virtual_map = info->virtual_map;\n-\tcq->pbl_chunk_size = info->pbl_chunk_size;\n-\tcq->first_pm_pbl_idx = info->first_pm_pbl_idx;\n-\tcq->cq_uk.polarity = true;\n-\n-\t/* following are only for iw cqs so initialize them to zero */\n-\tcq->cq_uk.cqe_alloc_reg = NULL;\n-\tinfo->dev->ccq = cq;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_ccq_create_done - poll cqp for ccq create\n- * @ccq: ccq sc struct\n- */\n-static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\n-\tcqp = ccq->dev->cqp;\n-\treturn\ti40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);\n-}\n-\n-/**\n- * i40iw_sc_ccq_create - create control cq\n- * @ccq: ccq sc struct\n- * @scratch: u64 saved to be used during cqp completion\n- * @check_overflow: overlow flag for ccq\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool check_overflow,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\tenum i40iw_status_code ret_code;\n-\n-\tcqp = ccq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 0, ccq->cq_uk.cq_size);\n-\tset_64bit_val(wqe, 8, RS_64_1(ccq, 1));\n-\tset_64bit_val(wqe, 16,\n-\t\t LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));\n-\tset_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));\n-\tset_64bit_val(wqe, 40, ccq->shadow_area_pa);\n-\tset_64bit_val(wqe, 48,\n-\t\t (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));\n-\tset_64bit_val(wqe, 56,\n-\t\t LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));\n-\n-\theader = ccq->cq_uk.cq_id |\n-\t\t LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |\n-\t\t LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |\n-\t\t LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |\n-\t\t LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |\n-\t\t LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |\n-\t\t LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |\n-\t\t LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |\n-\t\t LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CCQ_CREATE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq) {\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\t\tret_code = i40iw_sc_ccq_create_done(ccq);\n-\t\tif (ret_code)\n-\t\t\treturn ret_code;\n-\t}\n-\tcqp->process_cqp_sds = i40iw_cqp_sds_cmd;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_ccq_destroy - destroy ccq during close\n- * @ccq: ccq sc struct\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\tu64 header;\n-\tenum i40iw_status_code ret_code = 0;\n-\tu32 tail, val, error;\n-\n-\tcqp = ccq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 0, ccq->cq_uk.cq_size);\n-\tset_64bit_val(wqe, 8, RS_64_1(ccq, 1));\n-\tset_64bit_val(wqe, 40, ccq->shadow_area_pa);\n-\n-\theader = ccq->cq_uk.cq_id |\n-\t\t LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |\n-\t\t LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |\n-\t\t LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |\n-\t\t LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |\n-\t\t LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CCQ_DESTROY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\ti40iw_get_cqp_reg_info(cqp, &val, &tail, &error);\n-\tif (error)\n-\t\treturn I40IW_ERR_CQP_COMPL_ERROR;\n-\n-\tif (post_sq) {\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\t\tret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);\n-\t}\n-\n-\tcqp->process_cqp_sds = i40iw_update_sds_noccq;\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_cq_init - initialize completion q\n- * @cq: cq struct\n- * @info: cq initialization info\n- */\n-static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,\n-\t\t\t\t\t struct i40iw_cq_init_info *info)\n-{\n-\tu32 __iomem *cqe_alloc_reg = NULL;\n-\tenum i40iw_status_code ret_code;\n-\tu32 pble_obj_cnt;\n-\tu32 arm_offset;\n-\n-\tpble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;\n-\n-\tif (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))\n-\t\treturn I40IW_ERR_INVALID_PBLE_INDEX;\n-\n-\tcq->cq_pa = info->cq_base_pa;\n-\tcq->dev = info->dev;\n-\tcq->ceq_id = info->ceq_id;\n-\tarm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;\n-\tif (i40iw_get_hw_addr(cq->dev))\n-\t\tcqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +\n-\t\t\t\t\t arm_offset);\n-\tinfo->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;\n-\tret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\tcq->virtual_map = info->virtual_map;\n-\tcq->pbl_chunk_size = info->pbl_chunk_size;\n-\tcq->ceqe_mask = info->ceqe_mask;\n-\tcq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;\n-\n-\tcq->shadow_area_pa = info->shadow_area_pa;\n-\tcq->shadow_read_threshold = info->shadow_read_threshold;\n-\n-\tcq->ceq_id_valid = info->ceq_id_valid;\n-\tcq->tph_en = info->tph_en;\n-\tcq->tph_val = info->tph_val;\n-\n-\tcq->first_pm_pbl_idx = info->first_pm_pbl_idx;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_cq_create - create completion q\n- * @cq: cq struct\n- * @scratch: u64 saved to be used during cqp completion\n- * @check_overflow: flag for overflow check\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool check_overflow,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\n-\tif (cq->cq_uk.cq_id > I40IW_MAX_CQID)\n-\t\treturn I40IW_ERR_INVALID_CQ_ID;\n-\n-\tif (cq->ceq_id > I40IW_MAX_CEQID)\n-\t\treturn I40IW_ERR_INVALID_CEQ_ID;\n-\n-\tcqp = cq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 0, cq->cq_uk.cq_size);\n-\tset_64bit_val(wqe, 8, RS_64_1(cq, 1));\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));\n-\n-\tset_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));\n-\n-\tset_64bit_val(wqe, 40, cq->shadow_area_pa);\n-\tset_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));\n-\tset_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));\n-\n-\theader = cq->cq_uk.cq_id |\n-\t\t LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |\n-\t\t LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |\n-\t\t LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |\n-\t\t LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |\n-\t\t LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |\n-\t\t LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |\n-\t\t LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |\n-\t\t LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CQ_CREATE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_cq_destroy - destroy completion q\n- * @cq: cq struct\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\tcqp = cq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 0, cq->cq_uk.cq_size);\n-\tset_64bit_val(wqe, 8, RS_64_1(cq, 1));\n-\tset_64bit_val(wqe, 40, cq->shadow_area_pa);\n-\tset_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));\n-\n-\theader = cq->cq_uk.cq_id |\n-\t\t LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |\n-\t\t LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |\n-\t\t LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |\n-\t\t LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |\n-\t\t LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |\n-\t\t LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |\n-\t\t LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CQ_DESTROY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_cq_modify - modify a Completion Queue\n- * @cq: cq struct\n- * @info: modification info struct\n- * @scratch:\n- * @post_sq: flag to post to sq\n- */\n-static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,\n-\t\t\t\t\t\t struct i40iw_modify_cq_info *info,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\tu64 header;\n-\tu32 cq_size, ceq_id, first_pm_pbl_idx;\n-\tu8 pbl_chunk_size;\n-\tbool virtual_map, ceq_id_valid, check_overflow;\n-\tu32 pble_obj_cnt;\n-\n-\tif (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))\n-\t\treturn I40IW_ERR_INVALID_CEQ_ID;\n-\n-\tpble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;\n-\n-\tif (info->cq_resize && info->virtual_map &&\n-\t (info->first_pm_pbl_idx >= pble_obj_cnt))\n-\t\treturn I40IW_ERR_INVALID_PBLE_INDEX;\n-\n-\tcqp = cq->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tcq->pbl_list = info->pbl_list;\n-\tcq->cq_pa = info->cq_pa;\n-\tcq->first_pm_pbl_idx = info->first_pm_pbl_idx;\n-\n-\tcq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;\n-\tif (info->ceq_change) {\n-\t\tceq_id_valid = true;\n-\t\tceq_id = info->ceq_id;\n-\t} else {\n-\t\tceq_id_valid = cq->ceq_id_valid;\n-\t\tceq_id = ceq_id_valid ? cq->ceq_id : 0;\n-\t}\n-\tvirtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;\n-\tfirst_pm_pbl_idx = (info->cq_resize ?\n-\t\t\t (info->virtual_map ? info->first_pm_pbl_idx : 0) :\n-\t\t\t (cq->virtual_map ? cq->first_pm_pbl_idx : 0));\n-\tpbl_chunk_size = (info->cq_resize ?\n-\t\t\t (info->virtual_map ? info->pbl_chunk_size : 0) :\n-\t\t\t (cq->virtual_map ? cq->pbl_chunk_size : 0));\n-\tcheck_overflow = info->check_overflow_change ? info->check_overflow :\n-\t\t\t cq->check_overflow;\n-\tcq->cq_uk.cq_size = cq_size;\n-\tcq->ceq_id_valid = ceq_id_valid;\n-\tcq->ceq_id = ceq_id;\n-\tcq->virtual_map = virtual_map;\n-\tcq->first_pm_pbl_idx = first_pm_pbl_idx;\n-\tcq->pbl_chunk_size = pbl_chunk_size;\n-\tcq->check_overflow = check_overflow;\n-\n-\tset_64bit_val(wqe, 0, cq_size);\n-\tset_64bit_val(wqe, 8, RS_64_1(cq, 1));\n-\tset_64bit_val(wqe, 16,\n-\t\t LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));\n-\tset_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));\n-\tset_64bit_val(wqe, 40, cq->shadow_area_pa);\n-\tset_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));\n-\tset_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));\n-\n-\theader = cq->cq_uk.cq_id |\n-\t\t LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |\n-\t\t LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |\n-\t\t LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |\n-\t\t LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |\n-\t\t LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |\n-\t\t LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |\n-\t\t LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |\n-\t\t LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |\n-\t\t LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"CQ_MODIFY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_qp_init - initialize qp\n- * @qp: sc qp\n- * @info: initialization qp info\n- */\n-static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,\n-\t\t\t\t\t struct i40iw_qp_init_info *info)\n-{\n-\tu32 __iomem *wqe_alloc_reg = NULL;\n-\tenum i40iw_status_code ret_code;\n-\tu32 pble_obj_cnt;\n-\tu8 wqe_size;\n-\tu32 offset;\n-\n-\tqp->dev = info->pd->dev;\n-\tqp->vsi = info->vsi;\n-\tqp->sq_pa = info->sq_pa;\n-\tqp->rq_pa = info->rq_pa;\n-\tqp->hw_host_ctx_pa = info->host_ctx_pa;\n-\tqp->q2_pa = info->q2_pa;\n-\tqp->shadow_area_pa = info->shadow_area_pa;\n-\n-\tqp->q2_buf = info->q2;\n-\tqp->pd = info->pd;\n-\tqp->hw_host_ctx = info->host_ctx;\n-\toffset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;\n-\tif (i40iw_get_hw_addr(qp->pd->dev))\n-\t\twqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +\n-\t\t\t\t\t offset);\n-\n-\tinfo->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;\n-\tinfo->qp_uk_init_info.abi_ver = qp->pd->abi_ver;\n-\tret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\tqp->virtual_map = info->virtual_map;\n-\n-\tpble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;\n-\n-\tif ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||\n-\t (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))\n-\t\treturn I40IW_ERR_INVALID_PBLE_INDEX;\n-\n-\tqp->llp_stream_handle = (void *)(-1);\n-\tqp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;\n-\n-\tqp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,\n-\t\t\t\t\t\t false);\n-\ti40iw_debug(qp->dev, I40IW_DEBUG_WQE, \"%s: hw_sq_size[%04d] sq_ring.size[%04d]\\n\",\n-\t\t __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);\n-\n-\tswitch (qp->pd->abi_ver) {\n-\tcase 4:\n-\t\tret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,\n-\t\t\t\t\t\t &wqe_size);\n-\t\tif (ret_code)\n-\t\t\treturn ret_code;\n-\t\tbreak;\n-\tcase 5: /* fallthrough until next ABI version */\n-\tdefault:\n-\t\tif (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)\n-\t\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\t\twqe_size = I40IW_MAX_WQE_SIZE_RQ;\n-\t\tbreak;\n-\t}\n-\tqp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *\n-\t\t\t\t(wqe_size / I40IW_QP_WQE_MIN_SIZE), false);\n-\ti40iw_debug(qp->dev, I40IW_DEBUG_WQE,\n-\t\t \"%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\\n\",\n-\t\t __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);\n-\tqp->sq_tph_val = info->sq_tph_val;\n-\tqp->rq_tph_val = info->rq_tph_val;\n-\tqp->sq_tph_en = info->sq_tph_en;\n-\tqp->rq_tph_en = info->rq_tph_en;\n-\tqp->rcv_tph_en = info->rcv_tph_en;\n-\tqp->xmit_tph_en = info->xmit_tph_en;\n-\tqp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_qp_create - create qp\n- * @qp: sc qp\n- * @info: qp create info\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_qp_create(\n-\t\t\t\tstruct i40iw_sc_qp *qp,\n-\t\t\t\tstruct i40iw_create_qp_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\tu64 header;\n-\n-\tif ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||\n-\t (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))\n-\t\treturn I40IW_ERR_INVALID_QP_ID;\n-\n-\tcqp = qp->pd->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 16, qp->hw_host_ctx_pa);\n-\n-\tset_64bit_val(wqe, 40, qp->shadow_area_pa);\n-\n-\theader = qp->qp_uk.qp_id |\n-\t\t LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |\n-\t\t LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |\n-\t\t LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |\n-\t\t LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |\n-\t\t LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |\n-\t\t LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |\n-\t\t LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"QP_CREATE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_qp_modify - modify qp cqp wqe\n- * @qp: sc qp\n- * @info: modify qp info\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_qp_modify(\n-\t\t\t\tstruct i40iw_sc_qp *qp,\n-\t\t\t\tstruct i40iw_modify_qp_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\tu8 term_actions = 0;\n-\tu8 term_len = 0;\n-\n-\tcqp = qp->pd->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tif (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {\n-\t\tif (info->dont_send_fin)\n-\t\t\tterm_actions += I40IWQP_TERM_SEND_TERM_ONLY;\n-\t\tif (info->dont_send_term)\n-\t\t\tterm_actions += I40IWQP_TERM_SEND_FIN_ONLY;\n-\t\tif ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||\n-\t\t (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))\n-\t\t\tterm_len = info->termlen;\n-\t}\n-\n-\tset_64bit_val(wqe,\n-\t\t 8,\n-\t\t LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));\n-\n-\tset_64bit_val(wqe, 16, qp->hw_host_ctx_pa);\n-\tset_64bit_val(wqe, 40, qp->shadow_area_pa);\n-\n-\theader = qp->qp_uk.qp_id |\n-\t\t LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |\n-\t\t LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |\n-\t\t LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |\n-\t\t LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |\n-\t\t LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |\n-\t\t LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |\n-\t\t LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |\n-\t\t LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |\n-\t\t LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |\n-\t\t LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |\n-\t\t LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |\n-\t\t LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"QP_MODIFY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_qp_destroy - cqp destroy qp\n- * @qp: sc qp\n- * @scratch: u64 saved to be used during cqp completion\n- * @remove_hash_idx: flag if to remove hash idx\n- * @ignore_mw_bnd: memory window bind flag\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_qp_destroy(\n-\t\t\t\t\tstruct i40iw_sc_qp *qp,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tbool remove_hash_idx,\n-\t\t\t\t\tbool ignore_mw_bnd,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\n-\ti40iw_qp_rem_qos(qp);\n-\tcqp = qp->pd->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 16, qp->hw_host_ctx_pa);\n-\tset_64bit_val(wqe, 40, qp->shadow_area_pa);\n-\n-\theader = qp->qp_uk.qp_id |\n-\t\t LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |\n-\t\t LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |\n-\t\t LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"QP_DESTROY WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_qp_flush_wqes - flush qp's wqe\n- * @qp: sc qp\n- * @info: dlush information\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_qp_flush_wqes(\n-\t\t\t\tstruct i40iw_sc_qp *qp,\n-\t\t\t\tstruct i40iw_qp_flush_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 temp = 0;\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\tbool flush_sq = false, flush_rq = false;\n-\n-\tif (info->rq && !qp->flush_rq)\n-\t\tflush_rq = true;\n-\n-\tif (info->sq && !qp->flush_sq)\n-\t\tflush_sq = true;\n-\n-\tqp->flush_sq |= flush_sq;\n-\tqp->flush_rq |= flush_rq;\n-\tif (!flush_sq && !flush_rq)\n-\t\treturn 0;\n-\n-\tcqp = qp->pd->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tif (info->userflushcode) {\n-\t\tif (flush_rq) {\n-\t\t\ttemp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |\n-\t\t\t\tLS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);\n-\t\t}\n-\t\tif (flush_sq) {\n-\t\t\ttemp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |\n-\t\t\t\tLS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);\n-\t\t}\n-\t}\n-\tset_64bit_val(wqe, 16, temp);\n-\n-\ttemp = (info->generate_ae) ?\n-\t\tinfo->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;\n-\n-\tset_64bit_val(wqe, 8, temp);\n-\n-\theader = qp->qp_uk.qp_id |\n-\t\t LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |\n-\t\t LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |\n-\t\t LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |\n-\t\t LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"QP_FLUSH WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_gen_ae - generate AE, currently uses flush WQE CQP OP\n- * @qp: sc qp\n- * @info: gen ae information\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_gen_ae(\n-\t\t\t\tstruct i40iw_sc_qp *qp,\n-\t\t\t\tstruct i40iw_gen_ae_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 temp;\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\n-\tcqp = qp->pd->dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\ttemp = info->ae_code |\n-\t LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE);\n-\n-\tset_64bit_val(wqe, 8, temp);\n-\n-\theader = qp->qp_uk.qp_id |\n-\t\t LS_64(I40IW_CQP_OP_GEN_AE, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IW_CQPSQ_FWQE_GENERATE_AE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"GEN_AE WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_qp_upload_context - upload qp's context\n- * @dev: sc device struct\n- * @info: upload context info ptr for return\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_qp_upload_context(\n-\t\t\t\t\tstruct i40iw_sc_dev *dev,\n-\t\t\t\t\tstruct i40iw_upload_context_info *info,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 16, info->buf_pa);\n-\n-\theader = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |\n-\t\t LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |\n-\t\t LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |\n-\t\t LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_WQE, \"QP_UPLOAD_CTX WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_qp_setctx - set qp's context\n- * @qp: sc qp\n- * @qp_ctx: context ptr\n- * @info: ctx info\n- */\n-static enum i40iw_status_code i40iw_sc_qp_setctx(\n-\t\t\t\tstruct i40iw_sc_qp *qp,\n-\t\t\t\tu64 *qp_ctx,\n-\t\t\t\tstruct i40iw_qp_host_ctx_info *info)\n-{\n-\tstruct i40iwarp_offload_info *iw;\n-\tstruct i40iw_tcp_offload_info *tcp;\n-\tstruct i40iw_sc_vsi *vsi;\n-\tstruct i40iw_sc_dev *dev;\n-\tu64 qw0, qw3, qw7 = 0;\n-\n-\tiw = info->iwarp_info;\n-\ttcp = info->tcp_info;\n-\tvsi = qp->vsi;\n-\tdev = qp->dev;\n-\tif (info->add_to_qoslist) {\n-\t\tqp->user_pri = info->user_pri;\n-\t\ti40iw_qp_add_qos(qp);\n-\t\ti40iw_debug(qp->dev, I40IW_DEBUG_DCB, \"%s qp[%d] UP[%d] qset[%d]\\n\",\n-\t\t\t __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);\n-\t}\n-\tqw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |\n-\t LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |\n-\t LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |\n-\t LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |\n-\t LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |\n-\t LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |\n-\t LS_64(info->push_idx, I40IWQPC_PPIDX) |\n-\t LS_64(info->push_mode_en, I40IWQPC_PMENA);\n-\n-\tset_64bit_val(qp_ctx, 8, qp->sq_pa);\n-\tset_64bit_val(qp_ctx, 16, qp->rq_pa);\n-\n-\tqw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |\n-\t LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |\n-\t LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);\n-\n-\tset_64bit_val(qp_ctx,\n-\t\t 128,\n-\t\t LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));\n-\n-\tset_64bit_val(qp_ctx,\n-\t\t 136,\n-\t\t LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |\n-\t\t LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));\n-\n-\tset_64bit_val(qp_ctx,\n-\t\t 168,\n-\t\t LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));\n-\tset_64bit_val(qp_ctx,\n-\t\t 176,\n-\t\t LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |\n-\t\t LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |\n-\t\t LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |\n-\t\t LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));\n-\n-\tif (info->iwarp_info_valid) {\n-\t\tqw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |\n-\t\t LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);\n-\n-\t\tqw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 144,\n-\t\t\t LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |\n-\t\t\t LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 152,\n-\t\t\t LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));\n-\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 160,\n-\t\t\t LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |\n-\t\t\t LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |\n-\t\t\t LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |\n-\t\t\t LS_64(iw->rd_enable, I40IWQPC_RDOK) |\n-\t\t\t LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |\n-\t\t\t LS_64(iw->bind_en, I40IWQPC_BINDEN) |\n-\t\t\t LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |\n-\t\t\t LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |\n-\t\t\t LS_64((((vsi->stats_fcn_id_alloc) &&\n-\t\t\t\t (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),\n-\t\t\t\t I40IWQPC_USESTATSINSTANCE) |\n-\t\t\t LS_64(1, I40IWQPC_IWARPMODE) |\n-\t\t\t LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |\n-\t\t\t LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |\n-\t\t\t LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |\n-\t\t\t LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |\n-\t\t\t LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));\n-\t}\n-\tif (info->tcp_info_valid) {\n-\t\tqw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |\n-\t\t LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |\n-\t\t LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |\n-\t\t LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |\n-\t\t LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |\n-\t\t LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |\n-\t\t LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);\n-\n-\t\tqw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |\n-\t\t LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |\n-\t\t LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |\n-\t\t LS_64(tcp->tos, I40IWQPC_TOS) |\n-\t\t LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |\n-\t\t LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);\n-\n-\t\tqp->src_mac_addr_idx = tcp->src_mac_addr_idx;\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 32,\n-\t\t\t LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |\n-\t\t\t LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));\n-\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 40,\n-\t\t\t LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |\n-\t\t\t LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));\n-\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 48,\n-\t\t\t LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |\n-\t\t\t\tLS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |\n-\t\t\t\tLS_64(tcp->arp_idx, I40IWQPC_ARPIDX));\n-\n-\t\tqw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |\n-\t\t LS_64(tcp->wscale, I40IWQPC_WSCALE) |\n-\t\t LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |\n-\t\t LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |\n-\t\t LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |\n-\t\t LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |\n-\t\t LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);\n-\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 72,\n-\t\t\t LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |\n-\t\t\t LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 80,\n-\t\t\t LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |\n-\t\t\t LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));\n-\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 88,\n-\t\t\t LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |\n-\t\t\t LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 96,\n-\t\t\t LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |\n-\t\t\t LS_64(tcp->snd_una, I40IWQPC_SNDUNA));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 104,\n-\t\t\t LS_64(tcp->srtt, I40IWQPC_SRTT) |\n-\t\t\t LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 112,\n-\t\t\t LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |\n-\t\t\t LS_64(tcp->cwnd, I40IWQPC_CWND));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 120,\n-\t\t\t LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |\n-\t\t\t LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 128,\n-\t\t\t LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |\n-\t\t\t LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 184,\n-\t\t\t LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |\n-\t\t\t LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));\n-\t\tset_64bit_val(qp_ctx,\n-\t\t\t 192,\n-\t\t\t LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |\n-\t\t\t LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));\n-\t}\n-\n-\tset_64bit_val(qp_ctx, 0, qw0);\n-\tset_64bit_val(qp_ctx, 24, qw3);\n-\tset_64bit_val(qp_ctx, 56, qw7);\n-\n-\ti40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, \"QP_HOST)CTX WQE\",\n-\t\t\tqp_ctx, I40IW_QP_CTX_SIZE);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_alloc_stag - mr stag alloc\n- * @dev: sc device struct\n- * @info: stag info\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_alloc_stag(\n-\t\t\t\tstruct i40iw_sc_dev *dev,\n-\t\t\t\tstruct i40iw_allocate_stag_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\tenum i40iw_page_size page_size;\n-\n-\tpage_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe,\n-\t\t 8,\n-\t\t LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |\n-\t\t LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));\n-\tset_64bit_val(wqe,\n-\t\t 40,\n-\t\t LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));\n-\n-\theader = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IW_CQPSQ_STAG_MR) |\n-\t\t LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |\n-\t\t LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |\n-\t\t LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |\n-\t\t LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |\n-\t\t LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |\n-\t\t LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_WQE, \"ALLOC_STAG WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_mr_reg_non_shared - non-shared mr registration\n- * @dev: sc device struct\n- * @info: mr info\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(\n-\t\t\t\tstruct i40iw_sc_dev *dev,\n-\t\t\t\tstruct i40iw_reg_ns_stag_info *info,\n-\t\t\t\tu64 scratch,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 temp;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\tu32 pble_obj_cnt;\n-\tbool remote_access;\n-\tu8 addr_type;\n-\tenum i40iw_page_size page_size;\n-\n-\tpage_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;\n-\tif (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |\n-\t\t\t\t I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))\n-\t\tremote_access = true;\n-\telse\n-\t\tremote_access = false;\n-\n-\tpble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;\n-\n-\tif (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))\n-\t\treturn I40IW_ERR_INVALID_PBLE_INDEX;\n-\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\ttemp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;\n-\tset_64bit_val(wqe, 0, temp);\n-\n-\tset_64bit_val(wqe,\n-\t\t 8,\n-\t\t LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |\n-\t\t LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));\n-\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |\n-\t\t LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));\n-\tif (!info->chunk_size) {\n-\t\tset_64bit_val(wqe, 32, info->reg_addr_pa);\n-\t\tset_64bit_val(wqe, 48, 0);\n-\t} else {\n-\t\tset_64bit_val(wqe, 32, 0);\n-\t\tset_64bit_val(wqe, 48, info->first_pm_pbl_index);\n-\t}\n-\tset_64bit_val(wqe, 40, info->hmc_fcn_index);\n-\tset_64bit_val(wqe, 56, 0);\n-\n-\taddr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;\n-\theader = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IW_CQPSQ_STAG_MR) |\n-\t\t LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |\n-\t\t LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |\n-\t\t LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |\n-\t\t LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |\n-\t\t LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |\n-\t\t LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |\n-\t\t LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_WQE, \"MR_REG_NS WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_mr_reg_shared - registered shared memory region\n- * @dev: sc device struct\n- * @info: info for shared memory registeration\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_mr_reg_shared(\n-\t\t\t\t\tstruct i40iw_sc_dev *dev,\n-\t\t\t\t\tstruct i40iw_register_shared_stag *info,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 temp, va64, fbo, header;\n-\tu32 va32;\n-\tbool remote_access;\n-\tu8 addr_type;\n-\n-\tif (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |\n-\t\t\t\t I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))\n-\t\tremote_access = true;\n-\telse\n-\t\tremote_access = false;\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tva64 = (uintptr_t)(info->va);\n-\tva32 = (u32)(va64 & 0x00000000FFFFFFFF);\n-\tfbo = (u64)(va32 & (4096 - 1));\n-\n-\tset_64bit_val(wqe,\n-\t\t 0,\n-\t\t (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));\n-\n-\tset_64bit_val(wqe,\n-\t\t 8,\n-\t\t LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));\n-\ttemp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |\n-\t LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |\n-\t LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);\n-\tset_64bit_val(wqe, 16, temp);\n-\n-\taddr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;\n-\theader = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IW_CQPSQ_STAG_MR) |\n-\t\t LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |\n-\t\t LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |\n-\t\t LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_WQE, \"MR_REG_SHARED WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_dealloc_stag - deallocate stag\n- * @dev: sc device struct\n- * @info: dealloc stag info\n- * @scratch: u64 saved to be used during cqp completion\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_dealloc_stag(\n-\t\t\t\t\tstruct i40iw_sc_dev *dev,\n-\t\t\t\t\tstruct i40iw_dealloc_stag_info *info,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 header;\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe,\n-\t\t 8,\n-\t\t LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));\n-\n-\theader = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_WQE, \"DEALLOC_STAG WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_query_stag - query hardware for stag\n- * @dev: sc device struct\n- * @scratch: u64 saved to be used during cqp completion\n- * @stag_index: stag index for query\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t\t u32 stag_index,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tu64 header;\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));\n-\n-\theader = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_WQE, \"QUERY_STAG WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_mw_alloc - mw allocate\n- * @dev: sc device struct\n- * @scratch: u64 saved to be used during cqp completion\n- * @mw_stag_index:stag index\n- * @pd_id: pd is for this mw\n- * @post_sq: flag for cqp db to ring\n- */\n-static enum i40iw_status_code i40iw_sc_mw_alloc(\n-\t\t\t\t\tstruct i40iw_sc_dev *dev,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tu32 mw_stag_index,\n-\t\t\t\t\tu16 pd_id,\n-\t\t\t\t\tbool post_sq)\n-{\n-\tu64 header;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));\n-\n-\theader = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_WQE, \"MW_ALLOC WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp\n- * @qp: sc qp struct\n- * @info: fast mr info\n- * @post_sq: flag for cqp db to ring\n- */\n-enum i40iw_status_code i40iw_sc_mr_fast_register(\n-\t\t\t\tstruct i40iw_sc_qp *qp,\n-\t\t\t\tstruct i40iw_fast_reg_stag_info *info,\n-\t\t\t\tbool post_sq)\n-{\n-\tu64 temp, header;\n-\tu64 *wqe;\n-\tu32 wqe_idx;\n-\tenum i40iw_page_size page_size;\n-\n-\tpage_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;\n-\twqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,\n-\t\t\t\t\t 0, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\n-\ti40iw_debug(qp->dev, I40IW_DEBUG_MR, \"%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\\n\",\n-\t\t __func__, info->wr_id, wqe_idx,\n-\t\t &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);\n-\ttemp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;\n-\tset_64bit_val(wqe, 0, temp);\n-\n-\ttemp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);\n-\tset_64bit_val(wqe,\n-\t\t 8,\n-\t\t LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |\n-\t\t LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));\n-\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t info->total_len |\n-\t\t LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));\n-\n-\theader = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |\n-\t\t LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |\n-\t\t LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |\n-\t\t LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |\n-\t\t LS_64(page_size, I40IWQPSQ_HPAGESIZE) |\n-\t\t LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |\n-\t\t LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |\n-\t\t LS_64(info->read_fence, I40IWQPSQ_READFENCE) |\n-\t\t LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t\t LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, \"FAST_REG WQE\",\n-\t\t\twqe, I40IW_QP_WQE_MIN_SIZE);\n-\n-\tif (post_sq)\n-\t\ti40iw_qp_post_wr(&qp->qp_uk);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_send_lsmm - send last streaming mode message\n- * @qp: sc qp struct\n- * @lsmm_buf: buffer with lsmm message\n- * @size: size of lsmm buffer\n- * @stag: stag of lsmm buffer\n- */\n-static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,\n-\t\t\t void *lsmm_buf,\n-\t\t\t u32 size,\n-\t\t\t i40iw_stag stag)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tstruct i40iw_qp_uk *qp_uk;\n-\n-\tqp_uk = &qp->qp_uk;\n-\twqe = qp_uk->sq_base->elem;\n-\n-\tset_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);\n-\n-\tset_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));\n-\n-\tset_64bit_val(wqe, 16, 0);\n-\n-\theader = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IWQPSQ_STREAMMODE) |\n-\t\t LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |\n-\t\t LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, \"SEND_LSMM WQE\",\n-\t\t\twqe, I40IW_QP_WQE_MIN_SIZE);\n-}\n-\n-/**\n- * i40iw_sc_send_lsmm_nostag - for privilege qp\n- * @qp: sc qp struct\n- * @lsmm_buf: buffer with lsmm message\n- * @size: size of lsmm buffer\n- */\n-static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,\n-\t\t\t\t void *lsmm_buf,\n-\t\t\t\t u32 size)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tstruct i40iw_qp_uk *qp_uk;\n-\n-\tqp_uk = &qp->qp_uk;\n-\twqe = qp_uk->sq_base->elem;\n-\n-\tset_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);\n-\n-\tset_64bit_val(wqe, 8, size);\n-\n-\tset_64bit_val(wqe, 16, 0);\n-\n-\theader = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |\n-\t\t LS_64(1, I40IWQPSQ_STREAMMODE) |\n-\t\t LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |\n-\t\t LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, \"SEND_LSMM_NOSTAG WQE\",\n-\t\t\twqe, I40IW_QP_WQE_MIN_SIZE);\n-}\n-\n-/**\n- * i40iw_sc_send_rtt - send last read0 or write0\n- * @qp: sc qp struct\n- * @read: Do read0 or write0\n- */\n-static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tstruct i40iw_qp_uk *qp_uk;\n-\n-\tqp_uk = &qp->qp_uk;\n-\twqe = qp_uk->sq_base->elem;\n-\n-\tset_64bit_val(wqe, 0, 0);\n-\tset_64bit_val(wqe, 8, 0);\n-\tset_64bit_val(wqe, 16, 0);\n-\tif (read) {\n-\t\theader = LS_64(0x1234, I40IWQPSQ_REMSTAG) |\n-\t\t\t LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |\n-\t\t\t LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);\n-\t\tset_64bit_val(wqe, 8, ((u64)0xabcd << 32));\n-\t} else {\n-\t\theader = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |\n-\t\t\t LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);\n-\t}\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, \"RTR WQE\",\n-\t\t\twqe, I40IW_QP_WQE_MIN_SIZE);\n-}\n-\n-/**\n- * i40iw_sc_post_wqe0 - send wqe with opcode\n- * @qp: sc qp struct\n- * @opcode: opcode to use for wqe0\n- */\n-static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tstruct i40iw_qp_uk *qp_uk;\n-\n-\tqp_uk = &qp->qp_uk;\n-\twqe = qp_uk->sq_base->elem;\n-\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\tswitch (opcode) {\n-\tcase I40IWQP_OP_NOP:\n-\t\tset_64bit_val(wqe, 0, 0);\n-\t\tset_64bit_val(wqe, 8, 0);\n-\t\tset_64bit_val(wqe, 16, 0);\n-\t\theader = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |\n-\t\t\t LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);\n-\n-\t\ti40iw_insert_wqe_hdr(wqe, header);\n-\t\tbreak;\n-\tcase I40IWQP_OP_RDMA_SEND:\n-\t\tset_64bit_val(wqe, 0, 0);\n-\t\tset_64bit_val(wqe, 8, 0);\n-\t\tset_64bit_val(wqe, 16, 0);\n-\t\theader = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |\n-\t\t\t LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |\n-\t\t\t LS_64(1, I40IWQPSQ_STREAMMODE) |\n-\t\t\t LS_64(1, I40IWQPSQ_WAITFORRCVPDU);\n-\n-\t\ti40iw_insert_wqe_hdr(wqe, header);\n-\t\tbreak;\n-\tdefault:\n-\t\ti40iw_debug(qp->dev, I40IW_DEBUG_QP, \"%s: Invalid WQE zero opcode\\n\",\n-\t\t\t __func__);\n-\t\tbreak;\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info\n- * @dev : ptr to i40iw_dev struct\n- * @hmc_fn_id: hmc function id\n- */\n-enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)\n-{\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct i40iw_dma_mem query_fpm_mem;\n-\tstruct i40iw_virt_mem virt_mem;\n-\tstruct i40iw_vfdev *vf_dev = NULL;\n-\tu32 mem_size;\n-\tenum i40iw_status_code ret_code = 0;\n-\tbool poll_registers = true;\n-\tu16 iw_vf_idx;\n-\tu8 wait_type;\n-\n-\tif (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||\n-\t (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))\n-\t\treturn I40IW_ERR_INVALID_HMCFN_ID;\n-\n-\ti40iw_debug(dev, I40IW_DEBUG_HMC, \"hmc_fn_id %u, dev->hmc_fn_id %u\\n\", hmc_fn_id,\n-\t\t dev->hmc_fn_id);\n-\tif (hmc_fn_id == dev->hmc_fn_id) {\n-\t\thmc_info = dev->hmc_info;\n-\t\tquery_fpm_mem.pa = dev->fpm_query_buf_pa;\n-\t\tquery_fpm_mem.va = dev->fpm_query_buf;\n-\t} else {\n-\t\tvf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);\n-\t\tif (!vf_dev)\n-\t\t\treturn I40IW_ERR_INVALID_VF_ID;\n-\n-\t\thmc_info = &vf_dev->hmc_info;\n-\t\tiw_vf_idx = vf_dev->iw_vf_idx;\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC, \"vf_dev %p, hmc_info %p, hmc_obj %p\\n\", vf_dev,\n-\t\t\t hmc_info, hmc_info->hmc_obj);\n-\t\tif (!vf_dev->fpm_query_buf) {\n-\t\t\tif (!dev->vf_fpm_query_buf[iw_vf_idx].va) {\n-\t\t\t\tret_code = i40iw_alloc_query_fpm_buf(dev,\n-\t\t\t\t\t\t\t\t &dev->vf_fpm_query_buf[iw_vf_idx]);\n-\t\t\t\tif (ret_code)\n-\t\t\t\t\treturn ret_code;\n-\t\t\t}\n-\t\t\tvf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;\n-\t\t\tvf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;\n-\t\t}\n-\t\tquery_fpm_mem.pa = vf_dev->fpm_query_buf_pa;\n-\t\tquery_fpm_mem.va = vf_dev->fpm_query_buf;\n-\t\t/**\n-\t\t * It is HARDWARE specific:\n-\t\t * this call is done by PF for VF and\n-\t\t * i40iw_sc_query_fpm_values needs ccq poll\n-\t\t * because PF ccq is already created.\n-\t\t */\n-\t\tpoll_registers = false;\n-\t}\n-\n-\thmc_info->hmc_fn_id = hmc_fn_id;\n-\n-\tif (hmc_fn_id != dev->hmc_fn_id) {\n-\t\tret_code =\n-\t\t\ti40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);\n-\t} else {\n-\t\twait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :\n-\t\t\t (u8)I40IW_CQP_WAIT_POLL_CQ;\n-\n-\t\tret_code = i40iw_sc_query_fpm_values(\n-\t\t\t\t\tdev->cqp,\n-\t\t\t\t\t0,\n-\t\t\t\t\thmc_info->hmc_fn_id,\n-\t\t\t\t\t&query_fpm_mem,\n-\t\t\t\t\ttrue,\n-\t\t\t\t\twait_type);\n-\t}\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\n-\t/* parse the fpm_query_buf and fill hmc obj info */\n-\tret_code =\n-\t\ti40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,\n-\t\t\t\t\t hmc_info,\n-\t\t\t\t\t &dev->hmc_fpm_misc);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_HMC, \"QUERY FPM BUFFER\",\n-\t\t\tquery_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);\n-\n-\tif (hmc_fn_id != dev->hmc_fn_id) {\n-\t\ti40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);\n-\n-\t\t/* parse the fpm_commit_buf and fill hmc obj info */\n-\t\ti40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);\n-\t\tmem_size = sizeof(struct i40iw_hmc_sd_entry) *\n-\t\t\t (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);\n-\t\tret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);\n-\t\tif (ret_code)\n-\t\t\treturn ret_code;\n-\t\thmc_info->sd_table.sd_entry = virt_mem.va;\n-\t}\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and\n- * populates fpm base address in hmc_info\n- * @dev : ptr to i40iw_dev struct\n- * @hmc_fn_id: hmc function id\n- */\n-static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\tu8 hmc_fn_id)\n-{\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct i40iw_hmc_obj_info *obj_info;\n-\tu64 *buf;\n-\tstruct i40iw_dma_mem commit_fpm_mem;\n-\tu32 i, j;\n-\tenum i40iw_status_code ret_code = 0;\n-\tbool poll_registers = true;\n-\tu8 wait_type;\n-\n-\tif (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||\n-\t (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))\n-\t\treturn I40IW_ERR_INVALID_HMCFN_ID;\n-\n-\tif (hmc_fn_id == dev->hmc_fn_id) {\n-\t\thmc_info = dev->hmc_info;\n-\t} else {\n-\t\thmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);\n-\t\tpoll_registers = false;\n-\t}\n-\tif (!hmc_info)\n-\t\treturn I40IW_ERR_BAD_PTR;\n-\n-\tobj_info = hmc_info->hmc_obj;\n-\tbuf = dev->fpm_commit_buf;\n-\n-\t/* copy cnt values in commit buf */\n-\tfor (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;\n-\t i++, j += 8)\n-\t\tset_64bit_val(buf, j, (u64)obj_info[i].cnt);\n-\n-\tset_64bit_val(buf, 40, 0); /* APBVT rsvd */\n-\n-\tcommit_fpm_mem.pa = dev->fpm_commit_buf_pa;\n-\tcommit_fpm_mem.va = dev->fpm_commit_buf;\n-\twait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :\n-\t\t\t(u8)I40IW_CQP_WAIT_POLL_CQ;\n-\tret_code = i40iw_sc_commit_fpm_values(\n-\t\t\t\t\tdev->cqp,\n-\t\t\t\t\t0,\n-\t\t\t\t\thmc_info->hmc_fn_id,\n-\t\t\t\t\t&commit_fpm_mem,\n-\t\t\t\t\ttrue,\n-\t\t\t\t\twait_type);\n-\n-\t/* parse the fpm_commit_buf and fill hmc obj info */\n-\tif (!ret_code)\n-\t\tret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,\n-\t\t\t\t\t\t\t hmc_info->hmc_obj,\n-\t\t\t\t\t\t\t &hmc_info->sd_table.sd_cnt);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_HMC, \"COMMIT FPM BUFFER\",\n-\t\t\tcommit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * cqp_sds_wqe_fill - fill cqp wqe doe sd\n- * @cqp: struct for cqp hw\n- * @info; sd info for wqe\n- * @scratch: u64 saved to be used during cqp completion\n- */\n-static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t struct i40iw_update_sds_info *info,\n-\t\t\t\t\t u64 scratch)\n-{\n-\tu64 data;\n-\tu64 header;\n-\tu64 *wqe;\n-\tint mem_entries, wqe_entries;\n-\tstruct i40iw_dma_mem *sdbuf = &cqp->sdbuf;\n-\tu64 offset;\n-\tu32 wqe_idx;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tI40IW_CQP_INIT_WQE(wqe);\n-\twqe_entries = (info->cnt > 3) ? 3 : info->cnt;\n-\tmem_entries = info->cnt - wqe_entries;\n-\n-\theader = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |\n-\t\t LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);\n-\n-\tif (mem_entries) {\n-\t\toffset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;\n-\t\tmemcpy((char *)sdbuf->va + offset, &info->entry[3],\n-\t\t mem_entries << 4);\n-\t\tdata = (u64)sdbuf->pa + offset;\n-\t} else {\n-\t\tdata = 0;\n-\t}\n-\tdata |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);\n-\n-\tset_64bit_val(wqe, 16, data);\n-\n-\tswitch (wqe_entries) {\n-\tcase 3:\n-\t\tset_64bit_val(wqe, 48,\n-\t\t\t (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |\n-\t\t\t\t\tLS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));\n-\n-\t\tset_64bit_val(wqe, 56, info->entry[2].data);\n-\t\t/* fallthrough */\n-\tcase 2:\n-\t\tset_64bit_val(wqe, 32,\n-\t\t\t (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |\n-\t\t\t\t\tLS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));\n-\n-\t\tset_64bit_val(wqe, 40, info->entry[1].data);\n-\t\t/* fallthrough */\n-\tcase 1:\n-\t\tset_64bit_val(wqe, 0,\n-\t\t\t LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));\n-\n-\t\tset_64bit_val(wqe, 8, info->entry[0].data);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"UPDATE_PE_SDS WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_update_pe_sds - cqp wqe for sd\n- * @dev: ptr to i40iw_dev struct\n- * @info: sd info for sd's\n- * @scratch: u64 saved to be used during cqp completion\n- */\n-static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_update_sds_info *info,\n-\t\t\t\t\t\t u64 scratch)\n-{\n-\tstruct i40iw_sc_cqp *cqp = dev->cqp;\n-\tenum i40iw_status_code ret_code;\n-\n-\tret_code = cqp_sds_wqe_fill(cqp, info, scratch);\n-\tif (!ret_code)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_update_sds_noccq - update sd before ccq created\n- * @dev: sc device struct\n- * @info: sd info for sd's\n- */\n-enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_update_sds_info *info)\n-{\n-\tu32 error, val, tail;\n-\tstruct i40iw_sc_cqp *cqp = dev->cqp;\n-\tenum i40iw_status_code ret_code;\n-\n-\tret_code = cqp_sds_wqe_fill(cqp, info, 0);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\ti40iw_get_cqp_reg_info(cqp, &val, &tail, &error);\n-\tif (error)\n-\t\treturn I40IW_ERR_CQP_COMPL_ERROR;\n-\n-\ti40iw_sc_cqp_post_sq(cqp);\n-\tret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_sc_suspend_qp - suspend qp for param change\n- * @cqp: struct for cqp hw\n- * @qp: sc qp struct\n- * @scratch: u64 saved to be used during cqp completion\n- */\n-enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t struct i40iw_sc_qp *qp,\n-\t\t\t\t\t u64 scratch)\n-{\n-\tu64 header;\n-\tu64 *wqe;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\theader = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |\n-\t\t LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"SUSPEND_QP WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_resume_qp - resume qp after suspend\n- * @cqp: struct for cqp hw\n- * @qp: sc qp struct\n- * @scratch: u64 saved to be used during cqp completion\n- */\n-enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t struct i40iw_sc_qp *qp,\n-\t\t\t\t\t u64 scratch)\n-{\n-\tu64 header;\n-\tu64 *wqe;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t\tLS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));\n-\n-\theader = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |\n-\t\t LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"RESUME_QP WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages\n- * @cqp: struct for cqp hw\n- * @scratch: u64 saved to be used during cqp completion\n- * @hmc_fn_id: hmc function id\n- * @post_sq: flag for cqp db to ring\n- * @poll_registers: flag to poll register for cqp completion\n- */\n-enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(\n-\t\t\t\t\tstruct i40iw_sc_cqp *cqp,\n-\t\t\t\t\tu64 scratch,\n-\t\t\t\t\tu8 hmc_fn_id,\n-\t\t\t\t\tbool post_sq,\n-\t\t\t\t\tbool poll_registers)\n-{\n-\tu64 header;\n-\tu64 *wqe;\n-\tu32 tail, val, error;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\tset_64bit_val(wqe,\n-\t\t 16,\n-\t\t LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));\n-\n-\theader = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"SHMC_PAGES_ALLOCATED WQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\ti40iw_get_cqp_reg_info(cqp, &val, &tail, &error);\n-\tif (error) {\n-\t\tret_code = I40IW_ERR_CQP_COMPL_ERROR;\n-\t\treturn ret_code;\n-\t}\n-\tif (post_sq) {\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\t\tif (poll_registers)\n-\t\t\t/* check for cqp sq tail update */\n-\t\t\tret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);\n-\t\telse\n-\t\t\tret_code = i40iw_sc_poll_for_cqp_op_done(cqp,\n-\t\t\t\t\t\t\t\t I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,\n-\t\t\t\t\t\t\t\t NULL);\n-\t}\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_ring_full - check if cqp ring is full\n- * @cqp: struct for cqp hw\n- */\n-static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)\n-{\n-\treturn I40IW_RING_FULL_ERR(cqp->sq_ring);\n-}\n-\n-/**\n- * i40iw_est_sd - returns approximate number of SDs for HMC\n- * @dev: sc device struct\n- * @hmc_info: hmc structure, size and count for HMC objects\n- */\n-static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)\n-{\n-\tint i;\n-\tu64 size = 0;\n-\tu64 sd;\n-\n-\tfor (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)\n-\t\tsize += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;\n-\n-\tif (dev->is_pf)\n-\t\tsize += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;\n-\n-\tif (size & 0x1FFFFF)\n-\t\tsd = (size >> 21) + 1; /* add 1 for remainder */\n-\telse\n-\t\tsd = size >> 21;\n-\n-\tif (!dev->is_pf) {\n-\t\t/* 2MB alignment for VF PBLE HMC */\n-\t\tsize = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;\n-\t\tif (size & 0x1FFFFF)\n-\t\t\tsd += (size >> 21) + 1; /* add 1 for remainder */\n-\t\telse\n-\t\t\tsd += size >> 21;\n-\t}\n-\n-\treturn sd;\n-}\n-\n-/**\n- * i40iw_config_fpm_values - configure HMC objects\n- * @dev: sc device struct\n- * @qp_count: desired qp count\n- */\n-enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)\n-{\n-\tstruct i40iw_virt_mem virt_mem;\n-\tu32 i, mem_size;\n-\tu32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;\n-\tu64 sd_needed;\n-\tu32 loop_count = 0;\n-\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct i40iw_hmc_fpm_misc *hmc_fpm_misc;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\thmc_info = dev->hmc_info;\n-\thmc_fpm_misc = &dev->hmc_fpm_misc;\n-\n-\tret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t\t \"i40iw_sc_init_iw_hmc returned error_code = %d\\n\",\n-\t\t\t ret_code);\n-\t\treturn ret_code;\n-\t}\n-\n-\tfor (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)\n-\t\thmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;\n-\tsd_needed = i40iw_est_sd(dev, hmc_info);\n-\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t \"%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\\n\",\n-\t\t __func__, sd_needed, hmc_info->first_sd_index);\n-\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t \"%s: sd count %d where max sd is %d\\n\",\n-\t\t __func__, hmc_info->sd_table.sd_cnt,\n-\t\t hmc_fpm_misc->max_sds);\n-\n-\tqpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);\n-\tqpwantedoriginal = qpwanted;\n-\tmrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;\n-\tpblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;\n-\n-\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t \"req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\\n\",\n-\t\t qp_count, hmc_fpm_misc->max_sds,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);\n-\n-\tdo {\n-\t\t++loop_count;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =\n-\t\t\tmin(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =\n-\t\t\t\t\tqpwanted * hmc_fpm_misc->ht_multiplier;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =\n-\t\t\thmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;\n-\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =\n-\t\t\troundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =\n-\t\t\troundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =\n-\t\t\thmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =\n-\t\t\thmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =\n-\t\t\t((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;\n-\t\thmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;\n-\n-\t\t/* How much memory is needed for all the objects. */\n-\t\tsd_needed = i40iw_est_sd(dev, hmc_info);\n-\t\tif ((loop_count > 1000) ||\n-\t\t ((!(loop_count % 10)) &&\n-\t\t (qpwanted > qpwantedoriginal * 2 / 3))) {\n-\t\t\tif (qpwanted > FPM_MULTIPLIER)\n-\t\t\t\tqpwanted = roundup_pow_of_two(qpwanted -\n-\t\t\t\t\t\t\t FPM_MULTIPLIER);\n-\t\t\tqpwanted >>= 1;\n-\t\t}\n-\t\tif (mrwanted > FPM_MULTIPLIER * 10)\n-\t\t\tmrwanted -= FPM_MULTIPLIER * 10;\n-\t\tif (pblewanted > FPM_MULTIPLIER * 1000)\n-\t\t\tpblewanted -= FPM_MULTIPLIER * 1000;\n-\t} while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);\n-\n-\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t \"loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\\n\",\n-\t\t loop_count, sd_needed,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,\n-\t\t hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);\n-\n-\tret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t\t \"configure_iw_fpm returned error_code[x%08X]\\n\",\n-\t\t\t i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));\n-\t\treturn ret_code;\n-\t}\n-\n-\tmem_size = sizeof(struct i40iw_hmc_sd_entry) *\n-\t\t (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);\n-\tret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t\t \"%s: failed to allocate memory for sd_entry buffer\\n\",\n-\t\t\t __func__);\n-\t\treturn ret_code;\n-\t}\n-\thmc_info->sd_table.sd_entry = virt_mem.va;\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available\n- * @dev: rdma device\n- * @pcmdinfo: cqp command info\n- */\n-static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct cqp_commands_info *pcmdinfo)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_dma_mem values_mem;\n-\n-\tdev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;\n-\tswitch (pcmdinfo->cqp_cmd) {\n-\tcase OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:\n-\t\tstatus = i40iw_sc_del_local_mac_ipaddr_entry(\n-\t\t\t\tpcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,\n-\t\t\t\tpcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,\n-\t\t\t\tpcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,\n-\t\t\t\tpcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_CEQ_DESTROY:\n-\t\tstatus = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,\n-\t\t\t\t\t pcmdinfo->in.u.ceq_destroy.scratch,\n-\t\t\t\t\t pcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_AEQ_DESTROY:\n-\t\tstatus = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,\n-\t\t\t\t\t pcmdinfo->in.u.aeq_destroy.scratch,\n-\t\t\t\t\t pcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_DELETE_ARP_CACHE_ENTRY:\n-\t\tstatus = i40iw_sc_del_arp_cache_entry(\n-\t\t\t\tpcmdinfo->in.u.del_arp_cache_entry.cqp,\n-\t\t\t\tpcmdinfo->in.u.del_arp_cache_entry.scratch,\n-\t\t\t\tpcmdinfo->in.u.del_arp_cache_entry.arp_index,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_MANAGE_APBVT_ENTRY:\n-\t\tstatus = i40iw_sc_manage_apbvt_entry(\n-\t\t\t\tpcmdinfo->in.u.manage_apbvt_entry.cqp,\n-\t\t\t\t&pcmdinfo->in.u.manage_apbvt_entry.info,\n-\t\t\t\tpcmdinfo->in.u.manage_apbvt_entry.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_CEQ_CREATE:\n-\t\tstatus = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,\n-\t\t\t\t\t pcmdinfo->in.u.ceq_create.scratch,\n-\t\t\t\t\t pcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_AEQ_CREATE:\n-\t\tstatus = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,\n-\t\t\t\t\t pcmdinfo->in.u.aeq_create.scratch,\n-\t\t\t\t\t pcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:\n-\t\tstatus = i40iw_sc_alloc_local_mac_ipaddr_entry(\n-\t\t\t\tpcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,\n-\t\t\t\tpcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_ADD_LOCAL_MAC_IPADDR_ENTRY:\n-\t\tstatus = i40iw_sc_add_local_mac_ipaddr_entry(\n-\t\t\t\tpcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,\n-\t\t\t\t&pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,\n-\t\t\t\tpcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_MANAGE_QHASH_TABLE_ENTRY:\n-\t\tstatus = i40iw_sc_manage_qhash_table_entry(\n-\t\t\t\tpcmdinfo->in.u.manage_qhash_table_entry.cqp,\n-\t\t\t\t&pcmdinfo->in.u.manage_qhash_table_entry.info,\n-\t\t\t\tpcmdinfo->in.u.manage_qhash_table_entry.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_QP_MODIFY:\n-\t\tstatus = i40iw_sc_qp_modify(\n-\t\t\t\tpcmdinfo->in.u.qp_modify.qp,\n-\t\t\t\t&pcmdinfo->in.u.qp_modify.info,\n-\t\t\t\tpcmdinfo->in.u.qp_modify.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_QP_UPLOAD_CONTEXT:\n-\t\tstatus = i40iw_sc_qp_upload_context(\n-\t\t\t\tpcmdinfo->in.u.qp_upload_context.dev,\n-\t\t\t\t&pcmdinfo->in.u.qp_upload_context.info,\n-\t\t\t\tpcmdinfo->in.u.qp_upload_context.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_CQ_CREATE:\n-\t\tstatus = i40iw_sc_cq_create(\n-\t\t\t\tpcmdinfo->in.u.cq_create.cq,\n-\t\t\t\tpcmdinfo->in.u.cq_create.scratch,\n-\t\t\t\tpcmdinfo->in.u.cq_create.check_overflow,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_CQ_DESTROY:\n-\t\tstatus = i40iw_sc_cq_destroy(\n-\t\t\t\tpcmdinfo->in.u.cq_destroy.cq,\n-\t\t\t\tpcmdinfo->in.u.cq_destroy.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_QP_CREATE:\n-\t\tstatus = i40iw_sc_qp_create(\n-\t\t\t\tpcmdinfo->in.u.qp_create.qp,\n-\t\t\t\t&pcmdinfo->in.u.qp_create.info,\n-\t\t\t\tpcmdinfo->in.u.qp_create.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_QP_DESTROY:\n-\t\tstatus = i40iw_sc_qp_destroy(\n-\t\t\t\tpcmdinfo->in.u.qp_destroy.qp,\n-\t\t\t\tpcmdinfo->in.u.qp_destroy.scratch,\n-\t\t\t\tpcmdinfo->in.u.qp_destroy.remove_hash_idx,\n-\t\t\t\tpcmdinfo->in.u.qp_destroy.\n-\t\t\t\tignore_mw_bnd,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_ALLOC_STAG:\n-\t\tstatus = i40iw_sc_alloc_stag(\n-\t\t\t\tpcmdinfo->in.u.alloc_stag.dev,\n-\t\t\t\t&pcmdinfo->in.u.alloc_stag.info,\n-\t\t\t\tpcmdinfo->in.u.alloc_stag.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_MR_REG_NON_SHARED:\n-\t\tstatus = i40iw_sc_mr_reg_non_shared(\n-\t\t\t\tpcmdinfo->in.u.mr_reg_non_shared.dev,\n-\t\t\t\t&pcmdinfo->in.u.mr_reg_non_shared.info,\n-\t\t\t\tpcmdinfo->in.u.mr_reg_non_shared.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_DEALLOC_STAG:\n-\t\tstatus = i40iw_sc_dealloc_stag(\n-\t\t\t\tpcmdinfo->in.u.dealloc_stag.dev,\n-\t\t\t\t&pcmdinfo->in.u.dealloc_stag.info,\n-\t\t\t\tpcmdinfo->in.u.dealloc_stag.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_MW_ALLOC:\n-\t\tstatus = i40iw_sc_mw_alloc(\n-\t\t\t\tpcmdinfo->in.u.mw_alloc.dev,\n-\t\t\t\tpcmdinfo->in.u.mw_alloc.scratch,\n-\t\t\t\tpcmdinfo->in.u.mw_alloc.mw_stag_index,\n-\t\t\t\tpcmdinfo->in.u.mw_alloc.pd_id,\n-\t\t\t\tpcmdinfo->post_sq);\n-\n-\t\tbreak;\n-\tcase OP_QP_FLUSH_WQES:\n-\t\tstatus = i40iw_sc_qp_flush_wqes(\n-\t\t\t\tpcmdinfo->in.u.qp_flush_wqes.qp,\n-\t\t\t\t&pcmdinfo->in.u.qp_flush_wqes.info,\n-\t\t\t\tpcmdinfo->in.u.qp_flush_wqes.\n-\t\t\t\tscratch, pcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_GEN_AE:\n-\t\tstatus = i40iw_sc_gen_ae(\n-\t\t\t\tpcmdinfo->in.u.gen_ae.qp,\n-\t\t\t\t&pcmdinfo->in.u.gen_ae.info,\n-\t\t\t\tpcmdinfo->in.u.gen_ae.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_ADD_ARP_CACHE_ENTRY:\n-\t\tstatus = i40iw_sc_add_arp_cache_entry(\n-\t\t\t\tpcmdinfo->in.u.add_arp_cache_entry.cqp,\n-\t\t\t\t&pcmdinfo->in.u.add_arp_cache_entry.info,\n-\t\t\t\tpcmdinfo->in.u.add_arp_cache_entry.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_MANAGE_PUSH_PAGE:\n-\t\tstatus = i40iw_sc_manage_push_page(\n-\t\t\t\tpcmdinfo->in.u.manage_push_page.cqp,\n-\t\t\t\t&pcmdinfo->in.u.manage_push_page.info,\n-\t\t\t\tpcmdinfo->in.u.manage_push_page.scratch,\n-\t\t\t\tpcmdinfo->post_sq);\n-\t\tbreak;\n-\tcase OP_UPDATE_PE_SDS:\n-\t\t/* case I40IW_CQP_OP_UPDATE_PE_SDS */\n-\t\tstatus = i40iw_update_pe_sds(\n-\t\t\t\tpcmdinfo->in.u.update_pe_sds.dev,\n-\t\t\t\t&pcmdinfo->in.u.update_pe_sds.info,\n-\t\t\t\tpcmdinfo->in.u.update_pe_sds.\n-\t\t\t\tscratch);\n-\n-\t\tbreak;\n-\tcase OP_MANAGE_HMC_PM_FUNC_TABLE:\n-\t\tstatus = i40iw_sc_manage_hmc_pm_func_table(\n-\t\t\t\tpcmdinfo->in.u.manage_hmc_pm.dev->cqp,\n-\t\t\t\tpcmdinfo->in.u.manage_hmc_pm.scratch,\n-\t\t\t\t(u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,\n-\t\t\t\tpcmdinfo->in.u.manage_hmc_pm.info.free_fcn,\n-\t\t\t\ttrue);\n-\t\tbreak;\n-\tcase OP_SUSPEND:\n-\t\tstatus = i40iw_sc_suspend_qp(\n-\t\t\t\tpcmdinfo->in.u.suspend_resume.cqp,\n-\t\t\t\tpcmdinfo->in.u.suspend_resume.qp,\n-\t\t\t\tpcmdinfo->in.u.suspend_resume.scratch);\n-\t\tbreak;\n-\tcase OP_RESUME:\n-\t\tstatus = i40iw_sc_resume_qp(\n-\t\t\t\tpcmdinfo->in.u.suspend_resume.cqp,\n-\t\t\t\tpcmdinfo->in.u.suspend_resume.qp,\n-\t\t\t\tpcmdinfo->in.u.suspend_resume.scratch);\n-\t\tbreak;\n-\tcase OP_MANAGE_VF_PBLE_BP:\n-\t\tstatus = i40iw_manage_vf_pble_bp(\n-\t\t\t\tpcmdinfo->in.u.manage_vf_pble_bp.cqp,\n-\t\t\t\t&pcmdinfo->in.u.manage_vf_pble_bp.info,\n-\t\t\t\tpcmdinfo->in.u.manage_vf_pble_bp.scratch, true);\n-\t\tbreak;\n-\tcase OP_QUERY_FPM_VALUES:\n-\t\tvalues_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;\n-\t\tvalues_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;\n-\t\tstatus = i40iw_sc_query_fpm_values(\n-\t\t\t\tpcmdinfo->in.u.query_fpm_values.cqp,\n-\t\t\t\tpcmdinfo->in.u.query_fpm_values.scratch,\n-\t\t\t\tpcmdinfo->in.u.query_fpm_values.hmc_fn_id,\n-\t\t\t\t&values_mem, true, I40IW_CQP_WAIT_EVENT);\n-\t\tbreak;\n-\tcase OP_COMMIT_FPM_VALUES:\n-\t\tvalues_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;\n-\t\tvalues_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;\n-\t\tstatus = i40iw_sc_commit_fpm_values(\n-\t\t\t\tpcmdinfo->in.u.commit_fpm_values.cqp,\n-\t\t\t\tpcmdinfo->in.u.commit_fpm_values.scratch,\n-\t\t\t\tpcmdinfo->in.u.commit_fpm_values.hmc_fn_id,\n-\t\t\t\t&values_mem,\n-\t\t\t\ttrue,\n-\t\t\t\tI40IW_CQP_WAIT_EVENT);\n-\t\tbreak;\n-\tdefault:\n-\t\tstatus = I40IW_NOT_SUPPORTED;\n-\t\tbreak;\n-\t}\n-\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_process_cqp_cmd - process all cqp commands\n- * @dev: sc device struct\n- * @pcmdinfo: cqp command info\n- */\n-enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct cqp_commands_info *pcmdinfo)\n-{\n-\tenum i40iw_status_code status = 0;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&dev->cqp_lock, flags);\n-\tif (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))\n-\t\tstatus = i40iw_exec_cqp_cmd(dev, pcmdinfo);\n-\telse\n-\t\tlist_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);\n-\tspin_unlock_irqrestore(&dev->cqp_lock, flags);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_process_bh - called from tasklet for cqp list\n- * @dev: sc device struct\n- */\n-enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)\n-{\n-\tenum i40iw_status_code status = 0;\n-\tstruct cqp_commands_info *pcmdinfo;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&dev->cqp_lock, flags);\n-\twhile (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {\n-\t\tpcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);\n-\n-\t\tstatus = i40iw_exec_cqp_cmd(dev, pcmdinfo);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t}\n-\tspin_unlock_irqrestore(&dev->cqp_lock, flags);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_iwarp_opcode - determine if incoming is rdma layer\n- * @info: aeq info for the packet\n- * @pkt: packet for error\n- */\n-static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)\n-{\n-\t__be16 *mpa;\n-\tu32 opcode = 0xffffffff;\n-\n-\tif (info->q2_data_written) {\n-\t\tmpa = (__be16 *)pkt;\n-\t\topcode = ntohs(mpa[1]) & 0xf;\n-\t}\n-\treturn opcode;\n-}\n-\n-/**\n- * i40iw_locate_mpa - return pointer to mpa in the pkt\n- * @pkt: packet with data\n- */\n-static u8 *i40iw_locate_mpa(u8 *pkt)\n-{\n-\t/* skip over ethernet header */\n-\tpkt += I40IW_MAC_HLEN;\n-\n-\t/* Skip over IP and TCP headers */\n-\tpkt += 4 * (pkt[0] & 0x0f);\n-\tpkt += 4 * ((pkt[12] >> 4) & 0x0f);\n-\treturn pkt;\n-}\n-\n-/**\n- * i40iw_setup_termhdr - termhdr for terminate pkt\n- * @qp: sc qp ptr for pkt\n- * @hdr: term hdr\n- * @opcode: flush opcode for termhdr\n- * @layer_etype: error layer + error type\n- * @err: error cod ein the header\n- */\n-static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,\n-\t\t\t\tstruct i40iw_terminate_hdr *hdr,\n-\t\t\t\tenum i40iw_flush_opcode opcode,\n-\t\t\t\tu8 layer_etype,\n-\t\t\t\tu8 err)\n-{\n-\tqp->flush_code = opcode;\n-\thdr->layer_etype = layer_etype;\n-\thdr->error_code = err;\n-}\n-\n-/**\n- * i40iw_bld_terminate_hdr - build terminate message header\n- * @qp: qp associated with received terminate AE\n- * @info: the struct contiaing AE information\n- */\n-static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,\n-\t\t\t\t struct i40iw_aeqe_info *info)\n-{\n-\tu8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;\n-\tu16 ddp_seg_len;\n-\tint copy_len = 0;\n-\tu8 is_tagged = 0;\n-\tu32 opcode;\n-\tstruct i40iw_terminate_hdr *termhdr;\n-\n-\ttermhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;\n-\tmemset(termhdr, 0, Q2_BAD_FRAME_OFFSET);\n-\n-\tif (info->q2_data_written) {\n-\t\t/* Use data from offending packet to fill in ddp & rdma hdrs */\n-\t\tpkt = i40iw_locate_mpa(pkt);\n-\t\tddp_seg_len = ntohs(*(__be16 *)pkt);\n-\t\tif (ddp_seg_len) {\n-\t\t\tcopy_len = 2;\n-\t\t\ttermhdr->hdrct = DDP_LEN_FLAG;\n-\t\t\tif (pkt[2] & 0x80) {\n-\t\t\t\tis_tagged = 1;\n-\t\t\t\tif (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {\n-\t\t\t\t\tcopy_len += TERM_DDP_LEN_TAGGED;\n-\t\t\t\t\ttermhdr->hdrct |= DDP_HDR_FLAG;\n-\t\t\t\t}\n-\t\t\t} else {\n-\t\t\t\tif (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {\n-\t\t\t\t\tcopy_len += TERM_DDP_LEN_UNTAGGED;\n-\t\t\t\t\ttermhdr->hdrct |= DDP_HDR_FLAG;\n-\t\t\t\t}\n-\n-\t\t\t\tif (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {\n-\t\t\t\t\tif ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {\n-\t\t\t\t\t\tcopy_len += TERM_RDMA_LEN;\n-\t\t\t\t\t\ttermhdr->hdrct |= RDMA_HDR_FLAG;\n-\t\t\t\t\t}\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\topcode = i40iw_iwarp_opcode(info, pkt);\n-\n-\tswitch (info->ae_id) {\n-\tcase I40IW_AE_AMP_UNALLOCATED_STAG:\n-\t\tqp->eventtype = TERM_EVENT_QP_ACCESS_ERR;\n-\t\tif (opcode == I40IW_OP_TYPE_RDMA_WRITE)\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,\n-\t\t\t\t\t (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);\n-\t\telse\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);\n-\t\tbreak;\n-\tcase I40IW_AE_AMP_BOUNDS_VIOLATION:\n-\t\tqp->eventtype = TERM_EVENT_QP_ACCESS_ERR;\n-\t\tif (info->q2_data_written)\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,\n-\t\t\t\t\t (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);\n-\t\telse\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);\n-\t\tbreak;\n-\tcase I40IW_AE_AMP_BAD_PD:\n-\t\tswitch (opcode) {\n-\t\tcase I40IW_OP_TYPE_RDMA_WRITE:\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,\n-\t\t\t\t\t (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);\n-\t\t\tbreak;\n-\t\tcase I40IW_OP_TYPE_SEND_INV:\n-\t\tcase I40IW_OP_TYPE_SEND_SOL_INV:\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);\n-\t\t}\n-\t\tbreak;\n-\tcase I40IW_AE_AMP_INVALID_STAG:\n-\t\tqp->eventtype = TERM_EVENT_QP_ACCESS_ERR;\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);\n-\t\tbreak;\n-\tcase I40IW_AE_AMP_BAD_QP:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);\n-\t\tbreak;\n-\tcase I40IW_AE_AMP_BAD_STAG_KEY:\n-\tcase I40IW_AE_AMP_BAD_STAG_INDEX:\n-\t\tqp->eventtype = TERM_EVENT_QP_ACCESS_ERR;\n-\t\tswitch (opcode) {\n-\t\tcase I40IW_OP_TYPE_SEND_INV:\n-\t\tcase I40IW_OP_TYPE_SEND_SOL_INV:\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,\n-\t\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);\n-\t\t}\n-\t\tbreak;\n-\tcase I40IW_AE_AMP_RIGHTS_VIOLATION:\n-\tcase I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:\n-\tcase I40IW_AE_PRIV_OPERATION_DENIED:\n-\t\tqp->eventtype = TERM_EVENT_QP_ACCESS_ERR;\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);\n-\t\tbreak;\n-\tcase I40IW_AE_AMP_TO_WRAP:\n-\t\tqp->eventtype = TERM_EVENT_QP_ACCESS_ERR;\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,\n-\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);\n-\t\tbreak;\n-\tcase I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,\n-\t\t\t\t (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);\n-\t\tbreak;\n-\tcase I40IW_AE_LLP_SEGMENT_TOO_LARGE:\n-\tcase I40IW_AE_LLP_SEGMENT_TOO_SMALL:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);\n-\t\tbreak;\n-\tcase I40IW_AE_LCE_QP_CATASTROPHIC:\n-\tcase I40IW_AE_DDP_NO_L_BIT:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);\n-\t\tbreak;\n-\tcase I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);\n-\t\tbreak;\n-\tcase I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:\n-\t\tqp->eventtype = TERM_EVENT_QP_ACCESS_ERR;\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);\n-\t\tbreak;\n-\tcase I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:\n-\t\tif (is_tagged)\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,\n-\t\t\t\t\t (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);\n-\t\telse\n-\t\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,\n-\t\t\t\t\t (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);\n-\t\tbreak;\n-\tcase I40IW_AE_DDP_UBE_INVALID_MO:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);\n-\t\tbreak;\n-\tcase I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);\n-\t\tbreak;\n-\tcase I40IW_AE_DDP_UBE_INVALID_QN:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,\n-\t\t\t\t (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);\n-\t\tbreak;\n-\tcase I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,\n-\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);\n-\t\tbreak;\n-\tcase I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,\n-\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);\n-\t\tbreak;\n-\tdefault:\n-\t\ti40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,\n-\t\t\t\t (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);\n-\t\tbreak;\n-\t}\n-\n-\tif (copy_len)\n-\t\tmemcpy(termhdr + 1, pkt, copy_len);\n-\n-\treturn sizeof(struct i40iw_terminate_hdr) + copy_len;\n-}\n-\n-/**\n- * i40iw_terminate_send_fin() - Send fin for terminate message\n- * @qp: qp associated with received terminate AE\n- */\n-void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)\n-{\n-\t/* Send the fin only */\n-\ti40iw_term_modify_qp(qp,\n-\t\t\t I40IW_QP_STATE_TERMINATE,\n-\t\t\t I40IWQP_TERM_SEND_FIN_ONLY,\n-\t\t\t 0);\n-}\n-\n-/**\n- * i40iw_terminate_connection() - Bad AE and send terminate to remote QP\n- * @qp: qp associated with received terminate AE\n- * @info: the struct contiaing AE information\n- */\n-void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)\n-{\n-\tu8 termlen = 0;\n-\n-\tif (qp->term_flags & I40IW_TERM_SENT)\n-\t\treturn; /* Sanity check */\n-\n-\t/* Eventtype can change from bld_terminate_hdr */\n-\tqp->eventtype = TERM_EVENT_QP_FATAL;\n-\ttermlen = i40iw_bld_terminate_hdr(qp, info);\n-\ti40iw_terminate_start_timer(qp);\n-\tqp->term_flags |= I40IW_TERM_SENT;\n-\ti40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,\n-\t\t\t I40IWQP_TERM_SEND_TERM_ONLY, termlen);\n-}\n-\n-/**\n- * i40iw_terminate_received - handle terminate received AE\n- * @qp: qp associated with received terminate AE\n- * @info: the struct contiaing AE information\n- */\n-void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)\n-{\n-\tu8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;\n-\t__be32 *mpa;\n-\tu8 ddp_ctl;\n-\tu8 rdma_ctl;\n-\tu16 aeq_id = 0;\n-\tstruct i40iw_terminate_hdr *termhdr;\n-\n-\tmpa = (__be32 *)i40iw_locate_mpa(pkt);\n-\tif (info->q2_data_written) {\n-\t\t/* did not validate the frame - do it now */\n-\t\tddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;\n-\t\trdma_ctl = ntohl(mpa[0]) & 0xff;\n-\t\tif ((ddp_ctl & 0xc0) != 0x40)\n-\t\t\taeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;\n-\t\telse if ((ddp_ctl & 0x03) != 1)\n-\t\t\taeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;\n-\t\telse if (ntohl(mpa[2]) != 2)\n-\t\t\taeq_id = I40IW_AE_DDP_UBE_INVALID_QN;\n-\t\telse if (ntohl(mpa[3]) != 1)\n-\t\t\taeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;\n-\t\telse if (ntohl(mpa[4]) != 0)\n-\t\t\taeq_id = I40IW_AE_DDP_UBE_INVALID_MO;\n-\t\telse if ((rdma_ctl & 0xc0) != 0x40)\n-\t\t\taeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;\n-\n-\t\tinfo->ae_id = aeq_id;\n-\t\tif (info->ae_id) {\n-\t\t\t/* Bad terminate recvd - send back a terminate */\n-\t\t\ti40iw_terminate_connection(qp, info);\n-\t\t\treturn;\n-\t\t}\n-\t}\n-\n-\tqp->term_flags |= I40IW_TERM_RCVD;\n-\tqp->eventtype = TERM_EVENT_QP_FATAL;\n-\ttermhdr = (struct i40iw_terminate_hdr *)&mpa[5];\n-\tif (termhdr->layer_etype == RDMAP_REMOTE_PROT ||\n-\t termhdr->layer_etype == RDMAP_REMOTE_OP) {\n-\t\ti40iw_terminate_done(qp, 0);\n-\t} else {\n-\t\ti40iw_terminate_start_timer(qp);\n-\t\ti40iw_terminate_send_fin(qp);\n-\t}\n-}\n-\n-/**\n- * i40iw_sc_vsi_init - Initialize virtual device\n- * @vsi: pointer to the vsi structure\n- * @info: parameters to initialize vsi\n- **/\n-void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)\n-{\n-\tint i;\n-\n-\tvsi->dev = info->dev;\n-\tvsi->back_vsi = info->back_vsi;\n-\tvsi->mtu = info->params->mtu;\n-\tvsi->exception_lan_queue = info->exception_lan_queue;\n-\ti40iw_fill_qos_list(info->params->qs_handle_list);\n-\n-\tfor (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {\n-\t\tvsi->qos[i].qs_handle = info->params->qs_handle_list[i];\n-\t\ti40iw_debug(vsi->dev, I40IW_DEBUG_DCB, \"qset[%d]: %d\\n\", i,\n-\t\t\t vsi->qos[i].qs_handle);\n-\t\tspin_lock_init(&vsi->qos[i].lock);\n-\t\tINIT_LIST_HEAD(&vsi->qos[i].qplist);\n-\t}\n-}\n-\n-/**\n- * i40iw_hw_stats_init - Initiliaze HW stats table\n- * @stats: pestat struct\n- * @fcn_idx: PCI fn id\n- * @is_pf: Is it a PF?\n- *\n- * Populate the HW stats table with register offset addr for each\n- * stats. And start the perioidic stats timer.\n- */\n-void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)\n-{\n-\tu32 stats_reg_offset;\n-\tu32 stats_index;\n-\tstruct i40iw_dev_hw_stats_offsets *stats_table =\n-\t\t&stats->hw_stats_offsets;\n-\tstruct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;\n-\n-\tif (is_pf) {\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =\n-\t\t\t\tI40E_GLPES_PFIP4RXDISCARD(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =\n-\t\t\t\tI40E_GLPES_PFIP4RXTRUNC(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =\n-\t\t\t\tI40E_GLPES_PFIP4TXNOROUTE(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =\n-\t\t\t\tI40E_GLPES_PFIP6RXDISCARD(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =\n-\t\t\t\tI40E_GLPES_PFIP6RXTRUNC(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =\n-\t\t\t\tI40E_GLPES_PFIP6TXNOROUTE(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =\n-\t\t\t\tI40E_GLPES_PFTCPRTXSEG(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =\n-\t\t\t\tI40E_GLPES_PFTCPRXOPTERR(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =\n-\t\t\t\tI40E_GLPES_PFTCPRXPROTOERR(fcn_idx);\n-\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =\n-\t\t\t\tI40E_GLPES_PFIP4RXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP4RXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =\n-\t\t\t\tI40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =\n-\t\t\t\tI40E_GLPES_PFIP4TXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP4TXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =\n-\t\t\t\tI40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =\n-\t\t\t\tI40E_GLPES_PFIP6RXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP6RXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =\n-\t\t\t\tI40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =\n-\t\t\t\tI40E_GLPES_PFIP6TXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP6TXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =\n-\t\t\t\tI40E_GLPES_PFIP6TXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =\n-\t\t\t\tI40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =\n-\t\t\t\tI40E_GLPES_PFTCPRXSEGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =\n-\t\t\t\tI40E_GLPES_PFTCPTXSEGLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =\n-\t\t\t\tI40E_GLPES_PFRDMARXRDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =\n-\t\t\t\tI40E_GLPES_PFRDMARXSNDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =\n-\t\t\t\tI40E_GLPES_PFRDMARXWRSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =\n-\t\t\t\tI40E_GLPES_PFRDMATXRDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =\n-\t\t\t\tI40E_GLPES_PFRDMATXSNDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =\n-\t\t\t\tI40E_GLPES_PFRDMATXWRSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =\n-\t\t\t\tI40E_GLPES_PFRDMAVBNDLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =\n-\t\t\t\tI40E_GLPES_PFRDMAVINVLO(fcn_idx);\n-\t} else {\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =\n-\t\t\t\tI40E_GLPES_VFIP4RXDISCARD(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =\n-\t\t\t\tI40E_GLPES_VFIP4RXTRUNC(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =\n-\t\t\t\tI40E_GLPES_VFIP4TXNOROUTE(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =\n-\t\t\t\tI40E_GLPES_VFIP6RXDISCARD(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =\n-\t\t\t\tI40E_GLPES_VFIP6RXTRUNC(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =\n-\t\t\t\tI40E_GLPES_VFIP6TXNOROUTE(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =\n-\t\t\t\tI40E_GLPES_VFTCPRTXSEG(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =\n-\t\t\t\tI40E_GLPES_VFTCPRXOPTERR(fcn_idx);\n-\t\tstats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =\n-\t\t\t\tI40E_GLPES_VFTCPRXPROTOERR(fcn_idx);\n-\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =\n-\t\t\t\tI40E_GLPES_VFIP4RXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP4RXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =\n-\t\t\t\tI40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =\n-\t\t\t\tI40E_GLPES_VFIP4TXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP4TXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =\n-\t\t\t\tI40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =\n-\t\t\t\tI40E_GLPES_VFIP6RXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP6RXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =\n-\t\t\t\tI40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =\n-\t\t\t\tI40E_GLPES_VFIP6TXOCTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP6TXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =\n-\t\t\t\tI40E_GLPES_VFIP6TXPKTSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =\n-\t\t\t\tI40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =\n-\t\t\t\tI40E_GLPES_VFTCPRXSEGSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =\n-\t\t\t\tI40E_GLPES_VFTCPTXSEGLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =\n-\t\t\t\tI40E_GLPES_VFRDMARXRDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =\n-\t\t\t\tI40E_GLPES_VFRDMARXSNDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =\n-\t\t\t\tI40E_GLPES_VFRDMARXWRSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =\n-\t\t\t\tI40E_GLPES_VFRDMATXRDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =\n-\t\t\t\tI40E_GLPES_VFRDMATXSNDSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =\n-\t\t\t\tI40E_GLPES_VFRDMATXWRSLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =\n-\t\t\t\tI40E_GLPES_VFRDMAVBNDLO(fcn_idx);\n-\t\tstats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =\n-\t\t\t\tI40E_GLPES_VFRDMAVINVLO(fcn_idx);\n-\t}\n-\n-\tfor (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;\n-\t stats_index++) {\n-\t\tstats_reg_offset = stats_table->stats_offset_64[stats_index];\n-\t\tlast_rd_stats->stats_value_64[stats_index] =\n-\t\t\treadq(stats->hw->hw_addr + stats_reg_offset);\n-\t}\n-\n-\tfor (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;\n-\t stats_index++) {\n-\t\tstats_reg_offset = stats_table->stats_offset_32[stats_index];\n-\t\tlast_rd_stats->stats_value_32[stats_index] =\n-\t\t\ti40iw_rd32(stats->hw, stats_reg_offset);\n-\t}\n-}\n-\n-/**\n- * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.\n- * @stat: pestat struct\n- * @index: index in HW stats table which contains offset reg-addr\n- * @value: hw stats value\n- */\n-void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,\n-\t\t\t enum i40iw_hw_stats_index_32b index,\n-\t\t\t u64 *value)\n-{\n-\tstruct i40iw_dev_hw_stats_offsets *stats_table =\n-\t\t&stats->hw_stats_offsets;\n-\tstruct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;\n-\tstruct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;\n-\tu64 new_stats_value = 0;\n-\tu32 stats_reg_offset = stats_table->stats_offset_32[index];\n-\n-\tnew_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);\n-\t/*roll-over case */\n-\tif (new_stats_value < last_rd_stats->stats_value_32[index])\n-\t\thw_stats->stats_value_32[index] += new_stats_value;\n-\telse\n-\t\thw_stats->stats_value_32[index] +=\n-\t\t\tnew_stats_value - last_rd_stats->stats_value_32[index];\n-\tlast_rd_stats->stats_value_32[index] = new_stats_value;\n-\t*value = hw_stats->stats_value_32[index];\n-}\n-\n-/**\n- * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.\n- * @stats: pestat struct\n- * @index: index in HW stats table which contains offset reg-addr\n- * @value: hw stats value\n- */\n-void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,\n-\t\t\t enum i40iw_hw_stats_index_64b index,\n-\t\t\t u64 *value)\n-{\n-\tstruct i40iw_dev_hw_stats_offsets *stats_table =\n-\t\t&stats->hw_stats_offsets;\n-\tstruct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;\n-\tstruct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;\n-\tu64 new_stats_value = 0;\n-\tu32 stats_reg_offset = stats_table->stats_offset_64[index];\n-\n-\tnew_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);\n-\t/*roll-over case */\n-\tif (new_stats_value < last_rd_stats->stats_value_64[index])\n-\t\thw_stats->stats_value_64[index] += new_stats_value;\n-\telse\n-\t\thw_stats->stats_value_64[index] +=\n-\t\t\tnew_stats_value - last_rd_stats->stats_value_64[index];\n-\tlast_rd_stats->stats_value_64[index] = new_stats_value;\n-\t*value = hw_stats->stats_value_64[index];\n-}\n-\n-/**\n- * i40iw_hw_stats_read_all - read all HW stat counters\n- * @stats: pestat struct\n- * @stats_values: hw stats structure\n- *\n- * Read all the HW stat counters and populates hw_stats structure\n- * of passed-in vsi's pestat as well as copy created in stat_values.\n- */\n-void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,\n-\t\t\t struct i40iw_dev_hw_stats *stats_values)\n-{\n-\tu32 stats_index;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&stats->lock, flags);\n-\n-\tfor (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;\n-\t stats_index++)\n-\t\ti40iw_hw_stats_read_32(stats, stats_index,\n-\t\t\t\t &stats_values->stats_value_32[stats_index]);\n-\tfor (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;\n-\t stats_index++)\n-\t\ti40iw_hw_stats_read_64(stats, stats_index,\n-\t\t\t\t &stats_values->stats_value_64[stats_index]);\n-\tspin_unlock_irqrestore(&stats->lock, flags);\n-}\n-\n-/**\n- * i40iw_hw_stats_refresh_all - Update all HW stats structs\n- * @stats: pestat struct\n- *\n- * Read all the HW stats counters to refresh values in hw_stats structure\n- * of passed-in dev's pestat\n- */\n-void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)\n-{\n-\tu64 stats_value;\n-\tu32 stats_index;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&stats->lock, flags);\n-\n-\tfor (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;\n-\t stats_index++)\n-\t\ti40iw_hw_stats_read_32(stats, stats_index, &stats_value);\n-\tfor (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;\n-\t stats_index++)\n-\t\ti40iw_hw_stats_read_64(stats, stats_index, &stats_value);\n-\tspin_unlock_irqrestore(&stats->lock, flags);\n-}\n-\n-/**\n- * i40iw_get_fcn_id - Return the function id\n- * @dev: pointer to the device\n- */\n-static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)\n-{\n-\tu8 fcn_id = I40IW_INVALID_FCN_ID;\n-\tu8 i;\n-\n-\tfor (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)\n-\t\tif (!dev->fcn_id_array[i]) {\n-\t\t\tfcn_id = i;\n-\t\t\tdev->fcn_id_array[i] = true;\n-\t\t\tbreak;\n-\t\t}\n-\treturn fcn_id;\n-}\n-\n-/**\n- * i40iw_vsi_stats_init - Initialize the vsi statistics\n- * @vsi: pointer to the vsi structure\n- * @info: The info structure used for initialization\n- */\n-enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)\n-{\n-\tu8 fcn_id = info->fcn_id;\n-\n-\tif (info->alloc_fcn_id)\n-\t\tfcn_id = i40iw_get_fcn_id(vsi->dev);\n-\n-\tif (fcn_id == I40IW_INVALID_FCN_ID)\n-\t\treturn I40IW_ERR_NOT_READY;\n-\n-\tvsi->pestat = info->pestat;\n-\tvsi->pestat->hw = vsi->dev->hw;\n-\tvsi->pestat->vsi = vsi;\n-\n-\tif (info->stats_initialize) {\n-\t\ti40iw_hw_stats_init(vsi->pestat, fcn_id, true);\n-\t\tspin_lock_init(&vsi->pestat->lock);\n-\t\ti40iw_hw_stats_start_timer(vsi);\n-\t}\n-\tvsi->stats_fcn_id_alloc = info->alloc_fcn_id;\n-\tvsi->fcn_id = fcn_id;\n-\treturn I40IW_SUCCESS;\n-}\n-\n-/**\n- * i40iw_vsi_stats_free - Free the vsi stats\n- * @vsi: pointer to the vsi structure\n- */\n-void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)\n-{\n-\tu8 fcn_id = vsi->fcn_id;\n-\n-\tif (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)\n-\t\tvsi->dev->fcn_id_array[fcn_id] = false;\n-\ti40iw_hw_stats_stop_timer(vsi);\n-}\n-\n-static struct i40iw_cqp_ops iw_cqp_ops = {\n-\t.cqp_init = i40iw_sc_cqp_init,\n-\t.cqp_create = i40iw_sc_cqp_create,\n-\t.cqp_post_sq = i40iw_sc_cqp_post_sq,\n-\t.cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,\n-\t.cqp_destroy = i40iw_sc_cqp_destroy,\n-\t.poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done\n-};\n-\n-static struct i40iw_ccq_ops iw_ccq_ops = {\n-\t.ccq_init = i40iw_sc_ccq_init,\n-\t.ccq_create = i40iw_sc_ccq_create,\n-\t.ccq_destroy = i40iw_sc_ccq_destroy,\n-\t.ccq_create_done = i40iw_sc_ccq_create_done,\n-\t.ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,\n-\t.ccq_arm = i40iw_sc_ccq_arm\n-};\n-\n-static struct i40iw_ceq_ops iw_ceq_ops = {\n-\t.ceq_init = i40iw_sc_ceq_init,\n-\t.ceq_create = i40iw_sc_ceq_create,\n-\t.cceq_create_done = i40iw_sc_cceq_create_done,\n-\t.cceq_destroy_done = i40iw_sc_cceq_destroy_done,\n-\t.cceq_create = i40iw_sc_cceq_create,\n-\t.ceq_destroy = i40iw_sc_ceq_destroy,\n-\t.process_ceq = i40iw_sc_process_ceq\n-};\n-\n-static struct i40iw_aeq_ops iw_aeq_ops = {\n-\t.aeq_init = i40iw_sc_aeq_init,\n-\t.aeq_create = i40iw_sc_aeq_create,\n-\t.aeq_destroy = i40iw_sc_aeq_destroy,\n-\t.get_next_aeqe = i40iw_sc_get_next_aeqe,\n-\t.repost_aeq_entries = i40iw_sc_repost_aeq_entries,\n-\t.aeq_create_done = i40iw_sc_aeq_create_done,\n-\t.aeq_destroy_done = i40iw_sc_aeq_destroy_done\n-};\n-\n-/* iwarp pd ops */\n-static struct i40iw_pd_ops iw_pd_ops = {\n-\t.pd_init = i40iw_sc_pd_init,\n-};\n-\n-static struct i40iw_priv_qp_ops iw_priv_qp_ops = {\n-\t.qp_init = i40iw_sc_qp_init,\n-\t.qp_create = i40iw_sc_qp_create,\n-\t.qp_modify = i40iw_sc_qp_modify,\n-\t.qp_destroy = i40iw_sc_qp_destroy,\n-\t.qp_flush_wqes = i40iw_sc_qp_flush_wqes,\n-\t.qp_upload_context = i40iw_sc_qp_upload_context,\n-\t.qp_setctx = i40iw_sc_qp_setctx,\n-\t.qp_send_lsmm = i40iw_sc_send_lsmm,\n-\t.qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,\n-\t.qp_send_rtt = i40iw_sc_send_rtt,\n-\t.qp_post_wqe0 = i40iw_sc_post_wqe0,\n-\t.iw_mr_fast_register = i40iw_sc_mr_fast_register\n-};\n-\n-static struct i40iw_priv_cq_ops iw_priv_cq_ops = {\n-\t.cq_init = i40iw_sc_cq_init,\n-\t.cq_create = i40iw_sc_cq_create,\n-\t.cq_destroy = i40iw_sc_cq_destroy,\n-\t.cq_modify = i40iw_sc_cq_modify,\n-};\n-\n-static struct i40iw_mr_ops iw_mr_ops = {\n-\t.alloc_stag = i40iw_sc_alloc_stag,\n-\t.mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,\n-\t.mr_reg_shared = i40iw_sc_mr_reg_shared,\n-\t.dealloc_stag = i40iw_sc_dealloc_stag,\n-\t.query_stag = i40iw_sc_query_stag,\n-\t.mw_alloc = i40iw_sc_mw_alloc\n-};\n-\n-static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {\n-\t.manage_push_page = i40iw_sc_manage_push_page,\n-\t.manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,\n-\t.set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,\n-\t.commit_fpm_values = i40iw_sc_commit_fpm_values,\n-\t.query_fpm_values = i40iw_sc_query_fpm_values,\n-\t.static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,\n-\t.add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,\n-\t.del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,\n-\t.query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,\n-\t.manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,\n-\t.manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,\n-\t.alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,\n-\t.add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,\n-\t.del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,\n-\t.cqp_nop = i40iw_sc_cqp_nop,\n-\t.commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,\n-\t.query_fpm_values_done = i40iw_sc_query_fpm_values_done,\n-\t.manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,\n-\t.update_suspend_qp = i40iw_sc_suspend_qp,\n-\t.update_resume_qp = i40iw_sc_resume_qp\n-};\n-\n-static struct i40iw_hmc_ops iw_hmc_ops = {\n-\t.init_iw_hmc = i40iw_sc_init_iw_hmc,\n-\t.parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,\n-\t.configure_iw_fpm = i40iw_sc_configure_iw_fpm,\n-\t.parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,\n-\t.create_hmc_object = i40iw_sc_create_hmc_obj,\n-\t.del_hmc_object = i40iw_sc_del_hmc_obj\n-};\n-\n-/**\n- * i40iw_device_init - Initialize IWARP device\n- * @dev: IWARP device pointer\n- * @info: IWARP init info\n- */\n-enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_device_init_info *info)\n-{\n-\tu32 val;\n-\tu32 vchnl_ver = 0;\n-\tu16 hmc_fcn = 0;\n-\tenum i40iw_status_code ret_code = 0;\n-\tu8 db_size;\n-\n-\tspin_lock_init(&dev->cqp_lock);\n-\n-\ti40iw_device_init_uk(&dev->dev_uk);\n-\n-\tdev->debug_mask = info->debug_mask;\n-\n-\tdev->hmc_fn_id = info->hmc_fn_id;\n-\tdev->is_pf = info->is_pf;\n-\n-\tdev->fpm_query_buf_pa = info->fpm_query_buf_pa;\n-\tdev->fpm_query_buf = info->fpm_query_buf;\n-\n-\tdev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;\n-\tdev->fpm_commit_buf = info->fpm_commit_buf;\n-\n-\tdev->hw = info->hw;\n-\tdev->hw->hw_addr = info->bar0;\n-\n-\tif (dev->is_pf) {\n-\t\tval = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);\n-\t\tdev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);\n-\n-\t\tval = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);\n-\t\tdb_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);\n-\t\tif ((db_size != I40IW_PE_DB_SIZE_4M) &&\n-\t\t (db_size != I40IW_PE_DB_SIZE_8M)) {\n-\t\t\ti40iw_debug(dev, I40IW_DEBUG_DEV,\n-\t\t\t\t \"%s: PE doorbell is not enabled in CSR val 0x%x\\n\",\n-\t\t\t\t __func__, val);\n-\t\t\tret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;\n-\t\t\treturn ret_code;\n-\t\t}\n-\t\tdev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;\n-\t\tdev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;\n-\t} else {\n-\t\tdev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;\n-\t}\n-\n-\tdev->cqp_ops = &iw_cqp_ops;\n-\tdev->ccq_ops = &iw_ccq_ops;\n-\tdev->ceq_ops = &iw_ceq_ops;\n-\tdev->aeq_ops = &iw_aeq_ops;\n-\tdev->cqp_misc_ops = &iw_cqp_misc_ops;\n-\tdev->iw_pd_ops = &iw_pd_ops;\n-\tdev->iw_priv_qp_ops = &iw_priv_qp_ops;\n-\tdev->iw_priv_cq_ops = &iw_priv_cq_ops;\n-\tdev->mr_ops = &iw_mr_ops;\n-\tdev->hmc_ops = &iw_hmc_ops;\n-\tdev->vchnl_if.vchnl_send = info->vchnl_send;\n-\tif (dev->vchnl_if.vchnl_send)\n-\t\tdev->vchnl_up = true;\n-\telse\n-\t\tdev->vchnl_up = false;\n-\tif (!dev->is_pf) {\n-\t\tdev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;\n-\t\tret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);\n-\t\tif (!ret_code) {\n-\t\t\ti40iw_debug(dev, I40IW_DEBUG_DEV,\n-\t\t\t\t \"%s: Get Channel version rc = 0x%0x, version is %u\\n\",\n-\t\t\t\t__func__, ret_code, vchnl_ver);\n-\t\t\tret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);\n-\t\t\tif (!ret_code) {\n-\t\t\t\ti40iw_debug(dev, I40IW_DEBUG_DEV,\n-\t\t\t\t\t \"%s Get HMC function rc = 0x%0x, hmc fcn is %u\\n\",\n-\t\t\t\t\t __func__, ret_code, hmc_fcn);\n-\t\t\t\tdev->hmc_fn_id = (u8)hmc_fcn;\n-\t\t\t}\n-\t\t}\n-\t}\n-\tdev->iw_vf_cqp_ops = &iw_vf_cqp_ops;\n-\n-\treturn ret_code;\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h\ndeleted file mode 100644\nindex 6ddaeec..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_d.h\n+++ /dev/null\n@@ -1,1737 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_D_H\n-#define I40IW_D_H\n-\n-#define I40IW_FIRST_USER_QP_ID 2\n-\n-#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)\n-#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)\n-\n-#define I40IW_PUSH_OFFSET (4 * 1024 * 1024)\n-#define I40IW_PF_FIRST_PUSH_PAGE_INDEX 16\n-#define I40IW_VF_PUSH_OFFSET ((8 + 64) * 1024)\n-#define I40IW_VF_FIRST_PUSH_PAGE_INDEX 2\n-\n-#define I40IW_PE_DB_SIZE_4M 1\n-#define I40IW_PE_DB_SIZE_8M 2\n-\n-#define I40IW_DDP_VER 1\n-#define I40IW_RDMAP_VER 1\n-\n-#define I40IW_RDMA_MODE_RDMAC 0\n-#define I40IW_RDMA_MODE_IETF 1\n-\n-#define I40IW_QP_STATE_INVALID 0\n-#define I40IW_QP_STATE_IDLE 1\n-#define I40IW_QP_STATE_RTS 2\n-#define I40IW_QP_STATE_CLOSING 3\n-#define I40IW_QP_STATE_RESERVED 4\n-#define I40IW_QP_STATE_TERMINATE 5\n-#define I40IW_QP_STATE_ERROR 6\n-\n-#define I40IW_STAG_STATE_INVALID 0\n-#define I40IW_STAG_STATE_VALID 1\n-\n-#define I40IW_STAG_TYPE_SHARED 0\n-#define I40IW_STAG_TYPE_NONSHARED 1\n-\n-#define I40IW_MAX_USER_PRIORITY 8\n-#define I40IW_MAX_STATS_COUNT 16\n-#define I40IW_FIRST_NON_PF_STAT\t4\n-\n-\n-#define I40IW_MTU_TO_MSS_IPV4\t\t40\n-#define I40IW_MTU_TO_MSS_IPV6\t\t60\n-#define I40IW_DEFAULT_MTU\t\t1500\n-\n-#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)\n-#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)\n-#define LS_32_1(val, bits) (u32)(val << bits)\n-#define RS_32_1(val, bits) (u32)(val >> bits)\n-#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))\n-\n-#define QS_HANDLE_UNKNOWN 0xffff\n-\n-#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))\n-\n-#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)\n-#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK))\n-#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT)\n-\n-#define TERM_DDP_LEN_TAGGED 14\n-#define TERM_DDP_LEN_UNTAGGED 18\n-#define TERM_RDMA_LEN 28\n-#define RDMA_OPCODE_MASK 0x0f\n-#define RDMA_READ_REQ_OPCODE 1\n-#define Q2_BAD_FRAME_OFFSET 72\n-#define Q2_FPSN_OFFSET 64\n-#define CQE_MAJOR_DRV 0x8000\n-\n-#define I40IW_TERM_SENT 0x01\n-#define I40IW_TERM_RCVD 0x02\n-#define I40IW_TERM_DONE 0x04\n-#define I40IW_MAC_HLEN 14\n-\n-#define I40IW_INVALID_WQE_INDEX 0xffffffff\n-\n-#define I40IW_CQP_WAIT_POLL_REGS 1\n-#define I40IW_CQP_WAIT_POLL_CQ 2\n-#define I40IW_CQP_WAIT_EVENT 3\n-\n-#define I40IW_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)\n-\n-#define I40IW_GET_CURRENT_CQ_ELEMENT(_cq) \\\n-\t( \\\n-\t\t&((_cq)->cq_base[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \\\n-\t)\n-#define I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(_cq) \\\n-\t( \\\n-\t\t&(((struct i40iw_extended_cqe *) \\\n-\t\t ((_cq)->cq_base))[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \\\n-\t)\n-\n-#define I40IW_GET_CURRENT_AEQ_ELEMENT(_aeq) \\\n-\t( \\\n-\t\t&_aeq->aeqe_base[I40IW_RING_GETCURRENT_TAIL(_aeq->aeq_ring)] \\\n-\t)\n-\n-#define I40IW_GET_CURRENT_CEQ_ELEMENT(_ceq) \\\n-\t( \\\n-\t\t&_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \\\n-\t)\n-\n-#define I40IW_AE_SOURCE_RSVD 0x0\n-#define I40IW_AE_SOURCE_RQ 0x1\n-#define I40IW_AE_SOURCE_RQ_0011 0x3\n-\n-#define I40IW_AE_SOURCE_CQ 0x2\n-#define I40IW_AE_SOURCE_CQ_0110 0x6\n-#define I40IW_AE_SOURCE_CQ_1010 0xA\n-#define I40IW_AE_SOURCE_CQ_1110 0xE\n-\n-#define I40IW_AE_SOURCE_SQ 0x5\n-#define I40IW_AE_SOURCE_SQ_0111 0x7\n-\n-#define I40IW_AE_SOURCE_IN_RR_WR 0x9\n-#define I40IW_AE_SOURCE_IN_RR_WR_1011 0xB\n-#define I40IW_AE_SOURCE_OUT_RR 0xD\n-#define I40IW_AE_SOURCE_OUT_RR_1111 0xF\n-\n-#define I40IW_TCP_STATE_NON_EXISTENT 0\n-#define I40IW_TCP_STATE_CLOSED 1\n-#define I40IW_TCP_STATE_LISTEN 2\n-#define I40IW_STATE_SYN_SEND 3\n-#define I40IW_TCP_STATE_SYN_RECEIVED 4\n-#define I40IW_TCP_STATE_ESTABLISHED 5\n-#define I40IW_TCP_STATE_CLOSE_WAIT 6\n-#define I40IW_TCP_STATE_FIN_WAIT_1 7\n-#define I40IW_TCP_STATE_CLOSING 8\n-#define I40IW_TCP_STATE_LAST_ACK 9\n-#define I40IW_TCP_STATE_FIN_WAIT_2 10\n-#define I40IW_TCP_STATE_TIME_WAIT 11\n-#define I40IW_TCP_STATE_RESERVED_1 12\n-#define I40IW_TCP_STATE_RESERVED_2 13\n-#define I40IW_TCP_STATE_RESERVED_3 14\n-#define I40IW_TCP_STATE_RESERVED_4 15\n-\n-/* ILQ CQP hash table fields */\n-#define I40IW_CQPSQ_QHASH_VLANID_SHIFT 32\n-#define I40IW_CQPSQ_QHASH_VLANID_MASK \\\n-\t((u64)0xfff << I40IW_CQPSQ_QHASH_VLANID_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_QPN_SHIFT 32\n-#define I40IW_CQPSQ_QHASH_QPN_MASK \\\n-\t((u64)0x3ffff << I40IW_CQPSQ_QHASH_QPN_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT 0\n-#define I40IW_CQPSQ_QHASH_QS_HANDLE_MASK ((u64)0x3ff << I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT 16\n-#define I40IW_CQPSQ_QHASH_SRC_PORT_MASK \\\n-\t((u64)0xffff << I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT 0\n-#define I40IW_CQPSQ_QHASH_DEST_PORT_MASK \\\n-\t((u64)0xffff << I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_ADDR0_SHIFT 32\n-#define I40IW_CQPSQ_QHASH_ADDR0_MASK \\\n-\t((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR0_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_ADDR1_SHIFT 0\n-#define I40IW_CQPSQ_QHASH_ADDR1_MASK \\\n-\t((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR1_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_ADDR2_SHIFT 32\n-#define I40IW_CQPSQ_QHASH_ADDR2_MASK \\\n-\t((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR2_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_ADDR3_SHIFT 0\n-#define I40IW_CQPSQ_QHASH_ADDR3_MASK \\\n-\t((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR3_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_WQEVALID_SHIFT 63\n-#define I40IW_CQPSQ_QHASH_WQEVALID_MASK \\\n-\t((u64)0x1 << I40IW_CQPSQ_QHASH_WQEVALID_SHIFT)\n-#define I40IW_CQPSQ_QHASH_OPCODE_SHIFT 32\n-#define I40IW_CQPSQ_QHASH_OPCODE_MASK \\\n-\t((u64)0x3f << I40IW_CQPSQ_QHASH_OPCODE_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_MANAGE_SHIFT 61\n-#define I40IW_CQPSQ_QHASH_MANAGE_MASK \\\n-\t((u64)0x3 << I40IW_CQPSQ_QHASH_MANAGE_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT 60\n-#define I40IW_CQPSQ_QHASH_IPV4VALID_MASK \\\n-\t((u64)0x1 << I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_VLANVALID_SHIFT 59\n-#define I40IW_CQPSQ_QHASH_VLANVALID_MASK \\\n-\t((u64)0x1 << I40IW_CQPSQ_QHASH_VLANVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT 42\n-#define I40IW_CQPSQ_QHASH_ENTRYTYPE_MASK \\\n-\t((u64)0x7 << I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT)\n-/* CQP Host Context */\n-#define I40IW_CQPHC_EN_DC_TCP_SHIFT 0\n-#define I40IW_CQPHC_EN_DC_TCP_MASK (1UL << I40IW_CQPHC_EN_DC_TCP_SHIFT)\n-\n-#define I40IW_CQPHC_SQSIZE_SHIFT 8\n-#define I40IW_CQPHC_SQSIZE_MASK (0xfUL << I40IW_CQPHC_SQSIZE_SHIFT)\n-\n-#define I40IW_CQPHC_DISABLE_PFPDUS_SHIFT 1\n-#define I40IW_CQPHC_DISABLE_PFPDUS_MASK (0x1UL << I40IW_CQPHC_DISABLE_PFPDUS_SHIFT)\n-\n-#define I40IW_CQPHC_ENABLED_VFS_SHIFT 32\n-#define I40IW_CQPHC_ENABLED_VFS_MASK (0x3fULL << I40IW_CQPHC_ENABLED_VFS_SHIFT)\n-\n-#define I40IW_CQPHC_HMC_PROFILE_SHIFT 0\n-#define I40IW_CQPHC_HMC_PROFILE_MASK (0x7ULL << I40IW_CQPHC_HMC_PROFILE_SHIFT)\n-\n-#define I40IW_CQPHC_SVER_SHIFT 24\n-#define I40IW_CQPHC_SVER_MASK (0xffUL << I40IW_CQPHC_SVER_SHIFT)\n-\n-#define I40IW_CQPHC_SQBASE_SHIFT 9\n-#define I40IW_CQPHC_SQBASE_MASK \\\n-\t(0xfffffffffffffeULL << I40IW_CQPHC_SQBASE_SHIFT)\n-\n-#define I40IW_CQPHC_QPCTX_SHIFT 0\n-#define I40IW_CQPHC_QPCTX_MASK \\\n-\t(0xffffffffffffffffULL << I40IW_CQPHC_QPCTX_SHIFT)\n-#define I40IW_CQPHC_SVER 1\n-\n-#define I40IW_CQP_SW_SQSIZE_4 4\n-#define I40IW_CQP_SW_SQSIZE_2048 2048\n-\n-/* iWARP QP Doorbell shadow area */\n-#define I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT 0\n-#define I40IW_QP_DBSA_HW_SQ_TAIL_MASK \\\n-\t(0x3fffUL << I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT)\n-\n-/* Completion Queue Doorbell shadow area */\n-#define I40IW_CQ_DBSA_CQEIDX_SHIFT 0\n-#define I40IW_CQ_DBSA_CQEIDX_MASK (0xfffffUL << I40IW_CQ_DBSA_CQEIDX_SHIFT)\n-\n-#define I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT 0\n-#define I40IW_CQ_DBSA_SW_CQ_SELECT_MASK \\\n-\t(0x3fffUL << I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT)\n-\n-#define I40IW_CQ_DBSA_ARM_NEXT_SHIFT 14\n-#define I40IW_CQ_DBSA_ARM_NEXT_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SHIFT)\n-\n-#define I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT 15\n-#define I40IW_CQ_DBSA_ARM_NEXT_SE_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT)\n-\n-#define I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT 16\n-#define I40IW_CQ_DBSA_ARM_SEQ_NUM_MASK \\\n-\t(0x3UL << I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT)\n-\n-/* CQP and iWARP Completion Queue */\n-#define I40IW_CQ_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQ_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CCQ_OPRETVAL_SHIFT 0\n-#define I40IW_CCQ_OPRETVAL_MASK (0xffffffffUL << I40IW_CCQ_OPRETVAL_SHIFT)\n-\n-#define I40IW_CQ_MINERR_SHIFT 0\n-#define I40IW_CQ_MINERR_MASK (0xffffUL << I40IW_CQ_MINERR_SHIFT)\n-\n-#define I40IW_CQ_MAJERR_SHIFT 16\n-#define I40IW_CQ_MAJERR_MASK (0xffffUL << I40IW_CQ_MAJERR_SHIFT)\n-\n-#define I40IW_CQ_WQEIDX_SHIFT 32\n-#define I40IW_CQ_WQEIDX_MASK (0x3fffULL << I40IW_CQ_WQEIDX_SHIFT)\n-\n-#define I40IW_CQ_ERROR_SHIFT 55\n-#define I40IW_CQ_ERROR_MASK (1ULL << I40IW_CQ_ERROR_SHIFT)\n-\n-#define I40IW_CQ_SQ_SHIFT 62\n-#define I40IW_CQ_SQ_MASK (1ULL << I40IW_CQ_SQ_SHIFT)\n-\n-#define I40IW_CQ_VALID_SHIFT 63\n-#define I40IW_CQ_VALID_MASK (1ULL << I40IW_CQ_VALID_SHIFT)\n-\n-#define I40IWCQ_PAYLDLEN_SHIFT 0\n-#define I40IWCQ_PAYLDLEN_MASK (0xffffffffUL << I40IWCQ_PAYLDLEN_SHIFT)\n-\n-#define I40IWCQ_TCPSEQNUM_SHIFT 32\n-#define I40IWCQ_TCPSEQNUM_MASK (0xffffffffULL << I40IWCQ_TCPSEQNUM_SHIFT)\n-\n-#define I40IWCQ_INVSTAG_SHIFT 0\n-#define I40IWCQ_INVSTAG_MASK (0xffffffffUL << I40IWCQ_INVSTAG_SHIFT)\n-\n-#define I40IWCQ_QPID_SHIFT 32\n-#define I40IWCQ_QPID_MASK (0x3ffffULL << I40IWCQ_QPID_SHIFT)\n-\n-#define I40IWCQ_PSHDROP_SHIFT 51\n-#define I40IWCQ_PSHDROP_MASK (1ULL << I40IWCQ_PSHDROP_SHIFT)\n-\n-#define I40IWCQ_SRQ_SHIFT 52\n-#define I40IWCQ_SRQ_MASK (1ULL << I40IWCQ_SRQ_SHIFT)\n-\n-#define I40IWCQ_STAG_SHIFT 53\n-#define I40IWCQ_STAG_MASK (1ULL << I40IWCQ_STAG_SHIFT)\n-\n-#define I40IWCQ_SOEVENT_SHIFT 54\n-#define I40IWCQ_SOEVENT_MASK (1ULL << I40IWCQ_SOEVENT_SHIFT)\n-\n-#define I40IWCQ_OP_SHIFT 56\n-#define I40IWCQ_OP_MASK (0x3fULL << I40IWCQ_OP_SHIFT)\n-\n-/* CEQE format */\n-#define I40IW_CEQE_CQCTX_SHIFT 0\n-#define I40IW_CEQE_CQCTX_MASK \\\n-\t(0x7fffffffffffffffULL << I40IW_CEQE_CQCTX_SHIFT)\n-\n-#define I40IW_CEQE_VALID_SHIFT 63\n-#define I40IW_CEQE_VALID_MASK (1ULL << I40IW_CEQE_VALID_SHIFT)\n-\n-/* AEQE format */\n-#define I40IW_AEQE_COMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_AEQE_COMPCTX_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_AEQE_QPCQID_SHIFT 0\n-#define I40IW_AEQE_QPCQID_MASK (0x3ffffUL << I40IW_AEQE_QPCQID_SHIFT)\n-\n-#define I40IW_AEQE_WQDESCIDX_SHIFT 18\n-#define I40IW_AEQE_WQDESCIDX_MASK (0x3fffULL << I40IW_AEQE_WQDESCIDX_SHIFT)\n-\n-#define I40IW_AEQE_OVERFLOW_SHIFT 33\n-#define I40IW_AEQE_OVERFLOW_MASK (1ULL << I40IW_AEQE_OVERFLOW_SHIFT)\n-\n-#define I40IW_AEQE_AECODE_SHIFT 34\n-#define I40IW_AEQE_AECODE_MASK (0xffffULL << I40IW_AEQE_AECODE_SHIFT)\n-\n-#define I40IW_AEQE_AESRC_SHIFT 50\n-#define I40IW_AEQE_AESRC_MASK (0xfULL << I40IW_AEQE_AESRC_SHIFT)\n-\n-#define I40IW_AEQE_IWSTATE_SHIFT 54\n-#define I40IW_AEQE_IWSTATE_MASK (0x7ULL << I40IW_AEQE_IWSTATE_SHIFT)\n-\n-#define I40IW_AEQE_TCPSTATE_SHIFT 57\n-#define I40IW_AEQE_TCPSTATE_MASK (0xfULL << I40IW_AEQE_TCPSTATE_SHIFT)\n-\n-#define I40IW_AEQE_Q2DATA_SHIFT 61\n-#define I40IW_AEQE_Q2DATA_MASK (0x3ULL << I40IW_AEQE_Q2DATA_SHIFT)\n-\n-#define I40IW_AEQE_VALID_SHIFT 63\n-#define I40IW_AEQE_VALID_MASK (1ULL << I40IW_AEQE_VALID_SHIFT)\n-\n-/* CQP SQ WQES */\n-#define I40IW_QP_TYPE_IWARP 1\n-#define I40IW_QP_TYPE_UDA 2\n-#define I40IW_QP_TYPE_CQP 4\n-\n-#define I40IW_CQ_TYPE_IWARP 1\n-#define I40IW_CQ_TYPE_ILQ 2\n-#define I40IW_CQ_TYPE_IEQ 3\n-#define I40IW_CQ_TYPE_CQP 4\n-\n-#define I40IWQP_TERM_SEND_TERM_AND_FIN 0\n-#define I40IWQP_TERM_SEND_TERM_ONLY 1\n-#define I40IWQP_TERM_SEND_FIN_ONLY 2\n-#define I40IWQP_TERM_DONOT_SEND_TERM_OR_FIN 3\n-\n-#define I40IW_CQP_OP_CREATE_QP 0\n-#define I40IW_CQP_OP_MODIFY_QP 0x1\n-#define I40IW_CQP_OP_DESTROY_QP 0x02\n-#define I40IW_CQP_OP_CREATE_CQ 0x03\n-#define I40IW_CQP_OP_MODIFY_CQ 0x04\n-#define I40IW_CQP_OP_DESTROY_CQ 0x05\n-#define I40IW_CQP_OP_CREATE_SRQ 0x06\n-#define I40IW_CQP_OP_MODIFY_SRQ 0x07\n-#define I40IW_CQP_OP_DESTROY_SRQ 0x08\n-#define I40IW_CQP_OP_ALLOC_STAG 0x09\n-#define I40IW_CQP_OP_REG_MR 0x0a\n-#define I40IW_CQP_OP_QUERY_STAG 0x0b\n-#define I40IW_CQP_OP_REG_SMR 0x0c\n-#define I40IW_CQP_OP_DEALLOC_STAG 0x0d\n-#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE 0x0e\n-#define I40IW_CQP_OP_MANAGE_ARP 0x0f\n-#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10\n-#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11\n-#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12\n-#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13\n-#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14\n-#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15\n-#define I40IW_CQP_OP_CREATE_CEQ 0x16\n-#define I40IW_CQP_OP_DESTROY_CEQ 0x18\n-#define I40IW_CQP_OP_CREATE_AEQ 0x19\n-#define I40IW_CQP_OP_DESTROY_AEQ 0x1b\n-#define I40IW_CQP_OP_CREATE_ADDR_VECT 0x1c\n-#define I40IW_CQP_OP_MODIFY_ADDR_VECT 0x1d\n-#define I40IW_CQP_OP_DESTROY_ADDR_VECT 0x1e\n-#define I40IW_CQP_OP_UPDATE_PE_SDS 0x1f\n-#define I40IW_CQP_OP_QUERY_FPM_VALUES 0x20\n-#define I40IW_CQP_OP_COMMIT_FPM_VALUES 0x21\n-#define I40IW_CQP_OP_FLUSH_WQES 0x22\n-/* I40IW_CQP_OP_GEN_AE is the same value as I40IW_CQP_OP_FLUSH_WQES */\n-#define I40IW_CQP_OP_GEN_AE 0x22\n-#define I40IW_CQP_OP_MANAGE_APBVT 0x23\n-#define I40IW_CQP_OP_NOP 0x24\n-#define I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25\n-#define I40IW_CQP_OP_CREATE_UDA_MCAST_GROUP 0x26\n-#define I40IW_CQP_OP_MODIFY_UDA_MCAST_GROUP 0x27\n-#define I40IW_CQP_OP_DESTROY_UDA_MCAST_GROUP 0x28\n-#define I40IW_CQP_OP_SUSPEND_QP 0x29\n-#define I40IW_CQP_OP_RESUME_QP 0x2a\n-#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b\n-#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d\n-\n-#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16\n-#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_OPCODE_SHIFT 32\n-#define I40IW_UDA_QPSQ_OPCODE_MASK ((u64)0x3f << I40IW_UDA_QPSQ_OPCODE_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_MACLEN_SHIFT 56\n-#define I40IW_UDA_QPSQ_MACLEN_MASK \\\n-\t((u64)0x7f << I40IW_UDA_QPSQ_MACLEN_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_IPLEN_SHIFT 48\n-#define I40IW_UDA_QPSQ_IPLEN_MASK \\\n-\t((u64)0x7f << I40IW_UDA_QPSQ_IPLEN_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_L4T_SHIFT 30\n-#define I40IW_UDA_QPSQ_L4T_MASK \\\n-\t((u64)0x3 << I40IW_UDA_QPSQ_L4T_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_IIPT_SHIFT 28\n-#define I40IW_UDA_QPSQ_IIPT_MASK \\\n-\t((u64)0x3 << I40IW_UDA_QPSQ_IIPT_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_L4LEN_SHIFT 24\n-#define I40IW_UDA_QPSQ_L4LEN_MASK ((u64)0xf << I40IW_UDA_QPSQ_L4LEN_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_AVIDX_SHIFT 0\n-#define I40IW_UDA_QPSQ_AVIDX_MASK ((u64)0xffff << I40IW_UDA_QPSQ_AVIDX_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_VALID_SHIFT 63\n-#define I40IW_UDA_QPSQ_VALID_MASK \\\n-\t((u64)0x1 << I40IW_UDA_QPSQ_VALID_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_SIGCOMPL_SHIFT 62\n-#define I40IW_UDA_QPSQ_SIGCOMPL_MASK ((u64)0x1 << I40IW_UDA_QPSQ_SIGCOMPL_SHIFT)\n-\n-#define I40IW_UDA_PAYLOADLEN_SHIFT 0\n-#define I40IW_UDA_PAYLOADLEN_MASK ((u64)0x3fff << I40IW_UDA_PAYLOADLEN_SHIFT)\n-\n-#define I40IW_UDA_HDRLEN_SHIFT 16\n-#define I40IW_UDA_HDRLEN_MASK ((u64)0x1ff << I40IW_UDA_HDRLEN_SHIFT)\n-\n-#define I40IW_VLAN_TAG_VALID_SHIFT 50\n-#define I40IW_VLAN_TAG_VALID_MASK ((u64)0x1 << I40IW_VLAN_TAG_VALID_SHIFT)\n-\n-#define I40IW_UDA_L3PROTO_SHIFT 0\n-#define I40IW_UDA_L3PROTO_MASK ((u64)0x3 << I40IW_UDA_L3PROTO_SHIFT)\n-\n-#define I40IW_UDA_L4PROTO_SHIFT 16\n-#define I40IW_UDA_L4PROTO_MASK ((u64)0x3 << I40IW_UDA_L4PROTO_SHIFT)\n-\n-#define I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT 44\n-#define I40IW_UDA_QPSQ_DOLOOPBACK_MASK \\\n-\t((u64)0x1 << I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT)\n-\n-/* CQP SQ WQE common fields */\n-#define I40IW_CQPSQ_OPCODE_SHIFT 32\n-#define I40IW_CQPSQ_OPCODE_MASK (0x3fULL << I40IW_CQPSQ_OPCODE_SHIFT)\n-\n-#define I40IW_CQPSQ_WQEVALID_SHIFT 63\n-#define I40IW_CQPSQ_WQEVALID_MASK (1ULL << I40IW_CQPSQ_WQEVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_TPHVAL_SHIFT 0\n-#define I40IW_CQPSQ_TPHVAL_MASK (0xffUL << I40IW_CQPSQ_TPHVAL_SHIFT)\n-\n-#define I40IW_CQPSQ_TPHEN_SHIFT 60\n-#define I40IW_CQPSQ_TPHEN_MASK (1ULL << I40IW_CQPSQ_TPHEN_SHIFT)\n-\n-#define I40IW_CQPSQ_PBUFADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_PBUFADDR_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-/* Create/Modify/Destroy QP */\n-\n-#define I40IW_CQPSQ_QP_NEWMSS_SHIFT 32\n-#define I40IW_CQPSQ_QP_NEWMSS_MASK (0x3fffULL << I40IW_CQPSQ_QP_NEWMSS_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_TERMLEN_SHIFT 48\n-#define I40IW_CQPSQ_QP_TERMLEN_MASK (0xfULL << I40IW_CQPSQ_QP_TERMLEN_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_QP_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CQPSQ_QP_QPID_SHIFT 0\n-#define I40IW_CQPSQ_QP_QPID_MASK (0x3FFFFUL)\n-/* I40IWCQ_QPID_MASK */\n-\n-#define I40IW_CQPSQ_QP_OP_SHIFT 32\n-#define I40IW_CQPSQ_QP_OP_MASK I40IWCQ_OP_MASK\n-\n-#define I40IW_CQPSQ_QP_ORDVALID_SHIFT 42\n-#define I40IW_CQPSQ_QP_ORDVALID_MASK (1ULL << I40IW_CQPSQ_QP_ORDVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_TOECTXVALID_SHIFT 43\n-#define I40IW_CQPSQ_QP_TOECTXVALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_QP_TOECTXVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT 44\n-#define I40IW_CQPSQ_QP_CACHEDVARVALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_VQ_SHIFT 45\n-#define I40IW_CQPSQ_QP_VQ_MASK (1ULL << I40IW_CQPSQ_QP_VQ_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT 46\n-#define I40IW_CQPSQ_QP_FORCELOOPBACK_MASK \\\n-\t(1ULL << I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_CQNUMVALID_SHIFT 47\n-#define I40IW_CQPSQ_QP_CQNUMVALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_QP_CQNUMVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_QPTYPE_SHIFT 48\n-#define I40IW_CQPSQ_QP_QPTYPE_MASK (0x3ULL << I40IW_CQPSQ_QP_QPTYPE_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52\n-#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54\n-#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \\\n-\t(1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT 55\n-#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_MASK \\\n-\t(1ULL << I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_TERMACT_SHIFT 56\n-#define I40IW_CQPSQ_QP_TERMACT_MASK (0x3ULL << I40IW_CQPSQ_QP_TERMACT_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_RESETCON_SHIFT 58\n-#define I40IW_CQPSQ_QP_RESETCON_MASK (1ULL << I40IW_CQPSQ_QP_RESETCON_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT 59\n-#define I40IW_CQPSQ_QP_ARPTABIDXVALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT 60\n-#define I40IW_CQPSQ_QP_NEXTIWSTATE_MASK \\\n-\t(0x7ULL << I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT)\n-\n-#define I40IW_CQPSQ_QP_DBSHADOWADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_QP_DBSHADOWADDR_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-/* Create/Modify/Destroy CQ */\n-#define I40IW_CQPSQ_CQ_CQSIZE_SHIFT 0\n-#define I40IW_CQPSQ_CQ_CQSIZE_MASK (0x3ffffUL << I40IW_CQPSQ_CQ_CQSIZE_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0\n-#define I40IW_CQPSQ_CQ_CQCTX_MASK \\\n-\t(0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0\n-#define I40IW_CQPSQ_CQ_CQCTX_MASK \\\n-\t(0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT 0\n-#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_MASK \\\n-\t(0x3ffff << I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_CEQID_SHIFT 24\n-#define I40IW_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CQ_CEQID_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_OP_SHIFT 32\n-#define I40IW_CQPSQ_CQ_OP_MASK (0x3fULL << I40IW_CQPSQ_CQ_OP_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_CQRESIZE_SHIFT 43\n-#define I40IW_CQPSQ_CQ_CQRESIZE_MASK (1ULL << I40IW_CQPSQ_CQ_CQRESIZE_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT 44\n-#define I40IW_CQPSQ_CQ_LPBLSIZE_MASK (3ULL << I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT 46\n-#define I40IW_CQPSQ_CQ_CHKOVERFLOW_MASK \\\n-\t(1ULL << I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_VIRTMAP_SHIFT 47\n-#define I40IW_CQPSQ_CQ_VIRTMAP_MASK (1ULL << I40IW_CQPSQ_CQ_VIRTMAP_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT 48\n-#define I40IW_CQPSQ_CQ_ENCEQEMASK_MASK \\\n-\t(1ULL << I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT 49\n-#define I40IW_CQPSQ_CQ_CEQIDVALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT 61\n-#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_MASK \\\n-\t(1ULL << I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT)\n-\n-/* Create/Modify/Destroy Shared Receive Queue */\n-\n-#define I40IW_CQPSQ_SRQ_RQSIZE_SHIFT 0\n-#define I40IW_CQPSQ_SRQ_RQSIZE_MASK (0xfUL << I40IW_CQPSQ_SRQ_RQSIZE_SHIFT)\n-\n-#define I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT 4\n-#define I40IW_CQPSQ_SRQ_RQWQESIZE_MASK \\\n-\t(0x7UL << I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT)\n-\n-#define I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT 32\n-#define I40IW_CQPSQ_SRQ_SRQLIMIT_MASK \\\n-\t(0xfffULL << I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT)\n-\n-#define I40IW_CQPSQ_SRQ_SRQCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_SRQ_SRQCTX_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CQPSQ_SRQ_PDID_SHIFT 16\n-#define I40IW_CQPSQ_SRQ_PDID_MASK \\\n-\t(0x7fffULL << I40IW_CQPSQ_SRQ_PDID_SHIFT)\n-\n-#define I40IW_CQPSQ_SRQ_SRQID_SHIFT 0\n-#define I40IW_CQPSQ_SRQ_SRQID_MASK (0x7fffUL << I40IW_CQPSQ_SRQ_SRQID_SHIFT)\n-\n-#define I40IW_CQPSQ_SRQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT\n-#define I40IW_CQPSQ_SRQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK\n-\n-#define I40IW_CQPSQ_SRQ_VIRTMAP_SHIFT I40IW_CQPSQ_CQ_VIRTMAP_SHIFT\n-#define I40IW_CQPSQ_SRQ_VIRTMAP_MASK I40IW_CQPSQ_CQ_VIRTMAP_MASK\n-\n-#define I40IW_CQPSQ_SRQ_TPHEN_SHIFT I40IW_CQPSQ_TPHEN_SHIFT\n-#define I40IW_CQPSQ_SRQ_TPHEN_MASK I40IW_CQPSQ_TPHEN_MASK\n-\n-#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT 61\n-#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_MASK \\\n-\t(1ULL << I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT)\n-\n-#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT 6\n-#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_MASK \\\n-\t(0x3ffffffffffffffULL << I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT)\n-\n-#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT 0\n-#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_MASK \\\n-\t(0xfffffffUL << I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT)\n-\n-/* Allocate/Register/Register Shared/Deallocate Stag */\n-#define I40IW_CQPSQ_STAG_VA_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_STAG_VA_FBO_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CQPSQ_STAG_STAGLEN_SHIFT 0\n-#define I40IW_CQPSQ_STAG_STAGLEN_MASK \\\n-\t(0x3fffffffffffULL << I40IW_CQPSQ_STAG_STAGLEN_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_PDID_SHIFT 48\n-#define I40IW_CQPSQ_STAG_PDID_MASK (0x7fffULL << I40IW_CQPSQ_STAG_PDID_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_KEY_SHIFT 0\n-#define I40IW_CQPSQ_STAG_KEY_MASK (0xffUL << I40IW_CQPSQ_STAG_KEY_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_IDX_SHIFT 8\n-#define I40IW_CQPSQ_STAG_IDX_MASK (0xffffffUL << I40IW_CQPSQ_STAG_IDX_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT 32\n-#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_MASK \\\n-\t(0xffffffULL << I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_MR_SHIFT 43\n-#define I40IW_CQPSQ_STAG_MR_MASK (1ULL << I40IW_CQPSQ_STAG_MR_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT\n-#define I40IW_CQPSQ_STAG_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK\n-\n-#define I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT 46\n-#define I40IW_CQPSQ_STAG_HPAGESIZE_MASK \\\n-\t(1ULL << I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_ARIGHTS_SHIFT 48\n-#define I40IW_CQPSQ_STAG_ARIGHTS_MASK \\\n-\t(0x1fULL << I40IW_CQPSQ_STAG_ARIGHTS_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT 53\n-#define I40IW_CQPSQ_STAG_REMACCENABLED_MASK \\\n-\t(1ULL << I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_VABASEDTO_SHIFT 59\n-#define I40IW_CQPSQ_STAG_VABASEDTO_MASK \\\n-\t(1ULL << I40IW_CQPSQ_STAG_VABASEDTO_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT 60\n-#define I40IW_CQPSQ_STAG_USEHMCFNIDX_MASK \\\n-\t(1ULL << I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_USEPFRID_SHIFT 61\n-#define I40IW_CQPSQ_STAG_USEPFRID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_STAG_USEPFRID_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_PBA_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_STAG_PBA_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT 0\n-#define I40IW_CQPSQ_STAG_HMCFNIDX_MASK \\\n-\t(0x3fUL << I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT)\n-\n-#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT 0\n-#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_MASK \\\n-\t(0xfffffffUL << I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT)\n-\n-/* Query stag */\n-#define I40IW_CQPSQ_QUERYSTAG_IDX_SHIFT I40IW_CQPSQ_STAG_IDX_SHIFT\n-#define I40IW_CQPSQ_QUERYSTAG_IDX_MASK I40IW_CQPSQ_STAG_IDX_MASK\n-\n-/* Allocate Local IP Address Entry */\n-\n-/* Manage Local IP Address Table - MLIPA */\n-#define I40IW_CQPSQ_MLIPA_IPV6LO_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_MLIPA_IPV6LO_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CQPSQ_MLIPA_IPV6HI_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_MLIPA_IPV6HI_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CQPSQ_MLIPA_IPV4_SHIFT 0\n-#define I40IW_CQPSQ_MLIPA_IPV4_MASK \\\n-\t(0xffffffffUL << I40IW_CQPSQ_MLIPA_IPV4_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT 0\n-#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_MASK \\\n-\t(0x3fUL << I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT 42\n-#define I40IW_CQPSQ_MLIPA_IPV4VALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT 43\n-#define I40IW_CQPSQ_MLIPA_IPV6VALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT 62\n-#define I40IW_CQPSQ_MLIPA_FREEENTRY_MASK \\\n-\t(1ULL << I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT 61\n-#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_MASK \\\n-\t(1ULL << I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_MAC0_SHIFT 0\n-#define I40IW_CQPSQ_MLIPA_MAC0_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC0_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_MAC1_SHIFT 8\n-#define I40IW_CQPSQ_MLIPA_MAC1_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC1_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_MAC2_SHIFT 16\n-#define I40IW_CQPSQ_MLIPA_MAC2_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC2_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_MAC3_SHIFT 24\n-#define I40IW_CQPSQ_MLIPA_MAC3_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC3_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_MAC4_SHIFT 32\n-#define I40IW_CQPSQ_MLIPA_MAC4_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC4_SHIFT)\n-\n-#define I40IW_CQPSQ_MLIPA_MAC5_SHIFT 40\n-#define I40IW_CQPSQ_MLIPA_MAC5_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC5_SHIFT)\n-\n-/* Manage ARP Table - MAT */\n-#define I40IW_CQPSQ_MAT_REACHMAX_SHIFT 0\n-#define I40IW_CQPSQ_MAT_REACHMAX_MASK \\\n-\t(0xffffffffUL << I40IW_CQPSQ_MAT_REACHMAX_SHIFT)\n-\n-#define I40IW_CQPSQ_MAT_MACADDR_SHIFT 0\n-#define I40IW_CQPSQ_MAT_MACADDR_MASK \\\n-\t(0xffffffffffffULL << I40IW_CQPSQ_MAT_MACADDR_SHIFT)\n-\n-#define I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT 0\n-#define I40IW_CQPSQ_MAT_ARPENTRYIDX_MASK \\\n-\t(0xfffUL << I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT)\n-\n-#define I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT 42\n-#define I40IW_CQPSQ_MAT_ENTRYVALID_MASK \\\n-\t(1ULL << I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT)\n-\n-#define I40IW_CQPSQ_MAT_PERMANENT_SHIFT 43\n-#define I40IW_CQPSQ_MAT_PERMANENT_MASK \\\n-\t(1ULL << I40IW_CQPSQ_MAT_PERMANENT_SHIFT)\n-\n-#define I40IW_CQPSQ_MAT_QUERY_SHIFT 44\n-#define I40IW_CQPSQ_MAT_QUERY_MASK (1ULL << I40IW_CQPSQ_MAT_QUERY_SHIFT)\n-\n-/* Manage VF PBLE Backing Pages - MVPBP*/\n-#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT 0\n-#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_MASK \\\n-\t(0x3ffULL << I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT)\n-\n-#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT 16\n-#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_MASK \\\n-\t(0x1ffULL << I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT)\n-\n-#define I40IW_CQPSQ_MVPBP_SD_INX_SHIFT 32\n-#define I40IW_CQPSQ_MVPBP_SD_INX_MASK \\\n-\t(0xfffULL << I40IW_CQPSQ_MVPBP_SD_INX_SHIFT)\n-\n-#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT 62\n-#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_MASK \\\n-\t(0x1ULL << I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT)\n-\n-#define I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT 3\n-#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \\\n-\t(0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT)\n-\n-/* Manage Push Page - MPP */\n-#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff\n-\n-#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0\n-#define I40IW_CQPSQ_MPP_QS_HANDLE_MASK (0xffffUL << \\\n-\t\t\t\t\tI40IW_CQPSQ_MPP_QS_HANDLE_SHIFT)\n-\n-#define I40IW_CQPSQ_MPP_PPIDX_SHIFT 0\n-#define I40IW_CQPSQ_MPP_PPIDX_MASK (0x3ffUL << I40IW_CQPSQ_MPP_PPIDX_SHIFT)\n-\n-#define I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT 62\n-#define I40IW_CQPSQ_MPP_FREE_PAGE_MASK (1ULL << I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT)\n-\n-/* Upload Context - UCTX */\n-#define I40IW_CQPSQ_UCTX_QPCTXADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IW_CQPSQ_UCTX_QPCTXADDR_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IW_CQPSQ_UCTX_QPID_SHIFT 0\n-#define I40IW_CQPSQ_UCTX_QPID_MASK (0x3ffffUL << I40IW_CQPSQ_UCTX_QPID_SHIFT)\n-\n-#define I40IW_CQPSQ_UCTX_QPTYPE_SHIFT 48\n-#define I40IW_CQPSQ_UCTX_QPTYPE_MASK (0xfULL << I40IW_CQPSQ_UCTX_QPTYPE_SHIFT)\n-\n-#define I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT 61\n-#define I40IW_CQPSQ_UCTX_RAWFORMAT_MASK \\\n-\t(1ULL << I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT)\n-\n-#define I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT 62\n-#define I40IW_CQPSQ_UCTX_FREEZEQP_MASK \\\n-\t(1ULL << I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT)\n-\n-/* Manage HMC PM Function Table - MHMC */\n-#define I40IW_CQPSQ_MHMC_VFIDX_SHIFT 0\n-#define I40IW_CQPSQ_MHMC_VFIDX_MASK (0x7fUL << I40IW_CQPSQ_MHMC_VFIDX_SHIFT)\n-\n-#define I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT 62\n-#define I40IW_CQPSQ_MHMC_FREEPMFN_MASK \\\n-\t(1ULL << I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT)\n-\n-/* Set HMC Resource Profile - SHMCRP */\n-#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT 0\n-#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_MASK \\\n-\t(0x7ULL << I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT)\n-#define I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT 32\n-#define I40IW_CQPSQ_SHMCRP_VFNUM_MASK (0x3fULL << I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT)\n-\n-/* Create/Destroy CEQ */\n-#define I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT 0\n-#define I40IW_CQPSQ_CEQ_CEQSIZE_MASK \\\n-\t(0x1ffffUL << I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT)\n-\n-#define I40IW_CQPSQ_CEQ_CEQID_SHIFT 0\n-#define I40IW_CQPSQ_CEQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CEQ_CEQID_SHIFT)\n-\n-#define I40IW_CQPSQ_CEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT\n-#define I40IW_CQPSQ_CEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK\n-\n-#define I40IW_CQPSQ_CEQ_VMAP_SHIFT 47\n-#define I40IW_CQPSQ_CEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_CEQ_VMAP_SHIFT)\n-\n-#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT 0\n-#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_MASK \\\n-\t(0xfffffffUL << I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT)\n-\n-/* Create/Destroy AEQ */\n-#define I40IW_CQPSQ_AEQ_AEQECNT_SHIFT 0\n-#define I40IW_CQPSQ_AEQ_AEQECNT_MASK \\\n-\t(0x7ffffUL << I40IW_CQPSQ_AEQ_AEQECNT_SHIFT)\n-\n-#define I40IW_CQPSQ_AEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT\n-#define I40IW_CQPSQ_AEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK\n-\n-#define I40IW_CQPSQ_AEQ_VMAP_SHIFT 47\n-#define I40IW_CQPSQ_AEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_AEQ_VMAP_SHIFT)\n-\n-#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT 0\n-#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_MASK \\\n-\t(0xfffffffUL << I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT)\n-\n-/* Commit FPM Values - CFPM */\n-#define I40IW_CQPSQ_CFPM_HMCFNID_SHIFT 0\n-#define I40IW_CQPSQ_CFPM_HMCFNID_MASK (0x3fUL << I40IW_CQPSQ_CFPM_HMCFNID_SHIFT)\n-\n-/* Flush WQEs - FWQE */\n-#define I40IW_CQPSQ_FWQE_AECODE_SHIFT 0\n-#define I40IW_CQPSQ_FWQE_AECODE_MASK (0xffffUL << I40IW_CQPSQ_FWQE_AECODE_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_AESOURCE_SHIFT 16\n-#define I40IW_CQPSQ_FWQE_AESOURCE_MASK \\\n-\t(0xfUL << I40IW_CQPSQ_FWQE_AESOURCE_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_RQMNERR_SHIFT 0\n-#define I40IW_CQPSQ_FWQE_RQMNERR_MASK \\\n-\t(0xffffUL << I40IW_CQPSQ_FWQE_RQMNERR_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_RQMJERR_SHIFT 16\n-#define I40IW_CQPSQ_FWQE_RQMJERR_MASK \\\n-\t(0xffffUL << I40IW_CQPSQ_FWQE_RQMJERR_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_SQMNERR_SHIFT 32\n-#define I40IW_CQPSQ_FWQE_SQMNERR_MASK \\\n-\t(0xffffULL << I40IW_CQPSQ_FWQE_SQMNERR_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_SQMJERR_SHIFT 48\n-#define I40IW_CQPSQ_FWQE_SQMJERR_MASK \\\n-\t(0xffffULL << I40IW_CQPSQ_FWQE_SQMJERR_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_QPID_SHIFT 0\n-#define I40IW_CQPSQ_FWQE_QPID_MASK (0x3ffffULL << I40IW_CQPSQ_FWQE_QPID_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT 59\n-#define I40IW_CQPSQ_FWQE_GENERATE_AE_MASK (1ULL << \\\n-\t\t\t\t\t I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT 60\n-#define I40IW_CQPSQ_FWQE_USERFLCODE_MASK \\\n-\t(1ULL << I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT 61\n-#define I40IW_CQPSQ_FWQE_FLUSHSQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT)\n-\n-#define I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT 62\n-#define I40IW_CQPSQ_FWQE_FLUSHRQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT)\n-\n-/* Manage Accelerated Port Table - MAPT */\n-#define I40IW_CQPSQ_MAPT_PORT_SHIFT 0\n-#define I40IW_CQPSQ_MAPT_PORT_MASK (0xffffUL << I40IW_CQPSQ_MAPT_PORT_SHIFT)\n-\n-#define I40IW_CQPSQ_MAPT_ADDPORT_SHIFT 62\n-#define I40IW_CQPSQ_MAPT_ADDPORT_MASK (1ULL << I40IW_CQPSQ_MAPT_ADDPORT_SHIFT)\n-\n-/* Update Protocol Engine SDs */\n-#define I40IW_CQPSQ_UPESD_SDCMD_SHIFT 0\n-#define I40IW_CQPSQ_UPESD_SDCMD_MASK (0xffffffffUL << I40IW_CQPSQ_UPESD_SDCMD_SHIFT)\n-\n-#define I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT 0\n-#define I40IW_CQPSQ_UPESD_SDDATALOW_MASK \\\n-\t(0xffffffffUL << I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT)\n-\n-#define I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT 32\n-#define I40IW_CQPSQ_UPESD_SDDATAHI_MASK \\\n-\t(0xffffffffULL << I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT)\n-#define I40IW_CQPSQ_UPESD_HMCFNID_SHIFT 0\n-#define I40IW_CQPSQ_UPESD_HMCFNID_MASK \\\n-\t(0x3fUL << I40IW_CQPSQ_UPESD_HMCFNID_SHIFT)\n-\n-#define I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT 63\n-#define I40IW_CQPSQ_UPESD_ENTRY_VALID_MASK \\\n-\t((u64)1 << I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT)\n-\n-#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT 0\n-#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_MASK \\\n-\t(0xfUL << I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT)\n-\n-#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT 7\n-#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_MASK \\\n-\t(0x1UL << I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT)\n-\n-/* Suspend QP */\n-#define I40IW_CQPSQ_SUSPENDQP_QPID_SHIFT 0\n-#define I40IW_CQPSQ_SUSPENDQP_QPID_MASK (0x3FFFFUL)\n-/* I40IWCQ_QPID_MASK */\n-\n-/* Resume QP */\n-#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT 0\n-#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_MASK \\\n-\t(0xffffffffUL << I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT)\n-\n-#define I40IW_CQPSQ_RESUMEQP_QPID_SHIFT 0\n-#define I40IW_CQPSQ_RESUMEQP_QPID_MASK (0x3FFFFUL)\n-/* I40IWCQ_QPID_MASK */\n-\n-/* IW QP Context */\n-#define I40IWQPC_DDP_VER_SHIFT 0\n-#define I40IWQPC_DDP_VER_MASK (3UL << I40IWQPC_DDP_VER_SHIFT)\n-\n-#define I40IWQPC_SNAP_SHIFT 2\n-#define I40IWQPC_SNAP_MASK (1UL << I40IWQPC_SNAP_SHIFT)\n-\n-#define I40IWQPC_IPV4_SHIFT 3\n-#define I40IWQPC_IPV4_MASK (1UL << I40IWQPC_IPV4_SHIFT)\n-\n-#define I40IWQPC_NONAGLE_SHIFT 4\n-#define I40IWQPC_NONAGLE_MASK (1UL << I40IWQPC_NONAGLE_SHIFT)\n-\n-#define I40IWQPC_INSERTVLANTAG_SHIFT 5\n-#define I40IWQPC_INSERTVLANTAG_MASK (1 << I40IWQPC_INSERTVLANTAG_SHIFT)\n-\n-#define I40IWQPC_USESRQ_SHIFT 6\n-#define I40IWQPC_USESRQ_MASK (1UL << I40IWQPC_USESRQ_SHIFT)\n-\n-#define I40IWQPC_TIMESTAMP_SHIFT 7\n-#define I40IWQPC_TIMESTAMP_MASK (1UL << I40IWQPC_TIMESTAMP_SHIFT)\n-\n-#define I40IWQPC_RQWQESIZE_SHIFT 8\n-#define I40IWQPC_RQWQESIZE_MASK (3UL << I40IWQPC_RQWQESIZE_SHIFT)\n-\n-#define I40IWQPC_INSERTL2TAG2_SHIFT 11\n-#define I40IWQPC_INSERTL2TAG2_MASK (1UL << I40IWQPC_INSERTL2TAG2_SHIFT)\n-\n-#define I40IWQPC_LIMIT_SHIFT 12\n-#define I40IWQPC_LIMIT_MASK (3UL << I40IWQPC_LIMIT_SHIFT)\n-\n-#define I40IWQPC_DROPOOOSEG_SHIFT 15\n-#define I40IWQPC_DROPOOOSEG_MASK (1UL << I40IWQPC_DROPOOOSEG_SHIFT)\n-\n-#define I40IWQPC_DUPACK_THRESH_SHIFT 16\n-#define I40IWQPC_DUPACK_THRESH_MASK (7UL << I40IWQPC_DUPACK_THRESH_SHIFT)\n-\n-#define I40IWQPC_ERR_RQ_IDX_VALID_SHIFT 19\n-#define I40IWQPC_ERR_RQ_IDX_VALID_MASK (1UL << I40IWQPC_ERR_RQ_IDX_VALID_SHIFT)\n-\n-#define I40IWQPC_DIS_VLAN_CHECKS_SHIFT 19\n-#define I40IWQPC_DIS_VLAN_CHECKS_MASK (7UL << I40IWQPC_DIS_VLAN_CHECKS_SHIFT)\n-\n-#define I40IWQPC_RCVTPHEN_SHIFT 28\n-#define I40IWQPC_RCVTPHEN_MASK (1UL << I40IWQPC_RCVTPHEN_SHIFT)\n-\n-#define I40IWQPC_XMITTPHEN_SHIFT 29\n-#define I40IWQPC_XMITTPHEN_MASK (1ULL << I40IWQPC_XMITTPHEN_SHIFT)\n-\n-#define I40IWQPC_RQTPHEN_SHIFT 30\n-#define I40IWQPC_RQTPHEN_MASK (1UL << I40IWQPC_RQTPHEN_SHIFT)\n-\n-#define I40IWQPC_SQTPHEN_SHIFT 31\n-#define I40IWQPC_SQTPHEN_MASK (1ULL << I40IWQPC_SQTPHEN_SHIFT)\n-\n-#define I40IWQPC_PPIDX_SHIFT 32\n-#define I40IWQPC_PPIDX_MASK (0x3ffULL << I40IWQPC_PPIDX_SHIFT)\n-\n-#define I40IWQPC_PMENA_SHIFT 47\n-#define I40IWQPC_PMENA_MASK (1ULL << I40IWQPC_PMENA_SHIFT)\n-\n-#define I40IWQPC_RDMAP_VER_SHIFT 62\n-#define I40IWQPC_RDMAP_VER_MASK (3ULL << I40IWQPC_RDMAP_VER_SHIFT)\n-\n-#define I40IWQPC_SQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPC_SQADDR_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IWQPC_RQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPC_RQADDR_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IWQPC_TTL_SHIFT 0\n-#define I40IWQPC_TTL_MASK (0xffUL << I40IWQPC_TTL_SHIFT)\n-\n-#define I40IWQPC_RQSIZE_SHIFT 8\n-#define I40IWQPC_RQSIZE_MASK (0xfUL << I40IWQPC_RQSIZE_SHIFT)\n-\n-#define I40IWQPC_SQSIZE_SHIFT 12\n-#define I40IWQPC_SQSIZE_MASK (0xfUL << I40IWQPC_SQSIZE_SHIFT)\n-\n-#define I40IWQPC_SRCMACADDRIDX_SHIFT 16\n-#define I40IWQPC_SRCMACADDRIDX_MASK (0x3fUL << I40IWQPC_SRCMACADDRIDX_SHIFT)\n-\n-#define I40IWQPC_AVOIDSTRETCHACK_SHIFT 23\n-#define I40IWQPC_AVOIDSTRETCHACK_MASK (1UL << I40IWQPC_AVOIDSTRETCHACK_SHIFT)\n-\n-#define I40IWQPC_TOS_SHIFT 24\n-#define I40IWQPC_TOS_MASK (0xffUL << I40IWQPC_TOS_SHIFT)\n-\n-#define I40IWQPC_SRCPORTNUM_SHIFT 32\n-#define I40IWQPC_SRCPORTNUM_MASK (0xffffULL << I40IWQPC_SRCPORTNUM_SHIFT)\n-\n-#define I40IWQPC_DESTPORTNUM_SHIFT 48\n-#define I40IWQPC_DESTPORTNUM_MASK (0xffffULL << I40IWQPC_DESTPORTNUM_SHIFT)\n-\n-#define I40IWQPC_DESTIPADDR0_SHIFT 32\n-#define I40IWQPC_DESTIPADDR0_MASK \\\n-\t(0xffffffffULL << I40IWQPC_DESTIPADDR0_SHIFT)\n-\n-#define I40IWQPC_DESTIPADDR1_SHIFT 0\n-#define I40IWQPC_DESTIPADDR1_MASK \\\n-\t(0xffffffffULL << I40IWQPC_DESTIPADDR1_SHIFT)\n-\n-#define I40IWQPC_DESTIPADDR2_SHIFT 32\n-#define I40IWQPC_DESTIPADDR2_MASK \\\n-\t(0xffffffffULL << I40IWQPC_DESTIPADDR2_SHIFT)\n-\n-#define I40IWQPC_DESTIPADDR3_SHIFT 0\n-#define I40IWQPC_DESTIPADDR3_MASK \\\n-\t(0xffffffffULL << I40IWQPC_DESTIPADDR3_SHIFT)\n-\n-#define I40IWQPC_SNDMSS_SHIFT 16\n-#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)\n-\n-#define I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT 16\n-#define I40IW_UDA_QPC_MAXFRAMESIZE_MASK (0x3fffUL << I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT)\n-\n-#define I40IWQPC_VLANTAG_SHIFT 32\n-#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)\n-\n-#define I40IWQPC_ARPIDX_SHIFT 48\n-#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)\n-\n-#define I40IWQPC_FLOWLABEL_SHIFT 0\n-#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)\n-\n-#define I40IWQPC_WSCALE_SHIFT 20\n-#define I40IWQPC_WSCALE_MASK (1UL << I40IWQPC_WSCALE_SHIFT)\n-\n-#define I40IWQPC_KEEPALIVE_SHIFT 21\n-#define I40IWQPC_KEEPALIVE_MASK (1UL << I40IWQPC_KEEPALIVE_SHIFT)\n-\n-#define I40IWQPC_IGNORE_TCP_OPT_SHIFT 22\n-#define I40IWQPC_IGNORE_TCP_OPT_MASK (1UL << I40IWQPC_IGNORE_TCP_OPT_SHIFT)\n-\n-#define I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT 23\n-#define I40IWQPC_IGNORE_TCP_UNS_OPT_MASK \\\n-\t(1UL << I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT)\n-\n-#define I40IWQPC_TCPSTATE_SHIFT 28\n-#define I40IWQPC_TCPSTATE_MASK (0xfUL << I40IWQPC_TCPSTATE_SHIFT)\n-\n-#define I40IWQPC_RCVSCALE_SHIFT 32\n-#define I40IWQPC_RCVSCALE_MASK (0xfULL << I40IWQPC_RCVSCALE_SHIFT)\n-\n-#define I40IWQPC_SNDSCALE_SHIFT 40\n-#define I40IWQPC_SNDSCALE_MASK (0xfULL << I40IWQPC_SNDSCALE_SHIFT)\n-\n-#define I40IWQPC_PDIDX_SHIFT 48\n-#define I40IWQPC_PDIDX_MASK (0x7fffULL << I40IWQPC_PDIDX_SHIFT)\n-\n-#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT 16\n-#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_MASK \\\n-\t(0xffUL << I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT)\n-\n-#define I40IWQPC_KEEPALIVE_INTERVAL_SHIFT 24\n-#define I40IWQPC_KEEPALIVE_INTERVAL_MASK \\\n-\t(0xffUL << I40IWQPC_KEEPALIVE_INTERVAL_SHIFT)\n-\n-#define I40IWQPC_TIMESTAMP_RECENT_SHIFT 0\n-#define I40IWQPC_TIMESTAMP_RECENT_MASK \\\n-\t(0xffffffffUL << I40IWQPC_TIMESTAMP_RECENT_SHIFT)\n-\n-#define I40IWQPC_TIMESTAMP_AGE_SHIFT 32\n-#define I40IWQPC_TIMESTAMP_AGE_MASK \\\n-\t(0xffffffffULL << I40IWQPC_TIMESTAMP_AGE_SHIFT)\n-\n-#define I40IWQPC_SNDNXT_SHIFT 0\n-#define I40IWQPC_SNDNXT_MASK (0xffffffffUL << I40IWQPC_SNDNXT_SHIFT)\n-\n-#define I40IWQPC_SNDWND_SHIFT 32\n-#define I40IWQPC_SNDWND_MASK (0xffffffffULL << I40IWQPC_SNDWND_SHIFT)\n-\n-#define I40IWQPC_RCVNXT_SHIFT 0\n-#define I40IWQPC_RCVNXT_MASK (0xffffffffUL << I40IWQPC_RCVNXT_SHIFT)\n-\n-#define I40IWQPC_RCVWND_SHIFT 32\n-#define I40IWQPC_RCVWND_MASK (0xffffffffULL << I40IWQPC_RCVWND_SHIFT)\n-\n-#define I40IWQPC_SNDMAX_SHIFT 0\n-#define I40IWQPC_SNDMAX_MASK (0xffffffffUL << I40IWQPC_SNDMAX_SHIFT)\n-\n-#define I40IWQPC_SNDUNA_SHIFT 32\n-#define I40IWQPC_SNDUNA_MASK (0xffffffffULL << I40IWQPC_SNDUNA_SHIFT)\n-\n-#define I40IWQPC_SRTT_SHIFT 0\n-#define I40IWQPC_SRTT_MASK (0xffffffffUL << I40IWQPC_SRTT_SHIFT)\n-\n-#define I40IWQPC_RTTVAR_SHIFT 32\n-#define I40IWQPC_RTTVAR_MASK (0xffffffffULL << I40IWQPC_RTTVAR_SHIFT)\n-\n-#define I40IWQPC_SSTHRESH_SHIFT 0\n-#define I40IWQPC_SSTHRESH_MASK (0xffffffffUL << I40IWQPC_SSTHRESH_SHIFT)\n-\n-#define I40IWQPC_CWND_SHIFT 32\n-#define I40IWQPC_CWND_MASK (0xffffffffULL << I40IWQPC_CWND_SHIFT)\n-\n-#define I40IWQPC_SNDWL1_SHIFT 0\n-#define I40IWQPC_SNDWL1_MASK (0xffffffffUL << I40IWQPC_SNDWL1_SHIFT)\n-\n-#define I40IWQPC_SNDWL2_SHIFT 32\n-#define I40IWQPC_SNDWL2_MASK (0xffffffffULL << I40IWQPC_SNDWL2_SHIFT)\n-\n-#define I40IWQPC_ERR_RQ_IDX_SHIFT 32\n-#define I40IWQPC_ERR_RQ_IDX_MASK (0x3fffULL << I40IWQPC_ERR_RQ_IDX_SHIFT)\n-\n-#define I40IWQPC_MAXSNDWND_SHIFT 0\n-#define I40IWQPC_MAXSNDWND_MASK (0xffffffffUL << I40IWQPC_MAXSNDWND_SHIFT)\n-\n-#define I40IWQPC_REXMIT_THRESH_SHIFT 48\n-#define I40IWQPC_REXMIT_THRESH_MASK (0x3fULL << I40IWQPC_REXMIT_THRESH_SHIFT)\n-\n-#define I40IWQPC_TXCQNUM_SHIFT 0\n-#define I40IWQPC_TXCQNUM_MASK (0x1ffffUL << I40IWQPC_TXCQNUM_SHIFT)\n-\n-#define I40IWQPC_RXCQNUM_SHIFT 32\n-#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)\n-\n-#define I40IWQPC_STAT_INDEX_SHIFT 0\n-#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)\n-\n-#define I40IWQPC_Q2ADDR_SHIFT 0\n-#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)\n-\n-#define I40IWQPC_LASTBYTESENT_SHIFT 0\n-#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)\n-\n-#define I40IWQPC_SRQID_SHIFT 32\n-#define I40IWQPC_SRQID_MASK (0xffULL << I40IWQPC_SRQID_SHIFT)\n-\n-#define I40IWQPC_ORDSIZE_SHIFT 0\n-#define I40IWQPC_ORDSIZE_MASK (0x7fUL << I40IWQPC_ORDSIZE_SHIFT)\n-\n-#define I40IWQPC_IRDSIZE_SHIFT 16\n-#define I40IWQPC_IRDSIZE_MASK (0x3UL << I40IWQPC_IRDSIZE_SHIFT)\n-\n-#define I40IWQPC_WRRDRSPOK_SHIFT 20\n-#define I40IWQPC_WRRDRSPOK_MASK (1UL << I40IWQPC_WRRDRSPOK_SHIFT)\n-\n-#define I40IWQPC_RDOK_SHIFT 21\n-#define I40IWQPC_RDOK_MASK (1UL << I40IWQPC_RDOK_SHIFT)\n-\n-#define I40IWQPC_SNDMARKERS_SHIFT 22\n-#define I40IWQPC_SNDMARKERS_MASK (1UL << I40IWQPC_SNDMARKERS_SHIFT)\n-\n-#define I40IWQPC_BINDEN_SHIFT 23\n-#define I40IWQPC_BINDEN_MASK (1UL << I40IWQPC_BINDEN_SHIFT)\n-\n-#define I40IWQPC_FASTREGEN_SHIFT 24\n-#define I40IWQPC_FASTREGEN_MASK (1UL << I40IWQPC_FASTREGEN_SHIFT)\n-\n-#define I40IWQPC_PRIVEN_SHIFT 25\n-#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)\n-\n-#define I40IWQPC_USESTATSINSTANCE_SHIFT 26\n-#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)\n-\n-#define I40IWQPC_IWARPMODE_SHIFT 28\n-#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)\n-\n-#define I40IWQPC_RCVMARKERS_SHIFT 29\n-#define I40IWQPC_RCVMARKERS_MASK (1UL << I40IWQPC_RCVMARKERS_SHIFT)\n-\n-#define I40IWQPC_ALIGNHDRS_SHIFT 30\n-#define I40IWQPC_ALIGNHDRS_MASK (1UL << I40IWQPC_ALIGNHDRS_SHIFT)\n-\n-#define I40IWQPC_RCVNOMPACRC_SHIFT 31\n-#define I40IWQPC_RCVNOMPACRC_MASK (1UL << I40IWQPC_RCVNOMPACRC_SHIFT)\n-\n-#define I40IWQPC_RCVMARKOFFSET_SHIFT 33\n-#define I40IWQPC_RCVMARKOFFSET_MASK (0x1ffULL << I40IWQPC_RCVMARKOFFSET_SHIFT)\n-\n-#define I40IWQPC_SNDMARKOFFSET_SHIFT 48\n-#define I40IWQPC_SNDMARKOFFSET_MASK (0x1ffULL << I40IWQPC_SNDMARKOFFSET_SHIFT)\n-\n-#define I40IWQPC_QPCOMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPC_QPCOMPCTX_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IWQPC_SQTPHVAL_SHIFT 0\n-#define I40IWQPC_SQTPHVAL_MASK (0xffUL << I40IWQPC_SQTPHVAL_SHIFT)\n-\n-#define I40IWQPC_RQTPHVAL_SHIFT 8\n-#define I40IWQPC_RQTPHVAL_MASK (0xffUL << I40IWQPC_RQTPHVAL_SHIFT)\n-\n-#define I40IWQPC_QSHANDLE_SHIFT 16\n-#define I40IWQPC_QSHANDLE_MASK (0x3ffUL << I40IWQPC_QSHANDLE_SHIFT)\n-\n-#define I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT 32\n-#define I40IWQPC_EXCEPTION_LAN_QUEUE_MASK (0xfffULL << \\\n-\t\t\t\t\t I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT)\n-\n-#define I40IWQPC_LOCAL_IPADDR3_SHIFT 0\n-#define I40IWQPC_LOCAL_IPADDR3_MASK \\\n-\t(0xffffffffUL << I40IWQPC_LOCAL_IPADDR3_SHIFT)\n-\n-#define I40IWQPC_LOCAL_IPADDR2_SHIFT 32\n-#define I40IWQPC_LOCAL_IPADDR2_MASK \\\n-\t(0xffffffffULL << I40IWQPC_LOCAL_IPADDR2_SHIFT)\n-\n-#define I40IWQPC_LOCAL_IPADDR1_SHIFT 0\n-#define I40IWQPC_LOCAL_IPADDR1_MASK \\\n-\t(0xffffffffUL << I40IWQPC_LOCAL_IPADDR1_SHIFT)\n-\n-#define I40IWQPC_LOCAL_IPADDR0_SHIFT 32\n-#define I40IWQPC_LOCAL_IPADDR0_MASK \\\n-\t(0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)\n-\n-/* wqe size considering 32 bytes per wqe*/\n-#define I40IW_QP_SW_MIN_WQSIZE 4\t\t/*in WRs*/\n-#define I40IW_SQ_RSVD 2\n-#define I40IW_RQ_RSVD 1\n-#define I40IW_MAX_QUANTAS_PER_WR 2\n-#define I40IW_QP_SW_MAX_SQ_QUANTAS 2048\n-#define I40IW_QP_SW_MAX_RQ_QUANTAS 16384\n-#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTAS / I40IW_MAX_QUANTAS_PER_WR) - 1)\n-\n-#define I40IWQP_OP_RDMA_WRITE 0\n-#define I40IWQP_OP_RDMA_READ 1\n-#define I40IWQP_OP_RDMA_SEND 3\n-#define I40IWQP_OP_RDMA_SEND_INV 4\n-#define I40IWQP_OP_RDMA_SEND_SOL_EVENT 5\n-#define I40IWQP_OP_RDMA_SEND_SOL_EVENT_INV 6\n-#define I40IWQP_OP_BIND_MW 8\n-#define I40IWQP_OP_FAST_REGISTER 9\n-#define I40IWQP_OP_LOCAL_INVALIDATE 10\n-#define I40IWQP_OP_RDMA_READ_LOC_INV 11\n-#define I40IWQP_OP_NOP 12\n-\n-#define I40IW_RSVD_SHIFT 41\n-#define I40IW_RSVD_MASK (0x7fffULL << I40IW_RSVD_SHIFT)\n-\n-/* iwarp QP SQ WQE common fields */\n-#define I40IWQPSQ_OPCODE_SHIFT 32\n-#define I40IWQPSQ_OPCODE_MASK (0x3fULL << I40IWQPSQ_OPCODE_SHIFT)\n-\n-#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38\n-#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT)\n-\n-#define I40IWQPSQ_PUSHWQE_SHIFT 56\n-#define I40IWQPSQ_PUSHWQE_MASK (1ULL << I40IWQPSQ_PUSHWQE_SHIFT)\n-\n-#define I40IWQPSQ_STREAMMODE_SHIFT 58\n-#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT)\n-\n-#define I40IWQPSQ_WAITFORRCVPDU_SHIFT 59\n-#define I40IWQPSQ_WAITFORRCVPDU_MASK (1ULL << I40IWQPSQ_WAITFORRCVPDU_SHIFT)\n-\n-#define I40IWQPSQ_READFENCE_SHIFT 60\n-#define I40IWQPSQ_READFENCE_MASK (1ULL << I40IWQPSQ_READFENCE_SHIFT)\n-\n-#define I40IWQPSQ_LOCALFENCE_SHIFT 61\n-#define I40IWQPSQ_LOCALFENCE_MASK (1ULL << I40IWQPSQ_LOCALFENCE_SHIFT)\n-\n-#define I40IWQPSQ_SIGCOMPL_SHIFT 62\n-#define I40IWQPSQ_SIGCOMPL_MASK (1ULL << I40IWQPSQ_SIGCOMPL_SHIFT)\n-\n-#define I40IWQPSQ_VALID_SHIFT 63\n-#define I40IWQPSQ_VALID_MASK (1ULL << I40IWQPSQ_VALID_SHIFT)\n-\n-#define I40IWQPSQ_FRAG_TO_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPSQ_FRAG_TO_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IWQPSQ_FRAG_LEN_SHIFT 0\n-#define I40IWQPSQ_FRAG_LEN_MASK (0xffffffffUL << I40IWQPSQ_FRAG_LEN_SHIFT)\n-\n-#define I40IWQPSQ_FRAG_STAG_SHIFT 32\n-#define I40IWQPSQ_FRAG_STAG_MASK (0xffffffffULL << I40IWQPSQ_FRAG_STAG_SHIFT)\n-\n-#define I40IWQPSQ_REMSTAGINV_SHIFT 0\n-#define I40IWQPSQ_REMSTAGINV_MASK (0xffffffffUL << I40IWQPSQ_REMSTAGINV_SHIFT)\n-\n-#define I40IWQPSQ_INLINEDATAFLAG_SHIFT 57\n-#define I40IWQPSQ_INLINEDATAFLAG_MASK (1ULL << I40IWQPSQ_INLINEDATAFLAG_SHIFT)\n-\n-#define I40IWQPSQ_INLINEDATALEN_SHIFT 48\n-#define I40IWQPSQ_INLINEDATALEN_MASK \\\n-\t(0x7fULL << I40IWQPSQ_INLINEDATALEN_SHIFT)\n-\n-/* iwarp send with push mode */\n-#define I40IWQPSQ_WQDESCIDX_SHIFT 0\n-#define I40IWQPSQ_WQDESCIDX_MASK (0x3fffUL << I40IWQPSQ_WQDESCIDX_SHIFT)\n-\n-/* rdma write */\n-#define I40IWQPSQ_REMSTAG_SHIFT 0\n-#define I40IWQPSQ_REMSTAG_MASK (0xffffffffUL << I40IWQPSQ_REMSTAG_SHIFT)\n-\n-#define I40IWQPSQ_REMTO_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPSQ_REMTO_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-/* memory window */\n-#define I40IWQPSQ_STAGRIGHTS_SHIFT 48\n-#define I40IWQPSQ_STAGRIGHTS_MASK (0x1fULL << I40IWQPSQ_STAGRIGHTS_SHIFT)\n-\n-#define I40IWQPSQ_VABASEDTO_SHIFT 53\n-#define I40IWQPSQ_VABASEDTO_MASK (1ULL << I40IWQPSQ_VABASEDTO_SHIFT)\n-\n-#define I40IWQPSQ_MWLEN_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPSQ_MWLEN_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IWQPSQ_PARENTMRSTAG_SHIFT 0\n-#define I40IWQPSQ_PARENTMRSTAG_MASK \\\n-\t(0xffffffffUL << I40IWQPSQ_PARENTMRSTAG_SHIFT)\n-\n-#define I40IWQPSQ_MWSTAG_SHIFT 32\n-#define I40IWQPSQ_MWSTAG_MASK (0xffffffffULL << I40IWQPSQ_MWSTAG_SHIFT)\n-\n-#define I40IWQPSQ_BASEVA_TO_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPSQ_BASEVA_TO_FBO_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-/* Local Invalidate */\n-#define I40IWQPSQ_LOCSTAG_SHIFT 32\n-#define I40IWQPSQ_LOCSTAG_MASK (0xffffffffULL << I40IWQPSQ_LOCSTAG_SHIFT)\n-\n-/* Fast Register */\n-#define I40IWQPSQ_STAGKEY_SHIFT 0\n-#define I40IWQPSQ_STAGKEY_MASK (0xffUL << I40IWQPSQ_STAGKEY_SHIFT)\n-\n-#define I40IWQPSQ_STAGINDEX_SHIFT 8\n-#define I40IWQPSQ_STAGINDEX_MASK (0xffffffUL << I40IWQPSQ_STAGINDEX_SHIFT)\n-\n-#define I40IWQPSQ_COPYHOSTPBLS_SHIFT 43\n-#define I40IWQPSQ_COPYHOSTPBLS_MASK (1ULL << I40IWQPSQ_COPYHOSTPBLS_SHIFT)\n-\n-#define I40IWQPSQ_LPBLSIZE_SHIFT 44\n-#define I40IWQPSQ_LPBLSIZE_MASK (3ULL << I40IWQPSQ_LPBLSIZE_SHIFT)\n-\n-#define I40IWQPSQ_HPAGESIZE_SHIFT 46\n-#define I40IWQPSQ_HPAGESIZE_MASK (3ULL << I40IWQPSQ_HPAGESIZE_SHIFT)\n-\n-#define I40IWQPSQ_STAGLEN_SHIFT 0\n-#define I40IWQPSQ_STAGLEN_MASK (0x1ffffffffffULL << I40IWQPSQ_STAGLEN_SHIFT)\n-\n-#define I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT 48\n-#define I40IWQPSQ_FIRSTPMPBLIDXLO_MASK \\\n-\t(0xffffULL << I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT)\n-\n-#define I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT 0\n-#define I40IWQPSQ_FIRSTPMPBLIDXHI_MASK \\\n-\t(0xfffUL << I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT)\n-\n-#define I40IWQPSQ_PBLADDR_SHIFT 12\n-#define I40IWQPSQ_PBLADDR_MASK (0xfffffffffffffULL << I40IWQPSQ_PBLADDR_SHIFT)\n-\n-/* iwarp QP RQ WQE common fields */\n-#define I40IWQPRQ_ADDFRAGCNT_SHIFT I40IWQPSQ_ADDFRAGCNT_SHIFT\n-#define I40IWQPRQ_ADDFRAGCNT_MASK I40IWQPSQ_ADDFRAGCNT_MASK\n-\n-#define I40IWQPRQ_VALID_SHIFT I40IWQPSQ_VALID_SHIFT\n-#define I40IWQPRQ_VALID_MASK I40IWQPSQ_VALID_MASK\n-\n-#define I40IWQPRQ_COMPLCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT\n-#define I40IWQPRQ_COMPLCTX_MASK I40IW_CQPHC_QPCTX_MASK\n-\n-#define I40IWQPRQ_FRAG_LEN_SHIFT I40IWQPSQ_FRAG_LEN_SHIFT\n-#define I40IWQPRQ_FRAG_LEN_MASK I40IWQPSQ_FRAG_LEN_MASK\n-\n-#define I40IWQPRQ_STAG_SHIFT I40IWQPSQ_FRAG_STAG_SHIFT\n-#define I40IWQPRQ_STAG_MASK I40IWQPSQ_FRAG_STAG_MASK\n-\n-#define I40IWQPRQ_TO_SHIFT I40IWQPSQ_FRAG_TO_SHIFT\n-#define I40IWQPRQ_TO_MASK I40IWQPSQ_FRAG_TO_MASK\n-\n-/* Query FPM CQP buf */\n-#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0\n-#define I40IW_QUERY_FPM_MAX_QPS_MASK \\\n-\t(0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)\n-\n-#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0\n-#define I40IW_QUERY_FPM_MAX_CQS_MASK \\\n-\t(0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)\n-\n-#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT 0\n-#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_MASK \\\n-\t(0x3fffUL << I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT)\n-\n-#define I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT 32\n-#define I40IW_QUERY_FPM_MAX_PE_SDS_MASK \\\n-\t(0x3fffULL << I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT)\n-\n-#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0\n-#define I40IW_QUERY_FPM_MAX_QPS_MASK \\\n-\t(0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)\n-\n-#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0\n-#define I40IW_QUERY_FPM_MAX_CQS_MASK \\\n-\t(0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)\n-\n-#define I40IW_QUERY_FPM_MAX_CEQS_SHIFT 0\n-#define I40IW_QUERY_FPM_MAX_CEQS_MASK \\\n-\t(0xffUL << I40IW_QUERY_FPM_MAX_CEQS_SHIFT)\n-\n-#define I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT 32\n-#define I40IW_QUERY_FPM_XFBLOCKSIZE_MASK \\\n-\t(0xffffffffULL << I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT)\n-\n-#define I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT 32\n-#define I40IW_QUERY_FPM_Q1BLOCKSIZE_MASK \\\n-\t(0xffffffffULL << I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT)\n-\n-#define I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT 16\n-#define I40IW_QUERY_FPM_HTMULTIPLIER_MASK \\\n-\t(0xfUL << I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT)\n-\n-#define I40IW_QUERY_FPM_TIMERBUCKET_SHIFT 32\n-#define I40IW_QUERY_FPM_TIMERBUCKET_MASK \\\n-\t(0xffFFULL << I40IW_QUERY_FPM_TIMERBUCKET_SHIFT)\n-\n-/* Static HMC pages allocated buf */\n-#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT 0\n-#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_MASK \\\n-\t(0x3fUL << I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT)\n-\n-#define I40IW_HW_PAGE_SIZE\t4096\n-#define I40IW_DONE_COUNT\t1000\n-#define I40IW_SLEEP_COUNT\t10\n-\n-enum {\n-\tI40IW_QUEUES_ALIGNMENT_MASK =\t\t(128 - 1),\n-\tI40IW_AEQ_ALIGNMENT_MASK =\t\t(256 - 1),\n-\tI40IW_Q2_ALIGNMENT_MASK =\t\t(256 - 1),\n-\tI40IW_CEQ_ALIGNMENT_MASK =\t\t(256 - 1),\n-\tI40IW_CQ0_ALIGNMENT_MASK =\t\t(256 - 1),\n-\tI40IW_HOST_CTX_ALIGNMENT_MASK =\t\t(4 - 1),\n-\tI40IW_SHADOWAREA_MASK =\t\t\t(128 - 1),\n-\tI40IW_FPM_QUERY_BUF_ALIGNMENT_MASK =\t(4 - 1),\n-\tI40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK =\t(4 - 1)\n-};\n-\n-enum i40iw_alignment {\n-\tI40IW_CQP_ALIGNMENT =\t\t0x200,\n-\tI40IW_AEQ_ALIGNMENT =\t\t0x100,\n-\tI40IW_CEQ_ALIGNMENT =\t\t0x100,\n-\tI40IW_CQ0_ALIGNMENT =\t\t0x100,\n-\tI40IW_SD_BUF_ALIGNMENT =\t0x80\n-};\n-\n-#define I40IW_WQE_SIZE_64\t64\n-\n-#define I40IW_QP_WQE_MIN_SIZE\t32\n-#define I40IW_QP_WQE_MAX_SIZE\t128\n-\n-#define I40IW_UPDATE_SD_BUF_SIZE 128\n-\n-#define I40IW_CQE_QTYPE_RQ 0\n-#define I40IW_CQE_QTYPE_SQ 1\n-\n-#define I40IW_RING_INIT(_ring, _size) \\\n-\t{ \\\n-\t\t(_ring).head = 0; \\\n-\t\t(_ring).tail = 0; \\\n-\t\t(_ring).size = (_size); \\\n-\t}\n-#define I40IW_RING_GETSIZE(_ring) ((_ring).size)\n-#define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head)\n-#define I40IW_RING_GETCURRENT_TAIL(_ring) ((_ring).tail)\n-\n-#define I40IW_RING_MOVE_HEAD(_ring, _retcode) \\\n-\t{ \\\n-\t\tregister u32 size; \\\n-\t\tsize = (_ring).size; \\\n-\t\tif (!I40IW_RING_FULL_ERR(_ring)) { \\\n-\t\t\t(_ring).head = ((_ring).head + 1) % size; \\\n-\t\t\t(_retcode) = 0; \\\n-\t\t} else { \\\n-\t\t\t(_retcode) = I40IW_ERR_RING_FULL; \\\n-\t\t} \\\n-\t}\n-\n-#define I40IW_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \\\n-\t{ \\\n-\t\tregister u32 size; \\\n-\t\tsize = (_ring).size; \\\n-\t\tif ((I40IW_RING_WORK_AVAILABLE(_ring) + (_count)) < size) { \\\n-\t\t\t(_ring).head = ((_ring).head + (_count)) % size; \\\n-\t\t\t(_retcode) = 0; \\\n-\t\t} else { \\\n-\t\t\t(_retcode) = I40IW_ERR_RING_FULL; \\\n-\t\t} \\\n-\t}\n-\n-#define I40IW_RING_MOVE_TAIL(_ring) \\\n-\t(_ring).tail = ((_ring).tail + 1) % (_ring).size\n-\n-#define I40IW_RING_MOVE_HEAD_NOCHECK(_ring) \\\n-\t(_ring).head = ((_ring).head + 1) % (_ring).size\n-\n-#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \\\n-\t(_ring).tail = ((_ring).tail + (_count)) % (_ring).size\n-\n-#define I40IW_RING_SET_TAIL(_ring, _pos) \\\n-\t(_ring).tail = (_pos) % (_ring).size\n-\n-#define I40IW_RING_FULL_ERR(_ring) \\\n-\t( \\\n-\t\t(I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 1)) \\\n-\t)\n-\n-#define I40IW_ERR_RING_FULL2(_ring) \\\n-\t( \\\n-\t\t(I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 2)) \\\n-\t)\n-\n-#define I40IW_ERR_RING_FULL3(_ring) \\\n-\t( \\\n-\t\t(I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 3)) \\\n-\t)\n-\n-#define I40IW_RING_MORE_WORK(_ring) \\\n-\t( \\\n-\t\t(I40IW_RING_WORK_AVAILABLE(_ring) != 0) \\\n-\t)\n-\n-#define I40IW_RING_WORK_AVAILABLE(_ring) \\\n-\t( \\\n-\t\t(((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \\\n-\t)\n-\n-#define I40IW_RING_GET_WQES_AVAILABLE(_ring) \\\n-\t( \\\n-\t\t((_ring).size - I40IW_RING_WORK_AVAILABLE(_ring) - 1) \\\n-\t)\n-\n-#define I40IW_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \\\n-\t{ \\\n-\t\tindex = I40IW_RING_GETCURRENT_HEAD(_ring); \\\n-\t\tI40IW_RING_MOVE_HEAD(_ring, _retcode); \\\n-\t}\n-\n-/* Async Events codes */\n-#define I40IW_AE_AMP_UNALLOCATED_STAG 0x0102\n-#define I40IW_AE_AMP_INVALID_STAG 0x0103\n-#define I40IW_AE_AMP_BAD_QP 0x0104\n-#define I40IW_AE_AMP_BAD_PD 0x0105\n-#define I40IW_AE_AMP_BAD_STAG_KEY 0x0106\n-#define I40IW_AE_AMP_BAD_STAG_INDEX 0x0107\n-#define I40IW_AE_AMP_BOUNDS_VIOLATION 0x0108\n-#define I40IW_AE_AMP_RIGHTS_VIOLATION 0x0109\n-#define I40IW_AE_AMP_TO_WRAP 0x010a\n-#define I40IW_AE_AMP_FASTREG_SHARED 0x010b\n-#define I40IW_AE_AMP_FASTREG_VALID_STAG 0x010c\n-#define I40IW_AE_AMP_FASTREG_MW_STAG 0x010d\n-#define I40IW_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e\n-#define I40IW_AE_AMP_FASTREG_PBL_TABLE_OVERFLOW 0x010f\n-#define I40IW_AE_AMP_FASTREG_INVALID_LENGTH 0x0110\n-#define I40IW_AE_AMP_INVALIDATE_SHARED 0x0111\n-#define I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112\n-#define I40IW_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113\n-#define I40IW_AE_AMP_MWBIND_VALID_STAG 0x0114\n-#define I40IW_AE_AMP_MWBIND_OF_MR_STAG 0x0115\n-#define I40IW_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116\n-#define I40IW_AE_AMP_MWBIND_TO_MW_STAG 0x0117\n-#define I40IW_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118\n-#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119\n-#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a\n-#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b\n-#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132\n-#define I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134\n-#define I40IW_AE_BAD_CLOSE 0x0201\n-#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202\n-#define I40IW_AE_CQ_OPERATION_ERROR 0x0203\n-#define I40IW_AE_PRIV_OPERATION_DENIED 0x011c\n-#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205\n-#define I40IW_AE_STAG_ZERO_INVALID 0x0206\n-#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207\n-#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a\n-#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b\n-#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220\n-#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301\n-#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303\n-#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304\n-#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305\n-#define I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306\n-#define I40IW_AE_DDP_UBE_INVALID_QN 0x0307\n-#define I40IW_AE_DDP_NO_L_BIT 0x0308\n-#define I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311\n-#define I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312\n-#define I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313\n-#define I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314\n-#define I40IW_AE_INVALID_ARP_ENTRY 0x0401\n-#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402\n-#define I40IW_AE_STALE_ARP_ENTRY 0x0403\n-#define I40IW_AE_INVALID_MAC_ENTRY 0x0405\n-#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501\n-#define I40IW_AE_LLP_CONNECTION_RESET 0x0502\n-#define I40IW_AE_LLP_FIN_RECEIVED 0x0503\n-#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505\n-#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506\n-#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507\n-#define I40IW_AE_LLP_SYN_RECEIVED 0x0508\n-#define I40IW_AE_LLP_TERMINATE_RECEIVED 0x0509\n-#define I40IW_AE_LLP_TOO_MANY_RETRIES 0x050a\n-#define I40IW_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b\n-#define I40IW_AE_LLP_DOUBT_REACHABILITY 0x050c\n-#define I40IW_AE_LLP_RX_VLAN_MISMATCH 0x050d\n-#define I40IW_AE_RESOURCE_EXHAUSTION 0x0520\n-#define I40IW_AE_RESET_SENT 0x0601\n-#define I40IW_AE_TERMINATE_SENT 0x0602\n-#define I40IW_AE_RESET_NOT_SENT 0x0603\n-#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700\n-#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701\n-#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702\n-#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900\n-\n-#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1\n-#define OP_CEQ_DESTROY 2\n-#define OP_AEQ_DESTROY 3\n-#define OP_DELETE_ARP_CACHE_ENTRY 4\n-#define OP_MANAGE_APBVT_ENTRY 5\n-#define OP_CEQ_CREATE 6\n-#define OP_AEQ_CREATE 7\n-#define OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY 8\n-#define OP_ADD_LOCAL_MAC_IPADDR_ENTRY 9\n-#define OP_MANAGE_QHASH_TABLE_ENTRY 10\n-#define OP_QP_MODIFY 11\n-#define OP_QP_UPLOAD_CONTEXT 12\n-#define OP_CQ_CREATE 13\n-#define OP_CQ_DESTROY 14\n-#define OP_QP_CREATE 15\n-#define OP_QP_DESTROY 16\n-#define OP_ALLOC_STAG 17\n-#define OP_MR_REG_NON_SHARED 18\n-#define OP_DEALLOC_STAG 19\n-#define OP_MW_ALLOC 20\n-#define OP_QP_FLUSH_WQES 21\n-#define OP_ADD_ARP_CACHE_ENTRY 22\n-#define OP_MANAGE_PUSH_PAGE 23\n-#define OP_UPDATE_PE_SDS 24\n-#define OP_MANAGE_HMC_PM_FUNC_TABLE 25\n-#define OP_SUSPEND 26\n-#define OP_RESUME 27\n-#define OP_MANAGE_VF_PBLE_BP 28\n-#define OP_QUERY_FPM_VALUES 29\n-#define OP_COMMIT_FPM_VALUES 30\n-#define OP_REQUESTED_COMMANDS 31\n-#define OP_COMPLETED_COMMANDS 32\n-#define OP_GEN_AE 33\n-#define OP_SIZE_CQP_STAT_ARRAY 34\n-\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c\ndeleted file mode 100644\nindex 5484cbf..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.c\n+++ /dev/null\n@@ -1,821 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_register.h\"\n-#include \"i40iw_status.h\"\n-#include \"i40iw_hmc.h\"\n-#include \"i40iw_d.h\"\n-#include \"i40iw_type.h\"\n-#include \"i40iw_p.h\"\n-#include \"i40iw_vf.h\"\n-#include \"i40iw_virtchnl.h\"\n-\n-/**\n- * i40iw_find_sd_index_limit - finds segment descriptor index limit\n- * @hmc_info: pointer to the HMC configuration information structure\n- * @type: type of HMC resources we're searching\n- * @index: starting index for the object\n- * @cnt: number of objects we're trying to create\n- * @sd_idx: pointer to return index of the segment descriptor in question\n- * @sd_limit: pointer to return the maximum number of segment descriptors\n- *\n- * This function calculates the segment descriptor index and index limit\n- * for the resource defined by i40iw_hmc_rsrc_type.\n- */\n-\n-static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t u32 type,\n-\t\t\t\t\t u32 idx,\n-\t\t\t\t\t u32 cnt,\n-\t\t\t\t\t u32 *sd_idx,\n-\t\t\t\t\t u32 *sd_limit)\n-{\n-\tu64 fpm_addr, fpm_limit;\n-\n-\tfpm_addr = hmc_info->hmc_obj[(type)].base +\n-\t\t\thmc_info->hmc_obj[type].size * idx;\n-\tfpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;\n-\t*sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);\n-\t*sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);\n-\t*sd_limit += 1;\n-}\n-\n-/**\n- * i40iw_find_pd_index_limit - finds page descriptor index limit\n- * @hmc_info: pointer to the HMC configuration information struct\n- * @type: HMC resource type we're examining\n- * @idx: starting index for the object\n- * @cnt: number of objects we're trying to create\n- * @pd_index: pointer to return page descriptor index\n- * @pd_limit: pointer to return page descriptor index limit\n- *\n- * Calculates the page descriptor index and index limit for the resource\n- * defined by i40iw_hmc_rsrc_type.\n- */\n-\n-static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t u32 type,\n-\t\t\t\t\t u32 idx,\n-\t\t\t\t\t u32 cnt,\n-\t\t\t\t\t u32 *pd_idx,\n-\t\t\t\t\t u32 *pd_limit)\n-{\n-\tu64 fpm_adr, fpm_limit;\n-\n-\tfpm_adr = hmc_info->hmc_obj[type].base +\n-\t\t\thmc_info->hmc_obj[type].size * idx;\n-\tfpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);\n-\t*(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);\n-\t*(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);\n-\t*(pd_limit) += 1;\n-}\n-\n-/**\n- * i40iw_set_sd_entry - setup entry for sd programming\n- * @pa: physical addr\n- * @idx: sd index\n- * @type: paged or direct sd\n- * @entry: sd entry ptr\n- */\n-static inline void i40iw_set_sd_entry(u64 pa,\n-\t\t\t\t u32 idx,\n-\t\t\t\t enum i40iw_sd_entry_type type,\n-\t\t\t\t struct update_sd_entry *entry)\n-{\n-\tentry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |\n-\t\t\t(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<\n-\t\t\t\tI40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |\n-\t\t\t(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);\n-\tentry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));\n-}\n-\n-/**\n- * i40iw_clr_sd_entry - setup entry for sd clear\n- * @idx: sd index\n- * @type: paged or direct sd\n- * @entry: sd entry ptr\n- */\n-static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,\n-\t\t\t\t struct update_sd_entry *entry)\n-{\n-\tentry->data = (I40IW_HMC_MAX_BP_COUNT <<\n-\t\t\tI40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |\n-\t\t\t(((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<\n-\t\t\t\tI40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);\n-\tentry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));\n-}\n-\n-/**\n- * i40iw_hmc_sd_one - setup 1 sd entry for cqp\n- * @dev: pointer to the device structure\n- * @hmc_fn_id: hmc's function id\n- * @pa: physical addr\n- * @sd_idx: sd index\n- * @type: paged or direct sd\n- * @setsd: flag to set or clear sd\n- */\n-enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,\n-\t\t\t\t\tu8 hmc_fn_id,\n-\t\t\t\t\tu64 pa, u32 sd_idx,\n-\t\t\t\t\tenum i40iw_sd_entry_type type,\n-\t\t\t\t\tbool setsd)\n-{\n-\tstruct i40iw_update_sds_info sdinfo;\n-\n-\tsdinfo.cnt = 1;\n-\tsdinfo.hmc_fn_id = hmc_fn_id;\n-\tif (setsd)\n-\t\ti40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);\n-\telse\n-\t\ti40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);\n-\n-\treturn dev->cqp->process_cqp_sds(dev, &sdinfo);\n-}\n-\n-/**\n- * i40iw_hmc_sd_grp - setup group od sd entries for cqp\n- * @dev: pointer to the device structure\n- * @hmc_info: pointer to the HMC configuration information struct\n- * @sd_index: sd index\n- * @sd_cnt: number of sd entries\n- * @setsd: flag to set or clear sd\n- */\n-static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t u32 sd_index,\n-\t\t\t\t\t u32 sd_cnt,\n-\t\t\t\t\t bool setsd)\n-{\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\tstruct i40iw_update_sds_info sdinfo;\n-\tu64 pa;\n-\tu32 i;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\tmemset(&sdinfo, 0, sizeof(sdinfo));\n-\tsdinfo.hmc_fn_id = hmc_info->hmc_fn_id;\n-\tfor (i = sd_index; i < sd_index + sd_cnt; i++) {\n-\t\tsd_entry = &hmc_info->sd_table.sd_entry[i];\n-\t\tif (!sd_entry ||\n-\t\t (!sd_entry->valid && setsd) ||\n-\t\t (sd_entry->valid && !setsd))\n-\t\t\tcontinue;\n-\t\tif (setsd) {\n-\t\t\tpa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?\n-\t\t\t sd_entry->u.pd_table.pd_page_addr.pa :\n-\t\t\t sd_entry->u.bp.addr.pa;\n-\t\t\ti40iw_set_sd_entry(pa, i, sd_entry->entry_type,\n-\t\t\t\t\t &sdinfo.entry[sdinfo.cnt]);\n-\t\t} else {\n-\t\t\ti40iw_clr_sd_entry(i, sd_entry->entry_type,\n-\t\t\t\t\t &sdinfo.entry[sdinfo.cnt]);\n-\t\t}\n-\t\tsdinfo.cnt++;\n-\t\tif (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {\n-\t\t\tret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);\n-\t\t\tif (ret_code) {\n-\t\t\t\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t\t\t\t \"i40iw_hmc_sd_grp: sd_programming failed err=%d\\n\",\n-\t\t\t\t\t ret_code);\n-\t\t\t\treturn ret_code;\n-\t\t\t}\n-\t\t\tsdinfo.cnt = 0;\n-\t\t}\n-\t}\n-\tif (sdinfo.cnt)\n-\t\tret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id\n- * @dev: pointer to the device structure\n- * @hmc_fn_id: hmc's function id\n- */\n-struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)\n-{\n-\tstruct i40iw_vfdev *vf_dev = NULL;\n-\tu16 idx;\n-\n-\tfor (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {\n-\t\tif (dev->vf_dev[idx] &&\n-\t\t ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {\n-\t\t\tvf_dev = dev->vf_dev[idx];\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\treturn vf_dev;\n-}\n-\n-/**\n- * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id\n- * @dev: pointer to the device structure\n- * @hmc_fn_id: hmc's function id\n- */\n-struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u8 hmc_fn_id)\n-{\n-\tstruct i40iw_hmc_info *hmc_info = NULL;\n-\tu16 idx;\n-\n-\tfor (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {\n-\t\tif (dev->vf_dev[idx] &&\n-\t\t ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {\n-\t\t\thmc_info = &dev->vf_dev[idx]->hmc_info;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\treturn hmc_info;\n-}\n-\n-/**\n- * i40iw_hmc_finish_add_sd_reg - program sd entries for objects\n- * @dev: pointer to the device structure\n- * @info: create obj info\n- */\n-static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\t struct i40iw_hmc_create_obj_info *info)\n-{\n-\tif (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)\n-\t\treturn I40IW_ERR_INVALID_HMC_OBJ_INDEX;\n-\n-\tif ((info->start_idx + info->count) >\n-\t\t\tinfo->hmc_info->hmc_obj[info->rsrc_type].cnt)\n-\t\treturn I40IW_ERR_INVALID_HMC_OBJ_COUNT;\n-\n-\tif (!info->add_sd_cnt)\n-\t\treturn 0;\n-\n-\treturn i40iw_hmc_sd_grp(dev, info->hmc_info,\n-\t\t\t\tinfo->hmc_info->sd_indexes[0],\n-\t\t\t\tinfo->add_sd_cnt, true);\n-}\n-\n-/**\n- * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects\n- * @dev: pointer to the device structure\n- * @info: pointer to i40iw_hmc_iw_create_obj_info struct\n- *\n- * This will allocate memory for PDs and backing pages and populate\n- * the sd and pd entries.\n- */\n-enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_create_obj_info *info)\n-{\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\tu32 sd_idx, sd_lmt;\n-\tu32 pd_idx = 0, pd_lmt = 0;\n-\tu32 pd_idx1 = 0, pd_lmt1 = 0;\n-\tu32 i, j;\n-\tbool pd_error = false;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\tif (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)\n-\t\treturn I40IW_ERR_INVALID_HMC_OBJ_INDEX;\n-\n-\tif ((info->start_idx + info->count) >\n-\t info->hmc_info->hmc_obj[info->rsrc_type].cnt) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t\t \"%s: error type %u, start = %u, req cnt %u, cnt = %u\\n\",\n-\t\t\t __func__, info->rsrc_type, info->start_idx, info->count,\n-\t\t\t info->hmc_info->hmc_obj[info->rsrc_type].cnt);\n-\t\treturn I40IW_ERR_INVALID_HMC_OBJ_COUNT;\n-\t}\n-\n-\tif (!dev->is_pf)\n-\t\treturn i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);\n-\n-\ti40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,\n-\t\t\t\t info->start_idx, info->count,\n-\t\t\t\t &sd_idx, &sd_lmt);\n-\tif (sd_idx >= info->hmc_info->sd_table.sd_cnt ||\n-\t sd_lmt > info->hmc_info->sd_table.sd_cnt) {\n-\t\treturn I40IW_ERR_INVALID_SD_INDEX;\n-\t}\n-\ti40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,\n-\t\t\t\t info->start_idx, info->count, &pd_idx, &pd_lmt);\n-\n-\tfor (j = sd_idx; j < sd_lmt; j++) {\n-\t\tret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,\n-\t\t\t\t\t\t j,\n-\t\t\t\t\t\t info->entry_type,\n-\t\t\t\t\t\t I40IW_HMC_DIRECT_BP_SIZE);\n-\t\tif (ret_code)\n-\t\t\tgoto exit_sd_error;\n-\t\tsd_entry = &info->hmc_info->sd_table.sd_entry[j];\n-\n-\t\tif ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&\n-\t\t ((dev->hmc_info == info->hmc_info) &&\n-\t\t (info->rsrc_type != I40IW_HMC_IW_PBLE))) {\n-\t\t\tpd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));\n-\t\t\tpd_lmt1 = min(pd_lmt,\n-\t\t\t\t (j + 1) * I40IW_HMC_MAX_BP_COUNT);\n-\t\t\tfor (i = pd_idx1; i < pd_lmt1; i++) {\n-\t\t\t\t/* update the pd table entry */\n-\t\t\t\tret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,\n-\t\t\t\t\t\t\t\t i, NULL);\n-\t\t\t\tif (ret_code) {\n-\t\t\t\t\tpd_error = true;\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\tif (pd_error) {\n-\t\t\t\twhile (i && (i > pd_idx1)) {\n-\t\t\t\t\ti40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),\n-\t\t\t\t\t\t\t info->is_pf);\n-\t\t\t\t\ti--;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t\tif (sd_entry->valid)\n-\t\t\tcontinue;\n-\n-\t\tinfo->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;\n-\t\tinfo->add_sd_cnt++;\n-\t\tsd_entry->valid = true;\n-\t}\n-\treturn i40iw_hmc_finish_add_sd_reg(dev, info);\n-\n-exit_sd_error:\n-\twhile (j && (j > sd_idx)) {\n-\t\tsd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];\n-\t\tswitch (sd_entry->entry_type) {\n-\t\tcase I40IW_SD_TYPE_PAGED:\n-\t\t\tpd_idx1 = max(pd_idx,\n-\t\t\t\t (j - 1) * I40IW_HMC_MAX_BP_COUNT);\n-\t\t\tpd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));\n-\t\t\tfor (i = pd_idx1; i < pd_lmt1; i++)\n-\t\t\t\ti40iw_prep_remove_pd_page(info->hmc_info, i);\n-\t\t\tbreak;\n-\t\tcase I40IW_SD_TYPE_DIRECT:\n-\t\t\ti40iw_prep_remove_pd_page(info->hmc_info, (j - 1));\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tret_code = I40IW_ERR_INVALID_SD_TYPE;\n-\t\t\tbreak;\n-\t\t}\n-\t\tj--;\n-\t}\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_finish_del_sd_reg - delete sd entries for objects\n- * @dev: pointer to the device structure\n- * @info: dele obj info\n- * @reset: true if called before reset\n- */\n-static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_hmc_del_obj_info *info,\n-\t\t\t\t\t\t bool reset)\n-{\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\tenum i40iw_status_code ret_code = 0;\n-\tu32 i, sd_idx;\n-\tstruct i40iw_dma_mem *mem;\n-\n-\tif (dev->is_pf && !reset)\n-\t\tret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,\n-\t\t\t\t\t info->hmc_info->sd_indexes[0],\n-\t\t\t\t\t info->del_sd_cnt, false);\n-\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC, \"%s: error cqp sd sd_grp\\n\", __func__);\n-\n-\tfor (i = 0; i < info->del_sd_cnt; i++) {\n-\t\tsd_idx = info->hmc_info->sd_indexes[i];\n-\t\tsd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];\n-\t\tif (!sd_entry)\n-\t\t\tcontinue;\n-\t\tmem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?\n-\t\t\t&sd_entry->u.pd_table.pd_page_addr :\n-\t\t\t&sd_entry->u.bp.addr;\n-\n-\t\tif (!mem || !mem->va)\n-\t\t\ti40iw_debug(dev, I40IW_DEBUG_HMC, \"%s: error cqp sd mem\\n\", __func__);\n-\t\telse\n-\t\t\ti40iw_free_dma_mem(dev->hw, mem);\n-\t}\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_del_iw_hmc_obj - remove pe hmc objects\n- * @dev: pointer to the device structure\n- * @info: pointer to i40iw_hmc_del_obj_info struct\n- * @reset: true if called before reset\n- *\n- * This will de-populate the SDs and PDs. It frees\n- * the memory for PDS and backing storage. After this function is returned,\n- * caller should deallocate memory allocated previously for\n- * book-keeping information about PDs and backing storage.\n- */\n-enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_del_obj_info *info,\n-\t\t\t\t\t bool reset)\n-{\n-\tstruct i40iw_hmc_pd_table *pd_table;\n-\tu32 sd_idx, sd_lmt;\n-\tu32 pd_idx, pd_lmt, rel_pd_idx;\n-\tu32 i, j;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\tif (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t\t \"%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\\n\",\n-\t\t\t __func__, info->start_idx, info->rsrc_type,\n-\t\t\t info->hmc_info->hmc_obj[info->rsrc_type].cnt);\n-\t\treturn I40IW_ERR_INVALID_HMC_OBJ_INDEX;\n-\t}\n-\n-\tif ((info->start_idx + info->count) >\n-\t info->hmc_info->hmc_obj[info->rsrc_type].cnt) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC,\n-\t\t\t \"%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\\n\",\n-\t\t\t __func__, info->start_idx, info->count,\n-\t\t\t info->rsrc_type,\n-\t\t\t info->hmc_info->hmc_obj[info->rsrc_type].cnt);\n-\t\treturn I40IW_ERR_INVALID_HMC_OBJ_COUNT;\n-\t}\n-\tif (!dev->is_pf) {\n-\t\tret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,\n-\t\t\t\t\t\t info->count);\n-\t\tif (info->rsrc_type != I40IW_HMC_IW_PBLE)\n-\t\t\treturn ret_code;\n-\t}\n-\n-\ti40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,\n-\t\t\t\t info->start_idx, info->count, &pd_idx, &pd_lmt);\n-\n-\tfor (j = pd_idx; j < pd_lmt; j++) {\n-\t\tsd_idx = j / I40IW_HMC_PD_CNT_IN_SD;\n-\n-\t\tif (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=\n-\t\t I40IW_SD_TYPE_PAGED)\n-\t\t\tcontinue;\n-\n-\t\trel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;\n-\t\tpd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;\n-\t\tif (pd_table->pd_entry[rel_pd_idx].valid) {\n-\t\t\tret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,\n-\t\t\t\t\t\t info->is_pf);\n-\t\t\tif (ret_code) {\n-\t\t\t\ti40iw_debug(dev, I40IW_DEBUG_HMC, \"%s: error\\n\", __func__);\n-\t\t\t\treturn ret_code;\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\ti40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,\n-\t\t\t\t info->start_idx, info->count, &sd_idx, &sd_lmt);\n-\tif (sd_idx >= info->hmc_info->sd_table.sd_cnt ||\n-\t sd_lmt > info->hmc_info->sd_table.sd_cnt) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC, \"%s: error invalid sd_idx\\n\", __func__);\n-\t\treturn I40IW_ERR_INVALID_SD_INDEX;\n-\t}\n-\n-\tfor (i = sd_idx; i < sd_lmt; i++) {\n-\t\tif (!info->hmc_info->sd_table.sd_entry[i].valid)\n-\t\t\tcontinue;\n-\t\tswitch (info->hmc_info->sd_table.sd_entry[i].entry_type) {\n-\t\tcase I40IW_SD_TYPE_DIRECT:\n-\t\t\tret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);\n-\t\t\tif (!ret_code) {\n-\t\t\t\tinfo->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;\n-\t\t\t\tinfo->del_sd_cnt++;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase I40IW_SD_TYPE_PAGED:\n-\t\t\tret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);\n-\t\t\tif (!ret_code) {\n-\t\t\t\tinfo->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;\n-\t\t\t\tinfo->del_sd_cnt++;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\treturn i40iw_finish_del_sd_reg(dev, info, reset);\n-}\n-\n-/**\n- * i40iw_add_sd_table_entry - Adds a segment descriptor to the table\n- * @hw: pointer to our hw struct\n- * @hmc_info: pointer to the HMC configuration information struct\n- * @sd_index: segment descriptor index to manipulate\n- * @type: what type of segment descriptor we're manipulating\n- * @direct_mode_sz: size to alloc in direct mode\n- */\n-enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,\n-\t\t\t\t\t\tstruct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t\tu32 sd_index,\n-\t\t\t\t\t\tenum i40iw_sd_entry_type type,\n-\t\t\t\t\t\tu64 direct_mode_sz)\n-{\n-\tenum i40iw_status_code ret_code = 0;\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\tbool dma_mem_alloc_done = false;\n-\tstruct i40iw_dma_mem mem;\n-\tu64 alloc_len;\n-\n-\tsd_entry = &hmc_info->sd_table.sd_entry[sd_index];\n-\tif (!sd_entry->valid) {\n-\t\tif (type == I40IW_SD_TYPE_PAGED)\n-\t\t\talloc_len = I40IW_HMC_PAGED_BP_SIZE;\n-\t\telse\n-\t\t\talloc_len = direct_mode_sz;\n-\n-\t\t/* allocate a 4K pd page or 2M backing page */\n-\t\tret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,\n-\t\t\t\t\t\t I40IW_HMC_PD_BP_BUF_ALIGNMENT);\n-\t\tif (ret_code)\n-\t\t\tgoto exit;\n-\t\tdma_mem_alloc_done = true;\n-\t\tif (type == I40IW_SD_TYPE_PAGED) {\n-\t\t\tret_code = i40iw_allocate_virt_mem(hw,\n-\t\t\t\t\t\t\t &sd_entry->u.pd_table.pd_entry_virt_mem,\n-\t\t\t\t\t\t\t sizeof(struct i40iw_hmc_pd_entry) * 512);\n-\t\t\tif (ret_code)\n-\t\t\t\tgoto exit;\n-\t\t\tsd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)\n-\t\t\t\t\t\t\t sd_entry->u.pd_table.pd_entry_virt_mem.va;\n-\n-\t\t\tmemcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));\n-\t\t} else {\n-\t\t\tmemcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));\n-\t\t\tsd_entry->u.bp.sd_pd_index = sd_index;\n-\t\t}\n-\n-\t\thmc_info->sd_table.sd_entry[sd_index].entry_type = type;\n-\n-\t\tI40IW_INC_SD_REFCNT(&hmc_info->sd_table);\n-\t}\n-\tif (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)\n-\t\tI40IW_INC_BP_REFCNT(&sd_entry->u.bp);\n-exit:\n-\tif (ret_code)\n-\t\tif (dma_mem_alloc_done)\n-\t\t\ti40iw_free_dma_mem(hw, &mem);\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_add_pd_table_entry - Adds page descriptor to the specified table\n- * @hw: pointer to our HW structure\n- * @hmc_info: pointer to the HMC configuration information structure\n- * @pd_index: which page descriptor index to manipulate\n- * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.\n- *\n- * This function:\n- *\t1. Initializes the pd entry\n- *\t2. Adds pd_entry in the pd_table\n- *\t3. Mark the entry valid in i40iw_hmc_pd_entry structure\n- *\t4. Initializes the pd_entry's ref count to 1\n- * assumptions:\n- *\t1. The memory for pd should be pinned down, physically contiguous and\n- *\t aligned on 4K boundary and zeroed memory.\n- *\t2. It should be 4K in size.\n- */\n-enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,\n-\t\t\t\t\t\tstruct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t\tu32 pd_index,\n-\t\t\t\t\t\tstruct i40iw_dma_mem *rsrc_pg)\n-{\n-\tenum i40iw_status_code ret_code = 0;\n-\tstruct i40iw_hmc_pd_table *pd_table;\n-\tstruct i40iw_hmc_pd_entry *pd_entry;\n-\tstruct i40iw_dma_mem mem;\n-\tstruct i40iw_dma_mem *page = &mem;\n-\tu32 sd_idx, rel_pd_idx;\n-\tu64 *pd_addr;\n-\tu64 page_desc;\n-\n-\tif (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)\n-\t\treturn I40IW_ERR_INVALID_PAGE_DESC_INDEX;\n-\n-\tsd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);\n-\tif (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)\n-\t\treturn 0;\n-\n-\trel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);\n-\tpd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;\n-\tpd_entry = &pd_table->pd_entry[rel_pd_idx];\n-\tif (!pd_entry->valid) {\n-\t\tif (rsrc_pg) {\n-\t\t\tpd_entry->rsrc_pg = true;\n-\t\t\tpage = rsrc_pg;\n-\t\t} else {\n-\t\t\tret_code = i40iw_allocate_dma_mem(hw, page,\n-\t\t\t\t\t\t\t I40IW_HMC_PAGED_BP_SIZE,\n-\t\t\t\t\t\t\t I40IW_HMC_PD_BP_BUF_ALIGNMENT);\n-\t\t\tif (ret_code)\n-\t\t\t\treturn ret_code;\n-\t\t\tpd_entry->rsrc_pg = false;\n-\t\t}\n-\n-\t\tmemcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));\n-\t\tpd_entry->bp.sd_pd_index = pd_index;\n-\t\tpd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;\n-\t\tpage_desc = page->pa | 0x1;\n-\n-\t\tpd_addr = (u64 *)pd_table->pd_page_addr.va;\n-\t\tpd_addr += rel_pd_idx;\n-\n-\t\tmemcpy(pd_addr, &page_desc, sizeof(*pd_addr));\n-\n-\t\tpd_entry->sd_index = sd_idx;\n-\t\tpd_entry->valid = true;\n-\t\tI40IW_INC_PD_REFCNT(pd_table);\n-\t\tif (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)\n-\t\t\tI40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);\n-\t\telse if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)\n-\t\t\tI40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,\n-\t\t\t\t\t\t hmc_info->hmc_fn_id);\n-\t}\n-\tI40IW_INC_BP_REFCNT(&pd_entry->bp);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_remove_pd_bp - remove a backing page from a page descriptor\n- * @hw: pointer to our HW structure\n- * @hmc_info: pointer to the HMC configuration information structure\n- * @idx: the page index\n- * @is_pf: distinguishes a VF from a PF\n- *\n- * This function:\n- *\t1. Marks the entry in pd table (for paged address mode) or in sd table\n- *\t (for direct address mode) invalid.\n- *\t2. Write to register PMPDINV to invalidate the backing page in FV cache\n- *\t3. Decrement the ref count for the pd _entry\n- * assumptions:\n- *\t1. Caller can deallocate the memory used by backing storage after this\n- *\t function returns.\n- */\n-enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t u32 idx,\n-\t\t\t\t\t bool is_pf)\n-{\n-\tstruct i40iw_hmc_pd_entry *pd_entry;\n-\tstruct i40iw_hmc_pd_table *pd_table;\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\tu32 sd_idx, rel_pd_idx;\n-\tstruct i40iw_dma_mem *mem;\n-\tu64 *pd_addr;\n-\n-\tsd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;\n-\trel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;\n-\tif (sd_idx >= hmc_info->sd_table.sd_cnt)\n-\t\treturn I40IW_ERR_INVALID_PAGE_DESC_INDEX;\n-\n-\tsd_entry = &hmc_info->sd_table.sd_entry[sd_idx];\n-\tif (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)\n-\t\treturn I40IW_ERR_INVALID_SD_TYPE;\n-\n-\tpd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;\n-\tpd_entry = &pd_table->pd_entry[rel_pd_idx];\n-\tI40IW_DEC_BP_REFCNT(&pd_entry->bp);\n-\tif (pd_entry->bp.ref_cnt)\n-\t\treturn 0;\n-\n-\tpd_entry->valid = false;\n-\tI40IW_DEC_PD_REFCNT(pd_table);\n-\tpd_addr = (u64 *)pd_table->pd_page_addr.va;\n-\tpd_addr += rel_pd_idx;\n-\tmemset(pd_addr, 0, sizeof(u64));\n-\tif (is_pf)\n-\t\tI40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);\n-\telse\n-\t\tI40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,\n-\t\t\t\t\t hmc_info->hmc_fn_id);\n-\n-\tif (!pd_entry->rsrc_pg) {\n-\t\tmem = &pd_entry->bp.addr;\n-\t\tif (!mem || !mem->va)\n-\t\t\treturn I40IW_ERR_PARAM;\n-\t\ti40iw_free_dma_mem(hw, mem);\n-\t}\n-\tif (!pd_table->ref_cnt)\n-\t\ti40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry\n- * @hmc_info: pointer to the HMC configuration information structure\n- * @idx: the page index\n- */\n-enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)\n-{\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\n-\tsd_entry = &hmc_info->sd_table.sd_entry[idx];\n-\tI40IW_DEC_BP_REFCNT(&sd_entry->u.bp);\n-\tif (sd_entry->u.bp.ref_cnt)\n-\t\treturn I40IW_ERR_NOT_READY;\n-\n-\tI40IW_DEC_SD_REFCNT(&hmc_info->sd_table);\n-\tsd_entry->valid = false;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.\n- * @hmc_info: pointer to the HMC configuration information structure\n- * @idx: segment descriptor index to find the relevant page descriptor\n- */\n-enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t\t u32 idx)\n-{\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\n-\tsd_entry = &hmc_info->sd_table.sd_entry[idx];\n-\n-\tif (sd_entry->u.pd_table.ref_cnt)\n-\t\treturn I40IW_ERR_NOT_READY;\n-\n-\tsd_entry->valid = false;\n-\tI40IW_DEC_SD_REFCNT(&hmc_info->sd_table);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_pf_init_vfhmc -\n- * @vf_cnt_array: array of cnt values of iwarp hmc objects\n- * @vf_hmc_fn_id: hmc function id ofr vf driver\n- * @dev: pointer to i40iw_dev struct\n- *\n- * Called by pf driver to initialize hmc_info for vf driver instance.\n- */\n-enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u8 vf_hmc_fn_id,\n-\t\t\t\t\t u32 *vf_cnt_array)\n-{\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tenum i40iw_status_code ret_code = 0;\n-\tu32 i;\n-\n-\tif ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||\n-\t (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +\n-\t I40IW_MAX_PE_ENABLED_VF_COUNT)) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_HMC, \"%s: invalid vf_hmc_fn_id 0x%x\\n\",\n-\t\t\t __func__, vf_hmc_fn_id);\n-\t\treturn I40IW_ERR_INVALID_HMCFN_ID;\n-\t}\n-\n-\tret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\n-\thmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);\n-\n-\tfor (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)\n-\t\tif (vf_cnt_array)\n-\t\t\thmc_info->hmc_obj[i].cnt =\n-\t\t\t vf_cnt_array[i - I40IW_HMC_IW_QP];\n-\t\telse\n-\t\t\thmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;\n-\n-\treturn 0;\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.h b/drivers/infiniband/hw/i40iw/i40iw_hmc.h\ndeleted file mode 100644\nindex 4c3fdd8..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_hmc.h\n+++ /dev/null\n@@ -1,241 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_HMC_H\n-#define I40IW_HMC_H\n-\n-#include \"i40iw_d.h\"\n-\n-struct i40iw_hw;\n-enum i40iw_status_code;\n-\n-#define I40IW_HMC_MAX_BP_COUNT 512\n-#define I40IW_MAX_SD_ENTRIES 11\n-#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA\n-\n-#define I40IW_HMC_INFO_SIGNATURE\t0x484D5347\n-#define I40IW_HMC_PD_CNT_IN_SD\t\t512\n-#define I40IW_HMC_DIRECT_BP_SIZE\t0x200000\n-#define I40IW_HMC_MAX_SD_COUNT\t\t4096\n-#define I40IW_HMC_PAGED_BP_SIZE\t\t4096\n-#define I40IW_HMC_PD_BP_BUF_ALIGNMENT\t4096\n-#define I40IW_FIRST_VF_FPM_ID\t\t16\n-#define FPM_MULTIPLIER\t\t\t1024\n-\n-#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)\n-#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)\n-#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)\n-\n-#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)\n-#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)\n-#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)\n-\n-/**\n- * I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware\n- * @hw: pointer to our hw struct\n- * @sd_idx: segment descriptor index\n- * @pd_idx: page descriptor index\n- */\n-#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \\\n-\ti40iw_wr32((hw), I40E_PFHMC_PDINV, \\\n-\t\t(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \\\n-\t\t(0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \\\n-\t\t((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))\n-\n-/**\n- * I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware\n- * @hw: pointer to our hw struct\n- * @sd_idx: segment descriptor index\n- * @pd_idx: page descriptor index\n- * @hmc_fn_id: VF's function id\n- */\n-#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \\\n-\ti40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \\\n-\t ((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \\\n-\t (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))\n-\n-struct i40iw_hmc_obj_info {\n-\tu64 base;\n-\tu32 max_cnt;\n-\tu32 cnt;\n-\tu64 size;\n-};\n-\n-enum i40iw_sd_entry_type {\n-\tI40IW_SD_TYPE_INVALID = 0,\n-\tI40IW_SD_TYPE_PAGED = 1,\n-\tI40IW_SD_TYPE_DIRECT = 2\n-};\n-\n-struct i40iw_hmc_bp {\n-\tenum i40iw_sd_entry_type entry_type;\n-\tstruct i40iw_dma_mem addr;\n-\tu32 sd_pd_index;\n-\tu32 ref_cnt;\n-};\n-\n-struct i40iw_hmc_pd_entry {\n-\tstruct i40iw_hmc_bp bp;\n-\tu32 sd_index;\n-\tbool rsrc_pg;\n-\tbool valid;\n-};\n-\n-struct i40iw_hmc_pd_table {\n-\tstruct i40iw_dma_mem pd_page_addr;\n-\tstruct i40iw_hmc_pd_entry *pd_entry;\n-\tstruct i40iw_virt_mem pd_entry_virt_mem;\n-\tu32 ref_cnt;\n-\tu32 sd_index;\n-};\n-\n-struct i40iw_hmc_sd_entry {\n-\tenum i40iw_sd_entry_type entry_type;\n-\tbool valid;\n-\n-\tunion {\n-\t\tstruct i40iw_hmc_pd_table pd_table;\n-\t\tstruct i40iw_hmc_bp bp;\n-\t} u;\n-};\n-\n-struct i40iw_hmc_sd_table {\n-\tstruct i40iw_virt_mem addr;\n-\tu32 sd_cnt;\n-\tu32 ref_cnt;\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-};\n-\n-struct i40iw_hmc_info {\n-\tu32 signature;\n-\tu8 hmc_fn_id;\n-\tu16 first_sd_index;\n-\n-\tstruct i40iw_hmc_obj_info *hmc_obj;\n-\tstruct i40iw_virt_mem hmc_obj_virt_mem;\n-\tstruct i40iw_hmc_sd_table sd_table;\n-\tu16 sd_indexes[I40IW_HMC_MAX_SD_COUNT];\n-};\n-\n-struct update_sd_entry {\n-\tu64 cmd;\n-\tu64 data;\n-};\n-\n-struct i40iw_update_sds_info {\n-\tu32 cnt;\n-\tu8 hmc_fn_id;\n-\tstruct update_sd_entry entry[I40IW_MAX_SD_ENTRIES];\n-};\n-\n-struct i40iw_ccq_cqe_info;\n-struct i40iw_hmc_fcn_info {\n-\tvoid (*callback_fcn)(struct i40iw_sc_dev *, void *,\n-\t\t\t struct i40iw_ccq_cqe_info *);\n-\tvoid *cqp_callback_param;\n-\tu32 vf_id;\n-\tu16 iw_vf_idx;\n-\tbool free_fcn;\n-};\n-\n-enum i40iw_hmc_rsrc_type {\n-\tI40IW_HMC_IW_QP = 0,\n-\tI40IW_HMC_IW_CQ = 1,\n-\tI40IW_HMC_IW_SRQ = 2,\n-\tI40IW_HMC_IW_HTE = 3,\n-\tI40IW_HMC_IW_ARP = 4,\n-\tI40IW_HMC_IW_APBVT_ENTRY = 5,\n-\tI40IW_HMC_IW_MR = 6,\n-\tI40IW_HMC_IW_XF = 7,\n-\tI40IW_HMC_IW_XFFL = 8,\n-\tI40IW_HMC_IW_Q1 = 9,\n-\tI40IW_HMC_IW_Q1FL = 10,\n-\tI40IW_HMC_IW_TIMER = 11,\n-\tI40IW_HMC_IW_FSIMC = 12,\n-\tI40IW_HMC_IW_FSIAV = 13,\n-\tI40IW_HMC_IW_PBLE = 14,\n-\tI40IW_HMC_IW_MAX = 15,\n-};\n-\n-struct i40iw_hmc_create_obj_info {\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct i40iw_virt_mem add_sd_virt_mem;\n-\tu32 rsrc_type;\n-\tu32 start_idx;\n-\tu32 count;\n-\tu32 add_sd_cnt;\n-\tenum i40iw_sd_entry_type entry_type;\n-\tbool is_pf;\n-};\n-\n-struct i40iw_hmc_del_obj_info {\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct i40iw_virt_mem del_sd_virt_mem;\n-\tu32 rsrc_type;\n-\tu32 start_idx;\n-\tu32 count;\n-\tu32 del_sd_cnt;\n-\tbool is_pf;\n-};\n-\n-enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf,\n-\t\t\t\t\t struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size);\n-enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_create_obj_info *info);\n-enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_del_obj_info *info,\n-\t\t\t\t\t bool reset);\n-enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id,\n-\t\t\t\t\tu64 pa, u32 sd_idx, enum i40iw_sd_entry_type type,\n-\t\t\t\t\tbool setsd);\n-enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_update_sds_info *info);\n-struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id);\n-struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u8 hmc_fn_id);\n-enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,\n-\t\t\t\t\t\tstruct i40iw_hmc_info *hmc_info, u32 sd_index,\n-\t\t\t\t\t\tenum i40iw_sd_entry_type type, u64 direct_mode_sz);\n-enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,\n-\t\t\t\t\t\tstruct i40iw_hmc_info *hmc_info, u32 pd_index,\n-\t\t\t\t\t\tstruct i40iw_dma_mem *rsrc_pg);\n-enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf);\n-enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx);\n-enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx);\n-\n-#define ENTER_SHARED_FUNCTION()\n-#define EXIT_SHARED_FUNCTION()\n-\n-#endif\t\t\t\t/* I40IW_HMC_H */\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c\ndeleted file mode 100644\nindex 55a1fbf..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c\n+++ /dev/null\n@@ -1,852 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include <linux/module.h>\n-#include <linux/moduleparam.h>\n-#include <linux/netdevice.h>\n-#include <linux/etherdevice.h>\n-#include <linux/ip.h>\n-#include <linux/tcp.h>\n-#include <linux/if_vlan.h>\n-\n-#include \"i40iw.h\"\n-\n-/**\n- * i40iw_initialize_hw_resources - initialize hw resource during open\n- * @iwdev: iwarp device\n- */\n-u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)\n-{\n-\tunsigned long num_pds;\n-\tu32 resources_size;\n-\tu32 max_mr;\n-\tu32 max_qp;\n-\tu32 max_cq;\n-\tu32 arp_table_size;\n-\tu32 mrdrvbits;\n-\tvoid *resource_ptr;\n-\n-\tmax_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;\n-\tmax_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;\n-\tmax_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;\n-\tarp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;\n-\tiwdev->max_cqe = 0xFFFFF;\n-\tnum_pds = I40IW_MAX_PDS;\n-\tresources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;\n-\tresources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);\n-\tresources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);\n-\tresources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);\n-\tresources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);\n-\tresources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);\n-\tresources_size += sizeof(struct i40iw_qp **) * max_qp;\n-\tiwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);\n-\n-\tif (!iwdev->mem_resources)\n-\t\treturn -ENOMEM;\n-\n-\tiwdev->max_qp = max_qp;\n-\tiwdev->max_mr = max_mr;\n-\tiwdev->max_cq = max_cq;\n-\tiwdev->max_pd = num_pds;\n-\tiwdev->arp_table_size = arp_table_size;\n-\tiwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;\n-\tresource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);\n-\n-\tiwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |\n-\t IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;\n-\n-\tiwdev->allocated_qps = resource_ptr;\n-\tiwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];\n-\tiwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];\n-\tiwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];\n-\tiwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];\n-\tiwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);\n-\tset_bit(0, iwdev->allocated_mrs);\n-\tset_bit(0, iwdev->allocated_qps);\n-\tset_bit(0, iwdev->allocated_cqs);\n-\tset_bit(0, iwdev->allocated_pds);\n-\tset_bit(0, iwdev->allocated_arps);\n-\n-\t/* Following for ILQ/IEQ */\n-\tset_bit(1, iwdev->allocated_qps);\n-\tset_bit(1, iwdev->allocated_cqs);\n-\tset_bit(1, iwdev->allocated_pds);\n-\tset_bit(2, iwdev->allocated_cqs);\n-\tset_bit(2, iwdev->allocated_pds);\n-\n-\tspin_lock_init(&iwdev->resource_lock);\n-\tspin_lock_init(&iwdev->qptable_lock);\n-\t/* stag index mask has a minimum of 14 bits */\n-\tmrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14);\n-\tiwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_cqp_ce_handler - handle cqp completions\n- * @iwdev: iwarp device\n- * @arm: flag to arm after completions\n- * @cq: cq for cqp completions\n- */\n-static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)\n-{\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tu32 cqe_count = 0;\n-\tstruct i40iw_ccq_cqe_info info;\n-\tint ret;\n-\n-\tdo {\n-\t\tmemset(&info, 0, sizeof(info));\n-\t\tret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);\n-\t\tif (ret)\n-\t\t\tbreak;\n-\t\tcqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;\n-\t\tif (info.error)\n-\t\t\ti40iw_pr_err(\"opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\\n\",\n-\t\t\t\t info.op_code, info.maj_err_code, info.min_err_code);\n-\t\tif (cqp_request) {\n-\t\t\tcqp_request->compl_info.maj_err_code = info.maj_err_code;\n-\t\t\tcqp_request->compl_info.min_err_code = info.min_err_code;\n-\t\t\tcqp_request->compl_info.op_ret_val = info.op_ret_val;\n-\t\t\tcqp_request->compl_info.error = info.error;\n-\n-\t\t\tif (cqp_request->waiting) {\n-\t\t\t\tcqp_request->request_done = true;\n-\t\t\t\twake_up(&cqp_request->waitq);\n-\t\t\t\ti40iw_put_cqp_request(&iwdev->cqp, cqp_request);\n-\t\t\t} else {\n-\t\t\t\tif (cqp_request->callback_fcn)\n-\t\t\t\t\tcqp_request->callback_fcn(cqp_request, 1);\n-\t\t\t\ti40iw_put_cqp_request(&iwdev->cqp, cqp_request);\n-\t\t\t}\n-\t\t}\n-\n-\t\tcqe_count++;\n-\t} while (1);\n-\n-\tif (arm && cqe_count) {\n-\t\ti40iw_process_bh(dev);\n-\t\tdev->ccq_ops->ccq_arm(cq);\n-\t}\n-}\n-\n-/**\n- * i40iw_iwarp_ce_handler - handle iwarp completions\n- * @iwdev: iwarp device\n- * @iwcp: iwarp cq receiving event\n- */\n-static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,\n-\t\t\t\t struct i40iw_sc_cq *iwcq)\n-{\n-\tstruct i40iw_cq *i40iwcq = iwcq->back_cq;\n-\n-\tif (i40iwcq->ibcq.comp_handler)\n-\t\ti40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,\n-\t\t\t\t\t i40iwcq->ibcq.cq_context);\n-}\n-\n-/**\n- * i40iw_puda_ce_handler - handle puda completion events\n- * @iwdev: iwarp device\n- * @cq: puda completion q for event\n- */\n-static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,\n-\t\t\t\t struct i40iw_sc_cq *cq)\n-{\n-\tstruct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;\n-\tenum i40iw_status_code status;\n-\tu32 compl_error;\n-\n-\tdo {\n-\t\tstatus = i40iw_puda_poll_completion(dev, cq, &compl_error);\n-\t\tif (status == I40IW_ERR_QUEUE_EMPTY)\n-\t\t\tbreak;\n-\t\tif (status) {\n-\t\t\ti40iw_pr_err(\"puda status = %d\\n\", status);\n-\t\t\tbreak;\n-\t\t}\n-\t\tif (compl_error) {\n-\t\t\ti40iw_pr_err(\"puda compl_err =0x%x\\n\", compl_error);\n-\t\t\tbreak;\n-\t\t}\n-\t} while (1);\n-\n-\tdev->ccq_ops->ccq_arm(cq);\n-}\n-\n-/**\n- * i40iw_process_ceq - handle ceq for completions\n- * @iwdev: iwarp device\n- * @ceq: ceq having cq for completion\n- */\n-void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_sc_ceq *sc_ceq;\n-\tstruct i40iw_sc_cq *cq;\n-\tbool arm = true;\n-\n-\tsc_ceq = &ceq->sc_ceq;\n-\tdo {\n-\t\tcq = dev->ceq_ops->process_ceq(dev, sc_ceq);\n-\t\tif (!cq)\n-\t\t\tbreak;\n-\n-\t\tif (cq->cq_type == I40IW_CQ_TYPE_CQP)\n-\t\t\ti40iw_cqp_ce_handler(iwdev, cq, arm);\n-\t\telse if (cq->cq_type == I40IW_CQ_TYPE_IWARP)\n-\t\t\ti40iw_iwarp_ce_handler(iwdev, cq);\n-\t\telse if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||\n-\t\t\t (cq->cq_type == I40IW_CQ_TYPE_IEQ))\n-\t\t\ti40iw_puda_ce_handler(iwdev, cq);\n-\t} while (1);\n-}\n-\n-/**\n- * i40iw_next_iw_state - modify qp state\n- * @iwqp: iwarp qp to modify\n- * @state: next state for qp\n- * @del_hash: del hash\n- * @term: term message\n- * @termlen: length of term message\n- */\n-void i40iw_next_iw_state(struct i40iw_qp *iwqp,\n-\t\t\t u8 state,\n-\t\t\t u8 del_hash,\n-\t\t\t u8 term,\n-\t\t\t u8 termlen)\n-{\n-\tstruct i40iw_modify_qp_info info;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.next_iwarp_state = state;\n-\tinfo.remove_hash_idx = del_hash;\n-\tinfo.cq_num_valid = true;\n-\tinfo.arp_cache_idx_valid = true;\n-\tinfo.dont_send_term = true;\n-\tinfo.dont_send_fin = true;\n-\tinfo.termlen = termlen;\n-\n-\tif (term & I40IWQP_TERM_SEND_TERM_ONLY)\n-\t\tinfo.dont_send_term = false;\n-\tif (term & I40IWQP_TERM_SEND_FIN_ONLY)\n-\t\tinfo.dont_send_fin = false;\n-\tif (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))\n-\t\tinfo.reset_tcp_conn = true;\n-\tiwqp->hw_iwarp_state = state;\n-\ti40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);\n-}\n-\n-/**\n- * i40iw_process_aeq - handle aeq events\n- * @iwdev: iwarp device\n- */\n-void i40iw_process_aeq(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_aeq *aeq = &iwdev->aeq;\n-\tstruct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;\n-\tstruct i40iw_aeqe_info aeinfo;\n-\tstruct i40iw_aeqe_info *info = &aeinfo;\n-\tint ret;\n-\tstruct i40iw_qp *iwqp = NULL;\n-\tstruct i40iw_sc_cq *cq = NULL;\n-\tstruct i40iw_cq *iwcq = NULL;\n-\tstruct i40iw_sc_qp *qp = NULL;\n-\tstruct i40iw_qp_host_ctx_info *ctx_info = NULL;\n-\tunsigned long flags;\n-\n-\tu32 aeqcnt = 0;\n-\n-\tif (!sc_aeq->size)\n-\t\treturn;\n-\n-\tdo {\n-\t\tmemset(info, 0, sizeof(*info));\n-\t\tret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);\n-\t\tif (ret)\n-\t\t\tbreak;\n-\n-\t\taeqcnt++;\n-\t\ti40iw_debug(dev, I40IW_DEBUG_AEQ,\n-\t\t\t \"%s ae_id = 0x%x bool qp=%d qp_id = %d\\n\",\n-\t\t\t __func__, info->ae_id, info->qp, info->qp_cq_id);\n-\t\tif (info->qp) {\n-\t\t\tspin_lock_irqsave(&iwdev->qptable_lock, flags);\n-\t\t\tiwqp = iwdev->qp_table[info->qp_cq_id];\n-\t\t\tif (!iwqp) {\n-\t\t\t\tspin_unlock_irqrestore(&iwdev->qptable_lock, flags);\n-\t\t\t\ti40iw_debug(dev, I40IW_DEBUG_AEQ,\n-\t\t\t\t\t \"%s qp_id %d is already freed\\n\",\n-\t\t\t\t\t __func__, info->qp_cq_id);\n-\t\t\t\tcontinue;\n-\t\t\t}\n-\t\t\ti40iw_add_ref(&iwqp->ibqp);\n-\t\t\tspin_unlock_irqrestore(&iwdev->qptable_lock, flags);\n-\t\t\tqp = &iwqp->sc_qp;\n-\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n-\t\t\tiwqp->hw_tcp_state = info->tcp_state;\n-\t\t\tiwqp->hw_iwarp_state = info->iwarp_state;\n-\t\t\tiwqp->last_aeq = info->ae_id;\n-\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\t\t\tctx_info = &iwqp->ctx_info;\n-\t\t\tctx_info->err_rq_idx_valid = true;\n-\t\t} else {\n-\t\t\tif (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)\n-\t\t\t\tcontinue;\n-\t\t}\n-\n-\t\tswitch (info->ae_id) {\n-\t\tcase I40IW_AE_LLP_FIN_RECEIVED:\n-\t\t\tif (qp->term_flags)\n-\t\t\t\tbreak;\n-\t\t\tif (atomic_inc_return(&iwqp->close_timer_started) == 1) {\n-\t\t\t\tiwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;\n-\t\t\t\tif ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&\n-\t\t\t\t (iwqp->ibqp_state == IB_QPS_RTS)) {\n-\t\t\t\t\ti40iw_next_iw_state(iwqp,\n-\t\t\t\t\t\t\t I40IW_QP_STATE_CLOSING, 0, 0, 0);\n-\t\t\t\t\ti40iw_cm_disconn(iwqp);\n-\t\t\t\t}\n-\t\t\t\tiwqp->cm_id->add_ref(iwqp->cm_id);\n-\t\t\t\ti40iw_schedule_cm_timer(iwqp->cm_node,\n-\t\t\t\t\t\t\t(struct i40iw_puda_buf *)iwqp,\n-\t\t\t\t\t\t\tI40IW_TIMER_TYPE_CLOSE, 1, 0);\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_LLP_CLOSE_COMPLETE:\n-\t\t\tif (qp->term_flags)\n-\t\t\t\ti40iw_terminate_done(qp, 0);\n-\t\t\telse\n-\t\t\t\ti40iw_cm_disconn(iwqp);\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_BAD_CLOSE:\n-\t\t\t/* fall through */\n-\t\tcase I40IW_AE_RESET_SENT:\n-\t\t\ti40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);\n-\t\t\ti40iw_cm_disconn(iwqp);\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_LLP_CONNECTION_RESET:\n-\t\t\tif (atomic_read(&iwqp->close_timer_started))\n-\t\t\t\tbreak;\n-\t\t\ti40iw_cm_disconn(iwqp);\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_QP_SUSPEND_COMPLETE:\n-\t\t\ti40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_TERMINATE_SENT:\n-\t\t\ti40iw_terminate_send_fin(qp);\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_LLP_TERMINATE_RECEIVED:\n-\t\t\ti40iw_terminate_received(qp, info);\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_CQ_OPERATION_ERROR:\n-\t\t\ti40iw_pr_err(\"Processing an iWARP related AE for CQ misc = 0x%04X\\n\",\n-\t\t\t\t info->ae_id);\n-\t\t\tcq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;\n-\t\t\tiwcq = (struct i40iw_cq *)cq->back_cq;\n-\n-\t\t\tif (iwcq->ibcq.event_handler) {\n-\t\t\t\tstruct ib_event ibevent;\n-\n-\t\t\t\tibevent.device = iwcq->ibcq.device;\n-\t\t\t\tibevent.event = IB_EVENT_CQ_ERR;\n-\t\t\t\tibevent.element.cq = &iwcq->ibcq;\n-\t\t\t\tiwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_LLP_DOUBT_REACHABILITY:\n-\t\t\tbreak;\n-\t\tcase I40IW_AE_PRIV_OPERATION_DENIED:\n-\t\tcase I40IW_AE_STAG_ZERO_INVALID:\n-\t\tcase I40IW_AE_IB_RREQ_AND_Q1_FULL:\n-\t\tcase I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:\n-\t\tcase I40IW_AE_DDP_UBE_INVALID_MO:\n-\t\tcase I40IW_AE_DDP_UBE_INVALID_QN:\n-\t\tcase I40IW_AE_DDP_NO_L_BIT:\n-\t\tcase I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:\n-\t\tcase I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:\n-\t\tcase I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:\n-\t\tcase I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:\n-\t\tcase I40IW_AE_INVALID_ARP_ENTRY:\n-\t\tcase I40IW_AE_INVALID_TCP_OPTION_RCVD:\n-\t\tcase I40IW_AE_STALE_ARP_ENTRY:\n-\t\tcase I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:\n-\t\tcase I40IW_AE_LLP_SEGMENT_TOO_SMALL:\n-\t\tcase I40IW_AE_LLP_SYN_RECEIVED:\n-\t\tcase I40IW_AE_LLP_TOO_MANY_RETRIES:\n-\t\tcase I40IW_AE_LCE_QP_CATASTROPHIC:\n-\t\tcase I40IW_AE_LCE_FUNCTION_CATASTROPHIC:\n-\t\tcase I40IW_AE_LCE_CQ_CATASTROPHIC:\n-\t\tcase I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:\n-\t\tcase I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:\n-\t\t\tctx_info->err_rq_idx_valid = false;\n-\t\t\t/* fall through */\n-\t\tdefault:\n-\t\t\tif (!info->sq && ctx_info->err_rq_idx_valid) {\n-\t\t\t\tctx_info->err_rq_idx = info->wqe_idx;\n-\t\t\t\tctx_info->tcp_info_valid = false;\n-\t\t\t\tctx_info->iwarp_info_valid = false;\n-\t\t\t\tret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,\n-\t\t\t\t\t\t\t\t iwqp->host_ctx.va,\n-\t\t\t\t\t\t\t\t ctx_info);\n-\t\t\t}\n-\t\t\ti40iw_terminate_connection(qp, info);\n-\t\t\tbreak;\n-\t\t}\n-\t\tif (info->qp)\n-\t\t\ti40iw_rem_ref(&iwqp->ibqp);\n-\t} while (1);\n-\n-\tif (aeqcnt)\n-\t\tdev->aeq_ops->repost_aeq_entries(dev, aeqcnt);\n-}\n-\n-/**\n- * i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt\n- * @iwdev: iwarp device\n- * @accel_local_port: port for apbvt\n- * @add_port: add or delete port\n- */\n-static enum i40iw_status_code\n-i40iw_cqp_manage_abvpt_cmd(struct i40iw_device *iwdev,\n-\t\t\t u16 accel_local_port,\n-\t\t\t bool add_port)\n-{\n-\tstruct i40iw_apbvt_info *info;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tcqp_info = &cqp_request->info;\n-\tinfo = &cqp_info->in.u.manage_apbvt_entry.info;\n-\n-\tmemset(info, 0, sizeof(*info));\n-\tinfo->add = add_port;\n-\tinfo->port = cpu_to_le16(accel_local_port);\n-\n-\tcqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;\n-\tcqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Manage APBVT entry fail\");\n-\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_manage_apbvt - add or delete tcp port\n- * @iwdev: iwarp device\n- * @accel_local_port: port for apbvt\n- * @add_port: add or delete port\n- */\n-enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,\n-\t\t\t\t\t u16 accel_local_port,\n-\t\t\t\t\t bool add_port)\n-{\n-\tstruct i40iw_cm_core *cm_core = &iwdev->cm_core;\n-\tenum i40iw_status_code status;\n-\tunsigned long flags;\n-\tbool in_use;\n-\n-\t/* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to\n-\t * protect against race where add APBVT CQP can race ahead of the delete\n-\t * APBVT for same port.\n-\t */\n-\tif (add_port) {\n-\t\tspin_lock_irqsave(&cm_core->apbvt_lock, flags);\n-\t\tin_use = __test_and_set_bit(accel_local_port,\n-\t\t\t\t\t cm_core->ports_in_use);\n-\t\tspin_unlock_irqrestore(&cm_core->apbvt_lock, flags);\n-\t\tif (in_use)\n-\t\t\treturn 0;\n-\t\treturn i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,\n-\t\t\t\t\t\t true);\n-\t} else {\n-\t\tspin_lock_irqsave(&cm_core->apbvt_lock, flags);\n-\t\tin_use = i40iw_port_in_use(cm_core, accel_local_port);\n-\t\tif (in_use) {\n-\t\t\tspin_unlock_irqrestore(&cm_core->apbvt_lock, flags);\n-\t\t\treturn 0;\n-\t\t}\n-\t\t__clear_bit(accel_local_port, cm_core->ports_in_use);\n-\t\tstatus = i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port,\n-\t\t\t\t\t\t false);\n-\t\tspin_unlock_irqrestore(&cm_core->apbvt_lock, flags);\n-\t\treturn status;\n-\t}\n-}\n-\n-/**\n- * i40iw_manage_arp_cache - manage hw arp cache\n- * @iwdev: iwarp device\n- * @mac_addr: mac address ptr\n- * @ip_addr: ip addr for arp cache\n- * @action: add, delete or modify\n- */\n-void i40iw_manage_arp_cache(struct i40iw_device *iwdev,\n-\t\t\t unsigned char *mac_addr,\n-\t\t\t u32 *ip_addr,\n-\t\t\t bool ipv4,\n-\t\t\t u32 action)\n-{\n-\tstruct i40iw_add_arp_cache_entry_info *info;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tint arp_index;\n-\n-\tarp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);\n-\tif (arp_index == -1)\n-\t\treturn;\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_info = &cqp_request->info;\n-\tif (action == I40IW_ARP_ADD) {\n-\t\tcqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;\n-\t\tinfo = &cqp_info->in.u.add_arp_cache_entry.info;\n-\t\tmemset(info, 0, sizeof(*info));\n-\t\tinfo->arp_index = cpu_to_le16((u16)arp_index);\n-\t\tinfo->permanent = true;\n-\t\tether_addr_copy(info->mac_addr, mac_addr);\n-\t\tcqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;\n-\t\tcqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;\n-\t} else {\n-\t\tcqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;\n-\t\tcqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;\n-\t\tcqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;\n-\t\tcqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;\n-\t}\n-\n-\tcqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;\n-\tcqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;\n-\tcqp_info->post_sq = 1;\n-\tif (i40iw_handle_cqp_op(iwdev, cqp_request))\n-\t\ti40iw_pr_err(\"CQP-OP Add/Del Arp Cache entry fail\");\n-}\n-\n-/**\n- * i40iw_send_syn_cqp_callback - do syn/ack after qhash\n- * @cqp_request: qhash cqp completion\n- * @send_ack: flag send ack\n- */\n-static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)\n-{\n-\ti40iw_send_syn(cqp_request->param, send_ack);\n-}\n-\n-/**\n- * i40iw_manage_qhash - add or modify qhash\n- * @iwdev: iwarp device\n- * @cminfo: cm info for qhash\n- * @etype: type (syn or quad)\n- * @mtype: type of qhash\n- * @cmnode: cmnode associated with connection\n- * @wait: wait for completion\n- * @user_pri:user pri of the connection\n- */\n-enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_cm_info *cminfo,\n-\t\t\t\t\t enum i40iw_quad_entry_type etype,\n-\t\t\t\t\t enum i40iw_quad_hash_manage_type mtype,\n-\t\t\t\t\t void *cmnode,\n-\t\t\t\t\t bool wait)\n-{\n-\tstruct i40iw_qhash_table_info *info;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_sc_vsi *vsi = &iwdev->vsi;\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, wait);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tcqp_info = &cqp_request->info;\n-\tinfo = &cqp_info->in.u.manage_qhash_table_entry.info;\n-\tmemset(info, 0, sizeof(*info));\n-\n-\tinfo->vsi = &iwdev->vsi;\n-\tinfo->manage = mtype;\n-\tinfo->entry_type = etype;\n-\tif (cminfo->vlan_id != 0xFFFF) {\n-\t\tinfo->vlan_valid = true;\n-\t\tinfo->vlan_id = cpu_to_le16(cminfo->vlan_id);\n-\t} else {\n-\t\tinfo->vlan_valid = false;\n-\t}\n-\n-\tinfo->ipv4_valid = cminfo->ipv4;\n-\tinfo->user_pri = cminfo->user_pri;\n-\tether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);\n-\tinfo->qp_num = cpu_to_le32(vsi->ilq->qp_id);\n-\tinfo->dest_port = cpu_to_le16(cminfo->loc_port);\n-\tinfo->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);\n-\tinfo->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);\n-\tinfo->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);\n-\tinfo->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);\n-\tif (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {\n-\t\tinfo->src_port = cpu_to_le16(cminfo->rem_port);\n-\t\tinfo->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);\n-\t\tinfo->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);\n-\t\tinfo->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);\n-\t\tinfo->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);\n-\t}\n-\tif (cmnode) {\n-\t\tcqp_request->callback_fcn = i40iw_send_syn_cqp_callback;\n-\t\tcqp_request->param = (void *)cmnode;\n-\t}\n-\n-\tif (info->ipv4_valid)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_CM,\n-\t\t\t \"%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\\n\",\n-\t\t\t __func__, (!mtype) ? \"DELETE\" : \"ADD\",\n-\t\t\t info->dest_ip,\n-\t\t\t info->dest_port, info->mac_addr, cminfo->vlan_id);\n-\telse\n-\t\ti40iw_debug(dev, I40IW_DEBUG_CM,\n-\t\t\t \"%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\\n\",\n-\t\t\t __func__, (!mtype) ? \"DELETE\" : \"ADD\",\n-\t\t\t info->dest_ip,\n-\t\t\t info->dest_port, info->mac_addr, cminfo->vlan_id);\n-\tcqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;\n-\tcqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;\n-\tcqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;\n-\tcqp_info->post_sq = 1;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Manage Qhash Entry fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_hw_flush_wqes - flush qp's wqe\n- * @iwdev: iwarp device\n- * @qp: hardware control qp\n- * @info: info for flush\n- * @wait: flag wait for completion\n- */\n-enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_sc_qp *qp,\n-\t\t\t\t\t struct i40iw_qp_flush_info *info,\n-\t\t\t\t\t bool wait)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_qp_flush_info *hw_info;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tcqp_info = &cqp_request->info;\n-\thw_info = &cqp_request->info.in.u.qp_flush_wqes.info;\n-\tmemcpy(hw_info, info, sizeof(*hw_info));\n-\n-\tcqp_info->cqp_cmd = OP_QP_FLUSH_WQES;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.qp_flush_wqes.qp = qp;\n-\tcqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status) {\n-\t\ti40iw_pr_err(\"CQP-OP Flush WQE's fail\");\n-\t\tcomplete(&iwqp->sq_drained);\n-\t\tcomplete(&iwqp->rq_drained);\n-\t\treturn status;\n-\t}\n-\tif (!cqp_request->compl_info.maj_err_code) {\n-\t\tswitch (cqp_request->compl_info.min_err_code) {\n-\t\tcase I40IW_CQP_COMPL_RQ_WQE_FLUSHED:\n-\t\t\tcomplete(&iwqp->sq_drained);\n-\t\t\tbreak;\n-\t\tcase I40IW_CQP_COMPL_SQ_WQE_FLUSHED:\n-\t\t\tcomplete(&iwqp->rq_drained);\n-\t\t\tbreak;\n-\t\tcase I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tcomplete(&iwqp->sq_drained);\n-\t\t\tcomplete(&iwqp->rq_drained);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_gen_ae - generate AE\n- * @iwdev: iwarp device\n- * @qp: qp associated with AE\n- * @info: info for ae\n- * @wait: wait for completion\n- */\n-void i40iw_gen_ae(struct i40iw_device *iwdev,\n-\t\t struct i40iw_sc_qp *qp,\n-\t\t struct i40iw_gen_ae_info *info,\n-\t\t bool wait)\n-{\n-\tstruct i40iw_gen_ae_info *ae_info;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_info = &cqp_request->info;\n-\tae_info = &cqp_request->info.in.u.gen_ae.info;\n-\tmemcpy(ae_info, info, sizeof(*ae_info));\n-\n-\tcqp_info->cqp_cmd = OP_GEN_AE;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.gen_ae.qp = qp;\n-\tcqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;\n-\tif (i40iw_handle_cqp_op(iwdev, cqp_request))\n-\t\ti40iw_pr_err(\"CQP OP failed attempting to generate ae_code=0x%x\\n\",\n-\t\t\t info->ae_code);\n-}\n-\n-/**\n- * i40iw_hw_manage_vf_pble_bp - manage vf pbles\n- * @iwdev: iwarp device\n- * @info: info for managing pble\n- * @wait: flag wait for completion\n- */\n-enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t struct i40iw_manage_vf_pble_info *info,\n-\t\t\t\t\t\t bool wait)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_manage_vf_pble_info *hw_info;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\n-\tif ((iwdev->init_state < CCQ_CREATED) && wait)\n-\t\twait = false;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tcqp_info = &cqp_request->info;\n-\thw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;\n-\tmemcpy(hw_info, info, sizeof(*hw_info));\n-\n-\tcqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;\n-\tcqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Manage VF pble_bp fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_get_ib_wc - return change flush code to IB's\n- * @opcode: iwarp flush code\n- */\n-static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)\n-{\n-\tswitch (opcode) {\n-\tcase FLUSH_PROT_ERR:\n-\t\treturn IB_WC_LOC_PROT_ERR;\n-\tcase FLUSH_REM_ACCESS_ERR:\n-\t\treturn IB_WC_REM_ACCESS_ERR;\n-\tcase FLUSH_LOC_QP_OP_ERR:\n-\t\treturn IB_WC_LOC_QP_OP_ERR;\n-\tcase FLUSH_REM_OP_ERR:\n-\t\treturn IB_WC_REM_OP_ERR;\n-\tcase FLUSH_LOC_LEN_ERR:\n-\t\treturn IB_WC_LOC_LEN_ERR;\n-\tcase FLUSH_GENERAL_ERR:\n-\t\treturn IB_WC_GENERAL_ERR;\n-\tcase FLUSH_FATAL_ERR:\n-\tdefault:\n-\t\treturn IB_WC_FATAL_ERR;\n-\t}\n-}\n-\n-/**\n- * i40iw_set_flush_info - set flush info\n- * @pinfo: set flush info\n- * @min: minor err\n- * @maj: major err\n- * @opcode: flush error code\n- */\n-static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,\n-\t\t\t\t u16 *min,\n-\t\t\t\t u16 *maj,\n-\t\t\t\t enum i40iw_flush_opcode opcode)\n-{\n-\t*min = (u16)i40iw_get_ib_wc(opcode);\n-\t*maj = CQE_MAJOR_DRV;\n-\tpinfo->userflushcode = true;\n-}\n-\n-/**\n- * i40iw_flush_wqes - flush wqe for qp\n- * @iwdev: iwarp device\n- * @iwqp: qp to flush wqes\n- */\n-void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)\n-{\n-\tstruct i40iw_qp_flush_info info;\n-\tstruct i40iw_qp_flush_info *pinfo = &info;\n-\n-\tstruct i40iw_sc_qp *qp = &iwqp->sc_qp;\n-\n-\tmemset(pinfo, 0, sizeof(*pinfo));\n-\tinfo.sq = true;\n-\tinfo.rq = true;\n-\tif (qp->term_flags) {\n-\t\ti40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,\n-\t\t\t\t &pinfo->sq_major_code, qp->flush_code);\n-\t\ti40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,\n-\t\t\t\t &pinfo->rq_major_code, qp->flush_code);\n-\t}\n-\t(void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c\ndeleted file mode 100644\nindex d44cf33d..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_main.c\n+++ /dev/null\n@@ -1,2068 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include <linux/module.h>\n-#include <linux/moduleparam.h>\n-#include <linux/netdevice.h>\n-#include <linux/etherdevice.h>\n-#include <linux/ip.h>\n-#include <linux/tcp.h>\n-#include <linux/if_vlan.h>\n-#include <net/addrconf.h>\n-\n-#include \"i40iw.h\"\n-#include \"i40iw_register.h\"\n-#include <net/netevent.h>\n-#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0\n-#define CLIENT_IW_INTERFACE_VERSION_MINOR 01\n-#define CLIENT_IW_INTERFACE_VERSION_BUILD 00\n-\n-#define DRV_VERSION_MAJOR 0\n-#define DRV_VERSION_MINOR 5\n-#define DRV_VERSION_BUILD 123\n-#define DRV_VERSION\t__stringify(DRV_VERSION_MAJOR) \".\"\t\t\\\n-\t__stringify(DRV_VERSION_MINOR) \".\" __stringify(DRV_VERSION_BUILD)\n-\n-static int push_mode;\n-module_param(push_mode, int, 0644);\n-MODULE_PARM_DESC(push_mode, \"Low latency mode: 0=disabled (default), 1=enabled)\");\n-\n-static int debug;\n-module_param(debug, int, 0644);\n-MODULE_PARM_DESC(debug, \"debug flags: 0=disabled (default), 0x7fffffff=all\");\n-\n-static int resource_profile;\n-module_param(resource_profile, int, 0644);\n-MODULE_PARM_DESC(resource_profile,\n-\t\t \"Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution\");\n-\n-static int max_rdma_vfs = 32;\n-module_param(max_rdma_vfs, int, 0644);\n-MODULE_PARM_DESC(max_rdma_vfs, \"Maximum VF count: 0-32 32=default\");\n-static int mpa_version = 2;\n-module_param(mpa_version, int, 0644);\n-MODULE_PARM_DESC(mpa_version, \"MPA version to be used in MPA Req/Resp 1 or 2\");\n-\n-MODULE_AUTHOR(\"Intel Corporation, <e1000-rdma@lists.sourceforge.net>\");\n-MODULE_DESCRIPTION(\"Intel(R) Ethernet Connection X722 iWARP RDMA Driver\");\n-MODULE_LICENSE(\"Dual BSD/GPL\");\n-\n-static struct i40e_client i40iw_client;\n-static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = \"i40iw\";\n-\n-static LIST_HEAD(i40iw_handlers);\n-static spinlock_t i40iw_handler_lock;\n-\n-static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u32 vf_id, u8 *msg, u16 len);\n-\n-static struct notifier_block i40iw_inetaddr_notifier = {\n-\t.notifier_call = i40iw_inetaddr_event\n-};\n-\n-static struct notifier_block i40iw_inetaddr6_notifier = {\n-\t.notifier_call = i40iw_inet6addr_event\n-};\n-\n-static struct notifier_block i40iw_net_notifier = {\n-\t.notifier_call = i40iw_net_event\n-};\n-\n-static struct notifier_block i40iw_netdevice_notifier = {\n-\t.notifier_call = i40iw_netdevice_event\n-};\n-\n-/**\n- * i40iw_find_i40e_handler - find a handler given a client info\n- * @ldev: pointer to a client info\n- */\n-static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)\n-{\n-\tstruct i40iw_handler *hdl;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&i40iw_handler_lock, flags);\n-\tlist_for_each_entry(hdl, &i40iw_handlers, list) {\n-\t\tif (hdl->ldev.netdev == ldev->netdev) {\n-\t\t\tspin_unlock_irqrestore(&i40iw_handler_lock, flags);\n-\t\t\treturn hdl;\n-\t\t}\n-\t}\n-\tspin_unlock_irqrestore(&i40iw_handler_lock, flags);\n-\treturn NULL;\n-}\n-\n-/**\n- * i40iw_find_netdev - find a handler given a netdev\n- * @netdev: pointer to net_device\n- */\n-struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)\n-{\n-\tstruct i40iw_handler *hdl;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&i40iw_handler_lock, flags);\n-\tlist_for_each_entry(hdl, &i40iw_handlers, list) {\n-\t\tif (hdl->ldev.netdev == netdev) {\n-\t\t\tspin_unlock_irqrestore(&i40iw_handler_lock, flags);\n-\t\t\treturn hdl;\n-\t\t}\n-\t}\n-\tspin_unlock_irqrestore(&i40iw_handler_lock, flags);\n-\treturn NULL;\n-}\n-\n-/**\n- * i40iw_add_handler - add a handler to the list\n- * @hdl: handler to be added to the handler list\n- */\n-static void i40iw_add_handler(struct i40iw_handler *hdl)\n-{\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&i40iw_handler_lock, flags);\n-\tlist_add(&hdl->list, &i40iw_handlers);\n-\tspin_unlock_irqrestore(&i40iw_handler_lock, flags);\n-}\n-\n-/**\n- * i40iw_del_handler - delete a handler from the list\n- * @hdl: handler to be deleted from the handler list\n- */\n-static int i40iw_del_handler(struct i40iw_handler *hdl)\n-{\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&i40iw_handler_lock, flags);\n-\tlist_del(&hdl->list);\n-\tspin_unlock_irqrestore(&i40iw_handler_lock, flags);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_enable_intr - set up device interrupts\n- * @dev: hardware control device structure\n- * @msix_id: id of the interrupt to be enabled\n- */\n-static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)\n-{\n-\tu32 val;\n-\n-\tval = I40E_PFINT_DYN_CTLN_INTENA_MASK |\n-\t\tI40E_PFINT_DYN_CTLN_CLEARPBA_MASK |\n-\t\t(3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);\n-\tif (dev->is_pf)\n-\t\ti40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);\n-\telse\n-\t\ti40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);\n-}\n-\n-/**\n- * i40iw_dpc - tasklet for aeq and ceq 0\n- * @data: iwarp device\n- */\n-static void i40iw_dpc(unsigned long data)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)data;\n-\n-\tif (iwdev->msix_shared)\n-\t\ti40iw_process_ceq(iwdev, iwdev->ceqlist);\n-\ti40iw_process_aeq(iwdev);\n-\ti40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);\n-}\n-\n-/**\n- * i40iw_ceq_dpc - dpc handler for CEQ\n- * @data: data points to CEQ\n- */\n-static void i40iw_ceq_dpc(unsigned long data)\n-{\n-\tstruct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;\n-\tstruct i40iw_device *iwdev = iwceq->iwdev;\n-\n-\ti40iw_process_ceq(iwdev, iwceq);\n-\ti40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);\n-}\n-\n-/**\n- * i40iw_irq_handler - interrupt handler for aeq and ceq0\n- * @irq: Interrupt request number\n- * @data: iwarp device\n- */\n-static irqreturn_t i40iw_irq_handler(int irq, void *data)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)data;\n-\n-\ttasklet_schedule(&iwdev->dpc_tasklet);\n-\treturn IRQ_HANDLED;\n-}\n-\n-/**\n- * i40iw_destroy_cqp - destroy control qp\n- * @iwdev: iwarp device\n- * @create_done: 1 if cqp create poll was success\n- *\n- * Issue destroy cqp request and\n- * free the resources associated with the cqp\n- */\n-static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_cqp *cqp = &iwdev->cqp;\n-\n-\tif (free_hwcqp)\n-\t\tdev->cqp_ops->cqp_destroy(dev->cqp);\n-\n-\ti40iw_cleanup_pending_cqp_op(iwdev);\n-\n-\ti40iw_free_dma_mem(dev->hw, &cqp->sq);\n-\tkfree(cqp->scratch_array);\n-\tiwdev->cqp.scratch_array = NULL;\n-\n-\tkfree(cqp->cqp_requests);\n-\tcqp->cqp_requests = NULL;\n-}\n-\n-/**\n- * i40iw_disable_irqs - disable device interrupts\n- * @dev: hardware control device structure\n- * @msic_vec: msix vector to disable irq\n- * @dev_id: parameter to pass to free_irq (used during irq setup)\n- *\n- * The function is called when destroying aeq/ceq\n- */\n-static void i40iw_disable_irq(struct i40iw_sc_dev *dev,\n-\t\t\t struct i40iw_msix_vector *msix_vec,\n-\t\t\t void *dev_id)\n-{\n-\tif (dev->is_pf)\n-\t\ti40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);\n-\telse\n-\t\ti40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);\n-\tirq_set_affinity_hint(msix_vec->irq, NULL);\n-\tfree_irq(msix_vec->irq, dev_id);\n-}\n-\n-/**\n- * i40iw_destroy_aeq - destroy aeq\n- * @iwdev: iwarp device\n- *\n- * Issue a destroy aeq request and\n- * free the resources associated with the aeq\n- * The function is called during driver unload\n- */\n-static void i40iw_destroy_aeq(struct i40iw_device *iwdev)\n-{\n-\tenum i40iw_status_code status = I40IW_ERR_NOT_READY;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_aeq *aeq = &iwdev->aeq;\n-\n-\tif (!iwdev->msix_shared)\n-\t\ti40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);\n-\tif (iwdev->reset)\n-\t\tgoto exit;\n-\n-\tif (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))\n-\t\tstatus = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);\n-\tif (status)\n-\t\ti40iw_pr_err(\"destroy aeq failed %d\\n\", status);\n-\n-exit:\n-\ti40iw_free_dma_mem(dev->hw, &aeq->mem);\n-}\n-\n-/**\n- * i40iw_destroy_ceq - destroy ceq\n- * @iwdev: iwarp device\n- * @iwceq: ceq to be destroyed\n- *\n- * Issue a destroy ceq request and\n- * free the resources associated with the ceq\n- */\n-static void i40iw_destroy_ceq(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_ceq *iwceq)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\n-\tif (iwdev->reset)\n-\t\tgoto exit;\n-\n-\tstatus = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);\n-\tif (status) {\n-\t\ti40iw_pr_err(\"ceq destroy command failed %d\\n\", status);\n-\t\tgoto exit;\n-\t}\n-\n-\tstatus = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);\n-\tif (status)\n-\t\ti40iw_pr_err(\"ceq destroy completion failed %d\\n\", status);\n-exit:\n-\ti40iw_free_dma_mem(dev->hw, &iwceq->mem);\n-}\n-\n-/**\n- * i40iw_dele_ceqs - destroy all ceq's\n- * @iwdev: iwarp device\n- *\n- * Go through all of the device ceq's and for each ceq\n- * disable the ceq interrupt and destroy the ceq\n- */\n-static void i40iw_dele_ceqs(struct i40iw_device *iwdev)\n-{\n-\tu32 i = 0;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_ceq *iwceq = iwdev->ceqlist;\n-\tstruct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;\n-\n-\tif (iwdev->msix_shared) {\n-\t\ti40iw_disable_irq(dev, msix_vec, (void *)iwdev);\n-\t\ti40iw_destroy_ceq(iwdev, iwceq);\n-\t\tiwceq++;\n-\t\ti++;\n-\t}\n-\n-\tfor (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {\n-\t\ti40iw_disable_irq(dev, msix_vec, (void *)iwceq);\n-\t\ti40iw_destroy_ceq(iwdev, iwceq);\n-\t}\n-\n-\tiwdev->sc_dev.ceq_valid = false;\n-}\n-\n-/**\n- * i40iw_destroy_ccq - destroy control cq\n- * @iwdev: iwarp device\n- *\n- * Issue destroy ccq request and\n- * free the resources associated with the ccq\n- */\n-static void i40iw_destroy_ccq(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_ccq *ccq = &iwdev->ccq;\n-\tenum i40iw_status_code status = 0;\n-\n-\tif (!iwdev->reset)\n-\t\tstatus = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);\n-\tif (status)\n-\t\ti40iw_pr_err(\"ccq destroy failed %d\\n\", status);\n-\ti40iw_free_dma_mem(dev->hw, &ccq->mem_cq);\n-}\n-\n-/* types of hmc objects */\n-static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {\n-\tI40IW_HMC_IW_QP,\n-\tI40IW_HMC_IW_CQ,\n-\tI40IW_HMC_IW_HTE,\n-\tI40IW_HMC_IW_ARP,\n-\tI40IW_HMC_IW_APBVT_ENTRY,\n-\tI40IW_HMC_IW_MR,\n-\tI40IW_HMC_IW_XF,\n-\tI40IW_HMC_IW_XFFL,\n-\tI40IW_HMC_IW_Q1,\n-\tI40IW_HMC_IW_Q1FL,\n-\tI40IW_HMC_IW_TIMER,\n-};\n-\n-/**\n- * i40iw_close_hmc_objects_type - delete hmc objects of a given type\n- * @iwdev: iwarp device\n- * @obj_type: the hmc object type to be deleted\n- * @is_pf: true if the function is PF otherwise false\n- * @reset: true if called before reset\n- */\n-static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t enum i40iw_hmc_rsrc_type obj_type,\n-\t\t\t\t\t struct i40iw_hmc_info *hmc_info,\n-\t\t\t\t\t bool is_pf,\n-\t\t\t\t\t bool reset)\n-{\n-\tstruct i40iw_hmc_del_obj_info info;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.hmc_info = hmc_info;\n-\tinfo.rsrc_type = obj_type;\n-\tinfo.count = hmc_info->hmc_obj[obj_type].cnt;\n-\tinfo.is_pf = is_pf;\n-\tif (dev->hmc_ops->del_hmc_object(dev, &info, reset))\n-\t\ti40iw_pr_err(\"del obj of type %d failed\\n\", obj_type);\n-}\n-\n-/**\n- * i40iw_del_hmc_objects - remove all device hmc objects\n- * @dev: iwarp device\n- * @hmc_info: hmc_info to free\n- * @is_pf: true if hmc_info belongs to PF, not vf nor allocated\n- *\t by PF on behalf of VF\n- * @reset: true if called before reset\n- */\n-static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,\n-\t\t\t\t struct i40iw_hmc_info *hmc_info,\n-\t\t\t\t bool is_pf,\n-\t\t\t\t bool reset)\n-{\n-\tunsigned int i;\n-\n-\tfor (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)\n-\t\ti40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);\n-}\n-\n-/**\n- * i40iw_ceq_handler - interrupt handler for ceq\n- * @data: ceq pointer\n- */\n-static irqreturn_t i40iw_ceq_handler(int irq, void *data)\n-{\n-\tstruct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;\n-\n-\tif (iwceq->irq != irq)\n-\t\ti40iw_pr_err(\"expected irq = %d received irq = %d\\n\", iwceq->irq, irq);\n-\ttasklet_schedule(&iwceq->dpc_tasklet);\n-\treturn IRQ_HANDLED;\n-}\n-\n-/**\n- * i40iw_create_hmc_obj_type - create hmc object of a given type\n- * @dev: hardware control device structure\n- * @info: information for the hmc object to create\n- */\n-static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\tstruct i40iw_hmc_create_obj_info *info)\n-{\n-\treturn dev->hmc_ops->create_hmc_object(dev, info);\n-}\n-\n-/**\n- * i40iw_create_hmc_objs - create all hmc objects for the device\n- * @iwdev: iwarp device\n- * @is_pf: true if the function is PF otherwise false\n- *\n- * Create the device hmc objects and allocate hmc pages\n- * Return 0 if successful, otherwise clean up and return error\n- */\n-static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t bool is_pf)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_hmc_create_obj_info info;\n-\tenum i40iw_status_code status;\n-\tint i;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.hmc_info = dev->hmc_info;\n-\tinfo.is_pf = is_pf;\n-\tinfo.entry_type = iwdev->sd_type;\n-\tfor (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {\n-\t\tinfo.rsrc_type = iw_hmc_obj_types[i];\n-\t\tinfo.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;\n-\t\tinfo.add_sd_cnt = 0;\n-\t\tstatus = i40iw_create_hmc_obj_type(dev, &info);\n-\t\tif (status) {\n-\t\t\ti40iw_pr_err(\"create obj type %d status = %d\\n\",\n-\t\t\t\t iw_hmc_obj_types[i], status);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\tif (!status)\n-\t\treturn (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,\n-\t\t\t\t\t\t\t\t dev->hmc_fn_id,\n-\t\t\t\t\t\t\t\t true, true));\n-\n-\twhile (i) {\n-\t\ti--;\n-\t\t/* destroy the hmc objects of a given type */\n-\t\ti40iw_close_hmc_objects_type(dev,\n-\t\t\t\t\t iw_hmc_obj_types[i],\n-\t\t\t\t\t dev->hmc_info,\n-\t\t\t\t\t is_pf,\n-\t\t\t\t\t false);\n-\t}\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_obj_aligned_mem - get aligned memory from device allocated memory\n- * @iwdev: iwarp device\n- * @memptr: points to the memory addresses\n- * @size: size of memory needed\n- * @mask: mask for the aligned memory\n- *\n- * Get aligned memory of the requested size and\n- * update the memptr to point to the new aligned memory\n- * Return 0 if successful, otherwise return no memory error\n- */\n-enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_dma_mem *memptr,\n-\t\t\t\t\t u32 size,\n-\t\t\t\t\t u32 mask)\n-{\n-\tunsigned long va, newva;\n-\tunsigned long extra;\n-\n-\tva = (unsigned long)iwdev->obj_next.va;\n-\tnewva = va;\n-\tif (mask)\n-\t\tnewva = ALIGN(va, (mask + 1));\n-\textra = newva - va;\n-\tmemptr->va = (u8 *)va + extra;\n-\tmemptr->pa = iwdev->obj_next.pa + extra;\n-\tmemptr->size = size;\n-\tif ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tiwdev->obj_next.va = memptr->va + size;\n-\tiwdev->obj_next.pa = memptr->pa + size;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_create_cqp - create control qp\n- * @iwdev: iwarp device\n- *\n- * Return 0, if the cqp and all the resources associated with it\n- * are successfully created, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)\n-{\n-\tenum i40iw_status_code status;\n-\tu32 sqsize = I40IW_CQP_SW_SQSIZE_2048;\n-\tstruct i40iw_dma_mem mem;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_cqp_init_info cqp_init_info;\n-\tstruct i40iw_cqp *cqp = &iwdev->cqp;\n-\tu16 maj_err, min_err;\n-\tint i;\n-\n-\tcqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);\n-\tif (!cqp->cqp_requests)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tcqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);\n-\tif (!cqp->scratch_array) {\n-\t\tkfree(cqp->cqp_requests);\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\t}\n-\tdev->cqp = &cqp->sc_cqp;\n-\tdev->cqp->dev = dev;\n-\tmemset(&cqp_init_info, 0, sizeof(cqp_init_info));\n-\tstatus = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,\n-\t\t\t\t\t(sizeof(struct i40iw_cqp_sq_wqe) * sqsize),\n-\t\t\t\t\tI40IW_CQP_ALIGNMENT);\n-\tif (status)\n-\t\tgoto exit;\n-\tstatus = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),\n-\t\t\t\t I40IW_HOST_CTX_ALIGNMENT_MASK);\n-\tif (status)\n-\t\tgoto exit;\n-\tdev->cqp->host_ctx_pa = mem.pa;\n-\tdev->cqp->host_ctx = mem.va;\n-\t/* populate the cqp init info */\n-\tcqp_init_info.dev = dev;\n-\tcqp_init_info.sq_size = sqsize;\n-\tcqp_init_info.sq = cqp->sq.va;\n-\tcqp_init_info.sq_pa = cqp->sq.pa;\n-\tcqp_init_info.host_ctx_pa = mem.pa;\n-\tcqp_init_info.host_ctx = mem.va;\n-\tcqp_init_info.hmc_profile = iwdev->resource_profile;\n-\tcqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;\n-\tcqp_init_info.scratch_array = cqp->scratch_array;\n-\tstatus = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);\n-\tif (status) {\n-\t\ti40iw_pr_err(\"cqp init status %d\\n\", status);\n-\t\tgoto exit;\n-\t}\n-\tstatus = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);\n-\tif (status) {\n-\t\ti40iw_pr_err(\"cqp create status %d maj_err %d min_err %d\\n\",\n-\t\t\t status, maj_err, min_err);\n-\t\tgoto exit;\n-\t}\n-\tspin_lock_init(&cqp->req_lock);\n-\tINIT_LIST_HEAD(&cqp->cqp_avail_reqs);\n-\tINIT_LIST_HEAD(&cqp->cqp_pending_reqs);\n-\t/* init the waitq of the cqp_requests and add them to the list */\n-\tfor (i = 0; i < sqsize; i++) {\n-\t\tinit_waitqueue_head(&cqp->cqp_requests[i].waitq);\n-\t\tlist_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);\n-\t}\n-\treturn 0;\n-exit:\n-\t/* clean up the created resources */\n-\ti40iw_destroy_cqp(iwdev, false);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_create_ccq - create control cq\n- * @iwdev: iwarp device\n- *\n- * Return 0, if the ccq and the resources associated with it\n- * are successfully created, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_dma_mem mem;\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_ccq_init_info info;\n-\tstruct i40iw_ccq *ccq = &iwdev->ccq;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tdev->ccq = &ccq->sc_cq;\n-\tdev->ccq->dev = dev;\n-\tinfo.dev = dev;\n-\tccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);\n-\tccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;\n-\tstatus = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,\n-\t\t\t\t\tccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);\n-\tif (status)\n-\t\tgoto exit;\n-\tstatus = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,\n-\t\t\t\t I40IW_SHADOWAREA_MASK);\n-\tif (status)\n-\t\tgoto exit;\n-\tccq->sc_cq.back_cq = (void *)ccq;\n-\t/* populate the ccq init info */\n-\tinfo.cq_base = ccq->mem_cq.va;\n-\tinfo.cq_pa = ccq->mem_cq.pa;\n-\tinfo.num_elem = IW_CCQ_SIZE;\n-\tinfo.shadow_area = mem.va;\n-\tinfo.shadow_area_pa = mem.pa;\n-\tinfo.ceqe_mask = false;\n-\tinfo.ceq_id_valid = true;\n-\tinfo.shadow_read_threshold = 16;\n-\tstatus = dev->ccq_ops->ccq_init(dev->ccq, &info);\n-\tif (!status)\n-\t\tstatus = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);\n-exit:\n-\tif (status)\n-\t\ti40iw_free_dma_mem(dev->hw, &ccq->mem_cq);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq\n- * @iwdev: iwarp device\n- * @msix_vec: interrupt vector information\n- * @iwceq: ceq associated with the vector\n- * @ceq_id: the id number of the iwceq\n- *\n- * Allocate interrupt resources and enable irq handling\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t\t struct i40iw_ceq *iwceq,\n-\t\t\t\t\t\t\t u32 ceq_id,\n-\t\t\t\t\t\t\t struct i40iw_msix_vector *msix_vec)\n-{\n-\tenum i40iw_status_code status;\n-\n-\tif (iwdev->msix_shared && !ceq_id) {\n-\t\ttasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);\n-\t\tstatus = request_irq(msix_vec->irq, i40iw_irq_handler, 0, \"AEQCEQ\", iwdev);\n-\t} else {\n-\t\ttasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);\n-\t\tstatus = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, \"CEQ\", iwceq);\n-\t}\n-\n-\tcpumask_clear(&msix_vec->mask);\n-\tcpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);\n-\tirq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);\n-\n-\tif (status) {\n-\t\ti40iw_pr_err(\"ceq irq config fail\\n\");\n-\t\treturn I40IW_ERR_CONFIG;\n-\t}\n-\tmsix_vec->ceq_id = ceq_id;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_create_ceq - create completion event queue\n- * @iwdev: iwarp device\n- * @iwceq: pointer to the ceq resources to be created\n- * @ceq_id: the id number of the iwceq\n- *\n- * Return 0, if the ceq and the resources associated with it\n- * are successfully created, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_ceq *iwceq,\n-\t\t\t\t\t u32 ceq_id)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_ceq_init_info info;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tu64 scratch;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.ceq_id = ceq_id;\n-\tiwceq->iwdev = iwdev;\n-\tiwceq->mem.size = sizeof(struct i40iw_ceqe) *\n-\t\tiwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;\n-\tstatus = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,\n-\t\t\t\t\tI40IW_CEQ_ALIGNMENT);\n-\tif (status)\n-\t\tgoto exit;\n-\tinfo.ceq_id = ceq_id;\n-\tinfo.ceqe_base = iwceq->mem.va;\n-\tinfo.ceqe_pa = iwceq->mem.pa;\n-\n-\tinfo.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;\n-\tiwceq->sc_ceq.ceq_id = ceq_id;\n-\tinfo.dev = dev;\n-\tscratch = (uintptr_t)&iwdev->cqp.sc_cqp;\n-\tstatus = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);\n-\tif (!status)\n-\t\tstatus = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);\n-\n-exit:\n-\tif (status)\n-\t\ti40iw_free_dma_mem(dev->hw, &iwceq->mem);\n-\treturn status;\n-}\n-\n-void i40iw_request_reset(struct i40iw_device *iwdev)\n-{\n-\tstruct i40e_info *ldev = iwdev->ldev;\n-\n-\tldev->ops->request_reset(ldev, iwdev->client, 1);\n-}\n-\n-/**\n- * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources\n- * @iwdev: iwarp device\n- * @ldev: i40e lan device\n- *\n- * Allocate a list for all device completion event queues\n- * Create the ceq's and configure their msix interrupt vectors\n- * Return 0, if at least one ceq is successfully set up, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40e_info *ldev)\n-{\n-\tu32 i;\n-\tu32 ceq_id;\n-\tstruct i40iw_ceq *iwceq;\n-\tstruct i40iw_msix_vector *msix_vec;\n-\tenum i40iw_status_code status = 0;\n-\tu32 num_ceqs;\n-\n-\tif (ldev && ldev->ops && ldev->ops->setup_qvlist) {\n-\t\tstatus = ldev->ops->setup_qvlist(ldev, &i40iw_client,\n-\t\t\t\t\t\t iwdev->iw_qvlist);\n-\t\tif (status)\n-\t\t\tgoto exit;\n-\t} else {\n-\t\tstatus = I40IW_ERR_BAD_PTR;\n-\t\tgoto exit;\n-\t}\n-\n-\tnum_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);\n-\tiwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);\n-\tif (!iwdev->ceqlist) {\n-\t\tstatus = I40IW_ERR_NO_MEMORY;\n-\t\tgoto exit;\n-\t}\n-\ti = (iwdev->msix_shared) ? 0 : 1;\n-\tfor (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {\n-\t\tiwceq = &iwdev->ceqlist[ceq_id];\n-\t\tstatus = i40iw_create_ceq(iwdev, iwceq, ceq_id);\n-\t\tif (status) {\n-\t\t\ti40iw_pr_err(\"create ceq status = %d\\n\", status);\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tmsix_vec = &iwdev->iw_msixtbl[i];\n-\t\tiwceq->irq = msix_vec->irq;\n-\t\tiwceq->msix_idx = msix_vec->idx;\n-\t\tstatus = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);\n-\t\tif (status) {\n-\t\t\ti40iw_destroy_ceq(iwdev, iwceq);\n-\t\t\tbreak;\n-\t\t}\n-\t\ti40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);\n-\t\tiwdev->ceqs_count++;\n-\t}\n-exit:\n-\tif (status && !iwdev->ceqs_count) {\n-\t\tkfree(iwdev->ceqlist);\n-\t\tiwdev->ceqlist = NULL;\n-\t\treturn status;\n-\t} else {\n-\t\tiwdev->sc_dev.ceq_valid = true;\n-\t\treturn 0;\n-\t}\n-\n-}\n-\n-/**\n- * i40iw_configure_aeq_vector - set up the msix vector for aeq\n- * @iwdev: iwarp device\n- *\n- * Allocate interrupt resources and enable irq handling\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;\n-\tu32 ret = 0;\n-\n-\tif (!iwdev->msix_shared) {\n-\t\ttasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);\n-\t\tret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, \"i40iw\", iwdev);\n-\t}\n-\tif (ret) {\n-\t\ti40iw_pr_err(\"aeq irq config fail\\n\");\n-\t\treturn I40IW_ERR_CONFIG;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_create_aeq - create async event queue\n- * @iwdev: iwarp device\n- *\n- * Return 0, if the aeq and the resources associated with it\n- * are successfully created, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_aeq_init_info info;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_aeq *aeq = &iwdev->aeq;\n-\tu64 scratch = 0;\n-\tu32 aeq_size;\n-\n-\taeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +\n-\t\tiwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;\n-\tmemset(&info, 0, sizeof(info));\n-\taeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;\n-\tstatus = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,\n-\t\t\t\t\tI40IW_AEQ_ALIGNMENT);\n-\tif (status)\n-\t\tgoto exit;\n-\n-\tinfo.aeqe_base = aeq->mem.va;\n-\tinfo.aeq_elem_pa = aeq->mem.pa;\n-\tinfo.elem_cnt = aeq_size;\n-\tinfo.dev = dev;\n-\tstatus = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);\n-\tif (status)\n-\t\tgoto exit;\n-\tstatus = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);\n-\tif (!status)\n-\t\tstatus = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);\n-exit:\n-\tif (status)\n-\t\ti40iw_free_dma_mem(dev->hw, &aeq->mem);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_setup_aeq - set up the device aeq\n- * @iwdev: iwarp device\n- *\n- * Create the aeq and configure its msix interrupt vector\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tenum i40iw_status_code status;\n-\n-\tstatus = i40iw_create_aeq(iwdev);\n-\tif (status)\n-\t\treturn status;\n-\n-\tstatus = i40iw_configure_aeq_vector(iwdev);\n-\tif (status) {\n-\t\ti40iw_destroy_aeq(iwdev);\n-\t\treturn status;\n-\t}\n-\n-\tif (!iwdev->msix_shared)\n-\t\ti40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_initialize_ilq - create iwarp local queue for cm\n- * @iwdev: iwarp device\n- *\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_puda_rsrc_info info;\n-\tenum i40iw_status_code status;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.type = I40IW_PUDA_RSRC_TYPE_ILQ;\n-\tinfo.cq_id = 1;\n-\tinfo.qp_id = 0;\n-\tinfo.count = 1;\n-\tinfo.pd_id = 1;\n-\tinfo.sq_size = 8192;\n-\tinfo.rq_size = 8192;\n-\tinfo.buf_size = 1024;\n-\tinfo.tx_buf_cnt = 16384;\n-\tinfo.receive = i40iw_receive_ilq;\n-\tinfo.xmit_complete = i40iw_free_sqbuf;\n-\tstatus = i40iw_puda_create_rsrc(&iwdev->vsi, &info);\n-\tif (status)\n-\t\ti40iw_pr_err(\"ilq create fail\\n\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_initialize_ieq - create iwarp exception queue\n- * @iwdev: iwarp device\n- *\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_puda_rsrc_info info;\n-\tenum i40iw_status_code status;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.type = I40IW_PUDA_RSRC_TYPE_IEQ;\n-\tinfo.cq_id = 2;\n-\tinfo.qp_id = iwdev->vsi.exception_lan_queue;\n-\tinfo.count = 1;\n-\tinfo.pd_id = 2;\n-\tinfo.sq_size = 8192;\n-\tinfo.rq_size = 8192;\n-\tinfo.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;\n-\tinfo.tx_buf_cnt = 4096;\n-\tstatus = i40iw_puda_create_rsrc(&iwdev->vsi, &info);\n-\tif (status)\n-\t\ti40iw_pr_err(\"ieq create fail\\n\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_reinitialize_ieq - destroy and re-create ieq\n- * @dev: iwarp device\n- */\n-void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\ti40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);\n-\tif (i40iw_initialize_ieq(iwdev)) {\n-\t\tiwdev->reset = true;\n-\t\ti40iw_request_reset(iwdev);\n-\t}\n-}\n-\n-/**\n- * i40iw_hmc_setup - create hmc objects for the device\n- * @iwdev: iwarp device\n- *\n- * Set up the device private memory space for the number and size of\n- * the hmc objects and create the objects\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)\n-{\n-\tenum i40iw_status_code status;\n-\n-\tiwdev->sd_type = I40IW_SD_TYPE_DIRECT;\n-\tstatus = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);\n-\tif (status)\n-\t\tgoto exit;\n-\tstatus = i40iw_create_hmc_objs(iwdev, true);\n-\tif (status)\n-\t\tgoto exit;\n-\tiwdev->init_state = HMC_OBJS_CREATED;\n-exit:\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_del_init_mem - deallocate memory resources\n- * @iwdev: iwarp device\n- */\n-static void i40iw_del_init_mem(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\n-\ti40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);\n-\tkfree(dev->hmc_info->sd_table.sd_entry);\n-\tdev->hmc_info->sd_table.sd_entry = NULL;\n-\tkfree(iwdev->mem_resources);\n-\tiwdev->mem_resources = NULL;\n-\tkfree(iwdev->ceqlist);\n-\tiwdev->ceqlist = NULL;\n-\tkfree(iwdev->iw_msixtbl);\n-\tiwdev->iw_msixtbl = NULL;\n-\tkfree(iwdev->hmc_info_mem);\n-\tiwdev->hmc_info_mem = NULL;\n-}\n-\n-/**\n- * i40iw_del_macip_entry - remove a mac ip address entry from the hw table\n- * @iwdev: iwarp device\n- * @idx: the index of the mac ip address to delete\n- */\n-static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)\n-{\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status = 0;\n-\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, true);\n-\tif (!cqp_request) {\n-\t\ti40iw_pr_err(\"cqp_request memory failed\\n\");\n-\t\treturn;\n-\t}\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;\n-\tcqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;\n-\tcqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;\n-\tcqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Del MAC Ip entry fail\");\n-}\n-\n-/**\n- * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table\n- * @iwdev: iwarp device\n- * @mac_addr: pointer to mac address\n- * @idx: the index of the mac ip address to add\n- */\n-static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t\t u8 *mac_addr,\n-\t\t\t\t\t\t\t u8 idx)\n-{\n-\tstruct i40iw_local_mac_ipaddr_entry_info *info;\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status = 0;\n-\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, true);\n-\tif (!cqp_request) {\n-\t\ti40iw_pr_err(\"cqp_request memory failed\\n\");\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\t}\n-\n-\tcqp_info = &cqp_request->info;\n-\n-\tcqp_info->post_sq = 1;\n-\tinfo = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;\n-\tether_addr_copy(info->mac_addr, mac_addr);\n-\tinfo->entry_idx = idx;\n-\tcqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;\n-\tcqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;\n-\tcqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;\n-\tcqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Add MAC Ip entry fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry\n- * @iwdev: iwarp device\n- * @mac_ip_tbl_idx: the index of the new mac ip address\n- *\n- * Allocate a mac ip address entry and update the mac_ip_tbl_idx\n- * to hold the index of the newly created mac ip address\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t\t\t u16 *mac_ip_tbl_idx)\n-{\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status = 0;\n-\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, true);\n-\tif (!cqp_request) {\n-\t\ti40iw_pr_err(\"cqp_request memory failed\\n\");\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\t}\n-\n-\t/* increment refcount, because we need the cqp request ret value */\n-\tatomic_inc(&cqp_request->refcount);\n-\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;\n-\tcqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (!status)\n-\t\t*mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;\n-\telse\n-\t\ti40iw_pr_err(\"CQP-OP Alloc MAC Ip entry fail\");\n-\t/* decrement refcount and free the cqp request, if no longer used */\n-\ti40iw_put_cqp_request(iwcqp, cqp_request);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry\n- * @iwdev: iwarp device\n- * @macaddr: pointer to mac address\n- *\n- * Allocate a mac ip address entry and add it to the hw table\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t\t u8 *macaddr)\n-{\n-\tenum i40iw_status_code status;\n-\n-\tstatus = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);\n-\tif (!status) {\n-\t\tstatus = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,\n-\t\t\t\t\t\t (u8)iwdev->mac_ip_table_idx);\n-\t\tif (status)\n-\t\t\ti40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);\n-\t}\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table\n- * @iwdev: iwarp device\n- */\n-static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)\n-{\n-\tstruct net_device *ip_dev;\n-\tstruct inet6_dev *idev;\n-\tstruct inet6_ifaddr *ifp, *tmp;\n-\tu32 local_ipaddr6[4];\n-\n-\trcu_read_lock();\n-\tfor_each_netdev_rcu(&init_net, ip_dev) {\n-\t\tif ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&\n-\t\t (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||\n-\t\t (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {\n-\t\t\tidev = __in6_dev_get(ip_dev);\n-\t\t\tif (!idev) {\n-\t\t\t\ti40iw_pr_err(\"ipv6 inet device not found\\n\");\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t\tlist_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {\n-\t\t\t\ti40iw_pr_info(\"IP=%pI6, vlan_id=%d, MAC=%pM\\n\", &ifp->addr,\n-\t\t\t\t\t rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);\n-\t\t\t\ti40iw_copy_ip_ntohl(local_ipaddr6,\n-\t\t\t\t\t\t ifp->addr.in6_u.u6_addr32);\n-\t\t\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t\t\t ip_dev->dev_addr,\n-\t\t\t\t\t\t local_ipaddr6,\n-\t\t\t\t\t\t false,\n-\t\t\t\t\t\t I40IW_ARP_ADD);\n-\t\t\t}\n-\t\t}\n-\t}\n-\trcu_read_unlock();\n-}\n-\n-/**\n- * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table\n- * @iwdev: iwarp device\n- */\n-static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)\n-{\n-\tstruct net_device *dev;\n-\tstruct in_device *idev;\n-\tbool got_lock = true;\n-\tu32 ip_addr;\n-\n-\tif (!rtnl_trylock())\n-\t\tgot_lock = false;\n-\n-\tfor_each_netdev(&init_net, dev) {\n-\t\tif ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&\n-\t\t (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||\n-\t\t (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {\n-\t\t\tconst struct in_ifaddr *ifa;\n-\n-\t\t\tidev = in_dev_get(dev);\n-\t\t\tin_dev_for_each_ifa_rtnl(ifa, idev) {\n-\t\t\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,\n-\t\t\t\t\t \"IP=%pI4, vlan_id=%d, MAC=%pM\\n\", &ifa->ifa_address,\n-\t\t\t\t\t rdma_vlan_dev_vlan_id(dev), dev->dev_addr);\n-\n-\t\t\t\tip_addr = ntohl(ifa->ifa_address);\n-\t\t\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t\t\t dev->dev_addr,\n-\t\t\t\t\t\t &ip_addr,\n-\t\t\t\t\t\t true,\n-\t\t\t\t\t\t I40IW_ARP_ADD);\n-\t\t\t}\n-\n-\t\t\tin_dev_put(idev);\n-\t\t}\n-\t}\n-\tif (got_lock)\n-\t\trtnl_unlock();\n-}\n-\n-/**\n- * i40iw_add_mac_ip - add mac and ip addresses\n- * @iwdev: iwarp device\n- *\n- * Create and add a mac ip address entry to the hw table and\n- * ipv4/ipv6 addresses to the arp cache\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)\n-{\n-\tstruct net_device *netdev = iwdev->netdev;\n-\tenum i40iw_status_code status;\n-\n-\tstatus = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);\n-\tif (status)\n-\t\treturn status;\n-\ti40iw_add_ipv4_addr(iwdev);\n-\ti40iw_add_ipv6_addr(iwdev);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_wait_pe_ready - Check if firmware is ready\n- * @hw: provides access to registers\n- */\n-static void i40iw_wait_pe_ready(struct i40iw_hw *hw)\n-{\n-\tu32 statusfw;\n-\tu32 statuscpu0;\n-\tu32 statuscpu1;\n-\tu32 statuscpu2;\n-\tu32 retrycount = 0;\n-\n-\tdo {\n-\t\tstatusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);\n-\t\ti40iw_pr_info(\"[%04d] fm load status[x%04X]\\n\", __LINE__, statusfw);\n-\t\tstatuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);\n-\t\ti40iw_pr_info(\"[%04d] CSR_CQP status[x%04X]\\n\", __LINE__, statuscpu0);\n-\t\tstatuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);\n-\t\ti40iw_pr_info(\"[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\\n\",\n-\t\t\t __LINE__, statuscpu1);\n-\t\tstatuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);\n-\t\ti40iw_pr_info(\"[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\\n\",\n-\t\t\t __LINE__, statuscpu2);\n-\t\tif ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))\n-\t\t\tbreak;\t/* SUCCESS */\n-\t\tmsleep(1000);\n-\t\tretrycount++;\n-\t} while (retrycount < 14);\n-\ti40iw_wr32(hw, 0xb4040, 0x4C104C5);\n-}\n-\n-/**\n- * i40iw_initialize_dev - initialize device\n- * @iwdev: iwarp device\n- * @ldev: lan device information\n- *\n- * Allocate memory for the hmc objects and initialize iwdev\n- * Return 0 if successful, otherwise clean up the resources\n- * and return error\n- */\n-static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t struct i40e_info *ldev)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_device_init_info info;\n-\tstruct i40iw_vsi_init_info vsi_info;\n-\tstruct i40iw_dma_mem mem;\n-\tstruct i40iw_l2params l2params;\n-\tu32 size;\n-\tstruct i40iw_vsi_stats_info stats_info;\n-\tu16 last_qset = I40IW_NO_QSET;\n-\tu16 qset;\n-\tu32 i;\n-\n-\tmemset(&l2params, 0, sizeof(l2params));\n-\tmemset(&info, 0, sizeof(info));\n-\tsize = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +\n-\t\t\t\t(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);\n-\tiwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);\n-\tif (!iwdev->hmc_info_mem)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tiwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;\n-\tdev->hmc_info = &iwdev->hw.hmc;\n-\tdev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);\n-\tstatus = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,\n-\t\t\t\t I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);\n-\tif (status)\n-\t\tgoto error;\n-\tinfo.fpm_query_buf_pa = mem.pa;\n-\tinfo.fpm_query_buf = mem.va;\n-\tstatus = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,\n-\t\t\t\t I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);\n-\tif (status)\n-\t\tgoto error;\n-\tinfo.fpm_commit_buf_pa = mem.pa;\n-\tinfo.fpm_commit_buf = mem.va;\n-\tinfo.hmc_fn_id = ldev->fid;\n-\tinfo.is_pf = (ldev->ftype) ? false : true;\n-\tinfo.bar0 = ldev->hw_addr;\n-\tinfo.hw = &iwdev->hw;\n-\tinfo.debug_mask = debug;\n-\tl2params.mtu =\n-\t\t(ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;\n-\tfor (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {\n-\t\tqset = ldev->params.qos.prio_qos[i].qs_handle;\n-\t\tl2params.qs_handle_list[i] = qset;\n-\t\tif (last_qset == I40IW_NO_QSET)\n-\t\t\tlast_qset = qset;\n-\t\telse if ((qset != last_qset) && (qset != I40IW_NO_QSET))\n-\t\t\tiwdev->dcb = true;\n-\t}\n-\ti40iw_pr_info(\"DCB is set/clear = %d\\n\", iwdev->dcb);\n-\tinfo.vchnl_send = i40iw_virtchnl_send;\n-\tstatus = i40iw_device_init(&iwdev->sc_dev, &info);\n-\n-\tif (status)\n-\t\tgoto error;\n-\tmemset(&vsi_info, 0, sizeof(vsi_info));\n-\tvsi_info.dev = &iwdev->sc_dev;\n-\tvsi_info.back_vsi = (void *)iwdev;\n-\tvsi_info.params = &l2params;\n-\tvsi_info.exception_lan_queue = 1;\n-\ti40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);\n-\n-\tif (dev->is_pf) {\n-\t\tmemset(&stats_info, 0, sizeof(stats_info));\n-\t\tstats_info.fcn_id = ldev->fid;\n-\t\tstats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);\n-\t\tif (!stats_info.pestat) {\n-\t\t\tstatus = I40IW_ERR_NO_MEMORY;\n-\t\t\tgoto error;\n-\t\t}\n-\t\tstats_info.stats_initialize = true;\n-\t\tif (stats_info.pestat)\n-\t\t\ti40iw_vsi_stats_init(&iwdev->vsi, &stats_info);\n-\t}\n-\treturn status;\n-error:\n-\tkfree(iwdev->hmc_info_mem);\n-\tiwdev->hmc_info_mem = NULL;\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_register_notifiers - register tcp ip notifiers\n- */\n-static void i40iw_register_notifiers(void)\n-{\n-\tregister_inetaddr_notifier(&i40iw_inetaddr_notifier);\n-\tregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);\n-\tregister_netevent_notifier(&i40iw_net_notifier);\n-\tregister_netdevice_notifier(&i40iw_netdevice_notifier);\n-}\n-\n-/**\n- * i40iw_unregister_notifiers - unregister tcp ip notifiers\n- */\n-\n-static void i40iw_unregister_notifiers(void)\n-{\n-\tunregister_netevent_notifier(&i40iw_net_notifier);\n-\tunregister_inetaddr_notifier(&i40iw_inetaddr_notifier);\n-\tunregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);\n-\tunregister_netdevice_notifier(&i40iw_netdevice_notifier);\n-}\n-\n-/**\n- * i40iw_save_msix_info - copy msix vector information to iwarp device\n- * @iwdev: iwarp device\n- * @ldev: lan device information\n- *\n- * Allocate iwdev msix table and copy the ldev msix info to the table\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t struct i40e_info *ldev)\n-{\n-\tstruct i40e_qvlist_info *iw_qvlist;\n-\tstruct i40e_qv_info *iw_qvinfo;\n-\tu32 ceq_idx;\n-\tu32 i;\n-\tu32 size;\n-\n-\tif (!ldev->msix_count) {\n-\t\ti40iw_pr_err(\"No MSI-X vectors\\n\");\n-\t\treturn I40IW_ERR_CONFIG;\n-\t}\n-\n-\tiwdev->msix_count = ldev->msix_count;\n-\n-\tsize = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;\n-\tsize += sizeof(struct i40e_qvlist_info);\n-\tsize += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;\n-\tiwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);\n-\n-\tif (!iwdev->iw_msixtbl)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tiwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);\n-\tiw_qvlist = iwdev->iw_qvlist;\n-\tiw_qvinfo = iw_qvlist->qv_info;\n-\tiw_qvlist->num_vectors = iwdev->msix_count;\n-\tif (iwdev->msix_count <= num_online_cpus())\n-\t\tiwdev->msix_shared = true;\n-\tfor (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {\n-\t\tiwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;\n-\t\tiwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;\n-\t\tiwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;\n-\t\tif (i == 0) {\n-\t\t\tiw_qvinfo->aeq_idx = 0;\n-\t\t\tif (iwdev->msix_shared)\n-\t\t\t\tiw_qvinfo->ceq_idx = ceq_idx++;\n-\t\t\telse\n-\t\t\t\tiw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;\n-\t\t} else {\n-\t\t\tiw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;\n-\t\t\tiw_qvinfo->ceq_idx = ceq_idx++;\n-\t\t}\n-\t\tiw_qvinfo->itr_idx = 3;\n-\t\tiw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_deinit_device - clean up the device resources\n- * @iwdev: iwarp device\n- *\n- * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,\n- * destroy the device queues and free the pble and the hmc objects\n- */\n-static void i40iw_deinit_device(struct i40iw_device *iwdev)\n-{\n-\tstruct i40e_info *ldev = iwdev->ldev;\n-\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\n-\ti40iw_pr_info(\"state = %d\\n\", iwdev->init_state);\n-\tif (iwdev->param_wq)\n-\t\tdestroy_workqueue(iwdev->param_wq);\n-\n-\tswitch (iwdev->init_state) {\n-\tcase RDMA_DEV_REGISTERED:\n-\t\tiwdev->iw_status = 0;\n-\t\ti40iw_port_ibevent(iwdev);\n-\t\ti40iw_destroy_rdma_device(iwdev->iwibdev);\n-\t\t/* fallthrough */\n-\tcase IP_ADDR_REGISTERED:\n-\t\tif (!iwdev->reset)\n-\t\t\ti40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);\n-\t\t/* fallthrough */\n-\t\t/* fallthrough */\n-\tcase PBLE_CHUNK_MEM:\n-\t\ti40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);\n-\t\t/* fallthrough */\n-\tcase CEQ_CREATED:\n-\t\ti40iw_dele_ceqs(iwdev);\n-\t\t/* fallthrough */\n-\tcase AEQ_CREATED:\n-\t\ti40iw_destroy_aeq(iwdev);\n-\t\t/* fallthrough */\n-\tcase IEQ_CREATED:\n-\t\ti40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);\n-\t\t/* fallthrough */\n-\tcase ILQ_CREATED:\n-\t\ti40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);\n-\t\t/* fallthrough */\n-\tcase CCQ_CREATED:\n-\t\ti40iw_destroy_ccq(iwdev);\n-\t\t/* fallthrough */\n-\tcase HMC_OBJS_CREATED:\n-\t\ti40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);\n-\t\t/* fallthrough */\n-\tcase CQP_CREATED:\n-\t\ti40iw_destroy_cqp(iwdev, true);\n-\t\t/* fallthrough */\n-\tcase INITIAL_STATE:\n-\t\ti40iw_cleanup_cm_core(&iwdev->cm_core);\n-\t\tif (iwdev->vsi.pestat) {\n-\t\t\ti40iw_vsi_stats_free(&iwdev->vsi);\n-\t\t\tkfree(iwdev->vsi.pestat);\n-\t\t}\n-\t\ti40iw_del_init_mem(iwdev);\n-\t\tbreak;\n-\tcase INVALID_STATE:\n-\t\t/* fallthrough */\n-\tdefault:\n-\t\ti40iw_pr_err(\"bad init_state = %d\\n\", iwdev->init_state);\n-\t\tbreak;\n-\t}\n-\n-\ti40iw_del_handler(i40iw_find_i40e_handler(ldev));\n-\tkfree(iwdev->hdl);\n-}\n-\n-/**\n- * i40iw_setup_init_state - set up the initial device struct\n- * @hdl: handler for iwarp device - one per instance\n- * @ldev: lan device information\n- * @client: iwarp client information, provided during registration\n- *\n- * Initialize the iwarp device and its hdl information\n- * using the ldev and client information\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,\n-\t\t\t\t\t\t struct i40e_info *ldev,\n-\t\t\t\t\t\t struct i40e_client *client)\n-{\n-\tstruct i40iw_device *iwdev = &hdl->device;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tenum i40iw_status_code status;\n-\n-\tmemcpy(&hdl->ldev, ldev, sizeof(*ldev));\n-\n-\tiwdev->mpa_version = mpa_version;\n-\tiwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?\n-\t (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :\n-\t I40IW_HMC_PROFILE_DEFAULT;\n-\tiwdev->max_rdma_vfs =\n-\t\t(iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;\n-\tiwdev->max_enabled_vfs = iwdev->max_rdma_vfs;\n-\tiwdev->netdev = ldev->netdev;\n-\thdl->client = client;\n-\tif (!ldev->ftype)\n-\t\tiwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;\n-\telse\n-\t\tiwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;\n-\n-\tstatus = i40iw_save_msix_info(iwdev, ldev);\n-\tif (status)\n-\t\treturn status;\n-\tiwdev->hw.dev_context = (void *)ldev->pcidev;\n-\tiwdev->hw.hw_addr = ldev->hw_addr;\n-\tstatus = i40iw_allocate_dma_mem(&iwdev->hw,\n-\t\t\t\t\t&iwdev->obj_mem, 8192, 4096);\n-\tif (status)\n-\t\tgoto exit;\n-\tiwdev->obj_next = iwdev->obj_mem;\n-\tiwdev->push_mode = push_mode;\n-\n-\tinit_waitqueue_head(&iwdev->vchnl_waitq);\n-\tinit_waitqueue_head(&dev->vf_reqs);\n-\tinit_waitqueue_head(&iwdev->close_wq);\n-\n-\tstatus = i40iw_initialize_dev(iwdev, ldev);\n-exit:\n-\tif (status) {\n-\t\tkfree(iwdev->iw_msixtbl);\n-\t\ti40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);\n-\t\tiwdev->iw_msixtbl = NULL;\n-\t}\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_get_used_rsrc - determine resources used internally\n- * @iwdev: iwarp device\n- *\n- * Called after internal allocations\n- */\n-static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)\n-{\n-\tiwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);\n-\tiwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);\n-\tiwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);\n-\tiwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);\n-}\n-\n-/**\n- * i40iw_open - client interface operation open for iwarp/uda device\n- * @ldev: lan device information\n- * @client: iwarp client information, provided during registration\n- *\n- * Called by the lan driver during the processing of client register\n- * Create device resources, set up queues, pble and hmc objects and\n- * register the device with the ib verbs interface\n- * Return 0 if successful, otherwise return error\n- */\n-static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)\n-{\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_sc_dev *dev;\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_handler *hdl;\n-\n-\thdl = i40iw_find_netdev(ldev->netdev);\n-\tif (hdl)\n-\t\treturn 0;\n-\n-\thdl = kzalloc(sizeof(*hdl), GFP_KERNEL);\n-\tif (!hdl)\n-\t\treturn -ENOMEM;\n-\tiwdev = &hdl->device;\n-\tiwdev->hdl = hdl;\n-\tdev = &iwdev->sc_dev;\n-\tif (i40iw_setup_cm_core(iwdev)) {\n-\t\tkfree(iwdev->hdl);\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tdev->back_dev = (void *)iwdev;\n-\tiwdev->ldev = &hdl->ldev;\n-\tiwdev->client = client;\n-\tmutex_init(&iwdev->pbl_mutex);\n-\ti40iw_add_handler(hdl);\n-\n-\tdo {\n-\t\tstatus = i40iw_setup_init_state(hdl, ldev, client);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = INITIAL_STATE;\n-\t\tif (dev->is_pf)\n-\t\t\ti40iw_wait_pe_ready(dev->hw);\n-\t\tstatus = i40iw_create_cqp(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = CQP_CREATED;\n-\t\tstatus = i40iw_hmc_setup(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tstatus = i40iw_create_ccq(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = CCQ_CREATED;\n-\t\tstatus = i40iw_initialize_ilq(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = ILQ_CREATED;\n-\t\tstatus = i40iw_initialize_ieq(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = IEQ_CREATED;\n-\t\tstatus = i40iw_setup_aeq(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = AEQ_CREATED;\n-\t\tstatus = i40iw_setup_ceqs(iwdev, ldev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = CEQ_CREATED;\n-\t\tstatus = i40iw_initialize_hw_resources(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\ti40iw_get_used_rsrc(iwdev);\n-\t\tdev->ccq_ops->ccq_arm(dev->ccq);\n-\t\tstatus = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = PBLE_CHUNK_MEM;\n-\t\tiwdev->virtchnl_wq = alloc_ordered_workqueue(\"iwvch\", WQ_MEM_RECLAIM);\n-\t\tstatus = i40iw_add_mac_ip(iwdev);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tiwdev->init_state = IP_ADDR_REGISTERED;\n-\t\tif (i40iw_register_rdma_device(iwdev)) {\n-\t\t\ti40iw_pr_err(\"register rdma device fail\\n\");\n-\t\t\tbreak;\n-\t\t};\n-\n-\t\tiwdev->init_state = RDMA_DEV_REGISTERED;\n-\t\tiwdev->iw_status = 1;\n-\t\ti40iw_port_ibevent(iwdev);\n-\t\tiwdev->param_wq = alloc_ordered_workqueue(\"l2params\", WQ_MEM_RECLAIM);\n-\t\tif(iwdev->param_wq == NULL)\n-\t\t\tbreak;\n-\t\ti40iw_pr_info(\"i40iw_open completed\\n\");\n-\t\treturn 0;\n-\t} while (0);\n-\n-\ti40iw_pr_err(\"status = %d last completion = %d\\n\", status, iwdev->init_state);\n-\ti40iw_deinit_device(iwdev);\n-\treturn -ERESTART;\n-}\n-\n-/**\n- * i40iw_l2params_worker - worker for l2 params change\n- * @work: work pointer for l2 params\n- */\n-static void i40iw_l2params_worker(struct work_struct *work)\n-{\n-\tstruct l2params_work *dwork =\n-\t container_of(work, struct l2params_work, work);\n-\tstruct i40iw_device *iwdev = dwork->iwdev;\n-\n-\ti40iw_change_l2params(&iwdev->vsi, &dwork->l2params);\n-\tatomic_dec(&iwdev->params_busy);\n-\tkfree(work);\n-}\n-\n-/**\n- * i40iw_l2param_change - handle qs handles for qos and mss change\n- * @ldev: lan device information\n- * @client: client for paramater change\n- * @params: new parameters from L2\n- */\n-static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,\n-\t\t\t\t struct i40e_params *params)\n-{\n-\tstruct i40iw_handler *hdl;\n-\tstruct i40iw_l2params *l2params;\n-\tstruct l2params_work *work;\n-\tstruct i40iw_device *iwdev;\n-\tint i;\n-\n-\thdl = i40iw_find_i40e_handler(ldev);\n-\tif (!hdl)\n-\t\treturn;\n-\n-\tiwdev = &hdl->device;\n-\n-\tif (atomic_read(&iwdev->params_busy))\n-\t\treturn;\n-\n-\n-\twork = kzalloc(sizeof(*work), GFP_KERNEL);\n-\tif (!work)\n-\t\treturn;\n-\n-\tatomic_inc(&iwdev->params_busy);\n-\n-\twork->iwdev = iwdev;\n-\tl2params = &work->l2params;\n-\tfor (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)\n-\t\tl2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;\n-\n-\tl2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;\n-\n-\tINIT_WORK(&work->work, i40iw_l2params_worker);\n-\tqueue_work(iwdev->param_wq, &work->work);\n-}\n-\n-/**\n- * i40iw_close - client interface operation close for iwarp/uda device\n- * @ldev: lan device information\n- * @client: client to close\n- *\n- * Called by the lan driver during the processing of client unregister\n- * Destroy and clean up the driver resources\n- */\n-static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)\n-{\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_handler *hdl;\n-\n-\thdl = i40iw_find_i40e_handler(ldev);\n-\tif (!hdl)\n-\t\treturn;\n-\n-\tiwdev = &hdl->device;\n-\tiwdev->closing = true;\n-\n-\tif (reset)\n-\t\tiwdev->reset = true;\n-\n-\ti40iw_cm_teardown_connections(iwdev, NULL, NULL, true);\n-\tdestroy_workqueue(iwdev->virtchnl_wq);\n-\ti40iw_deinit_device(iwdev);\n-}\n-\n-/**\n- * i40iw_vf_reset - process VF reset\n- * @ldev: lan device information\n- * @client: client interface instance\n- * @vf_id: virtual function id\n- *\n- * Called when a VF is reset by the PF\n- * Destroy and clean up the VF resources\n- */\n-static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)\n-{\n-\tstruct i40iw_handler *hdl;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_hmc_fcn_info hmc_fcn_info;\n-\tstruct i40iw_virt_mem vf_dev_mem;\n-\tstruct i40iw_vfdev *tmp_vfdev;\n-\tunsigned int i;\n-\tunsigned long flags;\n-\tstruct i40iw_device *iwdev;\n-\n-\thdl = i40iw_find_i40e_handler(ldev);\n-\tif (!hdl)\n-\t\treturn;\n-\n-\tdev = &hdl->device.sc_dev;\n-\tiwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\tfor (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {\n-\t\tif (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))\n-\t\t\tcontinue;\n-\t\t/* free all resources allocated on behalf of vf */\n-\t\ttmp_vfdev = dev->vf_dev[i];\n-\t\tspin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);\n-\t\tdev->vf_dev[i] = NULL;\n-\t\tspin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);\n-\t\ti40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);\n-\t\t/* remove vf hmc function */\n-\t\tmemset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));\n-\t\thmc_fcn_info.vf_id = vf_id;\n-\t\thmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;\n-\t\thmc_fcn_info.free_fcn = true;\n-\t\ti40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);\n-\t\t/* free vf_dev */\n-\t\tvf_dev_mem.va = tmp_vfdev;\n-\t\tvf_dev_mem.size = sizeof(struct i40iw_vfdev) +\n-\t\t\t\t\tsizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;\n-\t\ti40iw_free_virt_mem(dev->hw, &vf_dev_mem);\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_vf_enable - enable a number of VFs\n- * @ldev: lan device information\n- * @client: client interface instance\n- * @num_vfs: number of VFs for the PF\n- *\n- * Called when the number of VFs changes\n- */\n-static void i40iw_vf_enable(struct i40e_info *ldev,\n-\t\t\t struct i40e_client *client,\n-\t\t\t u32 num_vfs)\n-{\n-\tstruct i40iw_handler *hdl;\n-\n-\thdl = i40iw_find_i40e_handler(ldev);\n-\tif (!hdl)\n-\t\treturn;\n-\n-\tif (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)\n-\t\thdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;\n-\telse\n-\t\thdl->device.max_enabled_vfs = num_vfs;\n-}\n-\n-/**\n- * i40iw_vf_capable - check if VF capable\n- * @ldev: lan device information\n- * @client: client interface instance\n- * @vf_id: virtual function id\n- *\n- * Return 1 if a VF slot is available or if VF is already RDMA enabled\n- * Return 0 otherwise\n- */\n-static int i40iw_vf_capable(struct i40e_info *ldev,\n-\t\t\t struct i40e_client *client,\n-\t\t\t u32 vf_id)\n-{\n-\tstruct i40iw_handler *hdl;\n-\tstruct i40iw_sc_dev *dev;\n-\tunsigned int i;\n-\n-\thdl = i40iw_find_i40e_handler(ldev);\n-\tif (!hdl)\n-\t\treturn 0;\n-\n-\tdev = &hdl->device.sc_dev;\n-\n-\tfor (i = 0; i < hdl->device.max_enabled_vfs; i++) {\n-\t\tif (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))\n-\t\t\treturn 1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_virtchnl_receive - receive a message through the virtual channel\n- * @ldev: lan device information\n- * @client: client interface instance\n- * @vf_id: virtual function id associated with the message\n- * @msg: message buffer pointer\n- * @len: length of the message\n- *\n- * Invoke virtual channel receive operation for the given msg\n- * Return 0 if successful, otherwise return error\n- */\n-static int i40iw_virtchnl_receive(struct i40e_info *ldev,\n-\t\t\t\t struct i40e_client *client,\n-\t\t\t\t u32 vf_id,\n-\t\t\t\t u8 *msg,\n-\t\t\t\t u16 len)\n-{\n-\tstruct i40iw_handler *hdl;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_device *iwdev;\n-\tint ret_code = I40IW_NOT_SUPPORTED;\n-\n-\tif (!len || !msg)\n-\t\treturn I40IW_ERR_PARAM;\n-\n-\thdl = i40iw_find_i40e_handler(ldev);\n-\tif (!hdl)\n-\t\treturn I40IW_ERR_PARAM;\n-\n-\tdev = &hdl->device.sc_dev;\n-\tiwdev = dev->back_dev;\n-\n-\tif (dev->vchnl_if.vchnl_recv) {\n-\t\tret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);\n-\t\tif (!dev->is_pf) {\n-\t\t\tatomic_dec(&iwdev->vchnl_msgs);\n-\t\t\twake_up(&iwdev->vchnl_waitq);\n-\t\t}\n-\t}\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_vf_clear_to_send - wait to send virtual channel message\n- * @dev: iwarp device *\n- * Wait for until virtual channel is clear\n- * before sending the next message\n- *\n- * Returns false if error\n- * Returns true if clear to send\n- */\n-bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)\n-{\n-\tstruct i40iw_device *iwdev;\n-\twait_queue_entry_t wait;\n-\n-\tiwdev = dev->back_dev;\n-\n-\tif (!wq_has_sleeper(&dev->vf_reqs) &&\n-\t (atomic_read(&iwdev->vchnl_msgs) == 0))\n-\t\treturn true; /* virtual channel is clear */\n-\n-\tinit_wait(&wait);\n-\tadd_wait_queue_exclusive(&dev->vf_reqs, &wait);\n-\n-\tif (!wait_event_timeout(dev->vf_reqs,\n-\t\t\t\t(atomic_read(&iwdev->vchnl_msgs) == 0),\n-\t\t\t\tI40IW_VCHNL_EVENT_TIMEOUT))\n-\t\tdev->vchnl_up = false;\n-\n-\tremove_wait_queue(&dev->vf_reqs, &wait);\n-\n-\treturn dev->vchnl_up;\n-}\n-\n-/**\n- * i40iw_virtchnl_send - send a message through the virtual channel\n- * @dev: iwarp device\n- * @vf_id: virtual function id associated with the message\n- * @msg: virtual channel message buffer pointer\n- * @len: length of the message\n- *\n- * Invoke virtual channel send operation for the given msg\n- * Return 0 if successful, otherwise return error\n- */\n-static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u32 vf_id,\n-\t\t\t\t\t\t u8 *msg,\n-\t\t\t\t\t\t u16 len)\n-{\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40e_info *ldev;\n-\n-\tif (!dev || !dev->back_dev)\n-\t\treturn I40IW_ERR_BAD_PTR;\n-\n-\tiwdev = dev->back_dev;\n-\tldev = iwdev->ldev;\n-\n-\tif (ldev && ldev->ops && ldev->ops->virtchnl_send)\n-\t\treturn ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);\n-\treturn I40IW_ERR_BAD_PTR;\n-}\n-\n-/* client interface functions */\n-static const struct i40e_client_ops i40e_ops = {\n-\t.open = i40iw_open,\n-\t.close = i40iw_close,\n-\t.l2_param_change = i40iw_l2param_change,\n-\t.virtchnl_receive = i40iw_virtchnl_receive,\n-\t.vf_reset = i40iw_vf_reset,\n-\t.vf_enable = i40iw_vf_enable,\n-\t.vf_capable = i40iw_vf_capable\n-};\n-\n-/**\n- * i40iw_init_module - driver initialization function\n- *\n- * First function to call when the driver is loaded\n- * Register the driver as i40e client and port mapper client\n- */\n-static int __init i40iw_init_module(void)\n-{\n-\tint ret;\n-\n-\tmemset(&i40iw_client, 0, sizeof(i40iw_client));\n-\ti40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;\n-\ti40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;\n-\ti40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;\n-\ti40iw_client.ops = &i40e_ops;\n-\tmemcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);\n-\ti40iw_client.type = I40E_CLIENT_IWARP;\n-\tspin_lock_init(&i40iw_handler_lock);\n-\tret = i40e_register_client(&i40iw_client);\n-\ti40iw_register_notifiers();\n-\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_exit_module - driver exit clean up function\n- *\n- * The function is called just before the driver is unloaded\n- * Unregister the driver as i40e client and port mapper client\n- */\n-static void __exit i40iw_exit_module(void)\n-{\n-\ti40iw_unregister_notifiers();\n-\ti40e_unregister_client(&i40iw_client);\n-}\n-\n-module_init(i40iw_init_module);\n-module_exit(i40iw_exit_module);\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h\ndeleted file mode 100644\nindex d474aad..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h\n+++ /dev/null\n@@ -1,217 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_OSDEP_H\n-#define I40IW_OSDEP_H\n-\n-#include <linux/kernel.h>\n-#include <linux/string.h>\n-#include <linux/bitops.h>\n-#include <net/tcp.h>\n-#include <crypto/hash.h>\n-/* get readq/writeq support for 32 bit kernels, use the low-first version */\n-#include <linux/io-64-nonatomic-lo-hi.h>\n-\n-#define STATS_TIMER_DELAY 1000\n-\n-static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)\n-{\n-\twqe_words[byte_index >> 3] = value;\n-}\n-\n-/**\n- * set_32bit_val - set 32 value to hw wqe\n- * @wqe_words: wqe addr to write\n- * @byte_index: index in wqe\n- * @value: value to write\n- **/\n-static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value)\n-{\n-\twqe_words[byte_index >> 2] = value;\n-}\n-\n-/**\n- * get_64bit_val - read 64 bit value from wqe\n- * @wqe_words: wqe addr\n- * @byte_index: index to read from\n- * @value: read value\n- **/\n-static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)\n-{\n-\t*value = wqe_words[byte_index >> 3];\n-}\n-\n-/**\n- * get_32bit_val - read 32 bit value from wqe\n- * @wqe_words: wqe addr\n- * @byte_index: index to reaad from\n- * @value: return 32 bit value\n- **/\n-static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value)\n-{\n-\t*value = wqe_words[byte_index >> 2];\n-}\n-\n-struct i40iw_dma_mem {\n-\tvoid *va;\n-\tdma_addr_t pa;\n-\tu32 size;\n-} __packed;\n-\n-struct i40iw_virt_mem {\n-\tvoid *va;\n-\tu32 size;\n-} __packed;\n-\n-#define i40iw_debug(h, m, s, ...) \\\n-do { \\\n-\tif (((m) & (h)->debug_mask)) \\\n-\t\tpr_info(\"i40iw \" s, ##__VA_ARGS__); \\\n-} while (0)\n-\n-#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)\n-\n-#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \\\n-\t\t\t\t/* _i=0...31 */\n-#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31\n-#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0\n-#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \\\n-\t\t\t\t\t << I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT)\n-#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16\n-#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT)\n-#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20\n-#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT)\n-#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29\n-#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \\\n-\t\t\t\t\t << I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT)\n-#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31\n-#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT)\n-\n-#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \\\n-\t\t\t\t/* _i=0...31 */\n-#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31\n-#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0\n-#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \\\n-\t\t\t<< I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT)\n-\n-#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \\\n-\t\t\t\t/* _i=0...31 */\n-#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31\n-#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0\n-#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \\\n-\t\t\t<< I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT)\n-#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1\n-#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \\\n-\t\t\t<< I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT)\n-#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2\n-#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \\\n-\t\t\t<< I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT)\n-#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12\n-#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \\\n-\t\t\t<< I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT)\n-\n-#define I40E_GLPE_FWLDSTATUS 0x0000D200\n-#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0\n-#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \\\n-\t\t\t<< I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT)\n-#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1\n-#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT)\n-#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2\n-#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \\\n-\t\t\t << I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT)\n-#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3\n-#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \\\n-\t\t\t << I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT)\n-#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4\n-#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \\\n-\t\t\t << I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT)\n-\n-struct i40iw_sc_dev;\n-struct i40iw_sc_qp;\n-struct i40iw_puda_buf;\n-struct i40iw_puda_completion_info;\n-struct i40iw_update_sds_info;\n-struct i40iw_hmc_fcn_info;\n-struct i40iw_virtchnl_work_info;\n-struct i40iw_manage_vf_pble_info;\n-struct i40iw_device;\n-struct i40iw_hmc_info;\n-struct i40iw_hw;\n-\n-u8 __iomem *i40iw_get_hw_addr(void *dev);\n-void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);\n-enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);\n-bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev);\n-enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,\n-\t\t\t\t\t u32 length, u32 value);\n-struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);\n-void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum);\n-void i40iw_free_hash_desc(struct shash_desc *);\n-enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **);\n-enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,\n-\t\t\t\t\t\t struct i40iw_puda_buf *buf);\n-enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_update_sds_info *info);\n-enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_hmc_fcn_info *hmcfcninfo);\n-enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dma_mem *values_mem,\n-\t\t\t\t\t\t u8 hmc_fn_id);\n-enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dma_mem *values_mem,\n-\t\t\t\t\t\t u8 hmc_fn_id);\n-enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dma_mem *mem);\n-enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_manage_vf_pble_info *info);\n-void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,\n-\t\t\t struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);\n-void *i40iw_remove_head(struct list_head *list);\n-void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);\n-\n-void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);\n-void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);\n-void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp);\n-void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);\n-\n-enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,\n-\t\t\t\t\t\t struct i40iw_manage_vf_pble_info *info,\n-\t\t\t\t\t\t bool wait);\n-struct i40iw_sc_vsi;\n-void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);\n-void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);\n-#define i40iw_mmiowb() do { } while (0)\n-void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);\n-u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);\n-#endif\t\t\t\t/* _I40IW_OSDEP_H_ */\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h\ndeleted file mode 100644\nindex 11d3a2a..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_p.h\n+++ /dev/null\n@@ -1,128 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_P_H\n-#define I40IW_P_H\n-\n-#define PAUSE_TIMER_VALUE 0xFFFF\n-#define REFRESH_THRESHOLD 0x7FFF\n-#define HIGH_THRESHOLD 0x800\n-#define LOW_THRESHOLD 0x200\n-#define ALL_TC2PFC 0xFF\n-#define CQP_COMPL_WAIT_TIME 0x3E8\n-#define CQP_TIMEOUT_THRESHOLD 5\n-\n-void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,\n-\t\t char *desc, u64 *buf, u32 size);\n-/* init operations */\n-enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_device_init_info *info);\n-\n-void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);\n-\n-u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);\n-\n-void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev);\n-\n-enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,\n-\t\t\t\t\t\t struct i40iw_fast_reg_stag_info *info,\n-\t\t\t\t\t\t bool post_sq);\n-\n-void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);\n-\n-/* HMC/FPM functions */\n-enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u8 hmc_fn_id);\n-\n-enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,\n-\t\t\t\t\t u32 *vf_cnt_array);\n-\n-/* stats functions */\n-void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);\n-void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);\n-void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,\n-\t\t\t enum i40iw_hw_stats_index_32b index,\n-\t\t\t u64 *value);\n-void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,\n-\t\t\t enum i40iw_hw_stats_index_64b index,\n-\t\t\t u64 *value);\n-void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);\n-\n-/* vsi misc functions */\n-enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);\n-void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);\n-void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);\n-\n-void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);\n-void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);\n-void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp);\n-void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);\n-\n-void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);\n-\n-void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);\n-\n-enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t struct i40iw_sc_qp *qp, u64 scratch);\n-\n-enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t struct i40iw_sc_qp *qp, u64 scratch);\n-\n-enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t\t\t u64 scratch, u8 hmc_fn_id,\n-\t\t\t\t\t\t\t bool post_sq,\n-\t\t\t\t\t\t\t bool poll_registers);\n-\n-enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);\n-\n-void free_sd_mem(struct i40iw_sc_dev *dev);\n-\n-enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct cqp_commands_info *pcmdinfo);\n-\n-enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev);\n-\n-/* prototype for functions used for dynamic memory allocation */\n-enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_dma_mem *mem, u64 size,\n-\t\t\t\t\t u32 alignment);\n-void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem);\n-enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_virt_mem *mem, u32 size);\n-enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_virt_mem *mem);\n-u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);\n-void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev);\n-\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c\ndeleted file mode 100644\nindex 540aab5..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c\n+++ /dev/null\n@@ -1,612 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include \"i40iw_status.h\"\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_register.h\"\n-#include \"i40iw_hmc.h\"\n-\n-#include \"i40iw_d.h\"\n-#include \"i40iw_type.h\"\n-#include \"i40iw_p.h\"\n-\n-#include <linux/pci.h>\n-#include <linux/genalloc.h>\n-#include <linux/vmalloc.h>\n-#include \"i40iw_pble.h\"\n-#include \"i40iw.h\"\n-\n-struct i40iw_device;\n-static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc);\n-static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);\n-\n-/**\n- * i40iw_destroy_pble_pool - destroy pool during module unload\n- * @pble_rsrc:\tpble resources\n- */\n-void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)\n-{\n-\tstruct list_head *clist;\n-\tstruct list_head *tlist;\n-\tstruct i40iw_chunk *chunk;\n-\tstruct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;\n-\n-\tif (pinfo->pool) {\n-\t\tlist_for_each_safe(clist, tlist, &pinfo->clist) {\n-\t\t\tchunk = list_entry(clist, struct i40iw_chunk, list);\n-\t\t\tif (chunk->type == I40IW_VMALLOC)\n-\t\t\t\ti40iw_free_vmalloc_mem(dev->hw, chunk);\n-\t\t\tkfree(chunk);\n-\t\t}\n-\t\tgen_pool_destroy(pinfo->pool);\n-\t}\n-}\n-\n-/**\n- * i40iw_hmc_init_pble - Initialize pble resources during module load\n- * @dev: i40iw_sc_dev struct\n- * @pble_rsrc:\tpble resources\n- */\n-enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc)\n-{\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tu32 fpm_idx = 0;\n-\n-\thmc_info = dev->hmc_info;\n-\tpble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;\n-\t/* Now start the pble' on 4k boundary */\n-\tif (pble_rsrc->fpm_base_addr & 0xfff)\n-\t\tfpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;\n-\n-\tpble_rsrc->unallocated_pble =\n-\t hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;\n-\tpble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);\n-\n-\tpble_rsrc->pinfo.pool_shift = POOL_SHIFT;\n-\tpble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);\n-\tINIT_LIST_HEAD(&pble_rsrc->pinfo.clist);\n-\tif (!pble_rsrc->pinfo.pool)\n-\t\tgoto error;\n-\n-\tif (add_pble_pool(dev, pble_rsrc))\n-\t\tgoto error;\n-\n-\treturn 0;\n-\n- error:i40iw_destroy_pble_pool(dev, pble_rsrc);\n-\treturn I40IW_ERR_NO_MEMORY;\n-}\n-\n-/**\n- * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address\n- * @ pble_rsrc:\tstructure containing fpm address\n- * @ idx: where to return indexes\n- */\n-static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t struct sd_pd_idx *idx)\n-{\n-\tidx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;\n-\tidx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;\n-\tidx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);\n-}\n-\n-/**\n- * add_sd_direct - add sd direct for pble\n- * @dev: hardware control device structure\n- * @pble_rsrc: pble resource ptr\n- * @info: page info for sd\n- */\n-static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t\t struct i40iw_add_page_info *info)\n-{\n-\tenum i40iw_status_code ret_code = 0;\n-\tstruct sd_pd_idx *idx = &info->idx;\n-\tstruct i40iw_chunk *chunk = info->chunk;\n-\tstruct i40iw_hmc_info *hmc_info = info->hmc_info;\n-\tstruct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;\n-\tu32 offset = 0;\n-\n-\tif (!sd_entry->valid) {\n-\t\tif (dev->is_pf) {\n-\t\t\tret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,\n-\t\t\t\t\t\t\t info->idx.sd_idx,\n-\t\t\t\t\t\t\t I40IW_SD_TYPE_DIRECT,\n-\t\t\t\t\t\t\t I40IW_HMC_DIRECT_BP_SIZE);\n-\t\t\tif (ret_code)\n-\t\t\t\treturn ret_code;\n-\t\t\tchunk->type = I40IW_DMA_COHERENT;\n-\t\t}\n-\t}\n-\toffset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;\n-\tchunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;\n-\tchunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);\n-\tchunk->fpm_addr = pble_rsrc->next_fpm_addr;\n-\ti40iw_debug(dev, I40IW_DEBUG_PBLE, \"chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\\n\",\n-\t\t chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_free_vmalloc_mem - free vmalloc during close\n- * @hw: hw struct\n- * @chunk: chunk information for vmalloc\n- */\n-static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)\n-{\n-\tstruct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;\n-\tint i;\n-\n-\tif (!chunk->pg_cnt)\n-\t\tgoto done;\n-\tfor (i = 0; i < chunk->pg_cnt; i++)\n-\t\tdma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);\n-\n- done:\n-\tkfree(chunk->dmaaddrs);\n-\tchunk->dmaaddrs = NULL;\n-\tvfree(chunk->vaddr);\n-\tchunk->vaddr = NULL;\n-\tchunk->type = 0;\n-}\n-\n-/**\n- * i40iw_get_vmalloc_mem - get 2M page for sd\n- * @hw: hardware address\n- * @chunk: chunk to adf\n- * @pg_cnt: #of 4 K pages\n- */\n-static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,\n-\t\t\t\t\t\t struct i40iw_chunk *chunk,\n-\t\t\t\t\t\t int pg_cnt)\n-{\n-\tstruct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;\n-\tstruct page *page;\n-\tu8 *addr;\n-\tu32 size;\n-\tint i;\n-\n-\tchunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);\n-\tif (!chunk->dmaaddrs)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tsize = PAGE_SIZE * pg_cnt;\n-\tchunk->vaddr = vmalloc(size);\n-\tif (!chunk->vaddr) {\n-\t\tkfree(chunk->dmaaddrs);\n-\t\tchunk->dmaaddrs = NULL;\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\t}\n-\tchunk->size = size;\n-\taddr = (u8 *)chunk->vaddr;\n-\tfor (i = 0; i < pg_cnt; i++) {\n-\t\tpage = vmalloc_to_page((void *)addr);\n-\t\tif (!page)\n-\t\t\tbreak;\n-\t\tchunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,\n-\t\t\t\t\t\t PAGE_SIZE, DMA_BIDIRECTIONAL);\n-\t\tif (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))\n-\t\t\tbreak;\n-\t\taddr += PAGE_SIZE;\n-\t}\n-\n-\tchunk->pg_cnt = i;\n-\tchunk->type = I40IW_VMALLOC;\n-\tif (i == pg_cnt)\n-\t\treturn 0;\n-\n-\ti40iw_free_vmalloc_mem(hw, chunk);\n-\treturn I40IW_ERR_NO_MEMORY;\n-}\n-\n-/**\n- * fpm_to_idx - given fpm address, get pble index\n- * @pble_rsrc: pble resource management\n- * @addr: fpm address for index\n- */\n-static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)\n-{\n-\treturn (addr - (pble_rsrc->fpm_base_addr)) >> 3;\n-}\n-\n-/**\n- * add_bp_pages - add backing pages for sd\n- * @dev: hardware control device structure\n- * @pble_rsrc: pble resource management\n- * @info: page info for sd\n- */\n-static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t\t struct i40iw_add_page_info *info)\n-{\n-\tu8 *addr;\n-\tstruct i40iw_dma_mem mem;\n-\tstruct i40iw_hmc_pd_entry *pd_entry;\n-\tstruct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;\n-\tstruct i40iw_hmc_info *hmc_info = info->hmc_info;\n-\tstruct i40iw_chunk *chunk = info->chunk;\n-\tstruct i40iw_manage_vf_pble_info vf_pble_info;\n-\tenum i40iw_status_code status = 0;\n-\tu32 rel_pd_idx = info->idx.rel_pd_idx;\n-\tu32 pd_idx = info->idx.pd_idx;\n-\tu32 i;\n-\n-\tstatus = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);\n-\tif (status)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tstatus = i40iw_add_sd_table_entry(dev->hw, hmc_info,\n-\t\t\t\t\t info->idx.sd_idx, I40IW_SD_TYPE_PAGED,\n-\t\t\t\t\t I40IW_HMC_DIRECT_BP_SIZE);\n-\tif (status)\n-\t\tgoto error;\n-\tif (!dev->is_pf) {\n-\t\tstatus = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,\n-\t\t\t\t\t\t fpm_to_idx(pble_rsrc,\n-\t\t\t\t\t\t\t\tpble_rsrc->next_fpm_addr),\n-\t\t\t\t\t\t (info->pages << PBLE_512_SHIFT));\n-\t\tif (status) {\n-\t\t\ti40iw_pr_err(\"allocate PBLEs in the PF. Error %i\\n\", status);\n-\t\t\tgoto error;\n-\t\t}\n-\t}\n-\taddr = chunk->vaddr;\n-\tfor (i = 0; i < info->pages; i++) {\n-\t\tmem.pa = chunk->dmaaddrs[i];\n-\t\tmem.size = PAGE_SIZE;\n-\t\tmem.va = (void *)(addr);\n-\t\tpd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];\n-\t\tif (!pd_entry->valid) {\n-\t\t\tstatus = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);\n-\t\t\tif (status)\n-\t\t\t\tgoto error;\n-\t\t\taddr += PAGE_SIZE;\n-\t\t} else {\n-\t\t\ti40iw_pr_err(\"pd entry is valid expecting to be invalid\\n\");\n-\t\t}\n-\t}\n-\tif (!dev->is_pf) {\n-\t\tvf_pble_info.first_pd_index = info->idx.rel_pd_idx;\n-\t\tvf_pble_info.inv_pd_ent = false;\n-\t\tvf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;\n-\t\tvf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;\n-\t\tvf_pble_info.sd_index = info->idx.sd_idx;\n-\t\tstatus = i40iw_hw_manage_vf_pble_bp(dev->back_dev,\n-\t\t\t\t\t\t &vf_pble_info, true);\n-\t\tif (status) {\n-\t\t\ti40iw_pr_err(\"CQP manage VF PBLE BP failed. %i\\n\", status);\n-\t\t\tgoto error;\n-\t\t}\n-\t}\n-\tchunk->fpm_addr = pble_rsrc->next_fpm_addr;\n-\treturn 0;\n-error:\n-\ti40iw_free_vmalloc_mem(dev->hw, chunk);\n-\treturn status;\n-}\n-\n-/**\n- * add_pble_pool - add a sd entry for pble resoure\n- * @dev: hardware control device structure\n- * @pble_rsrc: pble resource management\n- */\n-static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc)\n-{\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct i40iw_chunk *chunk;\n-\tstruct i40iw_add_page_info info;\n-\tstruct sd_pd_idx *idx = &info.idx;\n-\tenum i40iw_status_code ret_code = 0;\n-\tenum i40iw_sd_entry_type sd_entry_type;\n-\tu64 sd_reg_val = 0;\n-\tu32 pages;\n-\n-\tif (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tif (pble_rsrc->next_fpm_addr & 0xfff) {\n-\t\ti40iw_pr_err(\"next fpm_addr %llx\\n\", pble_rsrc->next_fpm_addr);\n-\t\treturn I40IW_ERR_INVALID_PAGE_DESC_INDEX;\n-\t}\n-\tchunk = kzalloc(sizeof(*chunk), GFP_KERNEL);\n-\tif (!chunk)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\thmc_info = dev->hmc_info;\n-\tchunk->fpm_addr = pble_rsrc->next_fpm_addr;\n-\tget_sd_pd_idx(pble_rsrc, idx);\n-\tsd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];\n-\tpages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -\n-\t\t\tidx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;\n-\tpages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);\n-\tinfo.chunk = chunk;\n-\tinfo.hmc_info = hmc_info;\n-\tinfo.pages = pages;\n-\tinfo.sd_entry = sd_entry;\n-\tif (!sd_entry->valid) {\n-\t\tsd_entry_type = (!idx->rel_pd_idx &&\n-\t\t\t\t (pages == I40IW_HMC_PD_CNT_IN_SD) &&\n-\t\t\t\t dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;\n-\t} else {\n-\t\tsd_entry_type = sd_entry->entry_type;\n-\t}\n-\ti40iw_debug(dev, I40IW_DEBUG_PBLE,\n-\t\t \"pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\\n\",\n-\t\t pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);\n-\ti40iw_debug(dev, I40IW_DEBUG_PBLE, \"sd_entry_type = %d sd_entry valid = %d\\n\",\n-\t\t sd_entry_type, sd_entry->valid);\n-\n-\tif (sd_entry_type == I40IW_SD_TYPE_DIRECT)\n-\t\tret_code = add_sd_direct(dev, pble_rsrc, &info);\n-\tif (ret_code)\n-\t\tsd_entry_type = I40IW_SD_TYPE_PAGED;\n-\telse\n-\t\tpble_rsrc->stats_direct_sds++;\n-\n-\tif (sd_entry_type == I40IW_SD_TYPE_PAGED) {\n-\t\tret_code = add_bp_pages(dev, pble_rsrc, &info);\n-\t\tif (ret_code)\n-\t\t\tgoto error;\n-\t\telse\n-\t\t\tpble_rsrc->stats_paged_sds++;\n-\t}\n-\n-\tif (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,\n-\t\t\t (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {\n-\t\ti40iw_pr_err(\"could not allocate memory by gen_pool_addr_virt()\\n\");\n-\t\tret_code = I40IW_ERR_NO_MEMORY;\n-\t\tgoto error;\n-\t}\n-\tpble_rsrc->next_fpm_addr += chunk->size;\n-\ti40iw_debug(dev, I40IW_DEBUG_PBLE, \"next_fpm_addr = %llx chunk_size[%u] = 0x%x\\n\",\n-\t\t pble_rsrc->next_fpm_addr, chunk->size, chunk->size);\n-\tpble_rsrc->unallocated_pble -= (chunk->size >> 3);\n-\tlist_add(&chunk->list, &pble_rsrc->pinfo.clist);\n-\tsd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?\n-\t\t\tsd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;\n-\tif (sd_entry->valid)\n-\t\treturn 0;\n-\tif (dev->is_pf) {\n-\t\tret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,\n-\t\t\t\t\t sd_reg_val, idx->sd_idx,\n-\t\t\t\t\t sd_entry->entry_type, true);\n-\t\tif (ret_code) {\n-\t\t\ti40iw_pr_err(\"cqp cmd failed for sd (pbles)\\n\");\n-\t\t\tgoto error;\n-\t\t}\n-\t}\n-\n-\tsd_entry->valid = true;\n-\treturn 0;\n- error:\n-\tkfree(chunk);\n-\treturn ret_code;\n-}\n-\n-/**\n- * free_lvl2 - fee level 2 pble\n- * @pble_rsrc: pble resource management\n- * @palloc: level 2 pble allocation\n- */\n-static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t struct i40iw_pble_alloc *palloc)\n-{\n-\tu32 i;\n-\tstruct gen_pool *pool;\n-\tstruct i40iw_pble_level2 *lvl2 = &palloc->level2;\n-\tstruct i40iw_pble_info *root = &lvl2->root;\n-\tstruct i40iw_pble_info *leaf = lvl2->leaf;\n-\n-\tpool = pble_rsrc->pinfo.pool;\n-\n-\tfor (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {\n-\t\tif (leaf->addr)\n-\t\t\tgen_pool_free(pool, leaf->addr, (leaf->cnt << 3));\n-\t\telse\n-\t\t\tbreak;\n-\t}\n-\n-\tif (root->addr)\n-\t\tgen_pool_free(pool, root->addr, (root->cnt << 3));\n-\n-\tkfree(lvl2->leaf);\n-\tlvl2->leaf = NULL;\n-}\n-\n-/**\n- * get_lvl2_pble - get level 2 pble resource\n- * @pble_rsrc: pble resource management\n- * @palloc: level 2 pble allocation\n- * @pool: pool pointer\n- */\n-static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t\t struct i40iw_pble_alloc *palloc,\n-\t\t\t\t\t struct gen_pool *pool)\n-{\n-\tu32 lf4k, lflast, total, i;\n-\tu32 pblcnt = PBLE_PER_PAGE;\n-\tu64 *addr;\n-\tstruct i40iw_pble_level2 *lvl2 = &palloc->level2;\n-\tstruct i40iw_pble_info *root = &lvl2->root;\n-\tstruct i40iw_pble_info *leaf;\n-\n-\t/* number of full 512 (4K) leafs) */\n-\tlf4k = palloc->total_cnt >> 9;\n-\tlflast = palloc->total_cnt % PBLE_PER_PAGE;\n-\ttotal = (lflast == 0) ? lf4k : lf4k + 1;\n-\tlvl2->leaf_cnt = total;\n-\n-\tleaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);\n-\tif (!leaf)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tlvl2->leaf = leaf;\n-\t/* allocate pbles for the root */\n-\troot->addr = gen_pool_alloc(pool, (total << 3));\n-\tif (!root->addr) {\n-\t\tkfree(lvl2->leaf);\n-\t\tlvl2->leaf = NULL;\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\t}\n-\troot->idx = fpm_to_idx(pble_rsrc,\n-\t\t\t (u64)gen_pool_virt_to_phys(pool, root->addr));\n-\troot->cnt = total;\n-\taddr = (u64 *)root->addr;\n-\tfor (i = 0; i < total; i++, leaf++) {\n-\t\tpblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;\n-\t\tleaf->addr = gen_pool_alloc(pool, (pblcnt << 3));\n-\t\tif (!leaf->addr)\n-\t\t\tgoto error;\n-\t\tleaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));\n-\n-\t\tleaf->cnt = pblcnt;\n-\t\t*addr = (u64)leaf->idx;\n-\t\taddr++;\n-\t}\n-\tpalloc->level = I40IW_LEVEL_2;\n-\tpble_rsrc->stats_lvl2++;\n-\treturn 0;\n- error:\n-\tfree_lvl2(pble_rsrc, palloc);\n-\treturn I40IW_ERR_NO_MEMORY;\n-}\n-\n-/**\n- * get_lvl1_pble - get level 1 pble resource\n- * @dev: hardware control device structure\n- * @pble_rsrc: pble resource management\n- * @palloc: level 1 pble allocation\n- */\n-static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t\t struct i40iw_pble_alloc *palloc)\n-{\n-\tu64 *addr;\n-\tstruct gen_pool *pool;\n-\tstruct i40iw_pble_info *lvl1 = &palloc->level1;\n-\n-\tpool = pble_rsrc->pinfo.pool;\n-\taddr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));\n-\n-\tif (!addr)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tpalloc->level = I40IW_LEVEL_1;\n-\tlvl1->addr = (unsigned long)addr;\n-\tlvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,\n-\t\t\t (unsigned long)addr));\n-\tlvl1->cnt = palloc->total_cnt;\n-\tpble_rsrc->stats_lvl1++;\n-\treturn 0;\n-}\n-\n-/**\n- * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine\n- * @dev: i40iw_sc_dev struct\n- * @pble_rsrc:\tpble resources\n- * @palloc: contains all inforamtion regarding pble (idx + pble addr)\n- * @pool: pointer to general purpose special memory pool descriptor\n- */\n-static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\tstruct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t\t\t\tstruct i40iw_pble_alloc *palloc,\n-\t\t\t\t\t\t\tstruct gen_pool *pool)\n-{\n-\tenum i40iw_status_code status = 0;\n-\n-\tstatus = get_lvl1_pble(dev, pble_rsrc, palloc);\n-\tif (status && (palloc->total_cnt > PBLE_PER_PAGE))\n-\t\tstatus = get_lvl2_pble(pble_rsrc, palloc, pool);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_get_pble - allocate pbles from the pool\n- * @dev: i40iw_sc_dev struct\n- * @pble_rsrc:\tpble resources\n- * @palloc: contains all inforamtion regarding pble (idx + pble addr)\n- * @pble_cnt: #of pbles requested\n- */\n-enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,\n-\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t struct i40iw_pble_alloc *palloc,\n-\t\t\t\t u32 pble_cnt)\n-{\n-\tstruct gen_pool *pool;\n-\tenum i40iw_status_code status = 0;\n-\tu32 max_sds = 0;\n-\tint i;\n-\n-\tpool = pble_rsrc->pinfo.pool;\n-\tpalloc->total_cnt = pble_cnt;\n-\tpalloc->level = I40IW_LEVEL_0;\n-\t/*check first to see if we can get pble's without acquiring additional sd's */\n-\tstatus = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);\n-\tif (!status)\n-\t\tgoto exit;\n-\tmax_sds = (palloc->total_cnt >> 18) + 1;\n-\tfor (i = 0; i < max_sds; i++) {\n-\t\tstatus = add_pble_pool(dev, pble_rsrc);\n-\t\tif (status)\n-\t\t\tbreak;\n-\t\tstatus = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);\n-\t\tif (!status)\n-\t\t\tbreak;\n-\t}\n-exit:\n-\tif (!status)\n-\t\tpble_rsrc->stats_alloc_ok++;\n-\telse\n-\t\tpble_rsrc->stats_alloc_fail++;\n-\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_free_pble - put pbles back into pool\n- * @pble_rsrc:\tpble resources\n- * @palloc: contains all inforamtion regarding pble resource being freed\n- */\n-void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t struct i40iw_pble_alloc *palloc)\n-{\n-\tstruct gen_pool *pool;\n-\n-\tpool = pble_rsrc->pinfo.pool;\n-\tif (palloc->level == I40IW_LEVEL_2)\n-\t\tfree_lvl2(pble_rsrc, palloc);\n-\telse\n-\t\tgen_pool_free(pool, palloc->level1.addr,\n-\t\t\t (palloc->level1.cnt << 3));\n-\tpble_rsrc->stats_alloc_freed++;\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.h b/drivers/infiniband/hw/i40iw/i40iw_pble.h\ndeleted file mode 100644\nindex 7b1851d..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_pble.h\n+++ /dev/null\n@@ -1,131 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_PBLE_H\n-#define I40IW_PBLE_H\n-\n-#define POOL_SHIFT 6\n-#define PBLE_PER_PAGE 512\n-#define I40IW_HMC_PAGED_BP_SHIFT 12\n-#define PBLE_512_SHIFT 9\n-\n-enum i40iw_pble_level {\n-\tI40IW_LEVEL_0 = 0,\n-\tI40IW_LEVEL_1 = 1,\n-\tI40IW_LEVEL_2 = 2\n-};\n-\n-enum i40iw_alloc_type {\n-\tI40IW_NO_ALLOC = 0,\n-\tI40IW_DMA_COHERENT = 1,\n-\tI40IW_VMALLOC = 2\n-};\n-\n-struct i40iw_pble_info {\n-\tunsigned long addr;\n-\tu32 idx;\n-\tu32 cnt;\n-};\n-\n-struct i40iw_pble_level2 {\n-\tstruct i40iw_pble_info root;\n-\tstruct i40iw_pble_info *leaf;\n-\tu32 leaf_cnt;\n-};\n-\n-struct i40iw_pble_alloc {\n-\tu32 total_cnt;\n-\tenum i40iw_pble_level level;\n-\tunion {\n-\t\tstruct i40iw_pble_info level1;\n-\t\tstruct i40iw_pble_level2 level2;\n-\t};\n-};\n-\n-struct sd_pd_idx {\n-\tu32 sd_idx;\n-\tu32 pd_idx;\n-\tu32 rel_pd_idx;\n-};\n-\n-struct i40iw_add_page_info {\n-\tstruct i40iw_chunk *chunk;\n-\tstruct i40iw_hmc_sd_entry *sd_entry;\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct sd_pd_idx idx;\n-\tu32 pages;\n-};\n-\n-struct i40iw_chunk {\n-\tstruct list_head list;\n-\tu32 size;\n-\tvoid *vaddr;\n-\tu64 fpm_addr;\n-\tu32 pg_cnt;\n-\tdma_addr_t *dmaaddrs;\n-\tenum i40iw_alloc_type type;\n-};\n-\n-struct i40iw_pble_pool {\n-\tstruct gen_pool *pool;\n-\tstruct list_head clist;\n-\tu32 total_pble_alloc;\n-\tu32 free_pble_cnt;\n-\tu32 pool_shift;\n-};\n-\n-struct i40iw_hmc_pble_rsrc {\n-\tu32 unallocated_pble;\n-\tu64 fpm_base_addr;\n-\tu64 next_fpm_addr;\n-\tstruct i40iw_pble_pool pinfo;\n-\n-\tu32 stats_direct_sds;\n-\tu32 stats_paged_sds;\n-\tu64 stats_alloc_ok;\n-\tu64 stats_alloc_fail;\n-\tu64 stats_alloc_freed;\n-\tu64 stats_lvl1;\n-\tu64 stats_lvl2;\n-};\n-\n-void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc);\n-enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc);\n-void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc);\n-enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,\n-\t\t\t\t struct i40iw_hmc_pble_rsrc *pble_rsrc,\n-\t\t\t\t struct i40iw_pble_alloc *palloc,\n-\t\t\t\t u32 pble_cnt);\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c\ndeleted file mode 100644\nindex d9c7ae6..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c\n+++ /dev/null\n@@ -1,1493 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_register.h\"\n-#include \"i40iw_status.h\"\n-#include \"i40iw_hmc.h\"\n-\n-#include \"i40iw_d.h\"\n-#include \"i40iw_type.h\"\n-#include \"i40iw_p.h\"\n-#include \"i40iw_puda.h\"\n-\n-static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,\n-\t\t\t struct i40iw_puda_buf *buf);\n-static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);\n-static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);\n-static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc\n-\t\t\t\t\t\t *rsrc, bool initial);\n-/**\n- * i40iw_puda_get_listbuf - get buffer from puda list\n- * @list: list to use for buffers (ILQ or IEQ)\n- */\n-static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)\n-{\n-\tstruct i40iw_puda_buf *buf = NULL;\n-\n-\tif (!list_empty(list)) {\n-\t\tbuf = (struct i40iw_puda_buf *)list->next;\n-\t\tlist_del((struct list_head *)&buf->list);\n-\t}\n-\treturn buf;\n-}\n-\n-/**\n- * i40iw_puda_get_bufpool - return buffer from resource\n- * @rsrc: resource to use for buffer\n- */\n-struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)\n-{\n-\tstruct i40iw_puda_buf *buf = NULL;\n-\tstruct list_head *list = &rsrc->bufpool;\n-\tunsigned long\tflags;\n-\n-\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n-\tbuf = i40iw_puda_get_listbuf(list);\n-\tif (buf)\n-\t\trsrc->avail_buf_count--;\n-\telse\n-\t\trsrc->stats_buf_alloc_fail++;\n-\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n-\treturn buf;\n-}\n-\n-/**\n- * i40iw_puda_ret_bufpool - return buffer to rsrc list\n- * @rsrc: resource to use for buffer\n- * @buf: buffe to return to resouce\n- */\n-void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,\n-\t\t\t struct i40iw_puda_buf *buf)\n-{\n-\tunsigned long\tflags;\n-\n-\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n-\tlist_add(&buf->list, &rsrc->bufpool);\n-\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n-\trsrc->avail_buf_count++;\n-}\n-\n-/**\n- * i40iw_puda_post_recvbuf - set wqe for rcv buffer\n- * @rsrc: resource ptr\n- * @wqe_idx: wqe index to use\n- * @buf: puda buffer for rcv q\n- * @initial: flag if during init time\n- */\n-static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,\n-\t\t\t\t struct i40iw_puda_buf *buf, bool initial)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_qp *qp = &rsrc->qp;\n-\tu64 offset24 = 0;\n-\n-\tqp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;\n-\twqe = qp->qp_uk.rq_base[wqe_idx].elem;\n-\ti40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,\n-\t\t \"%s: wqe_idx= %d buf = %p wqe = %p\\n\", __func__,\n-\t\t wqe_idx, buf, wqe);\n-\tif (!initial)\n-\t\tget_64bit_val(wqe, 24, &offset24);\n-\n-\toffset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);\n-\n-\tset_64bit_val(wqe, 0, buf->mem.pa);\n-\tset_64bit_val(wqe, 8,\n-\t\t LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));\n-\ti40iw_insert_wqe_hdr(wqe, offset24);\n-}\n-\n-/**\n- * i40iw_puda_replenish_rq - post rcv buffers\n- * @rsrc: resource to use for buffer\n- * @initial: flag if during init time\n- */\n-static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,\n-\t\t\t\t\t\t bool initial)\n-{\n-\tu32 i;\n-\tu32 invalid_cnt = rsrc->rxq_invalid_cnt;\n-\tstruct i40iw_puda_buf *buf = NULL;\n-\n-\tfor (i = 0; i < invalid_cnt; i++) {\n-\t\tbuf = i40iw_puda_get_bufpool(rsrc);\n-\t\tif (!buf)\n-\t\t\treturn I40IW_ERR_list_empty;\n-\t\ti40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,\n-\t\t\t\t\tinitial);\n-\t\trsrc->rx_wqe_idx =\n-\t\t ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);\n-\t\trsrc->rxq_invalid_cnt--;\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_puda_alloc_buf - allocate mem for buffer\n- * @dev: iwarp device\n- * @length: length of buffer\n- */\n-static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u32 length)\n-{\n-\tstruct i40iw_puda_buf *buf = NULL;\n-\tstruct i40iw_virt_mem buf_mem;\n-\tenum i40iw_status_code ret;\n-\n-\tret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,\n-\t\t\t\t sizeof(struct i40iw_puda_buf));\n-\tif (ret) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA,\n-\t\t\t \"%s: error mem for buf\\n\", __func__);\n-\t\treturn NULL;\n-\t}\n-\tbuf = (struct i40iw_puda_buf *)buf_mem.va;\n-\tret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);\n-\tif (ret) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA,\n-\t\t\t \"%s: error dma mem for buf\\n\", __func__);\n-\t\ti40iw_free_virt_mem(dev->hw, &buf_mem);\n-\t\treturn NULL;\n-\t}\n-\tbuf->buf_mem.va = buf_mem.va;\n-\tbuf->buf_mem.size = buf_mem.size;\n-\treturn buf;\n-}\n-\n-/**\n- * i40iw_puda_dele_buf - delete buffer back to system\n- * @dev: iwarp device\n- * @buf: buffer to free\n- */\n-static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,\n-\t\t\t\tstruct i40iw_puda_buf *buf)\n-{\n-\ti40iw_free_dma_mem(dev->hw, &buf->mem);\n-\ti40iw_free_virt_mem(dev->hw, &buf->buf_mem);\n-}\n-\n-/**\n- * i40iw_puda_get_next_send_wqe - return next wqe for processing\n- * @qp: puda qp for wqe\n- * @wqe_idx: wqe index for caller\n- */\n-static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)\n-{\n-\tu64 *wqe = NULL;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\t*wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-\tif (!*wqe_idx)\n-\t\tqp->swqe_polarity = !qp->swqe_polarity;\n-\tI40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);\n-\tif (ret_code)\n-\t\treturn wqe;\n-\twqe = qp->sq_base[*wqe_idx].elem;\n-\n-\treturn wqe;\n-}\n-\n-/**\n- * i40iw_puda_poll_info - poll cq for completion\n- * @cq: cq for poll\n- * @info: info return for successful completion\n- */\n-static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,\n-\t\t\t\t\t\t struct i40iw_puda_completion_info *info)\n-{\n-\tu64 qword0, qword2, qword3;\n-\tu64 *cqe;\n-\tu64 comp_ctx;\n-\tbool valid_bit;\n-\tu32 major_err, minor_err;\n-\tbool error;\n-\n-\tcqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);\n-\tget_64bit_val(cqe, 24, &qword3);\n-\tvalid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);\n-\n-\tif (valid_bit != cq->cq_uk.polarity)\n-\t\treturn I40IW_ERR_QUEUE_EMPTY;\n-\n-\ti40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, \"PUDA CQE\", cqe, 32);\n-\terror = (bool)RS_64(qword3, I40IW_CQ_ERROR);\n-\tif (error) {\n-\t\ti40iw_debug(cq->dev, I40IW_DEBUG_PUDA, \"%s receive error\\n\", __func__);\n-\t\tmajor_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));\n-\t\tminor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));\n-\t\tinfo->compl_error = major_err << 16 | minor_err;\n-\t\treturn I40IW_ERR_CQ_COMPL_ERROR;\n-\t}\n-\n-\tget_64bit_val(cqe, 0, &qword0);\n-\tget_64bit_val(cqe, 16, &qword2);\n-\n-\tinfo->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);\n-\tinfo->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);\n-\n-\tget_64bit_val(cqe, 8, &comp_ctx);\n-\tinfo->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;\n-\tinfo->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);\n-\n-\tif (info->q_type == I40IW_CQE_QTYPE_RQ) {\n-\t\tinfo->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);\n-\t\tinfo->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);\n-\t\tinfo->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);\n-\t\tinfo->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_puda_poll_completion - processes completion for cq\n- * @dev: iwarp device\n- * @cq: cq getting interrupt\n- * @compl_err: return any completion err\n- */\n-enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_sc_cq *cq, u32 *compl_err)\n-{\n-\tstruct i40iw_qp_uk *qp;\n-\tstruct i40iw_cq_uk *cq_uk = &cq->cq_uk;\n-\tstruct i40iw_puda_completion_info info;\n-\tenum i40iw_status_code ret = 0;\n-\tstruct i40iw_puda_buf *buf;\n-\tstruct i40iw_puda_rsrc *rsrc;\n-\tvoid *sqwrid;\n-\tu8 cq_type = cq->cq_type;\n-\tunsigned long\tflags;\n-\n-\tif ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {\n-\t\trsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;\n-\t} else {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA, \"%s qp_type error\\n\", __func__);\n-\t\treturn I40IW_ERR_BAD_PTR;\n-\t}\n-\tmemset(&info, 0, sizeof(info));\n-\tret = i40iw_puda_poll_info(cq, &info);\n-\t*compl_err = info.compl_error;\n-\tif (ret == I40IW_ERR_QUEUE_EMPTY)\n-\t\treturn ret;\n-\tif (ret)\n-\t\tgoto done;\n-\n-\tqp = info.qp;\n-\tif (!qp || !rsrc) {\n-\t\tret = I40IW_ERR_BAD_PTR;\n-\t\tgoto done;\n-\t}\n-\n-\tif (qp->qp_id != rsrc->qp_id) {\n-\t\tret = I40IW_ERR_BAD_PTR;\n-\t\tgoto done;\n-\t}\n-\n-\tif (info.q_type == I40IW_CQE_QTYPE_RQ) {\n-\t\tbuf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];\n-\t\t/* Get all the tcpip information in the buf header */\n-\t\tret = i40iw_puda_get_tcpip_info(&info, buf);\n-\t\tif (ret) {\n-\t\t\trsrc->stats_rcvd_pkt_err++;\n-\t\t\tif (cq_type == I40IW_CQ_TYPE_ILQ) {\n-\t\t\t\ti40iw_ilq_putback_rcvbuf(&rsrc->qp,\n-\t\t\t\t\t\t\t info.wqe_idx);\n-\t\t\t} else {\n-\t\t\t\ti40iw_puda_ret_bufpool(rsrc, buf);\n-\t\t\t\ti40iw_puda_replenish_rq(rsrc, false);\n-\t\t\t}\n-\t\t\tgoto done;\n-\t\t}\n-\n-\t\trsrc->stats_pkt_rcvd++;\n-\t\trsrc->compl_rxwqe_idx = info.wqe_idx;\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA, \"%s RQ completion\\n\", __func__);\n-\t\trsrc->receive(rsrc->vsi, buf);\n-\t\tif (cq_type == I40IW_CQ_TYPE_ILQ)\n-\t\t\ti40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);\n-\t\telse\n-\t\t\ti40iw_puda_replenish_rq(rsrc, false);\n-\n-\t} else {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA, \"%s SQ completion\\n\", __func__);\n-\t\tsqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;\n-\t\tI40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);\n-\t\trsrc->xmit_complete(rsrc->vsi, sqwrid);\n-\t\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n-\t\trsrc->tx_wqe_avail_cnt++;\n-\t\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n-\t\tif (!list_empty(&rsrc->txpend))\n-\t\t\ti40iw_puda_send_buf(rsrc, NULL);\n-\t}\n-\n-done:\n-\tI40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);\n-\tif (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)\n-\t\tcq_uk->polarity = !cq_uk->polarity;\n-\t/* update cq tail in cq shadow memory also */\n-\tI40IW_RING_MOVE_TAIL(cq_uk->cq_ring);\n-\tset_64bit_val(cq_uk->shadow_area, 0,\n-\t\t I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_puda_send - complete send wqe for transmit\n- * @qp: puda qp for send\n- * @info: buffer information for transmit\n- */\n-enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,\n-\t\t\t\t struct i40iw_puda_send_info *info)\n-{\n-\tu64 *wqe;\n-\tu32 iplen, l4len;\n-\tu64 header[2];\n-\tu32 wqe_idx;\n-\tu8 iipt;\n-\n-\t/* number of 32 bits DWORDS in header */\n-\tl4len = info->tcplen >> 2;\n-\tif (info->ipv4) {\n-\t\tiipt = 3;\n-\t\tiplen = 5;\n-\t} else {\n-\t\tiipt = 1;\n-\t\tiplen = 10;\n-\t}\n-\n-\twqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\tqp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;\n-\t/* Third line of WQE descriptor */\n-\t/* maclen is in words */\n-\theader[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |\n-\t\t LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |\n-\t\t LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |\n-\t\t LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);\n-\t/* Forth line of WQE descriptor */\n-\theader[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |\n-\t\t LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |\n-\t\t LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |\n-\t\t LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);\n-\n-\tset_64bit_val(wqe, 0, info->paddr);\n-\tset_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));\n-\tset_64bit_val(wqe, 16, header[0]);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header[1]);\n-\n-\ti40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, \"PUDA SEND WQE\", wqe, 32);\n-\ti40iw_qp_post_wr(&qp->qp_uk);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_puda_send_buf - transmit puda buffer\n- * @rsrc: resource to use for buffer\n- * @buf: puda buffer to transmit\n- */\n-void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)\n-{\n-\tstruct i40iw_puda_send_info info;\n-\tenum i40iw_status_code ret = 0;\n-\tunsigned long\tflags;\n-\n-\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n-\t/* if no wqe available or not from a completion and we have\n-\t * pending buffers, we must queue new buffer\n-\t */\n-\tif (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {\n-\t\tlist_add_tail(&buf->list, &rsrc->txpend);\n-\t\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n-\t\trsrc->stats_sent_pkt_q++;\n-\t\tif (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)\n-\t\t\ti40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,\n-\t\t\t\t \"%s: adding to txpend\\n\", __func__);\n-\t\treturn;\n-\t}\n-\trsrc->tx_wqe_avail_cnt--;\n-\t/* if we are coming from a completion and have pending buffers\n-\t * then Get one from pending list\n-\t */\n-\tif (!buf) {\n-\t\tbuf = i40iw_puda_get_listbuf(&rsrc->txpend);\n-\t\tif (!buf)\n-\t\t\tgoto done;\n-\t}\n-\n-\tinfo.scratch = (void *)buf;\n-\tinfo.paddr = buf->mem.pa;\n-\tinfo.len = buf->totallen;\n-\tinfo.tcplen = buf->tcphlen;\n-\tinfo.maclen = buf->maclen;\n-\tinfo.ipv4 = buf->ipv4;\n-\tinfo.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);\n-\n-\tret = i40iw_puda_send(&rsrc->qp, &info);\n-\tif (ret) {\n-\t\trsrc->tx_wqe_avail_cnt++;\n-\t\trsrc->stats_sent_pkt_q++;\n-\t\tlist_add(&buf->list, &rsrc->txpend);\n-\t\tif (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)\n-\t\t\ti40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,\n-\t\t\t\t \"%s: adding to puda_send\\n\", __func__);\n-\t} else {\n-\t\trsrc->stats_pkt_sent++;\n-\t}\n-done:\n-\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n-}\n-\n-/**\n- * i40iw_puda_qp_setctx - during init, set qp's context\n- * @rsrc: qp's resource\n- */\n-static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)\n-{\n-\tstruct i40iw_sc_qp *qp = &rsrc->qp;\n-\tu64 *qp_ctx = qp->hw_host_ctx;\n-\n-\tset_64bit_val(qp_ctx, 8, qp->sq_pa);\n-\tset_64bit_val(qp_ctx, 16, qp->rq_pa);\n-\n-\tset_64bit_val(qp_ctx, 24,\n-\t\t LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |\n-\t\t LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));\n-\n-\tset_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE));\n-\tset_64bit_val(qp_ctx, 56, 0);\n-\tset_64bit_val(qp_ctx, 64, 1);\n-\n-\tset_64bit_val(qp_ctx, 136,\n-\t\t LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |\n-\t\t LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));\n-\n-\tset_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));\n-\n-\tset_64bit_val(qp_ctx, 168,\n-\t\t LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));\n-\n-\tset_64bit_val(qp_ctx, 176,\n-\t\t LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |\n-\t\t LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |\n-\t\t LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));\n-\n-\ti40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, \"PUDA QP CONTEXT\",\n-\t\t\tqp_ctx, I40IW_QP_CTX_SIZE);\n-}\n-\n-/**\n- * i40iw_puda_qp_wqe - setup wqe for qp create\n- * @rsrc: resource for qp\n- */\n-static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 *wqe;\n-\tu64 header;\n-\tstruct i40iw_ccq_cqe_info compl_info;\n-\tenum i40iw_status_code status = 0;\n-\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 16, qp->hw_host_ctx_pa);\n-\tset_64bit_val(wqe, 40, qp->shadow_area_pa);\n-\theader = qp->qp_uk.qp_id |\n-\t\t LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |\n-\t\t LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |\n-\t\t LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |\n-\t\t LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |\n-\t\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, \"PUDA CQE\", wqe, 32);\n-\ti40iw_sc_cqp_post_sq(cqp);\n-\tstatus = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n-\t\t\t\t\t\t I40IW_CQP_OP_CREATE_QP,\n-\t\t\t\t\t\t &compl_info);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_puda_qp_create - create qp for resource\n- * @rsrc: resource to use for buffer\n- */\n-static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)\n-{\n-\tstruct i40iw_sc_qp *qp = &rsrc->qp;\n-\tstruct i40iw_qp_uk *ukqp = &qp->qp_uk;\n-\tenum i40iw_status_code ret = 0;\n-\tu32 sq_size, rq_size, t_size;\n-\tstruct i40iw_dma_mem *mem;\n-\n-\tsq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;\n-\trq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;\n-\tt_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +\n-\t\t I40IW_QP_CTX_SIZE);\n-\t/* Get page aligned memory */\n-\tret =\n-\t i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,\n-\t\t\t\t I40IW_HW_PAGE_SIZE);\n-\tif (ret) {\n-\t\ti40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, \"%s: error dma mem\\n\", __func__);\n-\t\treturn ret;\n-\t}\n-\n-\tmem = &rsrc->qpmem;\n-\tmemset(mem->va, 0, t_size);\n-\tqp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);\n-\tqp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);\n-\tqp->pd = &rsrc->sc_pd;\n-\tqp->qp_type = I40IW_QP_TYPE_UDA;\n-\tqp->dev = rsrc->dev;\n-\tqp->back_qp = (void *)rsrc;\n-\tqp->sq_pa = mem->pa;\n-\tqp->rq_pa = qp->sq_pa + sq_size;\n-\tqp->vsi = rsrc->vsi;\n-\tukqp->sq_base = mem->va;\n-\tukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];\n-\tukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;\n-\tqp->shadow_area_pa = qp->rq_pa + rq_size;\n-\tqp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;\n-\tqp->hw_host_ctx_pa =\n-\t\tqp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);\n-\tukqp->qp_id = rsrc->qp_id;\n-\tukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;\n-\tukqp->rq_wrid_array = rsrc->rq_wrid_array;\n-\n-\tukqp->qp_id = rsrc->qp_id;\n-\tukqp->sq_size = rsrc->sq_size;\n-\tukqp->rq_size = rsrc->rq_size;\n-\n-\tI40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);\n-\tI40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);\n-\tI40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);\n-\n-\tif (qp->pd->dev->is_pf)\n-\t\tukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +\n-\t\t\t\t\t\t I40E_PFPE_WQEALLOC);\n-\telse\n-\t\tukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +\n-\t\t\t\t\t\t I40E_VFPE_WQEALLOC1);\n-\n-\tqp->user_pri = 0;\n-\ti40iw_qp_add_qos(qp);\n-\ti40iw_puda_qp_setctx(rsrc);\n-\tif (rsrc->dev->ceq_valid)\n-\t\tret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);\n-\telse\n-\t\tret = i40iw_puda_qp_wqe(rsrc->dev, qp);\n-\tif (ret) {\n-\t\ti40iw_qp_rem_qos(qp);\n-\t\ti40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);\n-\t}\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_puda_cq_wqe - setup wqe for cq create\n- * @rsrc: resource for cq\n- */\n-static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 header;\n-\tstruct i40iw_ccq_cqe_info compl_info;\n-\tenum i40iw_status_code status = 0;\n-\n-\tcqp = dev->cqp;\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\tset_64bit_val(wqe, 0, cq->cq_uk.cq_size);\n-\tset_64bit_val(wqe, 8, RS_64_1(cq, 1));\n-\tset_64bit_val(wqe, 16,\n-\t\t LS_64(cq->shadow_read_threshold,\n-\t\t\t I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));\n-\tset_64bit_val(wqe, 32, cq->cq_pa);\n-\n-\tset_64bit_val(wqe, 40, cq->shadow_area_pa);\n-\n-\theader = cq->cq_uk.cq_id |\n-\t LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |\n-\t LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |\n-\t LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |\n-\t LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |\n-\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\ti40iw_insert_wqe_hdr(wqe, header);\n-\n-\ti40iw_debug_buf(dev, I40IW_DEBUG_PUDA, \"PUDA CQE\",\n-\t\t\twqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\ti40iw_sc_cqp_post_sq(dev->cqp);\n-\tstatus = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n-\t\t\t\t\t\t I40IW_CQP_OP_CREATE_CQ,\n-\t\t\t\t\t\t &compl_info);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_puda_cq_create - create cq for resource\n- * @rsrc: resource for which cq to create\n- */\n-static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)\n-{\n-\tstruct i40iw_sc_dev *dev = rsrc->dev;\n-\tstruct i40iw_sc_cq *cq = &rsrc->cq;\n-\tenum i40iw_status_code ret = 0;\n-\tu32 tsize, cqsize;\n-\tstruct i40iw_dma_mem *mem;\n-\tstruct i40iw_cq_init_info info;\n-\tstruct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;\n-\n-\tcq->vsi = rsrc->vsi;\n-\tcqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));\n-\ttsize = cqsize + sizeof(struct i40iw_cq_shadow_area);\n-\tret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,\n-\t\t\t\t I40IW_CQ0_ALIGNMENT);\n-\tif (ret)\n-\t\treturn ret;\n-\n-\tmem = &rsrc->cqmem;\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.dev = dev;\n-\tinfo.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?\n-\t\t\t I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;\n-\tinfo.shadow_read_threshold = rsrc->cq_size >> 2;\n-\tinfo.ceq_id_valid = true;\n-\tinfo.cq_base_pa = mem->pa;\n-\tinfo.shadow_area_pa = mem->pa + cqsize;\n-\tinit_info->cq_base = mem->va;\n-\tinit_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);\n-\tinit_info->cq_size = rsrc->cq_size;\n-\tinit_info->cq_id = rsrc->cq_id;\n-\tinfo.ceqe_mask = true;\n-\tinfo.ceq_id_valid = true;\n-\tret = dev->iw_priv_cq_ops->cq_init(cq, &info);\n-\tif (ret)\n-\t\tgoto error;\n-\tif (rsrc->dev->ceq_valid)\n-\t\tret = i40iw_cqp_cq_create_cmd(dev, cq);\n-\telse\n-\t\tret = i40iw_puda_cq_wqe(dev, cq);\n-error:\n-\tif (ret)\n-\t\ti40iw_free_dma_mem(dev->hw, &rsrc->cqmem);\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_puda_free_qp - free qp for resource\n- * @rsrc: resource for which qp to free\n- */\n-static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)\n-{\n-\tenum i40iw_status_code ret;\n-\tstruct i40iw_ccq_cqe_info compl_info;\n-\tstruct i40iw_sc_dev *dev = rsrc->dev;\n-\n-\tif (rsrc->dev->ceq_valid) {\n-\t\ti40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);\n-\t\treturn;\n-\t}\n-\n-\tret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,\n-\t\t\t0, false, true, true);\n-\tif (ret)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA,\n-\t\t\t \"%s error puda qp destroy wqe\\n\",\n-\t\t\t __func__);\n-\n-\tif (!ret) {\n-\t\tret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n-\t\t\t\tI40IW_CQP_OP_DESTROY_QP,\n-\t\t\t\t&compl_info);\n-\t\tif (ret)\n-\t\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA,\n-\t\t\t\t \"%s error puda qp destroy failed\\n\",\n-\t\t\t\t __func__);\n-\t}\n-}\n-\n-/**\n- * i40iw_puda_free_cq - free cq for resource\n- * @rsrc: resource for which cq to free\n- */\n-static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)\n-{\n-\tenum i40iw_status_code ret;\n-\tstruct i40iw_ccq_cqe_info compl_info;\n-\tstruct i40iw_sc_dev *dev = rsrc->dev;\n-\n-\tif (rsrc->dev->ceq_valid) {\n-\t\ti40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);\n-\t\treturn;\n-\t}\n-\tret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);\n-\n-\tif (ret)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA,\n-\t\t\t \"%s error ieq cq destroy\\n\",\n-\t\t\t __func__);\n-\n-\tif (!ret) {\n-\t\tret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n-\t\t\t\tI40IW_CQP_OP_DESTROY_CQ,\n-\t\t\t\t&compl_info);\n-\t\tif (ret)\n-\t\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA,\n-\t\t\t\t \"%s error ieq qp destroy done\\n\",\n-\t\t\t\t __func__);\n-\t}\n-}\n-\n-/**\n- * i40iw_puda_dele_resources - delete all resources during close\n- * @dev: iwarp device\n- * @type: type of resource to dele\n- * @reset: true if reset chip\n- */\n-void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,\n-\t\t\t enum puda_resource_type type,\n-\t\t\t bool reset)\n-{\n-\tstruct i40iw_sc_dev *dev = vsi->dev;\n-\tstruct i40iw_puda_rsrc *rsrc;\n-\tstruct i40iw_puda_buf *buf = NULL;\n-\tstruct i40iw_puda_buf *nextbuf = NULL;\n-\tstruct i40iw_virt_mem *vmem;\n-\n-\tswitch (type) {\n-\tcase I40IW_PUDA_RSRC_TYPE_ILQ:\n-\t\trsrc = vsi->ilq;\n-\t\tvmem = &vsi->ilq_mem;\n-\t\tbreak;\n-\tcase I40IW_PUDA_RSRC_TYPE_IEQ:\n-\t\trsrc = vsi->ieq;\n-\t\tvmem = &vsi->ieq_mem;\n-\t\tbreak;\n-\tdefault:\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA, \"%s: error resource type = 0x%x\\n\",\n-\t\t\t __func__, type);\n-\t\treturn;\n-\t}\n-\n-\tswitch (rsrc->completion) {\n-\tcase PUDA_HASH_CRC_COMPLETE:\n-\t\ti40iw_free_hash_desc(rsrc->hash_desc);\n-\t\t/* fall through */\n-\tcase PUDA_QP_CREATED:\n-\t\tif (!reset)\n-\t\t\ti40iw_puda_free_qp(rsrc);\n-\n-\t\ti40iw_free_dma_mem(dev->hw, &rsrc->qpmem);\n-\t\t/* fallthrough */\n-\tcase PUDA_CQ_CREATED:\n-\t\tif (!reset)\n-\t\t\ti40iw_puda_free_cq(rsrc);\n-\n-\t\ti40iw_free_dma_mem(dev->hw, &rsrc->cqmem);\n-\t\tbreak;\n-\tdefault:\n-\t\ti40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, \"%s error no resources\\n\", __func__);\n-\t\tbreak;\n-\t}\n-\t/* Free all allocated puda buffers for both tx and rx */\n-\tbuf = rsrc->alloclist;\n-\twhile (buf) {\n-\t\tnextbuf = buf->next;\n-\t\ti40iw_puda_dele_buf(dev, buf);\n-\t\tbuf = nextbuf;\n-\t\trsrc->alloc_buf_count--;\n-\t}\n-\ti40iw_free_virt_mem(dev->hw, vmem);\n-}\n-\n-/**\n- * i40iw_puda_allocbufs - allocate buffers for resource\n- * @rsrc: resource for buffer allocation\n- * @count: number of buffers to create\n- */\n-static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,\n-\t\t\t\t\t\t u32 count)\n-{\n-\tu32 i;\n-\tstruct i40iw_puda_buf *buf;\n-\tstruct i40iw_puda_buf *nextbuf;\n-\n-\tfor (i = 0; i < count; i++) {\n-\t\tbuf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);\n-\t\tif (!buf) {\n-\t\t\trsrc->stats_buf_alloc_fail++;\n-\t\t\treturn I40IW_ERR_NO_MEMORY;\n-\t\t}\n-\t\ti40iw_puda_ret_bufpool(rsrc, buf);\n-\t\trsrc->alloc_buf_count++;\n-\t\tif (!rsrc->alloclist) {\n-\t\t\trsrc->alloclist = buf;\n-\t\t} else {\n-\t\t\tnextbuf = rsrc->alloclist;\n-\t\t\trsrc->alloclist = buf;\n-\t\t\tbuf->next = nextbuf;\n-\t\t}\n-\t}\n-\trsrc->avail_buf_count = rsrc->alloc_buf_count;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_puda_create_rsrc - create resouce (ilq or ieq)\n- * @dev: iwarp device\n- * @info: resource information\n- */\n-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,\n-\t\t\t\t\t struct i40iw_puda_rsrc_info *info)\n-{\n-\tstruct i40iw_sc_dev *dev = vsi->dev;\n-\tenum i40iw_status_code ret = 0;\n-\tstruct i40iw_puda_rsrc *rsrc;\n-\tu32 pudasize;\n-\tu32 sqwridsize, rqwridsize;\n-\tstruct i40iw_virt_mem *vmem;\n-\n-\tinfo->count = 1;\n-\tpudasize = sizeof(struct i40iw_puda_rsrc);\n-\tsqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);\n-\trqwridsize = info->rq_size * 8;\n-\tswitch (info->type) {\n-\tcase I40IW_PUDA_RSRC_TYPE_ILQ:\n-\t\tvmem = &vsi->ilq_mem;\n-\t\tbreak;\n-\tcase I40IW_PUDA_RSRC_TYPE_IEQ:\n-\t\tvmem = &vsi->ieq_mem;\n-\t\tbreak;\n-\tdefault:\n-\t\treturn I40IW_NOT_SUPPORTED;\n-\t}\n-\tret =\n-\t i40iw_allocate_virt_mem(dev->hw, vmem,\n-\t\t\t\t pudasize + sqwridsize + rqwridsize);\n-\tif (ret)\n-\t\treturn ret;\n-\trsrc = (struct i40iw_puda_rsrc *)vmem->va;\n-\tspin_lock_init(&rsrc->bufpool_lock);\n-\tif (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {\n-\t\tvsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;\n-\t\tvsi->ilq_count = info->count;\n-\t\trsrc->receive = info->receive;\n-\t\trsrc->xmit_complete = info->xmit_complete;\n-\t} else {\n-\t\tvmem = &vsi->ieq_mem;\n-\t\tvsi->ieq_count = info->count;\n-\t\tvsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;\n-\t\trsrc->receive = i40iw_ieq_receive;\n-\t\trsrc->xmit_complete = i40iw_ieq_tx_compl;\n-\t}\n-\n-\trsrc->type = info->type;\n-\trsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);\n-\trsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);\n-\t/* Initialize all ieq lists */\n-\tINIT_LIST_HEAD(&rsrc->bufpool);\n-\tINIT_LIST_HEAD(&rsrc->txpend);\n-\n-\trsrc->tx_wqe_avail_cnt = info->sq_size - 1;\n-\tdev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);\n-\trsrc->qp_id = info->qp_id;\n-\trsrc->cq_id = info->cq_id;\n-\trsrc->sq_size = info->sq_size;\n-\trsrc->rq_size = info->rq_size;\n-\trsrc->cq_size = info->rq_size + info->sq_size;\n-\trsrc->buf_size = info->buf_size;\n-\trsrc->dev = dev;\n-\trsrc->vsi = vsi;\n-\n-\tret = i40iw_puda_cq_create(rsrc);\n-\tif (!ret) {\n-\t\trsrc->completion = PUDA_CQ_CREATED;\n-\t\tret = i40iw_puda_qp_create(rsrc);\n-\t}\n-\tif (ret) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA, \"[%s] error qp_create\\n\",\n-\t\t\t __func__);\n-\t\tgoto error;\n-\t}\n-\trsrc->completion = PUDA_QP_CREATED;\n-\n-\tret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);\n-\tif (ret) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_PUDA, \"[%s] error alloc_buf\\n\",\n-\t\t\t __func__);\n-\t\tgoto error;\n-\t}\n-\n-\trsrc->rxq_invalid_cnt = info->rq_size;\n-\tret = i40iw_puda_replenish_rq(rsrc, true);\n-\tif (ret)\n-\t\tgoto error;\n-\n-\tif (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {\n-\t\tif (!i40iw_init_hash_desc(&rsrc->hash_desc)) {\n-\t\t\trsrc->check_crc = true;\n-\t\t\trsrc->completion = PUDA_HASH_CRC_COMPLETE;\n-\t\t\tret = 0;\n-\t\t}\n-\t}\n-\n-\tdev->ccq_ops->ccq_arm(&rsrc->cq);\n-\treturn ret;\n- error:\n-\ti40iw_puda_dele_resources(vsi, info->type, false);\n-\n-\treturn ret;\n-}\n-\n-/**\n- * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq\n- * @qp: ilq's qp resource\n- * @wqe_idx: wqe index of completed rcvbuf\n- */\n-static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)\n-{\n-\tu64 *wqe;\n-\tu64 offset24;\n-\n-\twqe = qp->qp_uk.rq_base[wqe_idx].elem;\n-\tget_64bit_val(wqe, 24, &offset24);\n-\toffset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);\n-\tset_64bit_val(wqe, 24, offset24);\n-}\n-\n-/**\n- * i40iw_ieq_get_fpdu - given length return fpdu length\n- * @length: length if fpdu\n- */\n-static u16 i40iw_ieq_get_fpdu_length(u16 length)\n-{\n-\tu16 fpdu_len;\n-\n-\tfpdu_len = length + I40IW_IEQ_MPA_FRAMING;\n-\tfpdu_len = (fpdu_len + 3) & 0xfffffffc;\n-\treturn fpdu_len;\n-}\n-\n-/**\n- * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf\n- * @buf: rcv buffer with partial\n- * @txbuf: tx buffer for sendign back\n- * @buf_offset: rcv buffer offset to copy from\n- * @txbuf_offset: at offset in tx buf to copy\n- * @length: length of data to copy\n- */\n-static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,\n-\t\t\t\t struct i40iw_puda_buf *txbuf,\n-\t\t\t\t u16 buf_offset, u32 txbuf_offset,\n-\t\t\t\t u32 length)\n-{\n-\tvoid *mem1 = (u8 *)buf->mem.va + buf_offset;\n-\tvoid *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;\n-\n-\tmemcpy(mem2, mem1, length);\n-}\n-\n-/**\n- * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling\n- * @buf: reeive buffer with partial\n- * @txbuf: buffer to prepare\n- */\n-static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,\n-\t\t\t\t struct i40iw_puda_buf *txbuf)\n-{\n-\ttxbuf->maclen = buf->maclen;\n-\ttxbuf->tcphlen = buf->tcphlen;\n-\ttxbuf->ipv4 = buf->ipv4;\n-\ttxbuf->hdrlen = buf->hdrlen;\n-\ti40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);\n-}\n-\n-/**\n- * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range\n- * @buf: receive exception buffer\n- * @fps: first partial sequence number\n- */\n-static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)\n-{\n-\tu32 offset;\n-\n-\tif (buf->seqnum < fps) {\n-\t\toffset = fps - buf->seqnum;\n-\t\tif (offset > buf->datalen)\n-\t\t\treturn;\n-\t\tbuf->data += offset;\n-\t\tbuf->datalen -= (u16)offset;\n-\t\tbuf->seqnum = fps;\n-\t}\n-}\n-\n-/**\n- * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu\n- * @ieq: ieq resource\n- * @rxlist: ieq's received buffer list\n- * @pbufl: temporary list for buffers for fpddu\n- * @txbuf: tx buffer for fpdu\n- * @fpdu_len: total length of fpdu\n- */\n-static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,\n-\t\t\t\t struct list_head *rxlist,\n-\t\t\t\t struct list_head *pbufl,\n-\t\t\t\t struct i40iw_puda_buf *txbuf,\n-\t\t\t\t u16 fpdu_len)\n-{\n-\tstruct i40iw_puda_buf *buf;\n-\tu32 nextseqnum;\n-\tu16 txoffset, bufoffset;\n-\n-\tbuf = i40iw_puda_get_listbuf(pbufl);\n-\tif (!buf)\n-\t\treturn;\n-\tnextseqnum = buf->seqnum + fpdu_len;\n-\ttxbuf->totallen = buf->hdrlen + fpdu_len;\n-\ttxbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;\n-\ti40iw_ieq_setup_tx_buf(buf, txbuf);\n-\n-\ttxoffset = buf->hdrlen;\n-\tbufoffset = (u16)(buf->data - (u8 *)buf->mem.va);\n-\n-\tdo {\n-\t\tif (buf->datalen >= fpdu_len) {\n-\t\t\t/* copied full fpdu */\n-\t\t\ti40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);\n-\t\t\tbuf->datalen -= fpdu_len;\n-\t\t\tbuf->data += fpdu_len;\n-\t\t\tbuf->seqnum = nextseqnum;\n-\t\t\tbreak;\n-\t\t}\n-\t\t/* copy partial fpdu */\n-\t\ti40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);\n-\t\ttxoffset += buf->datalen;\n-\t\tfpdu_len -= buf->datalen;\n-\t\ti40iw_puda_ret_bufpool(ieq, buf);\n-\t\tbuf = i40iw_puda_get_listbuf(pbufl);\n-\t\tif (!buf)\n-\t\t\treturn;\n-\t\tbufoffset = (u16)(buf->data - (u8 *)buf->mem.va);\n-\t} while (1);\n-\n-\t/* last buffer on the list*/\n-\tif (buf->datalen)\n-\t\tlist_add(&buf->list, rxlist);\n-\telse\n-\t\ti40iw_puda_ret_bufpool(ieq, buf);\n-}\n-\n-/**\n- * i40iw_ieq_create_pbufl - create buffer list for single fpdu\n- * @rxlist: resource list for receive ieq buffes\n- * @pbufl: temp. list for buffers for fpddu\n- * @buf: first receive buffer\n- * @fpdu_len: total length of fpdu\n- */\n-static enum i40iw_status_code i40iw_ieq_create_pbufl(\n-\t\t\t\t\t\t struct i40iw_pfpdu *pfpdu,\n-\t\t\t\t\t\t struct list_head *rxlist,\n-\t\t\t\t\t\t struct list_head *pbufl,\n-\t\t\t\t\t\t struct i40iw_puda_buf *buf,\n-\t\t\t\t\t\t u16 fpdu_len)\n-{\n-\tenum i40iw_status_code status = 0;\n-\tstruct i40iw_puda_buf *nextbuf;\n-\tu32\tnextseqnum;\n-\tu16 plen = fpdu_len - buf->datalen;\n-\tbool done = false;\n-\n-\tnextseqnum = buf->seqnum + buf->datalen;\n-\tdo {\n-\t\tnextbuf = i40iw_puda_get_listbuf(rxlist);\n-\t\tif (!nextbuf) {\n-\t\t\tstatus = I40IW_ERR_list_empty;\n-\t\t\tbreak;\n-\t\t}\n-\t\tlist_add_tail(&nextbuf->list, pbufl);\n-\t\tif (nextbuf->seqnum != nextseqnum) {\n-\t\t\tpfpdu->bad_seq_num++;\n-\t\t\tstatus = I40IW_ERR_SEQ_NUM;\n-\t\t\tbreak;\n-\t\t}\n-\t\tif (nextbuf->datalen >= plen) {\n-\t\t\tdone = true;\n-\t\t} else {\n-\t\t\tplen -= nextbuf->datalen;\n-\t\t\tnextseqnum = nextbuf->seqnum + nextbuf->datalen;\n-\t\t}\n-\n-\t} while (!done);\n-\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_ieq_handle_partial - process partial fpdu buffer\n- * @ieq: ieq resource\n- * @pfpdu: partial management per user qp\n- * @buf: receive buffer\n- * @fpdu_len: fpdu len in the buffer\n- */\n-static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,\n-\t\t\t\t\t\t struct i40iw_pfpdu *pfpdu,\n-\t\t\t\t\t\t struct i40iw_puda_buf *buf,\n-\t\t\t\t\t\t u16 fpdu_len)\n-{\n-\tenum i40iw_status_code status = 0;\n-\tu8 *crcptr;\n-\tu32 mpacrc;\n-\tu32 seqnum = buf->seqnum;\n-\tstruct list_head pbufl;\t/* partial buffer list */\n-\tstruct i40iw_puda_buf *txbuf = NULL;\n-\tstruct list_head *rxlist = &pfpdu->rxlist;\n-\n-\tINIT_LIST_HEAD(&pbufl);\n-\tlist_add(&buf->list, &pbufl);\n-\n-\tstatus = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);\n-\tif (status)\n-\t\tgoto error;\n-\n-\ttxbuf = i40iw_puda_get_bufpool(ieq);\n-\tif (!txbuf) {\n-\t\tpfpdu->no_tx_bufs++;\n-\t\tstatus = I40IW_ERR_NO_TXBUFS;\n-\t\tgoto error;\n-\t}\n-\n-\ti40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);\n-\ti40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);\n-\tcrcptr = txbuf->data + fpdu_len - 4;\n-\tmpacrc = *(u32 *)crcptr;\n-\tif (ieq->check_crc) {\n-\t\tstatus = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,\n-\t\t\t\t\t\t(fpdu_len - 4), mpacrc);\n-\t\tif (status) {\n-\t\t\ti40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,\n-\t\t\t\t \"%s: error bad crc\\n\", __func__);\n-\t\t\tgoto error;\n-\t\t}\n-\t}\n-\n-\ti40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, \"IEQ TX BUFFER\",\n-\t\t\ttxbuf->mem.va, txbuf->totallen);\n-\ti40iw_puda_send_buf(ieq, txbuf);\n-\tpfpdu->rcv_nxt = seqnum + fpdu_len;\n-\treturn status;\n- error:\n-\twhile (!list_empty(&pbufl)) {\n-\t\tbuf = (struct i40iw_puda_buf *)(pbufl.prev);\n-\t\tlist_del(&buf->list);\n-\t\tlist_add(&buf->list, rxlist);\n-\t}\n-\tif (txbuf)\n-\t\ti40iw_puda_ret_bufpool(ieq, txbuf);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_ieq_process_buf - process buffer rcvd for ieq\n- * @ieq: ieq resource\n- * @pfpdu: partial management per user qp\n- * @buf: receive buffer\n- */\n-static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,\n-\t\t\t\t\t\t struct i40iw_pfpdu *pfpdu,\n-\t\t\t\t\t\t struct i40iw_puda_buf *buf)\n-{\n-\tu16 fpdu_len = 0;\n-\tu16 datalen = buf->datalen;\n-\tu8 *datap = buf->data;\n-\tu8 *crcptr;\n-\tu16 ioffset = 0;\n-\tu32 mpacrc;\n-\tu32 seqnum = buf->seqnum;\n-\tu16 length = 0;\n-\tu16 full = 0;\n-\tbool partial = false;\n-\tstruct i40iw_puda_buf *txbuf;\n-\tstruct list_head *rxlist = &pfpdu->rxlist;\n-\tenum i40iw_status_code ret = 0;\n-\tenum i40iw_status_code status = 0;\n-\n-\tioffset = (u16)(buf->data - (u8 *)buf->mem.va);\n-\twhile (datalen) {\n-\t\tfpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));\n-\t\tif (fpdu_len > pfpdu->max_fpdu_data) {\n-\t\t\ti40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,\n-\t\t\t\t \"%s: error bad fpdu_len\\n\", __func__);\n-\t\t\tstatus = I40IW_ERR_MPA_CRC;\n-\t\t\tlist_add(&buf->list, rxlist);\n-\t\t\treturn status;\n-\t\t}\n-\n-\t\tif (datalen < fpdu_len) {\n-\t\t\tpartial = true;\n-\t\t\tbreak;\n-\t\t}\n-\t\tcrcptr = datap + fpdu_len - 4;\n-\t\tmpacrc = *(u32 *)crcptr;\n-\t\tif (ieq->check_crc)\n-\t\t\tret = i40iw_ieq_check_mpacrc(ieq->hash_desc,\n-\t\t\t\t\t\t datap, fpdu_len - 4, mpacrc);\n-\t\tif (ret) {\n-\t\t\tstatus = I40IW_ERR_MPA_CRC;\n-\t\t\tlist_add(&buf->list, rxlist);\n-\t\t\treturn status;\n-\t\t}\n-\t\tfull++;\n-\t\tpfpdu->fpdu_processed++;\n-\t\tdatap += fpdu_len;\n-\t\tlength += fpdu_len;\n-\t\tdatalen -= fpdu_len;\n-\t}\n-\tif (full) {\n-\t\t/* copy full pdu's in the txbuf and send them out */\n-\t\ttxbuf = i40iw_puda_get_bufpool(ieq);\n-\t\tif (!txbuf) {\n-\t\t\tpfpdu->no_tx_bufs++;\n-\t\t\tstatus = I40IW_ERR_NO_TXBUFS;\n-\t\t\tlist_add(&buf->list, rxlist);\n-\t\t\treturn status;\n-\t\t}\n-\t\t/* modify txbuf's buffer header */\n-\t\ti40iw_ieq_setup_tx_buf(buf, txbuf);\n-\t\t/* copy full fpdu's to new buffer */\n-\t\ti40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,\n-\t\t\t\t\tlength);\n-\t\ttxbuf->totallen = buf->hdrlen + length;\n-\n-\t\ti40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);\n-\t\ti40iw_puda_send_buf(ieq, txbuf);\n-\n-\t\tif (!datalen) {\n-\t\t\tpfpdu->rcv_nxt = buf->seqnum + length;\n-\t\t\ti40iw_puda_ret_bufpool(ieq, buf);\n-\t\t\treturn status;\n-\t\t}\n-\t\tbuf->data = datap;\n-\t\tbuf->seqnum = seqnum + length;\n-\t\tbuf->datalen = datalen;\n-\t\tpfpdu->rcv_nxt = buf->seqnum;\n-\t}\n-\tif (partial)\n-\t\tstatus = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);\n-\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_ieq_process_fpdus - process fpdu's buffers on its list\n- * @qp: qp for which partial fpdus\n- * @ieq: ieq resource\n- */\n-static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,\n-\t\t\t\t struct i40iw_puda_rsrc *ieq)\n-{\n-\tstruct i40iw_pfpdu *pfpdu = &qp->pfpdu;\n-\tstruct list_head *rxlist = &pfpdu->rxlist;\n-\tstruct i40iw_puda_buf *buf;\n-\tenum i40iw_status_code status;\n-\n-\tdo {\n-\t\tif (list_empty(rxlist))\n-\t\t\tbreak;\n-\t\tbuf = i40iw_puda_get_listbuf(rxlist);\n-\t\tif (!buf) {\n-\t\t\ti40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,\n-\t\t\t\t \"%s: error no buf\\n\", __func__);\n-\t\t\tbreak;\n-\t\t}\n-\t\tif (buf->seqnum != pfpdu->rcv_nxt) {\n-\t\t\t/* This could be out of order or missing packet */\n-\t\t\tpfpdu->out_of_order++;\n-\t\t\tlist_add(&buf->list, rxlist);\n-\t\t\tbreak;\n-\t\t}\n-\t\t/* keep processing buffers from the head of the list */\n-\t\tstatus = i40iw_ieq_process_buf(ieq, pfpdu, buf);\n-\t\tif (status == I40IW_ERR_MPA_CRC) {\n-\t\t\tpfpdu->mpa_crc_err = true;\n-\t\t\twhile (!list_empty(rxlist)) {\n-\t\t\t\tbuf = i40iw_puda_get_listbuf(rxlist);\n-\t\t\t\ti40iw_puda_ret_bufpool(ieq, buf);\n-\t\t\t\tpfpdu->crc_err++;\n-\t\t\t}\n-\t\t\t/* create CQP for AE */\n-\t\t\ti40iw_ieq_mpa_crc_ae(ieq->dev, qp);\n-\t\t}\n-\t} while (!status);\n-}\n-\n-/**\n- * i40iw_ieq_handle_exception - handle qp's exception\n- * @ieq: ieq resource\n- * @qp: qp receiving excpetion\n- * @buf: receive buffer\n- */\n-static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,\n-\t\t\t\t struct i40iw_sc_qp *qp,\n-\t\t\t\t struct i40iw_puda_buf *buf)\n-{\n-\tstruct i40iw_puda_buf *tmpbuf = NULL;\n-\tstruct i40iw_pfpdu *pfpdu = &qp->pfpdu;\n-\tu32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;\n-\tu32 rcv_wnd = hw_host_ctx[23];\n-\t/* first partial seq # in q2 */\n-\tu32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);\n-\tstruct list_head *rxlist = &pfpdu->rxlist;\n-\tstruct list_head *plist;\n-\n-\tpfpdu->total_ieq_bufs++;\n-\n-\tif (pfpdu->mpa_crc_err) {\n-\t\tpfpdu->crc_err++;\n-\t\tgoto error;\n-\t}\n-\tif (pfpdu->mode && (fps != pfpdu->fps)) {\n-\t\t/* clean up qp as it is new partial sequence */\n-\t\ti40iw_ieq_cleanup_qp(ieq, qp);\n-\t\ti40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,\n-\t\t\t \"%s: restarting new partial\\n\", __func__);\n-\t\tpfpdu->mode = false;\n-\t}\n-\n-\tif (!pfpdu->mode) {\n-\t\ti40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, \"Q2 BUFFER\", (u64 *)qp->q2_buf, 128);\n-\t\t/* First_Partial_Sequence_Number check */\n-\t\tpfpdu->rcv_nxt = fps;\n-\t\tpfpdu->fps = fps;\n-\t\tpfpdu->mode = true;\n-\t\tpfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV4) :\n-\t\t\t\t (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV6);\n-\t\tpfpdu->pmode_count++;\n-\t\tINIT_LIST_HEAD(rxlist);\n-\t\ti40iw_ieq_check_first_buf(buf, fps);\n-\t}\n-\n-\tif (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {\n-\t\tpfpdu->bad_seq_num++;\n-\t\tgoto error;\n-\t}\n-\n-\tif (!list_empty(rxlist)) {\n-\t\ttmpbuf = (struct i40iw_puda_buf *)rxlist->next;\n-\t\twhile ((struct list_head *)tmpbuf != rxlist) {\n-\t\t\tif ((int)(buf->seqnum - tmpbuf->seqnum) < 0)\n-\t\t\t\tbreak;\n-\t\t\tplist = &tmpbuf->list;\n-\t\t\ttmpbuf = (struct i40iw_puda_buf *)plist->next;\n-\t\t}\n-\t\t/* Insert buf before tmpbuf */\n-\t\tlist_add_tail(&buf->list, &tmpbuf->list);\n-\t} else {\n-\t\tlist_add_tail(&buf->list, rxlist);\n-\t}\n-\ti40iw_ieq_process_fpdus(qp, ieq);\n-\treturn;\n- error:\n-\ti40iw_puda_ret_bufpool(ieq, buf);\n-}\n-\n-/**\n- * i40iw_ieq_receive - received exception buffer\n- * @dev: iwarp device\n- * @buf: exception buffer received\n- */\n-static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,\n-\t\t\t struct i40iw_puda_buf *buf)\n-{\n-\tstruct i40iw_puda_rsrc *ieq = vsi->ieq;\n-\tstruct i40iw_sc_qp *qp = NULL;\n-\tu32 wqe_idx = ieq->compl_rxwqe_idx;\n-\n-\tqp = i40iw_ieq_get_qp(vsi->dev, buf);\n-\tif (!qp) {\n-\t\tieq->stats_bad_qp_id++;\n-\t\ti40iw_puda_ret_bufpool(ieq, buf);\n-\t} else {\n-\t\ti40iw_ieq_handle_exception(ieq, qp, buf);\n-\t}\n-\t/*\n-\t * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()\n-\t * on which wqe_idx to start replenish rq\n-\t */\n-\tif (!ieq->rxq_invalid_cnt)\n-\t\tieq->rx_wqe_idx = wqe_idx;\n-\tieq->rxq_invalid_cnt++;\n-}\n-\n-/**\n- * i40iw_ieq_tx_compl - put back after sending completed exception buffer\n- * @vsi: pointer to the vsi structure\n- * @sqwrid: pointer to puda buffer\n- */\n-static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)\n-{\n-\tstruct i40iw_puda_rsrc *ieq = vsi->ieq;\n-\tstruct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;\n-\n-\ti40iw_puda_ret_bufpool(ieq, buf);\n-}\n-\n-/**\n- * i40iw_ieq_cleanup_qp - qp is being destroyed\n- * @ieq: ieq resource\n- * @qp: all pending fpdu buffers\n- */\n-void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_puda_buf *buf;\n-\tstruct i40iw_pfpdu *pfpdu = &qp->pfpdu;\n-\tstruct list_head *rxlist = &pfpdu->rxlist;\n-\n-\tif (!pfpdu->mode)\n-\t\treturn;\n-\twhile (!list_empty(rxlist)) {\n-\t\tbuf = i40iw_puda_get_listbuf(rxlist);\n-\t\ti40iw_puda_ret_bufpool(ieq, buf);\n-\t}\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h\ndeleted file mode 100644\nindex 53a7d58c..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h\n+++ /dev/null\n@@ -1,188 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_PUDA_H\n-#define I40IW_PUDA_H\n-\n-#define I40IW_IEQ_MPA_FRAMING 6\n-\n-struct i40iw_sc_dev;\n-struct i40iw_sc_qp;\n-struct i40iw_sc_cq;\n-\n-enum puda_resource_type {\n-\tI40IW_PUDA_RSRC_TYPE_ILQ = 1,\n-\tI40IW_PUDA_RSRC_TYPE_IEQ\n-};\n-\n-enum puda_rsrc_complete {\n-\tPUDA_CQ_CREATED = 1,\n-\tPUDA_QP_CREATED,\n-\tPUDA_TX_COMPLETE,\n-\tPUDA_RX_COMPLETE,\n-\tPUDA_HASH_CRC_COMPLETE\n-};\n-\n-struct i40iw_puda_completion_info {\n-\tstruct i40iw_qp_uk *qp;\n-\tu8 q_type;\n-\tu8 vlan_valid;\n-\tu8 l3proto;\n-\tu8 l4proto;\n-\tu16 payload_len;\n-\tu32 compl_error;\t/* No_err=0, else major and minor err code */\n-\tu32 qp_id;\n-\tu32 wqe_idx;\n-};\n-\n-struct i40iw_puda_send_info {\n-\tu64 paddr;\t\t/* Physical address */\n-\tu32 len;\n-\tu8 tcplen;\n-\tu8 maclen;\n-\tbool ipv4;\n-\tbool doloopback;\n-\tvoid *scratch;\n-};\n-\n-struct i40iw_puda_buf {\n-\tstruct list_head list;\t/* MUST be first entry */\n-\tstruct i40iw_dma_mem mem;\t/* DMA memory for the buffer */\n-\tstruct i40iw_puda_buf *next;\t/* for alloclist in rsrc struct */\n-\tstruct i40iw_virt_mem buf_mem;\t/* Buffer memory for this buffer */\n-\tvoid *scratch;\n-\tu8 *iph;\n-\tu8 *tcph;\n-\tu8 *data;\n-\tu16 datalen;\n-\tu16 vlan_id;\n-\tu8 tcphlen;\t\t/* tcp length in bytes */\n-\tu8 maclen;\t\t/* mac length in bytes */\n-\tu32 totallen;\t\t/* machlen+iphlen+tcphlen+datalen */\n-\tatomic_t refcount;\n-\tu8 hdrlen;\n-\tbool ipv4;\n-\tu32 seqnum;\n-};\n-\n-struct i40iw_puda_rsrc_info {\n-\tenum puda_resource_type type;\t/* ILQ or IEQ */\n-\tu32 count;\n-\tu16 pd_id;\n-\tu32 cq_id;\n-\tu32 qp_id;\n-\tu32 sq_size;\n-\tu32 rq_size;\n-\tu16 buf_size;\n-\tu16 mss;\n-\tu32 tx_buf_cnt;\t\t/* total bufs allocated will be rq_size + tx_buf_cnt */\n-\tvoid (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);\n-\tvoid (*xmit_complete)(struct i40iw_sc_vsi *, void *);\n-};\n-\n-struct i40iw_puda_rsrc {\n-\tstruct i40iw_sc_cq cq;\n-\tstruct i40iw_sc_qp qp;\n-\tstruct i40iw_sc_pd sc_pd;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_sc_vsi *vsi;\n-\tstruct i40iw_dma_mem cqmem;\n-\tstruct i40iw_dma_mem qpmem;\n-\tstruct i40iw_virt_mem ilq_mem;\n-\tenum puda_rsrc_complete completion;\n-\tenum puda_resource_type type;\n-\tu16 buf_size;\t\t/*buffer must be max datalen + tcpip hdr + mac */\n-\tu16 mss;\n-\tu32 cq_id;\n-\tu32 qp_id;\n-\tu32 sq_size;\n-\tu32 rq_size;\n-\tu32 cq_size;\n-\tstruct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;\n-\tu64 *rq_wrid_array;\n-\tu32 compl_rxwqe_idx;\n-\tu32 rx_wqe_idx;\n-\tu32 rxq_invalid_cnt;\n-\tu32 tx_wqe_avail_cnt;\n-\tbool check_crc;\n-\tstruct shash_desc *hash_desc;\n-\tstruct list_head txpend;\n-\tstruct list_head bufpool;\t/* free buffers pool list for recv and xmit */\n-\tu32 alloc_buf_count;\n-\tu32 avail_buf_count;\t\t/* snapshot of currently available buffers */\n-\tspinlock_t bufpool_lock;\n-\tstruct i40iw_puda_buf *alloclist;\n-\tvoid (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);\n-\tvoid (*xmit_complete)(struct i40iw_sc_vsi *, void *);\n-\t/* puda stats */\n-\tu64 stats_buf_alloc_fail;\n-\tu64 stats_pkt_rcvd;\n-\tu64 stats_pkt_sent;\n-\tu64 stats_rcvd_pkt_err;\n-\tu64 stats_sent_pkt_q;\n-\tu64 stats_bad_qp_id;\n-};\n-\n-struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc);\n-void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,\n-\t\t\t struct i40iw_puda_buf *buf);\n-void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,\n-\t\t\t struct i40iw_puda_buf *buf);\n-enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,\n-\t\t\t\t struct i40iw_puda_send_info *info);\n-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,\n-\t\t\t\t\t struct i40iw_puda_rsrc_info *info);\n-void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,\n-\t\t\t enum puda_resource_type type,\n-\t\t\t bool reset);\n-enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_sc_cq *cq, u32 *compl_err);\n-\n-struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,\n-\t\t\t\t struct i40iw_puda_buf *buf);\n-enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,\n-\t\t\t\t\t\t struct i40iw_puda_buf *buf);\n-enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,\n-\t\t\t\t\t void *addr, u32 length, u32 value);\n-enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc);\n-void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);\n-void i40iw_free_hash_desc(struct shash_desc *desc);\n-void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,\n-\t\t\t\t u32 seqnum);\n-enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);\n-enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);\n-void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);\n-void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);\n-void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_register.h b/drivers/infiniband/hw/i40iw/i40iw_register.h\ndeleted file mode 100644\nindex 5776818..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_register.h\n+++ /dev/null\n@@ -1,1030 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_REGISTER_H\n-#define I40IW_REGISTER_H\n-\n-#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */\n-\n-#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */\n-#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0\n-#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)\n-#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16\n-#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)\n-#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31\n-#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)\n-#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0\n-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)\n-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1\n-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)\n-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2\n-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)\n-\n-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */\t/* Reset: PFR */\n-#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0\n-#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)\n-#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1\n-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)\n-#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3\n-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)\n-\n-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */\n-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */\n-\n-#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15\n-#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)\n-#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */\n-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4\n-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)\n-#define I40E_GLPCI_DREVID\t\t\t0x0009C480 /* Reset: PCIR */\n-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0\n-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK 0xFF\n-\n-#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */\n-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0\n-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)\n-#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */\n-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0\n-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)\n-#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */\n-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0\n-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)\n-#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */\n-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0\n-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)\n-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4\n-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)\n-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16\n-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)\n-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31\n-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)\n-#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */\n-#define I40E_PFPE_CQACK_PECQID_SHIFT 0\n-#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)\n-#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */\n-#define I40E_PFPE_CQARM_PECQID_SHIFT 0\n-#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)\n-#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */\n-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0\n-#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)\n-#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */\n-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0\n-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)\n-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16\n-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)\n-#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */\n-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0\n-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)\n-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31\n-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)\n-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */\n-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0\n-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)\n-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */\n-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0\n-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)\n-#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */\n-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0\n-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)\n-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16\n-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)\n-#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */\n-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0\n-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)\n-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */\n-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0\n-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)\n-#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */\n-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0\n-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)\n-\n-#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */\n-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0\n-#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)\n-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20\n-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)\n-\n-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127\n-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0\n-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)\n-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127\n-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0\n-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)\n-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127\n-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0\n-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127\n-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0\n-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4\n-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16\n-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31\n-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)\n-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CQACK_MAX_INDEX 127\n-#define I40E_VFPE_CQACK_PECQID_SHIFT 0\n-#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)\n-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CQARM_MAX_INDEX 127\n-#define I40E_VFPE_CQARM_PECQID_SHIFT 0\n-#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)\n-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CQPDB_MAX_INDEX 127\n-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0\n-#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)\n-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127\n-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0\n-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)\n-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16\n-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)\n-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127\n-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0\n-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)\n-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31\n-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)\n-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127\n-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0\n-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)\n-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16\n-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)\n-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127\n-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0\n-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)\n-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127\n-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0\n-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)\n-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127\n-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0\n-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)\n-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */\n-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127\n-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0\n-#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)\n-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20\n-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)\n-\n-#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */\n-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0\n-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)\n-#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */\n-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0\n-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)\n-#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */\n-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0\n-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)\n-#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */\n-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0\n-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK (0xFFFF << I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)\n-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17\n-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)\n-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18\n-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)\n-#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */\n-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0\n-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK (0x1 << I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)\n-#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */\n-#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15\n-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0\n-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)\n-#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */\n-#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15\n-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0\n-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)\n-#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */\n-#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15\n-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0\n-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)\n-#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */\n-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0\n-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK (0xFF << I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)\n-#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */\n-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0\n-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK (0xFF << I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)\n-#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */\n-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0\n-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK (0xFF << I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29\n-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)\n-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30\n-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)\n-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31\n-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK (0x1 << I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)\n-#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */\n-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0\n-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK (0xFF << I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)\n-#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */\n-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0\n-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK (0xFF << I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)\n-#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */\n-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0\n-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK (0x1 << I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)\n-#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */\n-#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31\n-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0\n-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)\n-#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */\n-#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31\n-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0\n-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)\n-#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */\n-#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31\n-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0\n-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)\n-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31\n-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0\n-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)\n-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8\n-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)\n-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31\n-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0\n-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)\n-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31\n-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0\n-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)\n-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31\n-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0\n-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)\n-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1\n-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)\n-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2\n-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)\n-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3\n-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)\n-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4\n-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)\n-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31\n-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0\n-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)\n-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31\n-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)\n-\n-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0\n-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)\n-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)\n-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)\n-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)\n-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0\n-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)\n-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)\n-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)\n-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0\n-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)\n-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0\n-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)\n-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)\n-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)\n-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0\n-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)\n-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)\n-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)\n-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0\n-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)\n-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)\n-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)\n-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)\n-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0\n-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)\n-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0\n-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)\n-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0\n-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)\n-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0\n-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)\n-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0\n-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)\n-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0\n-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)\n-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0\n-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)\n-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0\n-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)\n-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0\n-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)\n-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0\n-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)\n-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0\n-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)\n-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0\n-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)\n-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0\n-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)\n-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0\n-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)\n-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0\n-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)\n-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15\n-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0\n-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)\n-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15\n-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0\n-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)\n-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15\n-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0\n-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)\n-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15\n-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0\n-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)\n-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15\n-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0\n-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)\n-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0\n-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)\n-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0\n-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)\n-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15\n-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0\n-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)\n-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15\n-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0\n-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)\n-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)\n-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)\n-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15\n-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0\n-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)\n-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15\n-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0\n-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)\n-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */\n-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0\n-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)\n-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */\n-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0\n-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)\n-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */\n-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0\n-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)\n-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */\n-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0\n-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)\n-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */\n-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0\n-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)\n-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */\n-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0\n-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)\n-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0\n-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)\n-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0\n-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)\n-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0\n-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)\n-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0\n-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)\n-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0\n-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)\n-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0\n-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)\n-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0\n-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)\n-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0\n-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)\n-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0\n-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)\n-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0\n-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)\n-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0\n-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)\n-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0\n-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)\n-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0\n-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)\n-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0\n-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)\n-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0\n-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)\n-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */\n-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0\n-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0\n-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)\n-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)\n-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)\n-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0\n-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)\n-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)\n-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)\n-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0\n-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)\n-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0\n-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)\n-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)\n-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)\n-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0\n-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)\n-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)\n-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)\n-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0\n-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)\n-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)\n-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)\n-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)\n-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0\n-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)\n-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0\n-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)\n-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0\n-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)\n-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0\n-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)\n-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0\n-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)\n-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0\n-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)\n-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0\n-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)\n-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0\n-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)\n-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0\n-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)\n-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0\n-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)\n-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0\n-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)\n-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0\n-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)\n-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0\n-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)\n-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0\n-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)\n-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0\n-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)\n-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31\n-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0\n-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)\n-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31\n-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0\n-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)\n-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31\n-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0\n-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)\n-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31\n-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0\n-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)\n-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31\n-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0\n-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)\n-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0\n-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)\n-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0\n-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)\n-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31\n-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0\n-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)\n-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31\n-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0\n-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)\n-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)\n-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)\n-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31\n-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0\n-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)\n-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */\n-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31\n-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0\n-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)\n-\n-#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */\n-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0\n-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)\n-#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */\n-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0\n-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)\n-#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */\n-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0\n-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */\n-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0\n-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4\n-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16\n-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)\n-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31\n-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)\n-#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */\n-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0\n-#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)\n-#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */\n-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0\n-#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)\n-#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */\n-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0\n-#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)\n-#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */\n-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0\n-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)\n-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16\n-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)\n-#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */\n-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0\n-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)\n-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31\n-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)\n-#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */\n-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0\n-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)\n-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16\n-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)\n-#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */\n-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0\n-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)\n-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */\n-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0\n-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)\n-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */\n-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0\n-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)\n-#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */\n-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0\n-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)\n-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20\n-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)\n-#endif /* I40IW_REGISTER_H */\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h\ndeleted file mode 100644\nindex f7013f1..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_status.h\n+++ /dev/null\n@@ -1,101 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_STATUS_H\n-#define I40IW_STATUS_H\n-\n-/* Error Codes */\n-enum i40iw_status_code {\n-\tI40IW_SUCCESS = 0,\n-\tI40IW_ERR_NVM = -1,\n-\tI40IW_ERR_NVM_CHECKSUM = -2,\n-\tI40IW_ERR_CONFIG = -4,\n-\tI40IW_ERR_PARAM = -5,\n-\tI40IW_ERR_DEVICE_NOT_SUPPORTED = -6,\n-\tI40IW_ERR_RESET_FAILED = -7,\n-\tI40IW_ERR_SWFW_SYNC = -8,\n-\tI40IW_ERR_NO_MEMORY = -9,\n-\tI40IW_ERR_BAD_PTR = -10,\n-\tI40IW_ERR_INVALID_PD_ID = -11,\n-\tI40IW_ERR_INVALID_QP_ID = -12,\n-\tI40IW_ERR_INVALID_CQ_ID = -13,\n-\tI40IW_ERR_INVALID_CEQ_ID = -14,\n-\tI40IW_ERR_INVALID_AEQ_ID = -15,\n-\tI40IW_ERR_INVALID_SIZE = -16,\n-\tI40IW_ERR_INVALID_ARP_INDEX = -17,\n-\tI40IW_ERR_INVALID_FPM_FUNC_ID = -18,\n-\tI40IW_ERR_QP_INVALID_MSG_SIZE = -19,\n-\tI40IW_ERR_QP_TOOMANY_WRS_POSTED = -20,\n-\tI40IW_ERR_INVALID_FRAG_COUNT = -21,\n-\tI40IW_ERR_QUEUE_EMPTY = -22,\n-\tI40IW_ERR_INVALID_ALIGNMENT = -23,\n-\tI40IW_ERR_FLUSHED_QUEUE = -24,\n-\tI40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,\n-\tI40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,\n-\tI40IW_ERR_TIMEOUT = -27,\n-\tI40IW_ERR_OPCODE_MISMATCH = -28,\n-\tI40IW_ERR_CQP_COMPL_ERROR = -29,\n-\tI40IW_ERR_INVALID_VF_ID = -30,\n-\tI40IW_ERR_INVALID_HMCFN_ID = -31,\n-\tI40IW_ERR_BACKING_PAGE_ERROR = -32,\n-\tI40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33,\n-\tI40IW_ERR_INVALID_PBLE_INDEX = -34,\n-\tI40IW_ERR_INVALID_SD_INDEX = -35,\n-\tI40IW_ERR_INVALID_PAGE_DESC_INDEX = -36,\n-\tI40IW_ERR_INVALID_SD_TYPE = -37,\n-\tI40IW_ERR_MEMCPY_FAILED = -38,\n-\tI40IW_ERR_INVALID_HMC_OBJ_INDEX = -39,\n-\tI40IW_ERR_INVALID_HMC_OBJ_COUNT = -40,\n-\tI40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41,\n-\tI40IW_ERR_SRQ_ENABLED = -42,\n-\tI40IW_ERR_BUF_TOO_SHORT = -43,\n-\tI40IW_ERR_BAD_IWARP_CQE = -44,\n-\tI40IW_ERR_NVM_BLANK_MODE = -45,\n-\tI40IW_ERR_NOT_IMPLEMENTED = -46,\n-\tI40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47,\n-\tI40IW_ERR_NOT_READY = -48,\n-\tI40IW_NOT_SUPPORTED = -49,\n-\tI40IW_ERR_FIRMWARE_API_VERSION = -50,\n-\tI40IW_ERR_RING_FULL = -51,\n-\tI40IW_ERR_MPA_CRC = -61,\n-\tI40IW_ERR_NO_TXBUFS = -62,\n-\tI40IW_ERR_SEQ_NUM = -63,\n-\tI40IW_ERR_list_empty = -64,\n-\tI40IW_ERR_INVALID_MAC_ADDR = -65,\n-\tI40IW_ERR_BAD_STAG = -66,\n-\tI40IW_ERR_CQ_COMPL_ERROR = -67,\n-\tI40IW_ERR_QUEUE_DESTROYED = -68\n-\n-};\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h\ndeleted file mode 100644\nindex adc8d2e..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_type.h\n+++ /dev/null\n@@ -1,1363 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_TYPE_H\n-#define I40IW_TYPE_H\n-#include \"i40iw_user.h\"\n-#include \"i40iw_hmc.h\"\n-#include \"i40iw_vf.h\"\n-#include \"i40iw_virtchnl.h\"\n-\n-struct i40iw_cqp_sq_wqe {\n-\tu64 buf[I40IW_CQP_WQE_SIZE];\n-};\n-\n-struct i40iw_sc_aeqe {\n-\tu64 buf[I40IW_AEQE_SIZE];\n-};\n-\n-struct i40iw_ceqe {\n-\tu64 buf[I40IW_CEQE_SIZE];\n-};\n-\n-struct i40iw_cqp_ctx {\n-\tu64 buf[I40IW_CQP_CTX_SIZE];\n-};\n-\n-struct i40iw_cq_shadow_area {\n-\tu64 buf[I40IW_SHADOW_AREA_SIZE];\n-};\n-\n-struct i40iw_sc_dev;\n-struct i40iw_hmc_info;\n-struct i40iw_vsi_pestat;\n-\n-struct i40iw_cqp_ops;\n-struct i40iw_ccq_ops;\n-struct i40iw_ceq_ops;\n-struct i40iw_aeq_ops;\n-struct i40iw_mr_ops;\n-struct i40iw_cqp_misc_ops;\n-struct i40iw_pd_ops;\n-struct i40iw_priv_qp_ops;\n-struct i40iw_priv_cq_ops;\n-struct i40iw_hmc_ops;\n-\n-enum i40iw_page_size {\n-\tI40IW_PAGE_SIZE_4K,\n-\tI40IW_PAGE_SIZE_2M\n-};\n-\n-enum i40iw_resource_indicator_type {\n-\tI40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,\n-\tI40IW_RSRC_INDICATOR_TYPE_CQ,\n-\tI40IW_RSRC_INDICATOR_TYPE_QP,\n-\tI40IW_RSRC_INDICATOR_TYPE_SRQ\n-};\n-\n-enum i40iw_hdrct_flags {\n-\tDDP_LEN_FLAG = 0x80,\n-\tDDP_HDR_FLAG = 0x40,\n-\tRDMA_HDR_FLAG = 0x20\n-};\n-\n-enum i40iw_term_layers {\n-\tLAYER_RDMA = 0,\n-\tLAYER_DDP = 1,\n-\tLAYER_MPA = 2\n-};\n-\n-enum i40iw_term_error_types {\n-\tRDMAP_REMOTE_PROT = 1,\n-\tRDMAP_REMOTE_OP = 2,\n-\tDDP_CATASTROPHIC = 0,\n-\tDDP_TAGGED_BUFFER = 1,\n-\tDDP_UNTAGGED_BUFFER = 2,\n-\tDDP_LLP = 3\n-};\n-\n-enum i40iw_term_rdma_errors {\n-\tRDMAP_INV_STAG = 0x00,\n-\tRDMAP_INV_BOUNDS = 0x01,\n-\tRDMAP_ACCESS = 0x02,\n-\tRDMAP_UNASSOC_STAG = 0x03,\n-\tRDMAP_TO_WRAP = 0x04,\n-\tRDMAP_INV_RDMAP_VER = 0x05,\n-\tRDMAP_UNEXPECTED_OP = 0x06,\n-\tRDMAP_CATASTROPHIC_LOCAL = 0x07,\n-\tRDMAP_CATASTROPHIC_GLOBAL = 0x08,\n-\tRDMAP_CANT_INV_STAG = 0x09,\n-\tRDMAP_UNSPECIFIED = 0xff\n-};\n-\n-enum i40iw_term_ddp_errors {\n-\tDDP_CATASTROPHIC_LOCAL = 0x00,\n-\tDDP_TAGGED_INV_STAG = 0x00,\n-\tDDP_TAGGED_BOUNDS = 0x01,\n-\tDDP_TAGGED_UNASSOC_STAG = 0x02,\n-\tDDP_TAGGED_TO_WRAP = 0x03,\n-\tDDP_TAGGED_INV_DDP_VER = 0x04,\n-\tDDP_UNTAGGED_INV_QN = 0x01,\n-\tDDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,\n-\tDDP_UNTAGGED_INV_MSN_RANGE = 0x03,\n-\tDDP_UNTAGGED_INV_MO = 0x04,\n-\tDDP_UNTAGGED_INV_TOO_LONG = 0x05,\n-\tDDP_UNTAGGED_INV_DDP_VER = 0x06\n-};\n-\n-enum i40iw_term_mpa_errors {\n-\tMPA_CLOSED = 0x01,\n-\tMPA_CRC = 0x02,\n-\tMPA_MARKER = 0x03,\n-\tMPA_REQ_RSP = 0x04,\n-};\n-\n-enum i40iw_flush_opcode {\n-\tFLUSH_INVALID = 0,\n-\tFLUSH_PROT_ERR,\n-\tFLUSH_REM_ACCESS_ERR,\n-\tFLUSH_LOC_QP_OP_ERR,\n-\tFLUSH_REM_OP_ERR,\n-\tFLUSH_LOC_LEN_ERR,\n-\tFLUSH_GENERAL_ERR,\n-\tFLUSH_FATAL_ERR\n-};\n-\n-enum i40iw_term_eventtypes {\n-\tTERM_EVENT_QP_FATAL,\n-\tTERM_EVENT_QP_ACCESS_ERR\n-};\n-\n-struct i40iw_terminate_hdr {\n-\tu8 layer_etype;\n-\tu8 error_code;\n-\tu8 hdrct;\n-\tu8 rsvd;\n-};\n-\n-enum i40iw_debug_flag {\n-\tI40IW_DEBUG_NONE\t= 0x00000000,\n-\tI40IW_DEBUG_ERR\t\t= 0x00000001,\n-\tI40IW_DEBUG_INIT\t= 0x00000002,\n-\tI40IW_DEBUG_DEV\t\t= 0x00000004,\n-\tI40IW_DEBUG_CM\t\t= 0x00000008,\n-\tI40IW_DEBUG_VERBS\t= 0x00000010,\n-\tI40IW_DEBUG_PUDA\t= 0x00000020,\n-\tI40IW_DEBUG_ILQ\t\t= 0x00000040,\n-\tI40IW_DEBUG_IEQ\t\t= 0x00000080,\n-\tI40IW_DEBUG_QP\t\t= 0x00000100,\n-\tI40IW_DEBUG_CQ\t\t= 0x00000200,\n-\tI40IW_DEBUG_MR\t\t= 0x00000400,\n-\tI40IW_DEBUG_PBLE\t= 0x00000800,\n-\tI40IW_DEBUG_WQE\t\t= 0x00001000,\n-\tI40IW_DEBUG_AEQ\t\t= 0x00002000,\n-\tI40IW_DEBUG_CQP\t\t= 0x00004000,\n-\tI40IW_DEBUG_HMC\t\t= 0x00008000,\n-\tI40IW_DEBUG_USER\t= 0x00010000,\n-\tI40IW_DEBUG_VIRT\t= 0x00020000,\n-\tI40IW_DEBUG_DCB\t\t= 0x00040000,\n-\tI40IW_DEBUG_CQE\t\t= 0x00800000,\n-\tI40IW_DEBUG_ALL\t\t= 0xFFFFFFFF\n-};\n-\n-enum i40iw_hw_stats_index_32b {\n-\tI40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,\n-\tI40IW_HW_STAT_INDEX_IP4RXTRUNC,\n-\tI40IW_HW_STAT_INDEX_IP4TXNOROUTE,\n-\tI40IW_HW_STAT_INDEX_IP6RXDISCARD,\n-\tI40IW_HW_STAT_INDEX_IP6RXTRUNC,\n-\tI40IW_HW_STAT_INDEX_IP6TXNOROUTE,\n-\tI40IW_HW_STAT_INDEX_TCPRTXSEG,\n-\tI40IW_HW_STAT_INDEX_TCPRXOPTERR,\n-\tI40IW_HW_STAT_INDEX_TCPRXPROTOERR,\n-\tI40IW_HW_STAT_INDEX_MAX_32\n-};\n-\n-enum i40iw_hw_stats_index_64b {\n-\tI40IW_HW_STAT_INDEX_IP4RXOCTS = 0,\n-\tI40IW_HW_STAT_INDEX_IP4RXPKTS,\n-\tI40IW_HW_STAT_INDEX_IP4RXFRAGS,\n-\tI40IW_HW_STAT_INDEX_IP4RXMCPKTS,\n-\tI40IW_HW_STAT_INDEX_IP4TXOCTS,\n-\tI40IW_HW_STAT_INDEX_IP4TXPKTS,\n-\tI40IW_HW_STAT_INDEX_IP4TXFRAGS,\n-\tI40IW_HW_STAT_INDEX_IP4TXMCPKTS,\n-\tI40IW_HW_STAT_INDEX_IP6RXOCTS,\n-\tI40IW_HW_STAT_INDEX_IP6RXPKTS,\n-\tI40IW_HW_STAT_INDEX_IP6RXFRAGS,\n-\tI40IW_HW_STAT_INDEX_IP6RXMCPKTS,\n-\tI40IW_HW_STAT_INDEX_IP6TXOCTS,\n-\tI40IW_HW_STAT_INDEX_IP6TXPKTS,\n-\tI40IW_HW_STAT_INDEX_IP6TXFRAGS,\n-\tI40IW_HW_STAT_INDEX_IP6TXMCPKTS,\n-\tI40IW_HW_STAT_INDEX_TCPRXSEGS,\n-\tI40IW_HW_STAT_INDEX_TCPTXSEG,\n-\tI40IW_HW_STAT_INDEX_RDMARXRDS,\n-\tI40IW_HW_STAT_INDEX_RDMARXSNDS,\n-\tI40IW_HW_STAT_INDEX_RDMARXWRS,\n-\tI40IW_HW_STAT_INDEX_RDMATXRDS,\n-\tI40IW_HW_STAT_INDEX_RDMATXSNDS,\n-\tI40IW_HW_STAT_INDEX_RDMATXWRS,\n-\tI40IW_HW_STAT_INDEX_RDMAVBND,\n-\tI40IW_HW_STAT_INDEX_RDMAVINV,\n-\tI40IW_HW_STAT_INDEX_MAX_64\n-};\n-\n-struct i40iw_dev_hw_stats_offsets {\n-\tu32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];\n-\tu32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];\n-};\n-\n-struct i40iw_dev_hw_stats {\n-\tu64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32];\n-\tu64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64];\n-};\n-\n-struct i40iw_vsi_pestat {\n-\tstruct i40iw_hw *hw;\n-\tstruct i40iw_dev_hw_stats hw_stats;\n-\tstruct i40iw_dev_hw_stats last_read_hw_stats;\n-\tstruct i40iw_dev_hw_stats_offsets hw_stats_offsets;\n-\tstruct timer_list stats_timer;\n-\tstruct i40iw_sc_vsi *vsi;\n-\tspinlock_t lock; /* rdma stats lock */\n-};\n-\n-struct i40iw_hw {\n-\tu8 __iomem *hw_addr;\n-\tvoid *dev_context;\n-\tstruct i40iw_hmc_info hmc;\n-};\n-\n-struct i40iw_pfpdu {\n-\tstruct list_head rxlist;\n-\tu32 rcv_nxt;\n-\tu32 fps;\n-\tu32 max_fpdu_data;\n-\tbool mode;\n-\tbool mpa_crc_err;\n-\tu64 total_ieq_bufs;\n-\tu64 fpdu_processed;\n-\tu64 bad_seq_num;\n-\tu64 crc_err;\n-\tu64 no_tx_bufs;\n-\tu64 tx_err;\n-\tu64 out_of_order;\n-\tu64 pmode_count;\n-};\n-\n-struct i40iw_sc_pd {\n-\tu32 size;\n-\tstruct i40iw_sc_dev *dev;\n-\tu16 pd_id;\n-\tint abi_ver;\n-};\n-\n-struct i40iw_cqp_quanta {\n-\tu64 elem[I40IW_CQP_WQE_SIZE];\n-};\n-\n-struct i40iw_sc_cqp {\n-\tu32 size;\n-\tu64 sq_pa;\n-\tu64 host_ctx_pa;\n-\tvoid *back_cqp;\n-\tstruct i40iw_sc_dev *dev;\n-\tenum i40iw_status_code (*process_cqp_sds)(struct i40iw_sc_dev *,\n-\t\t\t\t\t\t struct i40iw_update_sds_info *);\n-\tstruct i40iw_dma_mem sdbuf;\n-\tstruct i40iw_ring sq_ring;\n-\tstruct i40iw_cqp_quanta *sq_base;\n-\tu64 *host_ctx;\n-\tu64 *scratch_array;\n-\tu32 cqp_id;\n-\tu32 sq_size;\n-\tu32 hw_sq_size;\n-\tu8 struct_ver;\n-\tu8 polarity;\n-\tbool en_datacenter_tcp;\n-\tu8 hmc_profile;\n-\tu8 enabled_vf_count;\n-\tu8 timeout_count;\n-};\n-\n-struct i40iw_sc_aeq {\n-\tu32 size;\n-\tu64 aeq_elem_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_sc_aeqe *aeqe_base;\n-\tvoid *pbl_list;\n-\tu32 elem_cnt;\n-\tstruct i40iw_ring aeq_ring;\n-\tbool virtual_map;\n-\tu8 pbl_chunk_size;\n-\tu32 first_pm_pbl_idx;\n-\tu8 polarity;\n-};\n-\n-struct i40iw_sc_ceq {\n-\tu32 size;\n-\tu64 ceq_elem_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_ceqe *ceqe_base;\n-\tvoid *pbl_list;\n-\tu32 ceq_id;\n-\tu32 elem_cnt;\n-\tstruct i40iw_ring ceq_ring;\n-\tbool virtual_map;\n-\tu8 pbl_chunk_size;\n-\tbool tph_en;\n-\tu8 tph_val;\n-\tu32 first_pm_pbl_idx;\n-\tu8 polarity;\n-};\n-\n-struct i40iw_sc_cq {\n-\tstruct i40iw_cq_uk cq_uk;\n-\tu64 cq_pa;\n-\tu64 shadow_area_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_sc_vsi *vsi;\n-\tvoid *pbl_list;\n-\tvoid *back_cq;\n-\tu32 ceq_id;\n-\tu32 shadow_read_threshold;\n-\tbool ceqe_mask;\n-\tbool virtual_map;\n-\tu8 pbl_chunk_size;\n-\tu8 cq_type;\n-\tbool ceq_id_valid;\n-\tbool tph_en;\n-\tu8 tph_val;\n-\tu32 first_pm_pbl_idx;\n-\tbool check_overflow;\n-};\n-\n-struct i40iw_sc_qp {\n-\tstruct i40iw_qp_uk qp_uk;\n-\tu64 sq_pa;\n-\tu64 rq_pa;\n-\tu64 hw_host_ctx_pa;\n-\tu64 shadow_area_pa;\n-\tu64 q2_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_sc_vsi *vsi;\n-\tstruct i40iw_sc_pd *pd;\n-\tu64 *hw_host_ctx;\n-\tvoid *llp_stream_handle;\n-\tvoid *back_qp;\n-\tstruct i40iw_pfpdu pfpdu;\n-\tu8 *q2_buf;\n-\tu64 qp_compl_ctx;\n-\tu16 qs_handle;\n-\tu16 push_idx;\n-\tu8 sq_tph_val;\n-\tu8 rq_tph_val;\n-\tu8 qp_state;\n-\tu8 qp_type;\n-\tu8 hw_sq_size;\n-\tu8 hw_rq_size;\n-\tu8 src_mac_addr_idx;\n-\tbool sq_tph_en;\n-\tbool rq_tph_en;\n-\tbool rcv_tph_en;\n-\tbool xmit_tph_en;\n-\tbool virtual_map;\n-\tbool flush_sq;\n-\tbool flush_rq;\n-\tu8 user_pri;\n-\tstruct list_head list;\n-\tbool on_qoslist;\n-\tbool sq_flush;\n-\tenum i40iw_flush_opcode flush_code;\n-\tenum i40iw_term_eventtypes eventtype;\n-\tu8 term_flags;\n-};\n-\n-struct i40iw_hmc_fpm_misc {\n-\tu32 max_ceqs;\n-\tu32 max_sds;\n-\tu32 xf_block_size;\n-\tu32 q1_block_size;\n-\tu32 ht_multiplier;\n-\tu32 timer_bucket;\n-};\n-\n-struct i40iw_vchnl_if {\n-\tenum i40iw_status_code (*vchnl_recv)(struct i40iw_sc_dev *, u32, u8 *, u16);\n-\tenum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *dev, u32, u8 *, u16);\n-};\n-\n-#define I40IW_VCHNL_MAX_VF_MSG_SIZE 512\n-\n-struct i40iw_vchnl_vf_msg_buffer {\n-\tstruct i40iw_virtchnl_op_buf vchnl_msg;\n-\tchar parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];\n-};\n-\n-struct i40iw_qos {\n-\tstruct list_head qplist;\n-\tspinlock_t lock;\t/* qos list */\n-\tu16 qs_handle;\n-};\n-\n-struct i40iw_vfdev {\n-\tstruct i40iw_sc_dev *pf_dev;\n-\tu8 *hmc_info_mem;\n-\tstruct i40iw_vsi_pestat pestat;\n-\tstruct i40iw_hmc_pble_info *pble_info;\n-\tstruct i40iw_hmc_info hmc_info;\n-\tstruct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;\n-\tu64 fpm_query_buf_pa;\n-\tu64 *fpm_query_buf;\n-\tu32 vf_id;\n-\tu32 msg_count;\n-\tbool pf_hmc_initialized;\n-\tu16 pmf_index;\n-\tu16 iw_vf_idx;\t\t/* VF Device table index */\n-\tbool stats_initialized;\n-};\n-\n-#define I40IW_INVALID_FCN_ID 0xff\n-struct i40iw_sc_vsi {\n-\tstruct i40iw_sc_dev *dev;\n-\tvoid *back_vsi; /* Owned by OS */\n-\tu32 ilq_count;\n-\tstruct i40iw_virt_mem ilq_mem;\n-\tstruct i40iw_puda_rsrc *ilq;\n-\tu32 ieq_count;\n-\tstruct i40iw_virt_mem ieq_mem;\n-\tstruct i40iw_puda_rsrc *ieq;\n-\tu16 exception_lan_queue;\n-\tu16 mtu;\n-\tu8 fcn_id;\n-\tbool stats_fcn_id_alloc;\n-\tstruct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];\n-\tstruct i40iw_vsi_pestat *pestat;\n-};\n-\n-struct i40iw_sc_dev {\n-\tstruct list_head cqp_cmd_head;\t/* head of the CQP command list */\n-\tspinlock_t cqp_lock; /* cqp list sync */\n-\tstruct i40iw_dev_uk dev_uk;\n-\tbool fcn_id_array[I40IW_MAX_STATS_COUNT];\n-\tstruct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];\n-\tu64 fpm_query_buf_pa;\n-\tu64 fpm_commit_buf_pa;\n-\tu64 *fpm_query_buf;\n-\tu64 *fpm_commit_buf;\n-\tvoid *back_dev;\n-\tstruct i40iw_hw *hw;\n-\tu8 __iomem *db_addr;\n-\tstruct i40iw_hmc_info *hmc_info;\n-\tstruct i40iw_hmc_pble_info *pble_info;\n-\tstruct i40iw_vfdev *vf_dev[I40IW_MAX_PE_ENABLED_VF_COUNT];\n-\tstruct i40iw_sc_cqp *cqp;\n-\tstruct i40iw_sc_aeq *aeq;\n-\tstruct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT];\n-\tstruct i40iw_sc_cq *ccq;\n-\tstruct i40iw_cqp_ops *cqp_ops;\n-\tstruct i40iw_ccq_ops *ccq_ops;\n-\tstruct i40iw_ceq_ops *ceq_ops;\n-\tstruct i40iw_aeq_ops *aeq_ops;\n-\tstruct i40iw_pd_ops *iw_pd_ops;\n-\tstruct i40iw_priv_qp_ops *iw_priv_qp_ops;\n-\tstruct i40iw_priv_cq_ops *iw_priv_cq_ops;\n-\tstruct i40iw_mr_ops *mr_ops;\n-\tstruct i40iw_cqp_misc_ops *cqp_misc_ops;\n-\tstruct i40iw_hmc_ops *hmc_ops;\n-\tstruct i40iw_vchnl_if vchnl_if;\n-\tconst struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;\n-\n-\tstruct i40iw_hmc_fpm_misc hmc_fpm_misc;\n-\tu32 debug_mask;\n-\tu8 hmc_fn_id;\n-\tbool is_pf;\n-\tbool vchnl_up;\n-\tbool ceq_valid;\n-\tu8 vf_id;\n-\twait_queue_head_t vf_reqs;\n-\tu64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];\n-\tstruct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf;\n-\tu8 hw_rev;\n-};\n-\n-struct i40iw_modify_cq_info {\n-\tu64 cq_pa;\n-\tstruct i40iw_cqe *cq_base;\n-\tvoid *pbl_list;\n-\tu32 ceq_id;\n-\tu32 cq_size;\n-\tu32 shadow_read_threshold;\n-\tbool virtual_map;\n-\tu8 pbl_chunk_size;\n-\tbool check_overflow;\n-\tbool cq_resize;\n-\tbool ceq_change;\n-\tbool check_overflow_change;\n-\tu32 first_pm_pbl_idx;\n-\tbool ceq_valid;\n-};\n-\n-struct i40iw_create_qp_info {\n-\tu8 next_iwarp_state;\n-\tbool ord_valid;\n-\tbool tcp_ctx_valid;\n-\tbool cq_num_valid;\n-\tbool arp_cache_idx_valid;\n-};\n-\n-struct i40iw_modify_qp_info {\n-\tu64 rx_win0;\n-\tu64 rx_win1;\n-\tu8 next_iwarp_state;\n-\tu8 termlen;\n-\tbool ord_valid;\n-\tbool tcp_ctx_valid;\n-\tbool cq_num_valid;\n-\tbool arp_cache_idx_valid;\n-\tbool reset_tcp_conn;\n-\tbool remove_hash_idx;\n-\tbool dont_send_term;\n-\tbool dont_send_fin;\n-\tbool cached_var_valid;\n-\tbool force_loopback;\n-};\n-\n-struct i40iw_ccq_cqe_info {\n-\tstruct i40iw_sc_cqp *cqp;\n-\tu64 scratch;\n-\tu32 op_ret_val;\n-\tu16 maj_err_code;\n-\tu16 min_err_code;\n-\tu8 op_code;\n-\tbool error;\n-};\n-\n-struct i40iw_l2params {\n-\tu16 qs_handle_list[I40IW_MAX_USER_PRIORITY];\n-\tu16 mtu;\n-};\n-\n-struct i40iw_vsi_init_info {\n-\tstruct i40iw_sc_dev *dev;\n-\tvoid *back_vsi;\n-\tstruct i40iw_l2params *params;\n-\tu16 exception_lan_queue;\n-};\n-\n-struct i40iw_vsi_stats_info {\n-\tstruct i40iw_vsi_pestat *pestat;\n-\tu8 fcn_id;\n-\tbool alloc_fcn_id;\n-\tbool stats_initialize;\n-};\n-\n-struct i40iw_device_init_info {\n-\tu64 fpm_query_buf_pa;\n-\tu64 fpm_commit_buf_pa;\n-\tu64 *fpm_query_buf;\n-\tu64 *fpm_commit_buf;\n-\tstruct i40iw_hw *hw;\n-\tvoid __iomem *bar0;\n-\tenum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);\n-\tu8 hmc_fn_id;\n-\tbool is_pf;\n-\tu32 debug_mask;\n-};\n-\n-enum i40iw_cqp_hmc_profile {\n-\tI40IW_HMC_PROFILE_DEFAULT = 1,\n-\tI40IW_HMC_PROFILE_FAVOR_VF = 2,\n-\tI40IW_HMC_PROFILE_EQUAL = 3,\n-};\n-\n-struct i40iw_cqp_init_info {\n-\tu64 cqp_compl_ctx;\n-\tu64 host_ctx_pa;\n-\tu64 sq_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_cqp_quanta *sq;\n-\tu64 *host_ctx;\n-\tu64 *scratch_array;\n-\tu32 sq_size;\n-\tu8 struct_ver;\n-\tbool en_datacenter_tcp;\n-\tu8 hmc_profile;\n-\tu8 enabled_vf_count;\n-};\n-\n-struct i40iw_ceq_init_info {\n-\tu64 ceqe_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tu64 *ceqe_base;\n-\tvoid *pbl_list;\n-\tu32 elem_cnt;\n-\tu32 ceq_id;\n-\tbool virtual_map;\n-\tu8 pbl_chunk_size;\n-\tbool tph_en;\n-\tu8 tph_val;\n-\tu32 first_pm_pbl_idx;\n-};\n-\n-struct i40iw_aeq_init_info {\n-\tu64 aeq_elem_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tu32 *aeqe_base;\n-\tvoid *pbl_list;\n-\tu32 elem_cnt;\n-\tbool virtual_map;\n-\tu8 pbl_chunk_size;\n-\tu32 first_pm_pbl_idx;\n-};\n-\n-struct i40iw_ccq_init_info {\n-\tu64 cq_pa;\n-\tu64 shadow_area_pa;\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_cqe *cq_base;\n-\tu64 *shadow_area;\n-\tvoid *pbl_list;\n-\tu32 num_elem;\n-\tu32 ceq_id;\n-\tu32 shadow_read_threshold;\n-\tbool ceqe_mask;\n-\tbool ceq_id_valid;\n-\tbool tph_en;\n-\tu8 tph_val;\n-\tbool avoid_mem_cflct;\n-\tbool virtual_map;\n-\tu8 pbl_chunk_size;\n-\tu32 first_pm_pbl_idx;\n-};\n-\n-struct i40iwarp_offload_info {\n-\tu16 rcv_mark_offset;\n-\tu16 snd_mark_offset;\n-\tu16 pd_id;\n-\tu8 ddp_ver;\n-\tu8 rdmap_ver;\n-\tu8 ord_size;\n-\tu8 ird_size;\n-\tbool wr_rdresp_en;\n-\tbool rd_enable;\n-\tbool snd_mark_en;\n-\tbool rcv_mark_en;\n-\tbool bind_en;\n-\tbool fast_reg_en;\n-\tbool priv_mode_en;\n-\tbool lsmm_present;\n-\tu8 iwarp_mode;\n-\tbool align_hdrs;\n-\tbool rcv_no_mpa_crc;\n-\n-\tu8 last_byte_sent;\n-};\n-\n-struct i40iw_tcp_offload_info {\n-\tbool ipv4;\n-\tbool no_nagle;\n-\tbool insert_vlan_tag;\n-\tbool time_stamp;\n-\tu8 cwnd_inc_limit;\n-\tbool drop_ooo_seg;\n-\tu8 dup_ack_thresh;\n-\tu8 ttl;\n-\tu8 src_mac_addr_idx;\n-\tbool avoid_stretch_ack;\n-\tu8 tos;\n-\tu16 src_port;\n-\tu16 dst_port;\n-\tu32 dest_ip_addr0;\n-\tu32 dest_ip_addr1;\n-\tu32 dest_ip_addr2;\n-\tu32 dest_ip_addr3;\n-\tu32 snd_mss;\n-\tu16 vlan_tag;\n-\tu16 arp_idx;\n-\tu32 flow_label;\n-\tbool wscale;\n-\tu8 tcp_state;\n-\tu8 snd_wscale;\n-\tu8 rcv_wscale;\n-\tu32 time_stamp_recent;\n-\tu32 time_stamp_age;\n-\tu32 snd_nxt;\n-\tu32 snd_wnd;\n-\tu32 rcv_nxt;\n-\tu32 rcv_wnd;\n-\tu32 snd_max;\n-\tu32 snd_una;\n-\tu32 srtt;\n-\tu32 rtt_var;\n-\tu32 ss_thresh;\n-\tu32 cwnd;\n-\tu32 snd_wl1;\n-\tu32 snd_wl2;\n-\tu32 max_snd_window;\n-\tu8 rexmit_thresh;\n-\tu32 local_ipaddr0;\n-\tu32 local_ipaddr1;\n-\tu32 local_ipaddr2;\n-\tu32 local_ipaddr3;\n-\tbool ignore_tcp_opt;\n-\tbool ignore_tcp_uns_opt;\n-};\n-\n-struct i40iw_qp_host_ctx_info {\n-\tu64 qp_compl_ctx;\n-\tstruct i40iw_tcp_offload_info *tcp_info;\n-\tstruct i40iwarp_offload_info *iwarp_info;\n-\tu32 send_cq_num;\n-\tu32 rcv_cq_num;\n-\tu16 push_idx;\n-\tbool push_mode_en;\n-\tbool tcp_info_valid;\n-\tbool iwarp_info_valid;\n-\tbool err_rq_idx_valid;\n-\tu16 err_rq_idx;\n-\tbool add_to_qoslist;\n-\tu8 user_pri;\n-};\n-\n-struct i40iw_aeqe_info {\n-\tu64 compl_ctx;\n-\tu32 qp_cq_id;\n-\tu16 ae_id;\n-\tu16 wqe_idx;\n-\tu8 tcp_state;\n-\tu8 iwarp_state;\n-\tbool qp;\n-\tbool cq;\n-\tbool sq;\n-\tbool in_rdrsp_wr;\n-\tbool out_rdrsp;\n-\tu8 q2_data_written;\n-\tbool aeqe_overflow;\n-};\n-\n-struct i40iw_allocate_stag_info {\n-\tu64 total_len;\n-\tu32 chunk_size;\n-\tu32 stag_idx;\n-\tu32 page_size;\n-\tu16 pd_id;\n-\tu16 access_rights;\n-\tbool remote_access;\n-\tbool use_hmc_fcn_index;\n-\tu8 hmc_fcn_index;\n-\tbool use_pf_rid;\n-};\n-\n-struct i40iw_reg_ns_stag_info {\n-\tu64 reg_addr_pa;\n-\tu64 fbo;\n-\tvoid *va;\n-\tu64 total_len;\n-\tu32 page_size;\n-\tu32 chunk_size;\n-\tu32 first_pm_pbl_index;\n-\tenum i40iw_addressing_type addr_type;\n-\ti40iw_stag_index stag_idx;\n-\tu16 access_rights;\n-\tu16 pd_id;\n-\ti40iw_stag_key stag_key;\n-\tbool use_hmc_fcn_index;\n-\tu8 hmc_fcn_index;\n-\tbool use_pf_rid;\n-};\n-\n-struct i40iw_fast_reg_stag_info {\n-\tu64 wr_id;\n-\tu64 reg_addr_pa;\n-\tu64 fbo;\n-\tvoid *va;\n-\tu64 total_len;\n-\tu32 page_size;\n-\tu32 chunk_size;\n-\tu32 first_pm_pbl_index;\n-\tenum i40iw_addressing_type addr_type;\n-\ti40iw_stag_index stag_idx;\n-\tu16 access_rights;\n-\tu16 pd_id;\n-\ti40iw_stag_key stag_key;\n-\tbool local_fence;\n-\tbool read_fence;\n-\tbool signaled;\n-\tbool use_hmc_fcn_index;\n-\tu8 hmc_fcn_index;\n-\tbool use_pf_rid;\n-\tbool defer_flag;\n-};\n-\n-struct i40iw_dealloc_stag_info {\n-\tu32 stag_idx;\n-\tu16 pd_id;\n-\tbool mr;\n-\tbool dealloc_pbl;\n-};\n-\n-struct i40iw_register_shared_stag {\n-\tvoid *va;\n-\tenum i40iw_addressing_type addr_type;\n-\ti40iw_stag_index new_stag_idx;\n-\ti40iw_stag_index parent_stag_idx;\n-\tu32 access_rights;\n-\tu16 pd_id;\n-\ti40iw_stag_key new_stag_key;\n-};\n-\n-struct i40iw_qp_init_info {\n-\tstruct i40iw_qp_uk_init_info qp_uk_init_info;\n-\tstruct i40iw_sc_pd *pd;\n-\tstruct i40iw_sc_vsi *vsi;\n-\tu64 *host_ctx;\n-\tu8 *q2;\n-\tu64 sq_pa;\n-\tu64 rq_pa;\n-\tu64 host_ctx_pa;\n-\tu64 q2_pa;\n-\tu64 shadow_area_pa;\n-\tint abi_ver;\n-\tu8 sq_tph_val;\n-\tu8 rq_tph_val;\n-\tu8 type;\n-\tbool sq_tph_en;\n-\tbool rq_tph_en;\n-\tbool rcv_tph_en;\n-\tbool xmit_tph_en;\n-\tbool virtual_map;\n-};\n-\n-struct i40iw_cq_init_info {\n-\tstruct i40iw_sc_dev *dev;\n-\tu64 cq_base_pa;\n-\tu64 shadow_area_pa;\n-\tu32 ceq_id;\n-\tu32 shadow_read_threshold;\n-\tbool virtual_map;\n-\tbool ceqe_mask;\n-\tu8 pbl_chunk_size;\n-\tu32 first_pm_pbl_idx;\n-\tbool ceq_id_valid;\n-\tbool tph_en;\n-\tu8 tph_val;\n-\tu8 type;\n-\tstruct i40iw_cq_uk_init_info cq_uk_init_info;\n-};\n-\n-struct i40iw_upload_context_info {\n-\tu64 buf_pa;\n-\tbool freeze_qp;\n-\tbool raw_format;\n-\tu32 qp_id;\n-\tu8 qp_type;\n-};\n-\n-struct i40iw_add_arp_cache_entry_info {\n-\tu8 mac_addr[6];\n-\tu32 reach_max;\n-\tu16 arp_index;\n-\tbool permanent;\n-};\n-\n-struct i40iw_apbvt_info {\n-\tu16 port;\n-\tbool add;\n-};\n-\n-enum i40iw_quad_entry_type {\n-\tI40IW_QHASH_TYPE_TCP_ESTABLISHED = 1,\n-\tI40IW_QHASH_TYPE_TCP_SYN,\n-};\n-\n-enum i40iw_quad_hash_manage_type {\n-\tI40IW_QHASH_MANAGE_TYPE_DELETE = 0,\n-\tI40IW_QHASH_MANAGE_TYPE_ADD,\n-\tI40IW_QHASH_MANAGE_TYPE_MODIFY\n-};\n-\n-struct i40iw_qhash_table_info {\n-\tstruct i40iw_sc_vsi *vsi;\n-\tenum i40iw_quad_hash_manage_type manage;\n-\tenum i40iw_quad_entry_type entry_type;\n-\tbool vlan_valid;\n-\tbool ipv4_valid;\n-\tu8 mac_addr[6];\n-\tu16 vlan_id;\n-\tu8 user_pri;\n-\tu32 qp_num;\n-\tu32 dest_ip[4];\n-\tu32 src_ip[4];\n-\tu16 dest_port;\n-\tu16 src_port;\n-};\n-\n-struct i40iw_local_mac_ipaddr_entry_info {\n-\tu8 mac_addr[6];\n-\tu8 entry_idx;\n-};\n-\n-struct i40iw_cqp_manage_push_page_info {\n-\tu32 push_idx;\n-\tu16 qs_handle;\n-\tu8 free_page;\n-};\n-\n-struct i40iw_qp_flush_info {\n-\tu16 sq_minor_code;\n-\tu16 sq_major_code;\n-\tu16 rq_minor_code;\n-\tu16 rq_major_code;\n-\tu16 ae_code;\n-\tu8 ae_source;\n-\tbool sq;\n-\tbool rq;\n-\tbool userflushcode;\n-\tbool generate_ae;\n-};\n-\n-struct i40iw_cqp_commit_fpm_values {\n-\tu64 qp_base;\n-\tu64 cq_base;\n-\tu32 hte_base;\n-\tu32 arp_base;\n-\tu32 apbvt_inuse_base;\n-\tu32 mr_base;\n-\tu32 xf_base;\n-\tu32 xffl_base;\n-\tu32 q1_base;\n-\tu32 q1fl_base;\n-\tu32 fsimc_base;\n-\tu32 fsiav_base;\n-\tu32 pbl_base;\n-\n-\tu32 qp_cnt;\n-\tu32 cq_cnt;\n-\tu32 hte_cnt;\n-\tu32 arp_cnt;\n-\tu32 mr_cnt;\n-\tu32 xf_cnt;\n-\tu32 xffl_cnt;\n-\tu32 q1_cnt;\n-\tu32 q1fl_cnt;\n-\tu32 fsimc_cnt;\n-\tu32 fsiav_cnt;\n-\tu32 pbl_cnt;\n-};\n-\n-struct i40iw_cqp_query_fpm_values {\n-\tu16 first_pe_sd_index;\n-\tu32 qp_objsize;\n-\tu32 cq_objsize;\n-\tu32 hte_objsize;\n-\tu32 arp_objsize;\n-\tu32 mr_objsize;\n-\tu32 xf_objsize;\n-\tu32 q1_objsize;\n-\tu32 fsimc_objsize;\n-\tu32 fsiav_objsize;\n-\n-\tu32 qp_max;\n-\tu32 cq_max;\n-\tu32 hte_max;\n-\tu32 arp_max;\n-\tu32 mr_max;\n-\tu32 xf_max;\n-\tu32 xffl_max;\n-\tu32 q1_max;\n-\tu32 q1fl_max;\n-\tu32 fsimc_max;\n-\tu32 fsiav_max;\n-\tu32 pbl_max;\n-};\n-\n-struct i40iw_gen_ae_info {\n-\tu16 ae_code;\n-\tu8 ae_source;\n-};\n-\n-struct i40iw_cqp_ops {\n-\tenum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t struct i40iw_cqp_init_info *);\n-\tenum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *);\n-\tvoid (*cqp_post_sq)(struct i40iw_sc_cqp *);\n-\tu64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);\n-\tenum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);\n-\tenum i40iw_status_code (*poll_for_cqp_op_done)(struct i40iw_sc_cqp *, u8,\n-\t\t\t\t\t\t struct i40iw_ccq_cqe_info *);\n-};\n-\n-struct i40iw_ccq_ops {\n-\tenum i40iw_status_code (*ccq_init)(struct i40iw_sc_cq *,\n-\t\t\t\t\t struct i40iw_ccq_init_info *);\n-\tenum i40iw_status_code (*ccq_create)(struct i40iw_sc_cq *, u64, bool, bool);\n-\tenum i40iw_status_code (*ccq_destroy)(struct i40iw_sc_cq *, u64, bool);\n-\tenum i40iw_status_code (*ccq_create_done)(struct i40iw_sc_cq *);\n-\tenum i40iw_status_code (*ccq_get_cqe_info)(struct i40iw_sc_cq *,\n-\t\t\t\t\t\t struct i40iw_ccq_cqe_info *);\n-\tvoid (*ccq_arm)(struct i40iw_sc_cq *);\n-};\n-\n-struct i40iw_ceq_ops {\n-\tenum i40iw_status_code (*ceq_init)(struct i40iw_sc_ceq *,\n-\t\t\t\t\t struct i40iw_ceq_init_info *);\n-\tenum i40iw_status_code (*ceq_create)(struct i40iw_sc_ceq *, u64, bool);\n-\tenum i40iw_status_code (*cceq_create_done)(struct i40iw_sc_ceq *);\n-\tenum i40iw_status_code (*cceq_destroy_done)(struct i40iw_sc_ceq *);\n-\tenum i40iw_status_code (*cceq_create)(struct i40iw_sc_ceq *, u64);\n-\tenum i40iw_status_code (*ceq_destroy)(struct i40iw_sc_ceq *, u64, bool);\n-\tvoid *(*process_ceq)(struct i40iw_sc_dev *, struct i40iw_sc_ceq *);\n-};\n-\n-struct i40iw_aeq_ops {\n-\tenum i40iw_status_code (*aeq_init)(struct i40iw_sc_aeq *,\n-\t\t\t\t\t struct i40iw_aeq_init_info *);\n-\tenum i40iw_status_code (*aeq_create)(struct i40iw_sc_aeq *, u64, bool);\n-\tenum i40iw_status_code (*aeq_destroy)(struct i40iw_sc_aeq *, u64, bool);\n-\tenum i40iw_status_code (*get_next_aeqe)(struct i40iw_sc_aeq *,\n-\t\t\t\t\t\tstruct i40iw_aeqe_info *);\n-\tenum i40iw_status_code (*repost_aeq_entries)(struct i40iw_sc_dev *, u32);\n-\tenum i40iw_status_code (*aeq_create_done)(struct i40iw_sc_aeq *);\n-\tenum i40iw_status_code (*aeq_destroy_done)(struct i40iw_sc_aeq *);\n-};\n-\n-struct i40iw_pd_ops {\n-\tvoid (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);\n-};\n-\n-struct i40iw_priv_qp_ops {\n-\tenum i40iw_status_code (*qp_init)(struct i40iw_sc_qp *, struct i40iw_qp_init_info *);\n-\tenum i40iw_status_code (*qp_create)(struct i40iw_sc_qp *,\n-\t\t\t\t\t struct i40iw_create_qp_info *, u64, bool);\n-\tenum i40iw_status_code (*qp_modify)(struct i40iw_sc_qp *,\n-\t\t\t\t\t struct i40iw_modify_qp_info *, u64, bool);\n-\tenum i40iw_status_code (*qp_destroy)(struct i40iw_sc_qp *, u64, bool, bool, bool);\n-\tenum i40iw_status_code (*qp_flush_wqes)(struct i40iw_sc_qp *,\n-\t\t\t\t\t\tstruct i40iw_qp_flush_info *, u64, bool);\n-\tenum i40iw_status_code (*qp_upload_context)(struct i40iw_sc_dev *,\n-\t\t\t\t\t\t struct i40iw_upload_context_info *,\n-\t\t\t\t\t\t u64, bool);\n-\tenum i40iw_status_code (*qp_setctx)(struct i40iw_sc_qp *, u64 *,\n-\t\t\t\t\t struct i40iw_qp_host_ctx_info *);\n-\n-\tvoid (*qp_send_lsmm)(struct i40iw_sc_qp *, void *, u32, i40iw_stag);\n-\tvoid (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32);\n-\tvoid (*qp_send_rtt)(struct i40iw_sc_qp *, bool);\n-\tenum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8);\n-\tenum i40iw_status_code (*iw_mr_fast_register)(struct i40iw_sc_qp *,\n-\t\t\t\t\t\t struct i40iw_fast_reg_stag_info *,\n-\t\t\t\t\t\t bool);\n-};\n-\n-struct i40iw_priv_cq_ops {\n-\tenum i40iw_status_code (*cq_init)(struct i40iw_sc_cq *, struct i40iw_cq_init_info *);\n-\tenum i40iw_status_code (*cq_create)(struct i40iw_sc_cq *, u64, bool, bool);\n-\tenum i40iw_status_code (*cq_destroy)(struct i40iw_sc_cq *, u64, bool);\n-\tenum i40iw_status_code (*cq_modify)(struct i40iw_sc_cq *,\n-\t\t\t\t\t struct i40iw_modify_cq_info *, u64, bool);\n-};\n-\n-struct i40iw_mr_ops {\n-\tenum i40iw_status_code (*alloc_stag)(struct i40iw_sc_dev *,\n-\t\t\t\t\t struct i40iw_allocate_stag_info *, u64, bool);\n-\tenum i40iw_status_code (*mr_reg_non_shared)(struct i40iw_sc_dev *,\n-\t\t\t\t\t\t struct i40iw_reg_ns_stag_info *,\n-\t\t\t\t\t\t u64, bool);\n-\tenum i40iw_status_code (*mr_reg_shared)(struct i40iw_sc_dev *,\n-\t\t\t\t\t\tstruct i40iw_register_shared_stag *,\n-\t\t\t\t\t\tu64, bool);\n-\tenum i40iw_status_code (*dealloc_stag)(struct i40iw_sc_dev *,\n-\t\t\t\t\t struct i40iw_dealloc_stag_info *,\n-\t\t\t\t\t u64, bool);\n-\tenum i40iw_status_code (*query_stag)(struct i40iw_sc_dev *, u64, u32, bool);\n-\tenum i40iw_status_code (*mw_alloc)(struct i40iw_sc_dev *, u64, u32, u16, bool);\n-};\n-\n-struct i40iw_cqp_misc_ops {\n-\tenum i40iw_status_code (*manage_push_page)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t struct i40iw_cqp_manage_push_page_info *,\n-\t\t\t\t\t\t u64, bool);\n-\tenum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t\t u64, u8, bool, bool);\n-\tenum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t\t u64, u8, u8, bool, bool);\n-\tenum i40iw_status_code (*commit_fpm_values)(struct i40iw_sc_cqp *, u64, u8,\n-\t\t\t\t\t\t struct i40iw_dma_mem *, bool, u8);\n-\tenum i40iw_status_code (*query_fpm_values)(struct i40iw_sc_cqp *, u64, u8,\n-\t\t\t\t\t\t struct i40iw_dma_mem *, bool, u8);\n-\tenum i40iw_status_code (*static_hmc_pages_allocated)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t\t u64, u8, bool, bool);\n-\tenum i40iw_status_code (*add_arp_cache_entry)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t struct i40iw_add_arp_cache_entry_info *,\n-\t\t\t\t\t\t u64, bool);\n-\tenum i40iw_status_code (*del_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);\n-\tenum i40iw_status_code (*query_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);\n-\tenum i40iw_status_code (*manage_apbvt_entry)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t struct i40iw_apbvt_info *, u64, bool);\n-\tenum i40iw_status_code (*manage_qhash_table_entry)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t\t struct i40iw_qhash_table_info *, u64, bool);\n-\tenum i40iw_status_code (*alloc_local_mac_ipaddr_table_entry)(struct i40iw_sc_cqp *, u64, bool);\n-\tenum i40iw_status_code (*add_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t\t struct i40iw_local_mac_ipaddr_entry_info *,\n-\t\t\t\t\t\t\t u64, bool);\n-\tenum i40iw_status_code (*del_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, u64, u8, u8, bool);\n-\tenum i40iw_status_code (*cqp_nop)(struct i40iw_sc_cqp *, u64, bool);\n-\tenum i40iw_status_code (*commit_fpm_values_done)(struct i40iw_sc_cqp\n-\t\t\t\t\t\t\t *);\n-\tenum i40iw_status_code (*query_fpm_values_done)(struct i40iw_sc_cqp *);\n-\tenum i40iw_status_code (*manage_hmc_pm_func_table_done)(struct i40iw_sc_cqp *);\n-\tenum i40iw_status_code (*update_suspend_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);\n-\tenum i40iw_status_code (*update_resume_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);\n-};\n-\n-struct i40iw_hmc_ops {\n-\tenum i40iw_status_code (*init_iw_hmc)(struct i40iw_sc_dev *, u8);\n-\tenum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *,\n-\t\t\t\t\t\t struct i40iw_hmc_fpm_misc *);\n-\tenum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8);\n-\tenum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *, u32 *sd);\n-\tenum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_hmc_create_obj_info *);\n-\tenum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_hmc_del_obj_info *,\n-\t\t\t\t\t\t bool reset);\n-\tenum i40iw_status_code (*pf_init_vfhmc)(struct i40iw_sc_dev *, u8, u32 *);\n-\tenum i40iw_status_code (*vf_configure_vffpm)(struct i40iw_sc_dev *, u32 *);\n-};\n-\n-struct cqp_info {\n-\tunion {\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_qp *qp;\n-\t\t\tstruct i40iw_create_qp_info info;\n-\t\t\tu64 scratch;\n-\t\t} qp_create;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_qp *qp;\n-\t\t\tstruct i40iw_modify_qp_info info;\n-\t\t\tu64 scratch;\n-\t\t} qp_modify;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_qp *qp;\n-\t\t\tu64 scratch;\n-\t\t\tbool remove_hash_idx;\n-\t\t\tbool ignore_mw_bnd;\n-\t\t} qp_destroy;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cq *cq;\n-\t\t\tu64 scratch;\n-\t\t\tbool check_overflow;\n-\t\t} cq_create;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cq *cq;\n-\t\t\tu64 scratch;\n-\t\t} cq_destroy;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_dev *dev;\n-\t\t\tstruct i40iw_allocate_stag_info info;\n-\t\t\tu64 scratch;\n-\t\t} alloc_stag;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_dev *dev;\n-\t\t\tu64 scratch;\n-\t\t\tu32 mw_stag_index;\n-\t\t\tu16 pd_id;\n-\t\t} mw_alloc;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_dev *dev;\n-\t\t\tstruct i40iw_reg_ns_stag_info info;\n-\t\t\tu64 scratch;\n-\t\t} mr_reg_non_shared;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_dev *dev;\n-\t\t\tstruct i40iw_dealloc_stag_info info;\n-\t\t\tu64 scratch;\n-\t\t} dealloc_stag;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tstruct i40iw_local_mac_ipaddr_entry_info info;\n-\t\t\tu64 scratch;\n-\t\t} add_local_mac_ipaddr_entry;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tstruct i40iw_add_arp_cache_entry_info info;\n-\t\t\tu64 scratch;\n-\t\t} add_arp_cache_entry;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tu64 scratch;\n-\t\t\tu8 entry_idx;\n-\t\t\tu8 ignore_ref_count;\n-\t\t} del_local_mac_ipaddr_entry;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tu64 scratch;\n-\t\t\tu16 arp_index;\n-\t\t} del_arp_cache_entry;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tstruct i40iw_manage_vf_pble_info info;\n-\t\t\tu64 scratch;\n-\t\t} manage_vf_pble_bp;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tstruct i40iw_cqp_manage_push_page_info info;\n-\t\t\tu64 scratch;\n-\t\t} manage_push_page;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_dev *dev;\n-\t\t\tstruct i40iw_upload_context_info info;\n-\t\t\tu64 scratch;\n-\t\t} qp_upload_context;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tu64 scratch;\n-\t\t} alloc_local_mac_ipaddr_entry;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_dev *dev;\n-\t\t\tstruct i40iw_hmc_fcn_info info;\n-\t\t\tu64 scratch;\n-\t\t} manage_hmc_pm;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_ceq *ceq;\n-\t\t\tu64 scratch;\n-\t\t} ceq_create;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_ceq *ceq;\n-\t\t\tu64 scratch;\n-\t\t} ceq_destroy;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_aeq *aeq;\n-\t\t\tu64 scratch;\n-\t\t} aeq_create;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_aeq *aeq;\n-\t\t\tu64 scratch;\n-\t\t} aeq_destroy;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_qp *qp;\n-\t\t\tstruct i40iw_qp_flush_info info;\n-\t\t\tu64 scratch;\n-\t\t} qp_flush_wqes;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_qp *qp;\n-\t\t\tstruct i40iw_gen_ae_info info;\n-\t\t\tu64 scratch;\n-\t\t} gen_ae;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tvoid *fpm_values_va;\n-\t\t\tu64 fpm_values_pa;\n-\t\t\tu8 hmc_fn_id;\n-\t\t\tu64 scratch;\n-\t\t} query_fpm_values;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tvoid *fpm_values_va;\n-\t\t\tu64 fpm_values_pa;\n-\t\t\tu8 hmc_fn_id;\n-\t\t\tu64 scratch;\n-\t\t} commit_fpm_values;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tstruct i40iw_apbvt_info info;\n-\t\t\tu64 scratch;\n-\t\t} manage_apbvt_entry;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tstruct i40iw_qhash_table_info info;\n-\t\t\tu64 scratch;\n-\t\t} manage_qhash_table_entry;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_dev *dev;\n-\t\t\tstruct i40iw_update_sds_info info;\n-\t\t\tu64 scratch;\n-\t\t} update_pe_sds;\n-\n-\t\tstruct {\n-\t\t\tstruct i40iw_sc_cqp *cqp;\n-\t\t\tstruct i40iw_sc_qp *qp;\n-\t\t\tu64 scratch;\n-\t\t} suspend_resume;\n-\t} u;\n-};\n-\n-struct cqp_commands_info {\n-\tstruct list_head cqp_cmd_entry;\n-\tu8 cqp_cmd;\n-\tu8 post_sq;\n-\tstruct cqp_info in;\n-};\n-\n-struct i40iw_virtchnl_work_info {\n-\tvoid (*callback_fcn)(void *vf_dev);\n-\tvoid *worker_vf_dev;\n-};\n-\n-struct i40iw_cqp_timeout {\n-\tu64 compl_cqp_cmds;\n-\tu8 count;\n-};\n-\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c\ndeleted file mode 100644\nindex 8afa5a6..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c\n+++ /dev/null\n@@ -1,1232 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_status.h\"\n-#include \"i40iw_d.h\"\n-#include \"i40iw_user.h\"\n-#include \"i40iw_register.h\"\n-\n-static u32 nop_signature = 0x55550000;\n-\n-/**\n- * i40iw_nop_1 - insert a nop wqe and move head. no post work\n- * @qp: hw qp ptr\n- */\n-static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)\n-{\n-\tu64 header, *wqe;\n-\tu64 *wqe_0 = NULL;\n-\tu32 wqe_idx, peek_head;\n-\tbool signaled = false;\n-\n-\tif (!qp->sq_ring.head)\n-\t\treturn I40IW_ERR_PARAM;\n-\n-\twqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-\twqe = qp->sq_base[wqe_idx].elem;\n-\n-\tqp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE;\n-\n-\tpeek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;\n-\twqe_0 = qp->sq_base[peek_head].elem;\n-\tif (peek_head)\n-\t\twqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);\n-\telse\n-\t\twqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\tset_64bit_val(wqe, 0, 0);\n-\tset_64bit_val(wqe, 8, 0);\n-\tset_64bit_val(wqe, 16, 0);\n-\n-\theader = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |\n-\t LS_64(signaled, I40IWQPSQ_SIGCOMPL) |\n-\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;\n-\n-\twmb();\t/* Memory barrier to ensure data is written before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_qp_post_wr - post wr to hrdware\n- * @qp: hw qp ptr\n- */\n-void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)\n-{\n-\tu64 temp;\n-\tu32 hw_sq_tail;\n-\tu32 sw_sq_head;\n-\n-\tmb(); /* valid bit is written and loads completed before reading shadow */\n-\n-\t/* read the doorbell shadow area */\n-\tget_64bit_val(qp->shadow_area, 0, &temp);\n-\n-\thw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);\n-\tsw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-\tif (sw_sq_head != hw_sq_tail) {\n-\t\tif (sw_sq_head > qp->initial_ring.head) {\n-\t\t\tif ((hw_sq_tail >= qp->initial_ring.head) &&\n-\t\t\t (hw_sq_tail < sw_sq_head)) {\n-\t\t\t\twritel(qp->qp_id, qp->wqe_alloc_reg);\n-\t\t\t}\n-\t\t} else if (sw_sq_head != qp->initial_ring.head) {\n-\t\t\tif ((hw_sq_tail >= qp->initial_ring.head) ||\n-\t\t\t (hw_sq_tail < sw_sq_head)) {\n-\t\t\t\twritel(qp->qp_id, qp->wqe_alloc_reg);\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\tqp->initial_ring.head = qp->sq_ring.head;\n-}\n-\n-/**\n- * i40iw_qp_ring_push_db - ring qp doorbell\n- * @qp: hw qp ptr\n- * @wqe_idx: wqe index\n- */\n-static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)\n-{\n-\tset_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);\n-\tqp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-}\n-\n-/**\n- * i40iw_qp_get_next_send_wqe - return next wqe ptr\n- * @qp: hw qp ptr\n- * @wqe_idx: return wqe index\n- * @wqe_size: size of sq wqe\n- */\n-u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,\n-\t\t\t\tu32 *wqe_idx,\n-\t\t\t\tu8 wqe_size,\n-\t\t\t\tu32 total_size,\n-\t\t\t\tu64 wr_id\n-\t\t\t\t)\n-{\n-\tu64 *wqe = NULL;\n-\tu64 wqe_ptr;\n-\tu32 peek_head = 0;\n-\tu16 offset;\n-\tenum i40iw_status_code ret_code = 0;\n-\tu8 nop_wqe_cnt = 0, i;\n-\tu64 *wqe_0 = NULL;\n-\n-\t*wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-\n-\tif (!*wqe_idx)\n-\t\tqp->swqe_polarity = !qp->swqe_polarity;\n-\twqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;\n-\toffset = (u16)(wqe_ptr) & 0x7F;\n-\tif ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {\n-\t\tnop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;\n-\t\tfor (i = 0; i < nop_wqe_cnt; i++) {\n-\t\t\ti40iw_nop_1(qp);\n-\t\t\tI40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);\n-\t\t\tif (ret_code)\n-\t\t\t\treturn NULL;\n-\t\t}\n-\n-\t\t*wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-\t\tif (!*wqe_idx)\n-\t\t\tqp->swqe_polarity = !qp->swqe_polarity;\n-\t}\n-\n-\tif (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) {\n-\t\ti40iw_nop_1(qp);\n-\t\tI40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);\n-\t\tif (ret_code)\n-\t\t\treturn NULL;\n-\t\t*wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-\t\tif (!*wqe_idx)\n-\t\t\tqp->swqe_polarity = !qp->swqe_polarity;\n-\t}\n-\tI40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,\n-\t\t\t\t wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);\n-\tif (ret_code)\n-\t\treturn NULL;\n-\n-\twqe = qp->sq_base[*wqe_idx].elem;\n-\n-\tpeek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);\n-\twqe_0 = qp->sq_base[peek_head].elem;\n-\n-\tif (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) {\n-\t\tif (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity)\n-\t\t\twqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);\n-\t}\n-\n-\tqp->sq_wrtrk_array[*wqe_idx].wrid = wr_id;\n-\tqp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;\n-\tqp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size;\n-\treturn wqe;\n-}\n-\n-/**\n- * i40iw_set_fragment - set fragment in wqe\n- * @wqe: wqe for setting fragment\n- * @offset: offset value\n- * @sge: sge length and stag\n- */\n-static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)\n-{\n-\tif (sge) {\n-\t\tset_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));\n-\t\tset_64bit_val(wqe, (offset + 8),\n-\t\t\t (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |\n-\t\t\t LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));\n-\t}\n-}\n-\n-/**\n- * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe\n- * @qp: hw qp ptr\n- * @wqe_idx: return wqe index\n- */\n-u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)\n-{\n-\tu64 *wqe = NULL;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (I40IW_RING_FULL_ERR(qp->rq_ring))\n-\t\treturn NULL;\n-\n-\tI40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);\n-\tif (ret_code)\n-\t\treturn NULL;\n-\tif (!*wqe_idx)\n-\t\tqp->rwqe_polarity = !qp->rwqe_polarity;\n-\t/* rq_wqe_size_multiplier is no of qwords in one rq wqe */\n-\twqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;\n-\n-\treturn wqe;\n-}\n-\n-/**\n- * i40iw_rdma_write - rdma write operation\n- * @qp: hw qp ptr\n- * @info: post sq information\n- * @post_sq: flag to post sq\n- */\n-static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t struct i40iw_post_sq_info *info,\n-\t\t\t\t\t bool post_sq)\n-{\n-\tu64 header;\n-\tu64 *wqe;\n-\tstruct i40iw_rdma_write *op_info;\n-\tu32 i, wqe_idx;\n-\tu32 total_size = 0, byte_off;\n-\tenum i40iw_status_code ret_code;\n-\tbool read_fence = false;\n-\tu8 wqe_size;\n-\n-\top_info = &info->op.rdma_write;\n-\tif (op_info->num_lo_sges > qp->max_sq_frag_cnt)\n-\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\n-\tfor (i = 0; i < op_info->num_lo_sges; i++)\n-\t\ttotal_size += op_info->lo_sg_list[i].len;\n-\n-\tif (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)\n-\t\treturn I40IW_ERR_QP_INVALID_MSG_SIZE;\n-\n-\tread_fence |= info->read_fence;\n-\n-\tret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\tset_64bit_val(wqe, 16,\n-\t\t LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));\n-\tif (!op_info->rem_addr.stag)\n-\t\treturn I40IW_ERR_BAD_STAG;\n-\n-\theader = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |\n-\t\t LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |\n-\t\t LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |\n-\t\t LS_64(read_fence, I40IWQPSQ_READFENCE) |\n-\t\t LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\ti40iw_set_fragment(wqe, 0, op_info->lo_sg_list);\n-\n-\tfor (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) {\n-\t\ti40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);\n-\t\tbyte_off += 16;\n-\t}\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\n-\tif (post_sq)\n-\t\ti40iw_qp_post_wr(qp);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_rdma_read - rdma read command\n- * @qp: hw qp ptr\n- * @info: post sq information\n- * @inv_stag: flag for inv_stag\n- * @post_sq: flag to post sq\n- */\n-static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t struct i40iw_post_sq_info *info,\n-\t\t\t\t\t bool inv_stag,\n-\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_rdma_read *op_info;\n-\tu64 header;\n-\tu32 wqe_idx;\n-\tenum i40iw_status_code ret_code;\n-\tu8 wqe_size;\n-\tbool local_fence = false;\n-\n-\top_info = &info->op.rdma_read;\n-\tret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\tlocal_fence |= info->local_fence;\n-\n-\tset_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));\n-\theader = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |\n-\t\t LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |\n-\t\t LS_64(info->read_fence, I40IWQPSQ_READFENCE) |\n-\t\t LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\ti40iw_set_fragment(wqe, 0, &op_info->lo_addr);\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\tif (post_sq)\n-\t\ti40iw_qp_post_wr(qp);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_send - rdma send command\n- * @qp: hw qp ptr\n- * @info: post sq information\n- * @stag_to_inv: stag_to_inv value\n- * @post_sq: flag to post sq\n- */\n-static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t struct i40iw_post_sq_info *info,\n-\t\t\t\t\t u32 stag_to_inv,\n-\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_post_send *op_info;\n-\tu64 header;\n-\tu32 i, wqe_idx, total_size = 0, byte_off;\n-\tenum i40iw_status_code ret_code;\n-\tbool read_fence = false;\n-\tu8 wqe_size;\n-\n-\top_info = &info->op.send;\n-\tif (qp->max_sq_frag_cnt < op_info->num_sges)\n-\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\n-\tfor (i = 0; i < op_info->num_sges; i++)\n-\t\ttotal_size += op_info->sg_list[i].len;\n-\tret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\n-\tread_fence |= info->read_fence;\n-\tset_64bit_val(wqe, 16, 0);\n-\theader = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |\n-\t\t LS_64(info->op_type, I40IWQPSQ_OPCODE) |\n-\t\t LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),\n-\t\t I40IWQPSQ_ADDFRAGCNT) |\n-\t\t LS_64(read_fence, I40IWQPSQ_READFENCE) |\n-\t\t LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\ti40iw_set_fragment(wqe, 0, op_info->sg_list);\n-\n-\tfor (i = 1, byte_off = 32; i < op_info->num_sges; i++) {\n-\t\ti40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);\n-\t\tbyte_off += 16;\n-\t}\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\tif (post_sq)\n-\t\ti40iw_qp_post_wr(qp);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_inline_rdma_write - inline rdma write operation\n- * @qp: hw qp ptr\n- * @info: post sq information\n- * @post_sq: flag to post sq\n- */\n-static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t\t struct i40iw_post_sq_info *info,\n-\t\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tu8 *dest, *src;\n-\tstruct i40iw_inline_rdma_write *op_info;\n-\tu64 *push;\n-\tu64 header = 0;\n-\tu32 wqe_idx;\n-\tenum i40iw_status_code ret_code;\n-\tbool read_fence = false;\n-\tu8 wqe_size;\n-\n-\top_info = &info->op.inline_rdma_write;\n-\tif (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)\n-\t\treturn I40IW_ERR_INVALID_INLINE_DATA_SIZE;\n-\n-\tret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\n-\tread_fence |= info->read_fence;\n-\tset_64bit_val(wqe, 16,\n-\t\t LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));\n-\n-\theader = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |\n-\t\t LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |\n-\t\t LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |\n-\t\t LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |\n-\t\t LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |\n-\t\t LS_64(read_fence, I40IWQPSQ_READFENCE) |\n-\t\t LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\tdest = (u8 *)wqe;\n-\tsrc = (u8 *)(op_info->data);\n-\n-\tif (op_info->len <= 16) {\n-\t\tmemcpy(dest, src, op_info->len);\n-\t} else {\n-\t\tmemcpy(dest, src, 16);\n-\t\tsrc += 16;\n-\t\tdest = (u8 *)wqe + 32;\n-\t\tmemcpy(dest, src, op_info->len - 16);\n-\t}\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\n-\tif (qp->push_db) {\n-\t\tpush = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);\n-\t\tmemcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);\n-\t\ti40iw_qp_ring_push_db(qp, wqe_idx);\n-\t} else {\n-\t\tif (post_sq)\n-\t\t\ti40iw_qp_post_wr(qp);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_inline_send - inline send operation\n- * @qp: hw qp ptr\n- * @info: post sq information\n- * @stag_to_inv: remote stag\n- * @post_sq: flag to post sq\n- */\n-static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t\tstruct i40iw_post_sq_info *info,\n-\t\t\t\t\t\tu32 stag_to_inv,\n-\t\t\t\t\t\tbool post_sq)\n-{\n-\tu64 *wqe;\n-\tu8 *dest, *src;\n-\tstruct i40iw_post_inline_send *op_info;\n-\tu64 header;\n-\tu32 wqe_idx;\n-\tenum i40iw_status_code ret_code;\n-\tbool read_fence = false;\n-\tu8 wqe_size;\n-\tu64 *push;\n-\n-\top_info = &info->op.inline_send;\n-\tif (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)\n-\t\treturn I40IW_ERR_INVALID_INLINE_DATA_SIZE;\n-\n-\tret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\n-\tread_fence |= info->read_fence;\n-\theader = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |\n-\t LS_64(info->op_type, I40IWQPSQ_OPCODE) |\n-\t LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |\n-\t LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |\n-\t LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |\n-\t LS_64(read_fence, I40IWQPSQ_READFENCE) |\n-\t LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\tdest = (u8 *)wqe;\n-\tsrc = (u8 *)(op_info->data);\n-\n-\tif (op_info->len <= 16) {\n-\t\tmemcpy(dest, src, op_info->len);\n-\t} else {\n-\t\tmemcpy(dest, src, 16);\n-\t\tsrc += 16;\n-\t\tdest = (u8 *)wqe + 32;\n-\t\tmemcpy(dest, src, op_info->len - 16);\n-\t}\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\n-\tif (qp->push_db) {\n-\t\tpush = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);\n-\t\tmemcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);\n-\t\ti40iw_qp_ring_push_db(qp, wqe_idx);\n-\t} else {\n-\t\tif (post_sq)\n-\t\t\ti40iw_qp_post_wr(qp);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_stag_local_invalidate - stag invalidate operation\n- * @qp: hw qp ptr\n- * @info: post sq information\n- * @post_sq: flag to post sq\n- */\n-static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t\t\t struct i40iw_post_sq_info *info,\n-\t\t\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_inv_local_stag *op_info;\n-\tu64 header;\n-\tu32 wqe_idx;\n-\tbool local_fence = false;\n-\n-\top_info = &info->op.inv_local_stag;\n-\tlocal_fence = info->local_fence;\n-\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\tset_64bit_val(wqe, 0, 0);\n-\tset_64bit_val(wqe, 8,\n-\t\t LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));\n-\tset_64bit_val(wqe, 16, 0);\n-\theader = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |\n-\t LS_64(info->read_fence, I40IWQPSQ_READFENCE) |\n-\t LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\n-\tif (post_sq)\n-\t\ti40iw_qp_post_wr(qp);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_mw_bind - Memory Window bind operation\n- * @qp: hw qp ptr\n- * @info: post sq information\n- * @post_sq: flag to post sq\n- */\n-static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t struct i40iw_post_sq_info *info,\n-\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tstruct i40iw_bind_window *op_info;\n-\tu64 header;\n-\tu32 wqe_idx;\n-\tbool local_fence = false;\n-\n-\top_info = &info->op.bind_window;\n-\n-\tlocal_fence |= info->local_fence;\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\tset_64bit_val(wqe, 0, (uintptr_t)op_info->va);\n-\tset_64bit_val(wqe, 8,\n-\t\t LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |\n-\t\t LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));\n-\tset_64bit_val(wqe, 16, op_info->bind_length);\n-\theader = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |\n-\t LS_64(((op_info->enable_reads << 2) |\n-\t\t (op_info->enable_writes << 3)),\n-\t\t I40IWQPSQ_STAGRIGHTS) |\n-\t LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0),\n-\t\t I40IWQPSQ_VABASEDTO) |\n-\t LS_64(info->read_fence, I40IWQPSQ_READFENCE) |\n-\t LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |\n-\t LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |\n-\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\n-\tif (post_sq)\n-\t\ti40iw_qp_post_wr(qp);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_post_receive - post receive wqe\n- * @qp: hw qp ptr\n- * @info: post rq information\n- */\n-static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,\n-\t\t\t\t\t\t struct i40iw_post_rq_info *info)\n-{\n-\tu64 *wqe;\n-\tu64 header;\n-\tu32 total_size = 0, wqe_idx, i, byte_off;\n-\n-\tif (qp->max_rq_frag_cnt < info->num_sges)\n-\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\tfor (i = 0; i < info->num_sges; i++)\n-\t\ttotal_size += info->sg_list[i].len;\n-\twqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\n-\tqp->rq_wrid_array[wqe_idx] = info->wr_id;\n-\tset_64bit_val(wqe, 16, 0);\n-\n-\theader = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),\n-\t\t I40IWQPSQ_ADDFRAGCNT) |\n-\t LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);\n-\n-\ti40iw_set_fragment(wqe, 0, info->sg_list);\n-\n-\tfor (i = 1, byte_off = 32; i < info->num_sges; i++) {\n-\t\ti40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);\n-\t\tbyte_off += 16;\n-\t}\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_cq_request_notification - cq notification request (door bell)\n- * @cq: hw cq\n- * @cq_notify: notification type\n- */\n-static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,\n-\t\t\t\t\t enum i40iw_completion_notify cq_notify)\n-{\n-\tu64 temp_val;\n-\tu16 sw_cq_sel;\n-\tu8 arm_next_se = 0;\n-\tu8 arm_next = 0;\n-\tu8 arm_seq_num;\n-\n-\tget_64bit_val(cq->shadow_area, 32, &temp_val);\n-\tarm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);\n-\tarm_seq_num++;\n-\n-\tsw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);\n-\tarm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);\n-\tarm_next_se |= 1;\n-\tif (cq_notify == IW_CQ_COMPL_EVENT)\n-\t\tarm_next = 1;\n-\ttemp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |\n-\t LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |\n-\t LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |\n-\t LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);\n-\n-\tset_64bit_val(cq->shadow_area, 32, temp_val);\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\twritel(cq->cq_id, cq->cqe_alloc_reg);\n-}\n-\n-/**\n- * i40iw_cq_post_entries - update tail in shadow memory\n- * @cq: hw cq\n- * @count: # of entries processed\n- */\n-static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,\n-\t\t\t\t\t\t u8 count)\n-{\n-\tI40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);\n-\tset_64bit_val(cq->shadow_area, 0,\n-\t\t I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_cq_poll_completion - get cq completion info\n- * @cq: hw cq\n- * @info: cq poll information returned\n- * @post_cq: update cq tail\n- */\n-static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,\n-\t\t\t\t\t\t struct i40iw_cq_poll_info *info)\n-{\n-\tu64 comp_ctx, qword0, qword2, qword3, wqe_qword;\n-\tu64 *cqe, *sw_wqe;\n-\tstruct i40iw_qp_uk *qp;\n-\tstruct i40iw_ring *pring = NULL;\n-\tu32 wqe_idx, q_type, array_idx = 0;\n-\tenum i40iw_status_code ret_code = 0;\n-\tbool move_cq_head = true;\n-\tu8 polarity;\n-\tu8 addl_wqes = 0;\n-\n-\tif (cq->avoid_mem_cflct)\n-\t\tcqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);\n-\telse\n-\t\tcqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);\n-\n-\tget_64bit_val(cqe, 24, &qword3);\n-\tpolarity = (u8)RS_64(qword3, I40IW_CQ_VALID);\n-\n-\tif (polarity != cq->polarity)\n-\t\treturn I40IW_ERR_QUEUE_EMPTY;\n-\n-\tq_type = (u8)RS_64(qword3, I40IW_CQ_SQ);\n-\tinfo->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);\n-\tinfo->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);\n-\tif (info->error) {\n-\t\tinfo->comp_status = I40IW_COMPL_STATUS_FLUSHED;\n-\t\tinfo->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);\n-\t\tinfo->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);\n-\t} else {\n-\t\tinfo->comp_status = I40IW_COMPL_STATUS_SUCCESS;\n-\t}\n-\n-\tget_64bit_val(cqe, 0, &qword0);\n-\tget_64bit_val(cqe, 16, &qword2);\n-\n-\tinfo->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);\n-\n-\tinfo->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);\n-\n-\tget_64bit_val(cqe, 8, &comp_ctx);\n-\n-\tinfo->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);\n-\tinfo->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);\n-\n-\tqp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;\n-\tif (!qp) {\n-\t\tret_code = I40IW_ERR_QUEUE_DESTROYED;\n-\t\tgoto exit;\n-\t}\n-\twqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);\n-\tinfo->qp_handle = (i40iw_qp_handle)(unsigned long)qp;\n-\n-\tif (q_type == I40IW_CQE_QTYPE_RQ) {\n-\t\tarray_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;\n-\t\tif (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {\n-\t\t\tinfo->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];\n-\t\t\tarray_idx = qp->rq_ring.tail;\n-\t\t} else {\n-\t\t\tinfo->wr_id = qp->rq_wrid_array[array_idx];\n-\t\t}\n-\n-\t\tinfo->op_type = I40IW_OP_TYPE_REC;\n-\t\tif (qword3 & I40IWCQ_STAG_MASK) {\n-\t\t\tinfo->stag_invalid_set = true;\n-\t\t\tinfo->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);\n-\t\t} else {\n-\t\t\tinfo->stag_invalid_set = false;\n-\t\t}\n-\t\tinfo->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);\n-\t\tI40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);\n-\t\tpring = &qp->rq_ring;\n-\t} else {\n-\t\tif (qp->first_sq_wq) {\n-\t\t\tqp->first_sq_wq = false;\n-\t\t\tif (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) {\n-\t\t\t\tI40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);\n-\t\t\t\tI40IW_RING_MOVE_TAIL(cq->cq_ring);\n-\t\t\t\tset_64bit_val(cq->shadow_area, 0,\n-\t\t\t\t\t I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));\n-\t\t\t\tmemset(info, 0, sizeof(struct i40iw_cq_poll_info));\n-\t\t\t\treturn i40iw_cq_poll_completion(cq, info);\n-\t\t\t}\n-\t\t}\n-\n-\t\tif (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {\n-\t\t\tinfo->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;\n-\t\t\tinfo->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;\n-\n-\t\t\tinfo->op_type = (u8)RS_64(qword3, I40IWCQ_OP);\n-\t\t\tsw_wqe = qp->sq_base[wqe_idx].elem;\n-\t\t\tget_64bit_val(sw_wqe, 24, &wqe_qword);\n-\n-\t\t\taddl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE;\n-\t\t\tI40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));\n-\t\t} else {\n-\t\t\tdo {\n-\t\t\t\tu8 op_type;\n-\t\t\t\tu32 tail;\n-\n-\t\t\t\ttail = qp->sq_ring.tail;\n-\t\t\t\tsw_wqe = qp->sq_base[tail].elem;\n-\t\t\t\tget_64bit_val(sw_wqe, 24, &wqe_qword);\n-\t\t\t\top_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);\n-\t\t\t\tinfo->op_type = op_type;\n-\t\t\t\taddl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE;\n-\t\t\t\tI40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));\n-\t\t\t\tif (op_type != I40IWQP_OP_NOP) {\n-\t\t\t\t\tinfo->wr_id = qp->sq_wrtrk_array[tail].wrid;\n-\t\t\t\t\tinfo->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\t\t\t} while (1);\n-\t\t}\n-\t\tpring = &qp->sq_ring;\n-\t}\n-\n-\tret_code = 0;\n-\n-exit:\n-\tif (!ret_code &&\n-\t (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))\n-\t\tif (pring && (I40IW_RING_MORE_WORK(*pring)))\n-\t\t\tmove_cq_head = false;\n-\n-\tif (move_cq_head) {\n-\t\tI40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);\n-\n-\t\tif (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)\n-\t\t\tcq->polarity ^= 1;\n-\n-\t\tI40IW_RING_MOVE_TAIL(cq->cq_ring);\n-\t\tset_64bit_val(cq->shadow_area, 0,\n-\t\t\t I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));\n-\t} else {\n-\t\tif (info->is_srq)\n-\t\t\treturn ret_code;\n-\t\tqword3 &= ~I40IW_CQ_WQEIDX_MASK;\n-\t\tqword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);\n-\t\tset_64bit_val(cqe, 24, qword3);\n-\t}\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_get_wqe_shift - get shift count for maximum wqe size\n- * @sge: Maximum Scatter Gather Elements wqe\n- * @inline_data: Maximum inline data size\n- * @shift: Returns the shift needed based on sge\n- *\n- * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.\n- * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).\n- * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).\n- * Shift of 2 otherwise (wqe size of 128 bytes).\n- */\n-void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)\n-{\n-\t*shift = 0;\n-\tif (sge > 1 || inline_data > 16)\n-\t\t*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;\n-}\n-\n-/*\n- * i40iw_get_sqdepth - get SQ depth (quantas)\n- * @sq_size: SQ size\n- * @shift: shift which determines size of WQE\n- * @sqdepth: depth of SQ\n- *\n- */\n-enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)\n-{\n-\t*sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD);\n-\n-\tif (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))\n-\t\t*sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;\n-\telse if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS)\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\n-\treturn 0;\n-}\n-\n-/*\n- * i40iw_get_rq_depth - get RQ depth (quantas)\n- * @rq_size: RQ size\n- * @shift: shift which determines size of WQE\n- * @rqdepth: depth of RQ\n- *\n- */\n-enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)\n-{\n-\t*rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD);\n-\n-\tif (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))\n-\t\t*rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;\n-\telse if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS)\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\n-\treturn 0;\n-}\n-\n-static const struct i40iw_qp_uk_ops iw_qp_uk_ops = {\n-\t.iw_qp_post_wr = i40iw_qp_post_wr,\n-\t.iw_qp_ring_push_db = i40iw_qp_ring_push_db,\n-\t.iw_rdma_write = i40iw_rdma_write,\n-\t.iw_rdma_read = i40iw_rdma_read,\n-\t.iw_send = i40iw_send,\n-\t.iw_inline_rdma_write = i40iw_inline_rdma_write,\n-\t.iw_inline_send = i40iw_inline_send,\n-\t.iw_stag_local_invalidate = i40iw_stag_local_invalidate,\n-\t.iw_mw_bind = i40iw_mw_bind,\n-\t.iw_post_receive = i40iw_post_receive,\n-\t.iw_post_nop = i40iw_nop\n-};\n-\n-static const struct i40iw_cq_ops iw_cq_ops = {\n-\t.iw_cq_request_notification = i40iw_cq_request_notification,\n-\t.iw_cq_poll_completion = i40iw_cq_poll_completion,\n-\t.iw_cq_post_entries = i40iw_cq_post_entries,\n-\t.iw_cq_clean = i40iw_clean_cq\n-};\n-\n-static const struct i40iw_device_uk_ops iw_device_uk_ops = {\n-\t.iwarp_cq_uk_init = i40iw_cq_uk_init,\n-\t.iwarp_qp_uk_init = i40iw_qp_uk_init,\n-};\n-\n-/**\n- * i40iw_qp_uk_init - initialize shared qp\n- * @qp: hw qp (user and kernel)\n- * @info: qp initialization info\n- *\n- * initializes the vars used in both user and kernel mode.\n- * size of the wqe depends on numbers of max. fragements\n- * allowed. Then size of wqe * the number of wqes should be the\n- * amount of memory allocated for sq and rq. If srq is used,\n- * then rq_base will point to one rq wqe only (not the whole\n- * array of wqes)\n- */\n-enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,\n-\t\t\t\t\tstruct i40iw_qp_uk_init_info *info)\n-{\n-\tenum i40iw_status_code ret_code = 0;\n-\tu32 sq_ring_size;\n-\tu8 sqshift, rqshift;\n-\n-\tif (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)\n-\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\n-\tif (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)\n-\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\ti40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift);\n-\n-\tqp->sq_base = info->sq;\n-\tqp->rq_base = info->rq;\n-\tqp->shadow_area = info->shadow_area;\n-\tqp->sq_wrtrk_array = info->sq_wrtrk_array;\n-\tqp->rq_wrid_array = info->rq_wrid_array;\n-\n-\tqp->wqe_alloc_reg = info->wqe_alloc_reg;\n-\tqp->qp_id = info->qp_id;\n-\n-\tqp->sq_size = info->sq_size;\n-\tqp->push_db = info->push_db;\n-\tqp->push_wqe = info->push_wqe;\n-\n-\tqp->max_sq_frag_cnt = info->max_sq_frag_cnt;\n-\tsq_ring_size = qp->sq_size << sqshift;\n-\n-\tI40IW_RING_INIT(qp->sq_ring, sq_ring_size);\n-\tI40IW_RING_INIT(qp->initial_ring, sq_ring_size);\n-\tI40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);\n-\tI40IW_RING_MOVE_TAIL(qp->sq_ring);\n-\tI40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);\n-\tqp->swqe_polarity = 1;\n-\tqp->first_sq_wq = true;\n-\tqp->swqe_polarity_deferred = 1;\n-\tqp->rwqe_polarity = 0;\n-\n-\tif (!qp->use_srq) {\n-\t\tqp->rq_size = info->rq_size;\n-\t\tqp->max_rq_frag_cnt = info->max_rq_frag_cnt;\n-\t\tI40IW_RING_INIT(qp->rq_ring, qp->rq_size);\n-\t\tswitch (info->abi_ver) {\n-\t\tcase 4:\n-\t\t\ti40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift);\n-\t\t\tbreak;\n-\t\tcase 5: /* fallthrough until next ABI version */\n-\t\tdefault:\n-\t\t\trqshift = I40IW_MAX_RQ_WQE_SHIFT;\n-\t\t\tbreak;\n-\t\t}\n-\t\tqp->rq_wqe_size = rqshift;\n-\t\tqp->rq_wqe_size_multiplier = 4 << rqshift;\n-\t}\n-\tqp->ops = iw_qp_uk_ops;\n-\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_cq_uk_init - initialize shared cq (user and kernel)\n- * @cq: hw cq\n- * @info: hw cq initialization info\n- */\n-enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,\n-\t\t\t\t\tstruct i40iw_cq_uk_init_info *info)\n-{\n-\tif ((info->cq_size < I40IW_MIN_CQ_SIZE) ||\n-\t (info->cq_size > I40IW_MAX_CQ_SIZE))\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\tcq->cq_base = (struct i40iw_cqe *)info->cq_base;\n-\tcq->cq_id = info->cq_id;\n-\tcq->cq_size = info->cq_size;\n-\tcq->cqe_alloc_reg = info->cqe_alloc_reg;\n-\tcq->shadow_area = info->shadow_area;\n-\tcq->avoid_mem_cflct = info->avoid_mem_cflct;\n-\n-\tI40IW_RING_INIT(cq->cq_ring, cq->cq_size);\n-\tcq->polarity = 1;\n-\tcq->ops = iw_cq_ops;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_device_init_uk - setup routines for iwarp shared device\n- * @dev: iwarp shared (user and kernel)\n- */\n-void i40iw_device_init_uk(struct i40iw_dev_uk *dev)\n-{\n-\tdev->ops_uk = iw_device_uk_ops;\n-}\n-\n-/**\n- * i40iw_clean_cq - clean cq entries\n- * @ queue completion context\n- * @cq: cq to clean\n- */\n-void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)\n-{\n-\tu64 *cqe;\n-\tu64 qword3, comp_ctx;\n-\tu32 cq_head;\n-\tu8 polarity, temp;\n-\n-\tcq_head = cq->cq_ring.head;\n-\ttemp = cq->polarity;\n-\tdo {\n-\t\tif (cq->avoid_mem_cflct)\n-\t\t\tcqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);\n-\t\telse\n-\t\t\tcqe = (u64 *)&cq->cq_base[cq_head];\n-\t\tget_64bit_val(cqe, 24, &qword3);\n-\t\tpolarity = (u8)RS_64(qword3, I40IW_CQ_VALID);\n-\n-\t\tif (polarity != temp)\n-\t\t\tbreak;\n-\n-\t\tget_64bit_val(cqe, 8, &comp_ctx);\n-\t\tif ((void *)(unsigned long)comp_ctx == queue)\n-\t\t\tset_64bit_val(cqe, 8, 0);\n-\n-\t\tcq_head = (cq_head + 1) % cq->cq_ring.size;\n-\t\tif (!cq_head)\n-\t\t\ttemp ^= 1;\n-\t} while (true);\n-}\n-\n-/**\n- * i40iw_nop - send a nop\n- * @qp: hw qp ptr\n- * @wr_id: work request id\n- * @signaled: flag if signaled for completion\n- * @post_sq: flag to post sq\n- */\n-enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,\n-\t\t\t\t u64 wr_id,\n-\t\t\t\t bool signaled,\n-\t\t\t\t bool post_sq)\n-{\n-\tu64 header, *wqe;\n-\tu32 wqe_idx;\n-\n-\twqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_QP_TOOMANY_WRS_POSTED;\n-\tset_64bit_val(wqe, 0, 0);\n-\tset_64bit_val(wqe, 8, 0);\n-\tset_64bit_val(wqe, 16, 0);\n-\n-\theader = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |\n-\t LS_64(signaled, I40IWQPSQ_SIGCOMPL) |\n-\t LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);\n-\n-\twmb(); /* make sure WQE is populated before valid bit is set */\n-\n-\tset_64bit_val(wqe, 24, header);\n-\tif (post_sq)\n-\t\ti40iw_qp_post_wr(qp);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ\n- * @frag_cnt: number of fragments\n- * @wqe_size: size of sq wqe returned\n- */\n-enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size)\n-{\n-\tswitch (frag_cnt) {\n-\tcase 0:\n-\tcase 1:\n-\t\t*wqe_size = I40IW_QP_WQE_MIN_SIZE;\n-\t\tbreak;\n-\tcase 2:\n-\tcase 3:\n-\t\t*wqe_size = 64;\n-\t\tbreak;\n-\tcase 4:\n-\tcase 5:\n-\t\t*wqe_size = 96;\n-\t\tbreak;\n-\tcase 6:\n-\tcase 7:\n-\t\t*wqe_size = 128;\n-\t\tbreak;\n-\tdefault:\n-\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ\n- * @frag_cnt: number of fragments\n- * @wqe_size: size of rq wqe returned\n- */\n-enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size)\n-{\n-\tswitch (frag_cnt) {\n-\tcase 0:\n-\tcase 1:\n-\t\t*wqe_size = 32;\n-\t\tbreak;\n-\tcase 2:\n-\tcase 3:\n-\t\t*wqe_size = 64;\n-\t\tbreak;\n-\tcase 4:\n-\tcase 5:\n-\tcase 6:\n-\tcase 7:\n-\t\t*wqe_size = 128;\n-\t\tbreak;\n-\tdefault:\n-\t\treturn I40IW_ERR_INVALID_FRAG_COUNT;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size\n- * @data_size: data size for inline\n- * @wqe_size: size of sq wqe returned\n- */\n-enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,\n-\t\t\t\t\t\t\t u8 *wqe_size)\n-{\n-\tif (data_size > I40IW_MAX_INLINE_DATA_SIZE)\n-\t\treturn I40IW_ERR_INVALID_INLINE_DATA_SIZE;\n-\n-\tif (data_size <= 16)\n-\t\t*wqe_size = I40IW_QP_WQE_MIN_SIZE;\n-\telse\n-\t\t*wqe_size = 64;\n-\n-\treturn 0;\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h\ndeleted file mode 100644\nindex b125925..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_user.h\n+++ /dev/null\n@@ -1,430 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_USER_H\n-#define I40IW_USER_H\n-\n-enum i40iw_device_capabilities_const {\n-\tI40IW_WQE_SIZE =\t\t\t4,\n-\tI40IW_CQP_WQE_SIZE =\t\t\t8,\n-\tI40IW_CQE_SIZE =\t\t\t4,\n-\tI40IW_EXTENDED_CQE_SIZE =\t\t8,\n-\tI40IW_AEQE_SIZE =\t\t\t2,\n-\tI40IW_CEQE_SIZE =\t\t\t1,\n-\tI40IW_CQP_CTX_SIZE =\t\t\t8,\n-\tI40IW_SHADOW_AREA_SIZE =\t\t8,\n-\tI40IW_CEQ_MAX_COUNT =\t\t\t256,\n-\tI40IW_QUERY_FPM_BUF_SIZE =\t\t128,\n-\tI40IW_COMMIT_FPM_BUF_SIZE =\t\t128,\n-\tI40IW_MIN_IW_QP_ID =\t\t\t1,\n-\tI40IW_MAX_IW_QP_ID =\t\t\t262143,\n-\tI40IW_MIN_CEQID =\t\t\t0,\n-\tI40IW_MAX_CEQID =\t\t\t256,\n-\tI40IW_MIN_CQID =\t\t\t0,\n-\tI40IW_MAX_CQID =\t\t\t131071,\n-\tI40IW_MIN_AEQ_ENTRIES =\t\t\t1,\n-\tI40IW_MAX_AEQ_ENTRIES =\t\t\t524287,\n-\tI40IW_MIN_CEQ_ENTRIES =\t\t\t1,\n-\tI40IW_MAX_CEQ_ENTRIES =\t\t\t131071,\n-\tI40IW_MIN_CQ_SIZE =\t\t\t1,\n-\tI40IW_MAX_CQ_SIZE =\t\t\t1048575,\n-\tI40IW_DB_ID_ZERO =\t\t\t0,\n-\tI40IW_MAX_WQ_FRAGMENT_COUNT =\t\t3,\n-\tI40IW_MAX_SGE_RD =\t\t\t1,\n-\tI40IW_MAX_OUTBOUND_MESSAGE_SIZE =\t2147483647,\n-\tI40IW_MAX_INBOUND_MESSAGE_SIZE =\t2147483647,\n-\tI40IW_MAX_PUSH_PAGE_COUNT =\t\t4096,\n-\tI40IW_MAX_PE_ENABLED_VF_COUNT =\t\t32,\n-\tI40IW_MAX_VF_FPM_ID =\t\t\t47,\n-\tI40IW_MAX_VF_PER_PF =\t\t\t127,\n-\tI40IW_MAX_SQ_PAYLOAD_SIZE =\t\t2145386496,\n-\tI40IW_MAX_INLINE_DATA_SIZE =\t\t48,\n-\tI40IW_MAX_PUSHMODE_INLINE_DATA_SIZE =\t48,\n-\tI40IW_MAX_IRD_SIZE =\t\t\t64,\n-\tI40IW_MAX_ORD_SIZE =\t\t\t127,\n-\tI40IW_MAX_WQ_ENTRIES =\t\t\t2048,\n-\tI40IW_Q2_BUFFER_SIZE =\t\t\t(248 + 100),\n-\tI40IW_MAX_WQE_SIZE_RQ =\t\t\t128,\n-\tI40IW_QP_CTX_SIZE =\t\t\t248,\n-\tI40IW_MAX_PDS = \t\t\t32768\n-};\n-\n-#define i40iw_handle void *\n-#define i40iw_adapter_handle i40iw_handle\n-#define i40iw_qp_handle i40iw_handle\n-#define i40iw_cq_handle i40iw_handle\n-#define i40iw_srq_handle i40iw_handle\n-#define i40iw_pd_id i40iw_handle\n-#define i40iw_stag_handle i40iw_handle\n-#define i40iw_stag_index u32\n-#define i40iw_stag u32\n-#define i40iw_stag_key u8\n-\n-#define i40iw_tagged_offset u64\n-#define i40iw_access_privileges u32\n-#define i40iw_physical_fragment u64\n-#define i40iw_address_list u64 *\n-\n-#define\tI40IW_MAX_MR_SIZE\t0x10000000000L\n-#define\tI40IW_MAX_RQ_WQE_SHIFT\t2\n-\n-struct i40iw_qp_uk;\n-struct i40iw_cq_uk;\n-struct i40iw_srq_uk;\n-struct i40iw_qp_uk_init_info;\n-struct i40iw_cq_uk_init_info;\n-struct i40iw_srq_uk_init_info;\n-\n-struct i40iw_sge {\n-\ti40iw_tagged_offset tag_off;\n-\tu32 len;\n-\ti40iw_stag stag;\n-};\n-\n-#define i40iw_sgl struct i40iw_sge *\n-\n-struct i40iw_ring {\n-\tu32 head;\n-\tu32 tail;\n-\tu32 size;\n-};\n-\n-struct i40iw_cqe {\n-\tu64 buf[I40IW_CQE_SIZE];\n-};\n-\n-struct i40iw_extended_cqe {\n-\tu64 buf[I40IW_EXTENDED_CQE_SIZE];\n-};\n-\n-struct i40iw_wqe {\n-\tu64 buf[I40IW_WQE_SIZE];\n-};\n-\n-struct i40iw_qp_uk_ops;\n-\n-enum i40iw_addressing_type {\n-\tI40IW_ADDR_TYPE_ZERO_BASED = 0,\n-\tI40IW_ADDR_TYPE_VA_BASED = 1,\n-};\n-\n-#define I40IW_ACCESS_FLAGS_LOCALREAD\t\t0x01\n-#define I40IW_ACCESS_FLAGS_LOCALWRITE\t\t0x02\n-#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY\t0x04\n-#define I40IW_ACCESS_FLAGS_REMOTEREAD\t\t0x05\n-#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY\t0x08\n-#define I40IW_ACCESS_FLAGS_REMOTEWRITE\t\t0x0a\n-#define I40IW_ACCESS_FLAGS_BIND_WINDOW\t\t0x10\n-#define I40IW_ACCESS_FLAGS_ALL\t\t\t0x1F\n-\n-#define I40IW_OP_TYPE_RDMA_WRITE\t0\n-#define I40IW_OP_TYPE_RDMA_READ\t\t1\n-#define I40IW_OP_TYPE_SEND\t\t3\n-#define I40IW_OP_TYPE_SEND_INV\t\t4\n-#define I40IW_OP_TYPE_SEND_SOL\t\t5\n-#define I40IW_OP_TYPE_SEND_SOL_INV\t6\n-#define I40IW_OP_TYPE_REC\t\t7\n-#define I40IW_OP_TYPE_BIND_MW\t\t8\n-#define I40IW_OP_TYPE_FAST_REG_NSMR\t9\n-#define I40IW_OP_TYPE_INV_STAG\t\t10\n-#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11\n-#define I40IW_OP_TYPE_NOP\t\t12\n-\n-enum i40iw_completion_status {\n-\tI40IW_COMPL_STATUS_SUCCESS = 0,\n-\tI40IW_COMPL_STATUS_FLUSHED,\n-\tI40IW_COMPL_STATUS_INVALID_WQE,\n-\tI40IW_COMPL_STATUS_QP_CATASTROPHIC,\n-\tI40IW_COMPL_STATUS_REMOTE_TERMINATION,\n-\tI40IW_COMPL_STATUS_INVALID_STAG,\n-\tI40IW_COMPL_STATUS_BASE_BOUND_VIOLATION,\n-\tI40IW_COMPL_STATUS_ACCESS_VIOLATION,\n-\tI40IW_COMPL_STATUS_INVALID_PD_ID,\n-\tI40IW_COMPL_STATUS_WRAP_ERROR,\n-\tI40IW_COMPL_STATUS_STAG_INVALID_PDID,\n-\tI40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD,\n-\tI40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED,\n-\tI40IW_COMPL_STATUS_STAG_NOT_INVALID,\n-\tI40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE,\n-\tI40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY,\n-\tI40IW_COMPL_STATUS_INVALID_FBO,\n-\tI40IW_COMPL_STATUS_INVALID_LENGTH,\n-\tI40IW_COMPL_STATUS_INVALID_ACCESS,\n-\tI40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG,\n-\tI40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS,\n-\tI40IW_COMPL_STATUS_INVALID_REGION,\n-\tI40IW_COMPL_STATUS_INVALID_WINDOW,\n-\tI40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH\n-};\n-\n-enum i40iw_completion_notify {\n-\tIW_CQ_COMPL_EVENT = 0,\n-\tIW_CQ_COMPL_SOLICITED = 1\n-};\n-\n-struct i40iw_post_send {\n-\ti40iw_sgl sg_list;\n-\tu32 num_sges;\n-};\n-\n-struct i40iw_post_inline_send {\n-\tvoid *data;\n-\tu32 len;\n-};\n-\n-struct i40iw_rdma_write {\n-\ti40iw_sgl lo_sg_list;\n-\tu32 num_lo_sges;\n-\tstruct i40iw_sge rem_addr;\n-};\n-\n-struct i40iw_inline_rdma_write {\n-\tvoid *data;\n-\tu32 len;\n-\tstruct i40iw_sge rem_addr;\n-};\n-\n-struct i40iw_rdma_read {\n-\tstruct i40iw_sge lo_addr;\n-\tstruct i40iw_sge rem_addr;\n-};\n-\n-struct i40iw_bind_window {\n-\ti40iw_stag mr_stag;\n-\tu64 bind_length;\n-\tvoid *va;\n-\tenum i40iw_addressing_type addressing_type;\n-\tbool enable_reads;\n-\tbool enable_writes;\n-\ti40iw_stag mw_stag;\n-};\n-\n-struct i40iw_inv_local_stag {\n-\ti40iw_stag target_stag;\n-};\n-\n-struct i40iw_post_sq_info {\n-\tu64 wr_id;\n-\tu8 op_type;\n-\tbool signaled;\n-\tbool read_fence;\n-\tbool local_fence;\n-\tbool inline_data;\n-\tbool defer_flag;\n-\tunion {\n-\t\tstruct i40iw_post_send send;\n-\t\tstruct i40iw_rdma_write rdma_write;\n-\t\tstruct i40iw_rdma_read rdma_read;\n-\t\tstruct i40iw_rdma_read rdma_read_inv;\n-\t\tstruct i40iw_bind_window bind_window;\n-\t\tstruct i40iw_inv_local_stag inv_local_stag;\n-\t\tstruct i40iw_inline_rdma_write inline_rdma_write;\n-\t\tstruct i40iw_post_inline_send inline_send;\n-\t} op;\n-};\n-\n-struct i40iw_post_rq_info {\n-\tu64 wr_id;\n-\ti40iw_sgl sg_list;\n-\tu32 num_sges;\n-};\n-\n-struct i40iw_cq_poll_info {\n-\tu64 wr_id;\n-\ti40iw_qp_handle qp_handle;\n-\tu32 bytes_xfered;\n-\tu32 tcp_seq_num;\n-\tu32 qp_id;\n-\ti40iw_stag inv_stag;\n-\tenum i40iw_completion_status comp_status;\n-\tu16 major_err;\n-\tu16 minor_err;\n-\tu8 op_type;\n-\tbool stag_invalid_set;\n-\tbool push_dropped;\n-\tbool error;\n-\tbool is_srq;\n-\tbool solicited_event;\n-};\n-\n-struct i40iw_qp_uk_ops {\n-\tvoid (*iw_qp_post_wr)(struct i40iw_qp_uk *);\n-\tvoid (*iw_qp_ring_push_db)(struct i40iw_qp_uk *, u32);\n-\tenum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,\n-\t\t\t\t\t\tstruct i40iw_post_sq_info *, bool);\n-\tenum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,\n-\t\t\t\t\t struct i40iw_post_sq_info *, bool, bool);\n-\tenum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *,\n-\t\t\t\t\t struct i40iw_post_sq_info *, u32, bool);\n-\tenum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *,\n-\t\t\t\t\t\t struct i40iw_post_sq_info *, bool);\n-\tenum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *,\n-\t\t\t\t\t\t struct i40iw_post_sq_info *, u32, bool);\n-\tenum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *,\n-\t\t\t\t\t\t\t struct i40iw_post_sq_info *, bool);\n-\tenum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *,\n-\t\t\t\t\t struct i40iw_post_sq_info *, bool);\n-\tenum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *,\n-\t\t\t\t\t\t struct i40iw_post_rq_info *);\n-\tenum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool);\n-};\n-\n-struct i40iw_cq_ops {\n-\tvoid (*iw_cq_request_notification)(struct i40iw_cq_uk *,\n-\t\t\t\t\t enum i40iw_completion_notify);\n-\tenum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,\n-\t\t\t\t\t\t\tstruct i40iw_cq_poll_info *);\n-\tenum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);\n-\tvoid (*iw_cq_clean)(void *, struct i40iw_cq_uk *);\n-};\n-\n-struct i40iw_dev_uk;\n-\n-struct i40iw_device_uk_ops {\n-\tenum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *,\n-\t\t\t\t\t\t struct i40iw_cq_uk_init_info *);\n-\tenum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *,\n-\t\t\t\t\t\t struct i40iw_qp_uk_init_info *);\n-};\n-\n-struct i40iw_dev_uk {\n-\tstruct i40iw_device_uk_ops ops_uk;\n-};\n-\n-struct i40iw_sq_uk_wr_trk_info {\n-\tu64 wrid;\n-\tu32 wr_len;\n-\tu8 wqe_size;\n-\tu8 reserved[3];\n-};\n-\n-struct i40iw_qp_quanta {\n-\tu64 elem[I40IW_WQE_SIZE];\n-};\n-\n-struct i40iw_qp_uk {\n-\tstruct i40iw_qp_quanta *sq_base;\n-\tstruct i40iw_qp_quanta *rq_base;\n-\tu32 __iomem *wqe_alloc_reg;\n-\tstruct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;\n-\tu64 *rq_wrid_array;\n-\tu64 *shadow_area;\n-\tu32 *push_db;\n-\tu64 *push_wqe;\n-\tstruct i40iw_ring sq_ring;\n-\tstruct i40iw_ring rq_ring;\n-\tstruct i40iw_ring initial_ring;\n-\tu32 qp_id;\n-\tu32 sq_size;\n-\tu32 rq_size;\n-\tu32 max_sq_frag_cnt;\n-\tu32 max_rq_frag_cnt;\n-\tstruct i40iw_qp_uk_ops ops;\n-\tbool use_srq;\n-\tu8 swqe_polarity;\n-\tu8 swqe_polarity_deferred;\n-\tu8 rwqe_polarity;\n-\tu8 rq_wqe_size;\n-\tu8 rq_wqe_size_multiplier;\n-\tbool first_sq_wq;\n-\tbool deferred_flag;\n-};\n-\n-struct i40iw_cq_uk {\n-\tstruct i40iw_cqe *cq_base;\n-\tu32 __iomem *cqe_alloc_reg;\n-\tu64 *shadow_area;\n-\tu32 cq_id;\n-\tu32 cq_size;\n-\tstruct i40iw_ring cq_ring;\n-\tu8 polarity;\n-\tbool avoid_mem_cflct;\n-\n-\tstruct i40iw_cq_ops ops;\n-};\n-\n-struct i40iw_qp_uk_init_info {\n-\tstruct i40iw_qp_quanta *sq;\n-\tstruct i40iw_qp_quanta *rq;\n-\tu32 __iomem *wqe_alloc_reg;\n-\tu64 *shadow_area;\n-\tstruct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;\n-\tu64 *rq_wrid_array;\n-\tu32 *push_db;\n-\tu64 *push_wqe;\n-\tu32 qp_id;\n-\tu32 sq_size;\n-\tu32 rq_size;\n-\tu32 max_sq_frag_cnt;\n-\tu32 max_rq_frag_cnt;\n-\tu32 max_inline_data;\n-\tint abi_ver;\n-};\n-\n-struct i40iw_cq_uk_init_info {\n-\tu32 __iomem *cqe_alloc_reg;\n-\tstruct i40iw_cqe *cq_base;\n-\tu64 *shadow_area;\n-\tu32 cq_size;\n-\tu32 cq_id;\n-\tbool avoid_mem_cflct;\n-};\n-\n-void i40iw_device_init_uk(struct i40iw_dev_uk *dev);\n-\n-void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);\n-u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,\n-\t\t\t\tu8 wqe_size,\n-\t\t\t\tu32 total_size,\n-\t\t\t\tu64 wr_id\n-\t\t\t\t);\n-u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);\n-u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);\n-\n-enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,\n-\t\t\t\t\tstruct i40iw_cq_uk_init_info *info);\n-enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,\n-\t\t\t\t\tstruct i40iw_qp_uk_init_info *info);\n-\n-void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);\n-enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,\n-\t\t\t\t bool signaled, bool post_sq);\n-enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);\n-enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);\n-enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,\n-\t\t\t\t\t\t\t u8 *wqe_size);\n-void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift);\n-enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth);\n-enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth);\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c\ndeleted file mode 100644\nindex 0165246..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c\n+++ /dev/null\n@@ -1,1557 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include <linux/module.h>\n-#include <linux/moduleparam.h>\n-#include <linux/netdevice.h>\n-#include <linux/etherdevice.h>\n-#include <linux/ethtool.h>\n-#include <linux/mii.h>\n-#include <linux/if_vlan.h>\n-#include <linux/crc32.h>\n-#include <linux/in.h>\n-#include <linux/ip.h>\n-#include <linux/tcp.h>\n-#include <linux/init.h>\n-#include <linux/io.h>\n-#include <asm/irq.h>\n-#include <asm/byteorder.h>\n-#include <net/netevent.h>\n-#include <net/neighbour.h>\n-#include \"i40iw.h\"\n-\n-/**\n- * i40iw_arp_table - manage arp table\n- * @iwdev: iwarp device\n- * @ip_addr: ip address for device\n- * @mac_addr: mac address ptr\n- * @action: modify, delete or add\n- */\n-int i40iw_arp_table(struct i40iw_device *iwdev,\n-\t\t u32 *ip_addr,\n-\t\t bool ipv4,\n-\t\t u8 *mac_addr,\n-\t\t u32 action)\n-{\n-\tint arp_index;\n-\tint err;\n-\tu32 ip[4];\n-\n-\tif (ipv4) {\n-\t\tmemset(ip, 0, sizeof(ip));\n-\t\tip[0] = *ip_addr;\n-\t} else {\n-\t\tmemcpy(ip, ip_addr, sizeof(ip));\n-\t}\n-\n-\tfor (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)\n-\t\tif (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)\n-\t\t\tbreak;\n-\tswitch (action) {\n-\tcase I40IW_ARP_ADD:\n-\t\tif (arp_index != iwdev->arp_table_size)\n-\t\t\treturn -1;\n-\n-\t\tarp_index = 0;\n-\t\terr = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,\n-\t\t\t\t\t iwdev->arp_table_size,\n-\t\t\t\t\t (u32 *)&arp_index,\n-\t\t\t\t\t &iwdev->next_arp_index);\n-\n-\t\tif (err)\n-\t\t\treturn err;\n-\n-\t\tmemcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));\n-\t\tether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);\n-\t\tbreak;\n-\tcase I40IW_ARP_RESOLVE:\n-\t\tif (arp_index == iwdev->arp_table_size)\n-\t\t\treturn -1;\n-\t\tbreak;\n-\tcase I40IW_ARP_DELETE:\n-\t\tif (arp_index == iwdev->arp_table_size)\n-\t\t\treturn -1;\n-\t\tmemset(iwdev->arp_table[arp_index].ip_addr, 0,\n-\t\t sizeof(iwdev->arp_table[arp_index].ip_addr));\n-\t\teth_zero_addr(iwdev->arp_table[arp_index].mac_addr);\n-\t\ti40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);\n-\t\tbreak;\n-\tdefault:\n-\t\treturn -1;\n-\t}\n-\treturn arp_index;\n-}\n-\n-/**\n- * i40iw_wr32 - write 32 bits to hw register\n- * @hw: hardware information including registers\n- * @reg: register offset\n- * @value: vvalue to write to register\n- */\n-inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)\n-{\n-\twritel(value, hw->hw_addr + reg);\n-}\n-\n-/**\n- * i40iw_rd32 - read a 32 bit hw register\n- * @hw: hardware information including registers\n- * @reg: register offset\n- *\n- * Return value of register content\n- */\n-inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)\n-{\n-\treturn readl(hw->hw_addr + reg);\n-}\n-\n-/**\n- * i40iw_inetaddr_event - system notifier for ipv4 addr events\n- * @notfier: not used\n- * @event: event for notifier\n- * @ptr: if address\n- */\n-int i40iw_inetaddr_event(struct notifier_block *notifier,\n-\t\t\t unsigned long event,\n-\t\t\t void *ptr)\n-{\n-\tstruct in_ifaddr *ifa = ptr;\n-\tstruct net_device *event_netdev = ifa->ifa_dev->dev;\n-\tstruct net_device *netdev;\n-\tstruct net_device *upper_dev;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_handler *hdl;\n-\tu32 local_ipaddr;\n-\tu32 action = I40IW_ARP_ADD;\n-\n-\thdl = i40iw_find_netdev(event_netdev);\n-\tif (!hdl)\n-\t\treturn NOTIFY_DONE;\n-\n-\tiwdev = &hdl->device;\n-\tif (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)\n-\t\treturn NOTIFY_DONE;\n-\n-\tnetdev = iwdev->ldev->netdev;\n-\tupper_dev = netdev_master_upper_dev_get(netdev);\n-\tif (netdev != event_netdev)\n-\t\treturn NOTIFY_DONE;\n-\n-\tif (upper_dev) {\n-\t\tstruct in_device *in;\n-\n-\t\trcu_read_lock();\n-\t\tin = __in_dev_get_rcu(upper_dev);\n-\n-\t\tlocal_ipaddr = 0;\n-\t\tif (in) {\n-\t\t\tstruct in_ifaddr *ifa;\n-\n-\t\t\tifa = rcu_dereference(in->ifa_list);\n-\t\t\tif (ifa)\n-\t\t\t\tlocal_ipaddr = ntohl(ifa->ifa_address);\n-\t\t}\n-\n-\t\trcu_read_unlock();\n-\t} else {\n-\t\tlocal_ipaddr = ntohl(ifa->ifa_address);\n-\t}\n-\tswitch (event) {\n-\tcase NETDEV_DOWN:\n-\t\taction = I40IW_ARP_DELETE;\n-\t\t/* Fall through */\n-\tcase NETDEV_UP:\n-\t\t/* Fall through */\n-\tcase NETDEV_CHANGEADDR:\n-\n-\t\t/* Just skip if no need to handle ARP cache */\n-\t\tif (!local_ipaddr)\n-\t\t\tbreak;\n-\n-\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t netdev->dev_addr,\n-\t\t\t\t &local_ipaddr,\n-\t\t\t\t true,\n-\t\t\t\t action);\n-\t\ti40iw_if_notify(iwdev, netdev, &local_ipaddr, true,\n-\t\t\t\t(action == I40IW_ARP_ADD) ? true : false);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\treturn NOTIFY_DONE;\n-}\n-\n-/**\n- * i40iw_inet6addr_event - system notifier for ipv6 addr events\n- * @notfier: not used\n- * @event: event for notifier\n- * @ptr: if address\n- */\n-int i40iw_inet6addr_event(struct notifier_block *notifier,\n-\t\t\t unsigned long event,\n-\t\t\t void *ptr)\n-{\n-\tstruct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;\n-\tstruct net_device *event_netdev = ifa->idev->dev;\n-\tstruct net_device *netdev;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_handler *hdl;\n-\tu32 local_ipaddr6[4];\n-\tu32 action = I40IW_ARP_ADD;\n-\n-\thdl = i40iw_find_netdev(event_netdev);\n-\tif (!hdl)\n-\t\treturn NOTIFY_DONE;\n-\n-\tiwdev = &hdl->device;\n-\tif (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)\n-\t\treturn NOTIFY_DONE;\n-\n-\tnetdev = iwdev->ldev->netdev;\n-\tif (netdev != event_netdev)\n-\t\treturn NOTIFY_DONE;\n-\n-\ti40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);\n-\tswitch (event) {\n-\tcase NETDEV_DOWN:\n-\t\taction = I40IW_ARP_DELETE;\n-\t\t/* Fall through */\n-\tcase NETDEV_UP:\n-\t\t/* Fall through */\n-\tcase NETDEV_CHANGEADDR:\n-\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t netdev->dev_addr,\n-\t\t\t\t local_ipaddr6,\n-\t\t\t\t false,\n-\t\t\t\t action);\n-\t\ti40iw_if_notify(iwdev, netdev, local_ipaddr6, false,\n-\t\t\t\t(action == I40IW_ARP_ADD) ? true : false);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\treturn NOTIFY_DONE;\n-}\n-\n-/**\n- * i40iw_net_event - system notifier for netevents\n- * @notfier: not used\n- * @event: event for notifier\n- * @ptr: neighbor\n- */\n-int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)\n-{\n-\tstruct neighbour *neigh = ptr;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_handler *iwhdl;\n-\t__be32 *p;\n-\tu32 local_ipaddr[4];\n-\n-\tswitch (event) {\n-\tcase NETEVENT_NEIGH_UPDATE:\n-\t\tiwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);\n-\t\tif (!iwhdl)\n-\t\t\treturn NOTIFY_DONE;\n-\t\tiwdev = &iwhdl->device;\n-\t\tif (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)\n-\t\t\treturn NOTIFY_DONE;\n-\t\tp = (__be32 *)neigh->primary_key;\n-\t\ti40iw_copy_ip_ntohl(local_ipaddr, p);\n-\t\tif (neigh->nud_state & NUD_VALID) {\n-\t\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t\t neigh->ha,\n-\t\t\t\t\t local_ipaddr,\n-\t\t\t\t\t false,\n-\t\t\t\t\t I40IW_ARP_ADD);\n-\n-\t\t} else {\n-\t\t\ti40iw_manage_arp_cache(iwdev,\n-\t\t\t\t\t neigh->ha,\n-\t\t\t\t\t local_ipaddr,\n-\t\t\t\t\t false,\n-\t\t\t\t\t I40IW_ARP_DELETE);\n-\t\t}\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\treturn NOTIFY_DONE;\n-}\n-\n-/**\n- * i40iw_netdevice_event - system notifier for netdev events\n- * @notfier: not used\n- * @event: event for notifier\n- * @ptr: netdev\n- */\n-int i40iw_netdevice_event(struct notifier_block *notifier,\n-\t\t\t unsigned long event,\n-\t\t\t void *ptr)\n-{\n-\tstruct net_device *event_netdev;\n-\tstruct net_device *netdev;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_handler *hdl;\n-\n-\tevent_netdev = netdev_notifier_info_to_dev(ptr);\n-\n-\thdl = i40iw_find_netdev(event_netdev);\n-\tif (!hdl)\n-\t\treturn NOTIFY_DONE;\n-\n-\tiwdev = &hdl->device;\n-\tif (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)\n-\t\treturn NOTIFY_DONE;\n-\n-\tnetdev = iwdev->ldev->netdev;\n-\tif (netdev != event_netdev)\n-\t\treturn NOTIFY_DONE;\n-\n-\tiwdev->iw_status = 1;\n-\n-\tswitch (event) {\n-\tcase NETDEV_DOWN:\n-\t\tiwdev->iw_status = 0;\n-\t\t/* Fall through */\n-\tcase NETDEV_UP:\n-\t\ti40iw_port_ibevent(iwdev);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-\treturn NOTIFY_DONE;\n-}\n-\n-/**\n- * i40iw_get_cqp_request - get cqp struct\n- * @cqp: device cqp ptr\n- * @wait: cqp to be used in wait mode\n- */\n-struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)\n-{\n-\tstruct i40iw_cqp_request *cqp_request = NULL;\n-\tunsigned long flags;\n-\n-\tspin_lock_irqsave(&cqp->req_lock, flags);\n-\tif (!list_empty(&cqp->cqp_avail_reqs)) {\n-\t\tcqp_request = list_entry(cqp->cqp_avail_reqs.next,\n-\t\t\t\t\t struct i40iw_cqp_request, list);\n-\t\tlist_del_init(&cqp_request->list);\n-\t}\n-\tspin_unlock_irqrestore(&cqp->req_lock, flags);\n-\tif (!cqp_request) {\n-\t\tcqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);\n-\t\tif (cqp_request) {\n-\t\t\tcqp_request->dynamic = true;\n-\t\t\tINIT_LIST_HEAD(&cqp_request->list);\n-\t\t\tinit_waitqueue_head(&cqp_request->waitq);\n-\t\t}\n-\t}\n-\tif (!cqp_request) {\n-\t\ti40iw_pr_err(\"CQP Request Fail: No Memory\");\n-\t\treturn NULL;\n-\t}\n-\n-\tif (wait) {\n-\t\tatomic_set(&cqp_request->refcount, 2);\n-\t\tcqp_request->waiting = true;\n-\t} else {\n-\t\tatomic_set(&cqp_request->refcount, 1);\n-\t}\n-\treturn cqp_request;\n-}\n-\n-/**\n- * i40iw_free_cqp_request - free cqp request\n- * @cqp: cqp ptr\n- * @cqp_request: to be put back in cqp list\n- */\n-void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)\n-{\n-\tstruct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);\n-\tunsigned long flags;\n-\n-\tif (cqp_request->dynamic) {\n-\t\tkfree(cqp_request);\n-\t} else {\n-\t\tcqp_request->request_done = false;\n-\t\tcqp_request->callback_fcn = NULL;\n-\t\tcqp_request->waiting = false;\n-\n-\t\tspin_lock_irqsave(&cqp->req_lock, flags);\n-\t\tlist_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);\n-\t\tspin_unlock_irqrestore(&cqp->req_lock, flags);\n-\t}\n-\twake_up(&iwdev->close_wq);\n-}\n-\n-/**\n- * i40iw_put_cqp_request - dec ref count and free if 0\n- * @cqp: cqp ptr\n- * @cqp_request: to be put back in cqp list\n- */\n-void i40iw_put_cqp_request(struct i40iw_cqp *cqp,\n-\t\t\t struct i40iw_cqp_request *cqp_request)\n-{\n-\tif (atomic_dec_and_test(&cqp_request->refcount))\n-\t\ti40iw_free_cqp_request(cqp, cqp_request);\n-}\n-\n-/**\n- * i40iw_free_pending_cqp_request -free pending cqp request objs\n- * @cqp: cqp ptr\n- * @cqp_request: to be put back in cqp list\n- */\n-static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,\n-\t\t\t\t\t struct i40iw_cqp_request *cqp_request)\n-{\n-\tstruct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);\n-\n-\tif (cqp_request->waiting) {\n-\t\tcqp_request->compl_info.error = true;\n-\t\tcqp_request->request_done = true;\n-\t\twake_up(&cqp_request->waitq);\n-\t}\n-\ti40iw_put_cqp_request(cqp, cqp_request);\n-\twait_event_timeout(iwdev->close_wq,\n-\t\t\t !atomic_read(&cqp_request->refcount),\n-\t\t\t 1000);\n-}\n-\n-/**\n- * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions\n- * @iwdev: iwarp device\n- */\n-void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_cqp *cqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request = NULL;\n-\tstruct cqp_commands_info *pcmdinfo = NULL;\n-\tu32 i, pending_work, wqe_idx;\n-\n-\tpending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);\n-\twqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);\n-\tfor (i = 0; i < pending_work; i++) {\n-\t\tcqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];\n-\t\tif (cqp_request)\n-\t\t\ti40iw_free_pending_cqp_request(cqp, cqp_request);\n-\t\twqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);\n-\t}\n-\n-\twhile (!list_empty(&dev->cqp_cmd_head)) {\n-\t\tpcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);\n-\t\tcqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);\n-\t\tif (cqp_request)\n-\t\t\ti40iw_free_pending_cqp_request(cqp, cqp_request);\n-\t}\n-}\n-\n-/**\n- * i40iw_free_qp - callback after destroy cqp completes\n- * @cqp_request: cqp request for destroy qp\n- * @num: not used\n- */\n-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)\n-{\n-\tstruct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;\n-\tstruct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;\n-\tstruct i40iw_device *iwdev;\n-\tu32 qp_num = iwqp->ibqp.qp_num;\n-\n-\tiwdev = iwqp->iwdev;\n-\n-\ti40iw_rem_pdusecount(iwqp->iwpd, iwdev);\n-\ti40iw_free_qp_resources(iwdev, iwqp, qp_num);\n-\ti40iw_rem_devusecount(iwdev);\n-}\n-\n-/**\n- * i40iw_wait_event - wait for completion\n- * @iwdev: iwarp device\n- * @cqp_request: cqp request to wait\n- */\n-static int i40iw_wait_event(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_cqp_request *cqp_request)\n-{\n-\tstruct cqp_commands_info *info = &cqp_request->info;\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_timeout cqp_timeout;\n-\tbool cqp_error = false;\n-\tint err_code = 0;\n-\tmemset(&cqp_timeout, 0, sizeof(cqp_timeout));\n-\tcqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];\n-\tdo {\n-\t\tif (wait_event_timeout(cqp_request->waitq,\n-\t\t\t\t cqp_request->request_done, CQP_COMPL_WAIT_TIME))\n-\t\t\tbreak;\n-\n-\t\ti40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);\n-\n-\t\tif (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)\n-\t\t\tcontinue;\n-\n-\t\ti40iw_pr_err(\"error cqp command 0x%x timed out\", info->cqp_cmd);\n-\t\terr_code = -ETIME;\n-\t\tif (!iwdev->reset) {\n-\t\t\tiwdev->reset = true;\n-\t\t\ti40iw_request_reset(iwdev);\n-\t\t}\n-\t\tgoto done;\n-\t} while (1);\n-\tcqp_error = cqp_request->compl_info.error;\n-\tif (cqp_error) {\n-\t\ti40iw_pr_err(\"error cqp command 0x%x completion maj = 0x%x min=0x%x\\n\",\n-\t\t\t info->cqp_cmd, cqp_request->compl_info.maj_err_code,\n-\t\t\t cqp_request->compl_info.min_err_code);\n-\t\terr_code = -EPROTO;\n-\t\tgoto done;\n-\t}\n-done:\n-\ti40iw_put_cqp_request(iwcqp, cqp_request);\n-\treturn err_code;\n-}\n-\n-/**\n- * i40iw_handle_cqp_op - process cqp command\n- * @iwdev: iwarp device\n- * @cqp_request: cqp request to process\n- */\n-enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,\n-\t\t\t\t\t struct i40iw_cqp_request\n-\t\t\t\t\t *cqp_request)\n-{\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tenum i40iw_status_code status;\n-\tstruct cqp_commands_info *info = &cqp_request->info;\n-\tint err_code = 0;\n-\n-\tif (iwdev->reset) {\n-\t\ti40iw_free_cqp_request(&iwdev->cqp, cqp_request);\n-\t\treturn I40IW_ERR_CQP_COMPL_ERROR;\n-\t}\n-\n-\tstatus = i40iw_process_cqp_cmd(dev, info);\n-\tif (status) {\n-\t\ti40iw_pr_err(\"error cqp command 0x%x failed\\n\", info->cqp_cmd);\n-\t\ti40iw_free_cqp_request(&iwdev->cqp, cqp_request);\n-\t\treturn status;\n-\t}\n-\tif (cqp_request->waiting)\n-\t\terr_code = i40iw_wait_event(iwdev, cqp_request);\n-\tif (err_code)\n-\t\tstatus = I40IW_ERR_CQP_COMPL_ERROR;\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_add_devusecount - add dev refcount\n- * @iwdev: dev for refcount\n- */\n-void i40iw_add_devusecount(struct i40iw_device *iwdev)\n-{\n-\tatomic64_inc(&iwdev->use_count);\n-}\n-\n-/**\n- * i40iw_rem_devusecount - decrement refcount for dev\n- * @iwdev: device\n- */\n-void i40iw_rem_devusecount(struct i40iw_device *iwdev)\n-{\n-\tif (!atomic64_dec_and_test(&iwdev->use_count))\n-\t\treturn;\n-\twake_up(&iwdev->close_wq);\n-}\n-\n-/**\n- * i40iw_add_pdusecount - add pd refcount\n- * @iwpd: pd for refcount\n- */\n-void i40iw_add_pdusecount(struct i40iw_pd *iwpd)\n-{\n-\tatomic_inc(&iwpd->usecount);\n-}\n-\n-/**\n- * i40iw_rem_pdusecount - decrement refcount for pd and free if 0\n- * @iwpd: pd for refcount\n- * @iwdev: iwarp device\n- */\n-void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)\n-{\n-\tif (!atomic_dec_and_test(&iwpd->usecount))\n-\t\treturn;\n-\ti40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);\n-}\n-\n-/**\n- * i40iw_add_ref - add refcount for qp\n- * @ibqp: iqarp qp\n- */\n-void i40iw_add_ref(struct ib_qp *ibqp)\n-{\n-\tstruct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;\n-\n-\tatomic_inc(&iwqp->refcount);\n-}\n-\n-/**\n- * i40iw_rem_ref - rem refcount for qp and free if 0\n- * @ibqp: iqarp qp\n- */\n-void i40iw_rem_ref(struct ib_qp *ibqp)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_device *iwdev;\n-\tu32 qp_num;\n-\tunsigned long flags;\n-\n-\tiwqp = to_iwqp(ibqp);\n-\tiwdev = iwqp->iwdev;\n-\tspin_lock_irqsave(&iwdev->qptable_lock, flags);\n-\tif (!atomic_dec_and_test(&iwqp->refcount)) {\n-\t\tspin_unlock_irqrestore(&iwdev->qptable_lock, flags);\n-\t\treturn;\n-\t}\n-\n-\tqp_num = iwqp->ibqp.qp_num;\n-\tiwdev->qp_table[qp_num] = NULL;\n-\tspin_unlock_irqrestore(&iwdev->qptable_lock, flags);\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_request->callback_fcn = i40iw_free_qp;\n-\tcqp_request->param = (void *)&iwqp->sc_qp;\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = OP_QP_DESTROY;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;\n-\tcqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;\n-\tcqp_info->in.u.qp_destroy.remove_hash_idx = true;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (!status)\n-\t\treturn;\n-\n-\ti40iw_rem_pdusecount(iwqp->iwpd, iwdev);\n-\ti40iw_free_qp_resources(iwdev, iwqp, qp_num);\n-\ti40iw_rem_devusecount(iwdev);\n-}\n-\n-/**\n- * i40iw_get_qp - get qp address\n- * @device: iwarp device\n- * @qpn: qp number\n- */\n-struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)\n-{\n-\tstruct i40iw_device *iwdev = to_iwdev(device);\n-\n-\tif ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))\n-\t\treturn NULL;\n-\n-\treturn &iwdev->qp_table[qpn]->ibqp;\n-}\n-\n-/**\n- * i40iw_debug_buf - print debug msg and buffer is mask set\n- * @dev: hardware control device structure\n- * @mask: mask to compare if to print debug buffer\n- * @buf: points buffer addr\n- * @size: saize of buffer to print\n- */\n-void i40iw_debug_buf(struct i40iw_sc_dev *dev,\n-\t\t enum i40iw_debug_flag mask,\n-\t\t char *desc,\n-\t\t u64 *buf,\n-\t\t u32 size)\n-{\n-\tu32 i;\n-\n-\tif (!(dev->debug_mask & mask))\n-\t\treturn;\n-\ti40iw_debug(dev, mask, \"%s\\n\", desc);\n-\ti40iw_debug(dev, mask, \"starting address virt=%p phy=%llxh\\n\", buf,\n-\t\t (unsigned long long)virt_to_phys(buf));\n-\n-\tfor (i = 0; i < size; i += 8)\n-\t\ti40iw_debug(dev, mask, \"index %03d val: %016llx\\n\", i, buf[i / 8]);\n-}\n-\n-/**\n- * i40iw_get_hw_addr - return hw addr\n- * @par: points to shared dev\n- */\n-u8 __iomem *i40iw_get_hw_addr(void *par)\n-{\n-\tstruct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;\n-\n-\treturn dev->hw->hw_addr;\n-}\n-\n-/**\n- * i40iw_remove_head - return head entry and remove from list\n- * @list: list for entry\n- */\n-void *i40iw_remove_head(struct list_head *list)\n-{\n-\tstruct list_head *entry;\n-\n-\tif (list_empty(list))\n-\t\treturn NULL;\n-\n-\tentry = (void *)list->next;\n-\tlist_del(entry);\n-\treturn (void *)entry;\n-}\n-\n-/**\n- * i40iw_allocate_dma_mem - Memory alloc helper fn\n- * @hw: pointer to the HW structure\n- * @mem: ptr to mem struct to fill out\n- * @size: size of memory requested\n- * @alignment: what to align the allocation to\n- */\n-enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_dma_mem *mem,\n-\t\t\t\t\t u64 size,\n-\t\t\t\t\t u32 alignment)\n-{\n-\tstruct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;\n-\n-\tif (!mem)\n-\t\treturn I40IW_ERR_PARAM;\n-\tmem->size = ALIGN(size, alignment);\n-\tmem->va = dma_alloc_coherent(&pcidev->dev, mem->size,\n-\t\t\t\t (dma_addr_t *)&mem->pa, GFP_KERNEL);\n-\tif (!mem->va)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_free_dma_mem - Memory free helper fn\n- * @hw: pointer to the HW structure\n- * @mem: ptr to mem struct to free\n- */\n-void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)\n-{\n-\tstruct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;\n-\n-\tif (!mem || !mem->va)\n-\t\treturn;\n-\n-\tdma_free_coherent(&pcidev->dev, mem->size,\n-\t\t\t mem->va, (dma_addr_t)mem->pa);\n-\tmem->va = NULL;\n-}\n-\n-/**\n- * i40iw_allocate_virt_mem - virtual memory alloc helper fn\n- * @hw: pointer to the HW structure\n- * @mem: ptr to mem struct to fill out\n- * @size: size of memory requested\n- */\n-enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_virt_mem *mem,\n-\t\t\t\t\t u32 size)\n-{\n-\tif (!mem)\n-\t\treturn I40IW_ERR_PARAM;\n-\n-\tmem->size = size;\n-\tmem->va = kzalloc(size, GFP_KERNEL);\n-\n-\tif (mem->va)\n-\t\treturn 0;\n-\telse\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-}\n-\n-/**\n- * i40iw_free_virt_mem - virtual memory free helper fn\n- * @hw: pointer to the HW structure\n- * @mem: ptr to mem struct to free\n- */\n-enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,\n-\t\t\t\t\t struct i40iw_virt_mem *mem)\n-{\n-\tif (!mem)\n-\t\treturn I40IW_ERR_PARAM;\n-\t/*\n-\t * mem->va points to the parent of mem, so both mem and mem->va\n-\t * can not be touched once mem->va is freed\n-\t */\n-\tkfree(mem->va);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_cqp_sds_cmd - create cqp command for sd\n- * @dev: hardware control device structure\n- * @sd_info: information for sd cqp\n- *\n- */\n-enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_update_sds_info *sdinfo)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tcqp_info = &cqp_request->info;\n-\tmemcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,\n-\t sizeof(cqp_info->in.u.update_pe_sds.info));\n-\tcqp_info->cqp_cmd = OP_UPDATE_PE_SDS;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.update_pe_sds.dev = dev;\n-\tcqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Update SD's fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_qp_suspend_resume - cqp command for suspend/resume\n- * @dev: hardware control device structure\n- * @qp: hardware control qp\n- * @suspend: flag if suspend or resume\n- */\n-void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct i40iw_sc_cqp *cqp = dev->cqp;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;\n-\tcqp_info->in.u.suspend_resume.cqp = cqp;\n-\tcqp_info->in.u.suspend_resume.qp = qp;\n-\tcqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP QP Suspend/Resume fail\");\n-}\n-\n-/**\n- * i40iw_term_modify_qp - modify qp for term message\n- * @qp: hardware control qp\n- * @next_state: qp's next state\n- * @term: terminate code\n- * @term_len: length\n- */\n-void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\n-\tiwqp = (struct i40iw_qp *)qp->back_qp;\n-\ti40iw_next_iw_state(iwqp, next_state, 0, term, term_len);\n-};\n-\n-/**\n- * i40iw_terminate_done - after terminate is completed\n- * @qp: hardware control qp\n- * @timeout_occurred: indicates if terminate timer expired\n- */\n-void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\tu32 next_iwarp_state = I40IW_QP_STATE_ERROR;\n-\tu8 hte = 0;\n-\tbool first_time;\n-\tunsigned long flags;\n-\n-\tiwqp = (struct i40iw_qp *)qp->back_qp;\n-\tspin_lock_irqsave(&iwqp->lock, flags);\n-\tif (iwqp->hte_added) {\n-\t\tiwqp->hte_added = 0;\n-\t\thte = 1;\n-\t}\n-\tfirst_time = !(qp->term_flags & I40IW_TERM_DONE);\n-\tqp->term_flags |= I40IW_TERM_DONE;\n-\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\tif (first_time) {\n-\t\tif (!timeout_occurred)\n-\t\t\ti40iw_terminate_del_timer(qp);\n-\t\telse\n-\t\t\tnext_iwarp_state = I40IW_QP_STATE_CLOSING;\n-\n-\t\ti40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);\n-\t\ti40iw_cm_disconn(iwqp);\n-\t}\n-}\n-\n-/**\n- * i40iw_terminate_imeout - timeout happened\n- * @context: points to iwarp qp\n- */\n-static void i40iw_terminate_timeout(struct timer_list *t)\n-{\n-\tstruct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);\n-\tstruct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;\n-\n-\ti40iw_terminate_done(qp, 1);\n-\ti40iw_rem_ref(&iwqp->ibqp);\n-}\n-\n-/**\n- * i40iw_terminate_start_timer - start terminate timeout\n- * @qp: hardware control qp\n- */\n-void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\n-\tiwqp = (struct i40iw_qp *)qp->back_qp;\n-\ti40iw_add_ref(&iwqp->ibqp);\n-\ttimer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);\n-\tiwqp->terminate_timer.expires = jiffies + HZ;\n-\tadd_timer(&iwqp->terminate_timer);\n-}\n-\n-/**\n- * i40iw_terminate_del_timer - delete terminate timeout\n- * @qp: hardware control qp\n- */\n-void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\n-\tiwqp = (struct i40iw_qp *)qp->back_qp;\n-\tif (del_timer(&iwqp->terminate_timer))\n-\t\ti40iw_rem_ref(&iwqp->ibqp);\n-}\n-\n-/**\n- * i40iw_cqp_generic_worker - generic worker for cqp\n- * @work: work pointer\n- */\n-static void i40iw_cqp_generic_worker(struct work_struct *work)\n-{\n-\tstruct i40iw_virtchnl_work_info *work_info =\n-\t &((struct virtchnl_work *)work)->work_info;\n-\n-\tif (work_info->worker_vf_dev)\n-\t\twork_info->callback_fcn(work_info->worker_vf_dev);\n-}\n-\n-/**\n- * i40iw_cqp_spawn_worker - spawn worket thread\n- * @iwdev: device struct pointer\n- * @work_info: work request info\n- * @iw_vf_idx: virtual function index\n- */\n-void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,\n-\t\t\t struct i40iw_virtchnl_work_info *work_info,\n-\t\t\t u32 iw_vf_idx)\n-{\n-\tstruct virtchnl_work *work;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\twork = &iwdev->virtchnl_w[iw_vf_idx];\n-\tmemcpy(&work->work_info, work_info, sizeof(*work_info));\n-\tINIT_WORK(&work->work, i40iw_cqp_generic_worker);\n-\tqueue_work(iwdev->virtchnl_wq, &work->work);\n-}\n-\n-/**\n- * i40iw_cqp_manage_hmc_fcn_worker -\n- * @work: work pointer for hmc info\n- */\n-static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)\n-{\n-\tstruct i40iw_cqp_request *cqp_request =\n-\t ((struct virtchnl_work *)work)->cqp_request;\n-\tstruct i40iw_ccq_cqe_info ccq_cqe_info;\n-\tstruct i40iw_hmc_fcn_info *hmcfcninfo =\n-\t\t\t&cqp_request->info.in.u.manage_hmc_pm.info;\n-\tstruct i40iw_device *iwdev =\n-\t (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;\n-\n-\tccq_cqe_info.cqp = NULL;\n-\tccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;\n-\tccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;\n-\tccq_cqe_info.op_code = cqp_request->compl_info.op_code;\n-\tccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;\n-\tccq_cqe_info.scratch = 0;\n-\tccq_cqe_info.error = cqp_request->compl_info.error;\n-\thmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,\n-\t\t\t\t hmcfcninfo->cqp_callback_param, &ccq_cqe_info);\n-\ti40iw_put_cqp_request(&iwdev->cqp, cqp_request);\n-}\n-\n-/**\n- * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion\n- * @cqp_request: cqp request info struct for hmc fun\n- * @unused: unused param of callback\n- */\n-static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,\n-\t\t\t\t\t u32 unused)\n-{\n-\tstruct virtchnl_work *work;\n-\tstruct i40iw_hmc_fcn_info *hmcfcninfo =\n-\t &cqp_request->info.in.u.manage_hmc_pm.info;\n-\tstruct i40iw_device *iwdev =\n-\t (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->\n-\t back_dev;\n-\n-\tif (hmcfcninfo && hmcfcninfo->callback_fcn) {\n-\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, \"%s1\\n\", __func__);\n-\t\tatomic_inc(&cqp_request->refcount);\n-\t\twork = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];\n-\t\twork->cqp_request = cqp_request;\n-\t\tINIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);\n-\t\tqueue_work(iwdev->virtchnl_wq, &work->work);\n-\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, \"%s2\\n\", __func__);\n-\t} else {\n-\t\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, \"%s: Something wrong\\n\", __func__);\n-\t}\n-}\n-\n-/**\n- * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc\n- * @dev: hardware control device structure\n- * @hmcfcninfo: info for hmc\n- */\n-enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_hmc_fcn_info *hmcfcninfo)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\ti40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, \"%s\\n\", __func__);\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tcqp_info = &cqp_request->info;\n-\tcqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;\n-\tcqp_request->param = hmcfcninfo;\n-\tmemcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,\n-\t sizeof(*hmcfcninfo));\n-\tcqp_info->in.u.manage_hmc_pm.dev = dev;\n-\tcqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Manage HMC fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm\n- * @iwdev: function device struct\n- * @values_mem: buffer for fpm\n- * @hmc_fn_id: function id for fpm\n- */\n-enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dma_mem *values_mem,\n-\t\t\t\t\t\t u8 hmc_fn_id)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tcqp_info = &cqp_request->info;\n-\tcqp_request->param = NULL;\n-\tcqp_info->in.u.query_fpm_values.cqp = dev->cqp;\n-\tcqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;\n-\tcqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;\n-\tcqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;\n-\tcqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Query FPM fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw\n- * @dev: hardware control device structure\n- * @values_mem: buffer with fpm values\n- * @hmc_fn_id: function id for fpm\n- */\n-enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dma_mem *values_mem,\n-\t\t\t\t\t\t u8 hmc_fn_id)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\tcqp_info = &cqp_request->info;\n-\tcqp_request->param = NULL;\n-\tcqp_info->in.u.commit_fpm_values.cqp = dev->cqp;\n-\tcqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;\n-\tcqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;\n-\tcqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;\n-\tcqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Commit FPM fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_vf_wait_vchnl_resp - wait for channel msg\n- * @iwdev: function's device struct\n- */\n-enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)\n-{\n-\tstruct i40iw_device *iwdev = dev->back_dev;\n-\tint timeout_ret;\n-\n-\ti40iw_debug(dev, I40IW_DEBUG_VIRT, \"%s[%u] dev %p, iwdev %p\\n\",\n-\t\t __func__, __LINE__, dev, iwdev);\n-\n-\tatomic_set(&iwdev->vchnl_msgs, 2);\n-\ttimeout_ret = wait_event_timeout(iwdev->vchnl_waitq,\n-\t\t\t\t\t (atomic_read(&iwdev->vchnl_msgs) == 1),\n-\t\t\t\t\t I40IW_VCHNL_EVENT_TIMEOUT);\n-\tatomic_dec(&iwdev->vchnl_msgs);\n-\tif (!timeout_ret) {\n-\t\ti40iw_pr_err(\"virt channel completion timeout = 0x%x\\n\", timeout_ret);\n-\t\tatomic_set(&iwdev->vchnl_msgs, 0);\n-\t\tdev->vchnl_up = false;\n-\t\treturn I40IW_ERR_TIMEOUT;\n-\t}\n-\twake_up(&dev->vf_reqs);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_cqp_cq_create_cmd - create a cq for the cqp\n- * @dev: device pointer\n- * @cq: pointer to created cq\n- */\n-enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_sc_cq *cq)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status;\n-\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, true);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = OP_CQ_CREATE;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.cq_create.cq = cq;\n-\tcqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Create QP fail\");\n-\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_cqp_qp_create_cmd - create a qp for the cqp\n- * @dev: device pointer\n- * @qp: pointer to created qp\n- */\n-enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_create_qp_info *qp_info;\n-\tenum i40iw_status_code status;\n-\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, true);\n-\tif (!cqp_request)\n-\t\treturn I40IW_ERR_NO_MEMORY;\n-\n-\tcqp_info = &cqp_request->info;\n-\tqp_info = &cqp_request->info.in.u.qp_create.info;\n-\n-\tmemset(qp_info, 0, sizeof(*qp_info));\n-\n-\tqp_info->cq_num_valid = true;\n-\tqp_info->next_iwarp_state = I40IW_QP_STATE_RTS;\n-\n-\tcqp_info->cqp_cmd = OP_QP_CREATE;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.qp_create.qp = qp;\n-\tcqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP QP create fail\");\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq\n- * @dev: device pointer\n- * @cq: pointer to cq\n- */\n-void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\ti40iw_cq_wq_destroy(iwdev, cq);\n-}\n-\n-/**\n- * i40iw_cqp_qp_destroy_cmd - destroy the cqp\n- * @dev: device pointer\n- * @qp: pointer to qp\n- */\n-void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status;\n-\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, true);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_info = &cqp_request->info;\n-\tmemset(cqp_info, 0, sizeof(*cqp_info));\n-\n-\tcqp_info->cqp_cmd = OP_QP_DESTROY;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.qp_destroy.qp = qp;\n-\tcqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;\n-\tcqp_info->in.u.qp_destroy.remove_hash_idx = true;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP QP_DESTROY fail\");\n-}\n-\n-\n-/**\n- * i40iw_ieq_mpa_crc_ae - generate AE for crc error\n- * @dev: hardware control device structure\n- * @qp: hardware control qp\n- */\n-void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_gen_ae_info info;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\ti40iw_debug(dev, I40IW_DEBUG_AEQ, \"%s entered\\n\", __func__);\n-\tinfo.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;\n-\tinfo.ae_source = I40IW_AE_SOURCE_RQ;\n-\ti40iw_gen_ae(iwdev, qp, &info, false);\n-}\n-\n-/**\n- * i40iw_init_hash_desc - initialize hash for crc calculation\n- * @desc: cryption type\n- */\n-enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)\n-{\n-\tstruct crypto_shash *tfm;\n-\tstruct shash_desc *tdesc;\n-\n-\ttfm = crypto_alloc_shash(\"crc32c\", 0, 0);\n-\tif (IS_ERR(tfm))\n-\t\treturn I40IW_ERR_MPA_CRC;\n-\n-\ttdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),\n-\t\t\tGFP_KERNEL);\n-\tif (!tdesc) {\n-\t\tcrypto_free_shash(tfm);\n-\t\treturn I40IW_ERR_MPA_CRC;\n-\t}\n-\ttdesc->tfm = tfm;\n-\t*desc = tdesc;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_free_hash_desc - free hash desc\n- * @desc: to be freed\n- */\n-void i40iw_free_hash_desc(struct shash_desc *desc)\n-{\n-\tif (desc) {\n-\t\tcrypto_free_shash(desc->tfm);\n-\t\tkfree(desc);\n-\t}\n-}\n-\n-/**\n- * i40iw_alloc_query_fpm_buf - allocate buffer for fpm\n- * @dev: hardware control device structure\n- * @mem: buffer ptr for fpm to be allocated\n- * @return: memory allocation status\n- */\n-enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dma_mem *mem)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\n-\tstatus = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,\n-\t\t\t\t I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);\n-\treturn status;\n-}\n-\n-/**\n- * i40iw_ieq_check_mpacrc - check if mpa crc is OK\n- * @desc: desc for hash\n- * @addr: address of buffer for crc\n- * @length: length of buffer\n- * @value: value to be compared\n- */\n-enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,\n-\t\t\t\t\t void *addr,\n-\t\t\t\t\t u32 length,\n-\t\t\t\t\t u32 value)\n-{\n-\tu32 crc = 0;\n-\tint ret;\n-\tenum i40iw_status_code ret_code = 0;\n-\n-\tcrypto_shash_init(desc);\n-\tret = crypto_shash_update(desc, addr, length);\n-\tif (!ret)\n-\t\tcrypto_shash_final(desc, (u8 *)&crc);\n-\tif (crc != value) {\n-\t\ti40iw_pr_err(\"mpa crc check fail\\n\");\n-\t\tret_code = I40IW_ERR_MPA_CRC;\n-\t}\n-\treturn ret_code;\n-}\n-\n-/**\n- * i40iw_ieq_get_qp - get qp based on quad in puda buffer\n- * @dev: hardware control device structure\n- * @buf: receive puda buffer on exception q\n- */\n-struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,\n-\t\t\t\t struct i40iw_puda_buf *buf)\n-{\n-\tstruct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_cm_node *cm_node;\n-\tu32 loc_addr[4], rem_addr[4];\n-\tu16 loc_port, rem_port;\n-\tstruct ipv6hdr *ip6h;\n-\tstruct iphdr *iph = (struct iphdr *)buf->iph;\n-\tstruct tcphdr *tcph = (struct tcphdr *)buf->tcph;\n-\n-\tif (iph->version == 4) {\n-\t\tmemset(loc_addr, 0, sizeof(loc_addr));\n-\t\tloc_addr[0] = ntohl(iph->daddr);\n-\t\tmemset(rem_addr, 0, sizeof(rem_addr));\n-\t\trem_addr[0] = ntohl(iph->saddr);\n-\t} else {\n-\t\tip6h = (struct ipv6hdr *)buf->iph;\n-\t\ti40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);\n-\t\ti40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);\n-\t}\n-\tloc_port = ntohs(tcph->dest);\n-\trem_port = ntohs(tcph->source);\n-\n-\tcm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,\n-\t\t\t\t loc_addr, false, true);\n-\tif (!cm_node)\n-\t\treturn NULL;\n-\tiwqp = cm_node->iwqp;\n-\treturn &iwqp->sc_qp;\n-}\n-\n-/**\n- * i40iw_ieq_update_tcpip_info - update tcpip in the buffer\n- * @buf: puda to update\n- * @length: length of buffer\n- * @seqnum: seq number for tcp\n- */\n-void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)\n-{\n-\tstruct tcphdr *tcph;\n-\tstruct iphdr *iph;\n-\tu16 iphlen;\n-\tu16 packetsize;\n-\tu8 *addr = (u8 *)buf->mem.va;\n-\n-\tiphlen = (buf->ipv4) ? 20 : 40;\n-\tiph = (struct iphdr *)(addr + buf->maclen);\n-\ttcph = (struct tcphdr *)(addr + buf->maclen + iphlen);\n-\tpacketsize = length + buf->tcphlen + iphlen;\n-\n-\tiph->tot_len = htons(packetsize);\n-\ttcph->seq = htonl(seqnum);\n-}\n-\n-/**\n- * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer\n- * @info: to get information\n- * @buf: puda buffer\n- */\n-enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,\n-\t\t\t\t\t\t struct i40iw_puda_buf *buf)\n-{\n-\tstruct iphdr *iph;\n-\tstruct ipv6hdr *ip6h;\n-\tstruct tcphdr *tcph;\n-\tu16 iphlen;\n-\tu16 pkt_len;\n-\tu8 *mem = (u8 *)buf->mem.va;\n-\tstruct ethhdr *ethh = (struct ethhdr *)buf->mem.va;\n-\n-\tif (ethh->h_proto == htons(0x8100)) {\n-\t\tinfo->vlan_valid = true;\n-\t\tbuf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;\n-\t}\n-\tbuf->maclen = (info->vlan_valid) ? 18 : 14;\n-\tiphlen = (info->l3proto) ? 40 : 20;\n-\tbuf->ipv4 = (info->l3proto) ? false : true;\n-\tbuf->iph = mem + buf->maclen;\n-\tiph = (struct iphdr *)buf->iph;\n-\n-\tbuf->tcph = buf->iph + iphlen;\n-\ttcph = (struct tcphdr *)buf->tcph;\n-\n-\tif (buf->ipv4) {\n-\t\tpkt_len = ntohs(iph->tot_len);\n-\t} else {\n-\t\tip6h = (struct ipv6hdr *)buf->iph;\n-\t\tpkt_len = ntohs(ip6h->payload_len) + iphlen;\n-\t}\n-\n-\tbuf->totallen = pkt_len + buf->maclen;\n-\n-\tif (info->payload_len < buf->totallen) {\n-\t\ti40iw_pr_err(\"payload_len = 0x%x totallen expected0x%x\\n\",\n-\t\t\t info->payload_len, buf->totallen);\n-\t\treturn I40IW_ERR_INVALID_SIZE;\n-\t}\n-\n-\tbuf->tcphlen = (tcph->doff) << 2;\n-\tbuf->datalen = pkt_len - iphlen - buf->tcphlen;\n-\tbuf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;\n-\tbuf->hdrlen = buf->maclen + iphlen + buf->tcphlen;\n-\tbuf->seqnum = ntohl(tcph->seq);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats\n- * @vsi: pointer to the vsi structure\n- */\n-static void i40iw_hw_stats_timeout(struct timer_list *t)\n-{\n-\tstruct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,\n-\t\t\t\t\t\t stats_timer);\n-\tstruct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;\n-\tstruct i40iw_sc_dev *pf_dev = sc_vsi->dev;\n-\tstruct i40iw_vsi_pestat *vf_devstat = NULL;\n-\tu16 iw_vf_idx;\n-\tunsigned long flags;\n-\n-\t/*PF*/\n-\ti40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);\n-\n-\tfor (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {\n-\t\tspin_lock_irqsave(&pf_devstat->lock, flags);\n-\t\tif (pf_dev->vf_dev[iw_vf_idx]) {\n-\t\t\tif (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {\n-\t\t\t\tvf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;\n-\t\t\t\ti40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);\n-\t\t\t}\n-\t\t}\n-\t\tspin_unlock_irqrestore(&pf_devstat->lock, flags);\n-\t}\n-\n-\tmod_timer(&pf_devstat->stats_timer,\n-\t\t jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));\n-}\n-\n-/**\n- * i40iw_hw_stats_start_timer - Start periodic stats timer\n- * @vsi: pointer to the vsi structure\n- */\n-void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)\n-{\n-\tstruct i40iw_vsi_pestat *devstat = vsi->pestat;\n-\n-\ttimer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);\n-\tmod_timer(&devstat->stats_timer,\n-\t\t jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));\n-}\n-\n-/**\n- * i40iw_hw_stats_stop_timer - Delete periodic stats timer\n- * @vsi: pointer to the vsi structure\n- */\n-void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)\n-{\n-\tstruct i40iw_vsi_pestat *devstat = vsi->pestat;\n-\n-\tdel_timer_sync(&devstat->stats_timer);\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c\ndeleted file mode 100644\nindex 8056930..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c\n+++ /dev/null\n@@ -1,2784 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include <linux/module.h>\n-#include <linux/moduleparam.h>\n-#include <linux/random.h>\n-#include <linux/highmem.h>\n-#include <linux/time.h>\n-#include <linux/hugetlb.h>\n-#include <linux/irq.h>\n-#include <asm/byteorder.h>\n-#include <net/ip.h>\n-#include <rdma/ib_verbs.h>\n-#include <rdma/iw_cm.h>\n-#include <rdma/ib_user_verbs.h>\n-#include <rdma/ib_umem.h>\n-#include <rdma/uverbs_ioctl.h>\n-#include \"i40iw.h\"\n-\n-/**\n- * i40iw_query_device - get device attributes\n- * @ibdev: device pointer from stack\n- * @props: returning device attributes\n- * @udata: user data\n- */\n-static int i40iw_query_device(struct ib_device *ibdev,\n-\t\t\t struct ib_device_attr *props,\n-\t\t\t struct ib_udata *udata)\n-{\n-\tstruct i40iw_device *iwdev = to_iwdev(ibdev);\n-\n-\tif (udata->inlen || udata->outlen)\n-\t\treturn -EINVAL;\n-\tmemset(props, 0, sizeof(*props));\n-\tether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);\n-\tprops->fw_ver = I40IW_FW_VERSION;\n-\tprops->device_cap_flags = iwdev->device_cap_flags;\n-\tprops->vendor_id = iwdev->ldev->pcidev->vendor;\n-\tprops->vendor_part_id = iwdev->ldev->pcidev->device;\n-\tprops->hw_ver = (u32)iwdev->sc_dev.hw_rev;\n-\tprops->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;\n-\tprops->max_qp = iwdev->max_qp - iwdev->used_qps;\n-\tprops->max_qp_wr = I40IW_MAX_QP_WRS;\n-\tprops->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;\n-\tprops->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;\n-\tprops->max_cq = iwdev->max_cq - iwdev->used_cqs;\n-\tprops->max_cqe = iwdev->max_cqe;\n-\tprops->max_mr = iwdev->max_mr - iwdev->used_mrs;\n-\tprops->max_pd = iwdev->max_pd - iwdev->used_pds;\n-\tprops->max_sge_rd = I40IW_MAX_SGE_RD;\n-\tprops->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;\n-\tprops->max_qp_init_rd_atom = props->max_qp_rd_atom;\n-\tprops->atomic_cap = IB_ATOMIC_NONE;\n-\tprops->max_map_per_fmr = 1;\n-\tprops->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_query_port - get port attrubutes\n- * @ibdev: device pointer from stack\n- * @port: port number for query\n- * @props: returning device attributes\n- */\n-static int i40iw_query_port(struct ib_device *ibdev,\n-\t\t\t u8 port,\n-\t\t\t struct ib_port_attr *props)\n-{\n-\tprops->lid = 1;\n-\tprops->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |\n-\t\tIB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;\n-\tprops->gid_tbl_len = 1;\n-\tprops->pkey_tbl_len = 1;\n-\tprops->active_width = IB_WIDTH_4X;\n-\tprops->active_speed = 1;\n-\tprops->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_alloc_ucontext - Allocate the user context data structure\n- * @uctx: Uverbs context pointer from stack\n- * @udata: user data\n- *\n- * This keeps track of all objects associated with a particular\n- * user-mode client.\n- */\n-static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,\n-\t\t\t\tstruct ib_udata *udata)\n-{\n-\tstruct ib_device *ibdev = uctx->device;\n-\tstruct i40iw_device *iwdev = to_iwdev(ibdev);\n-\tstruct i40iw_alloc_ucontext_req req;\n-\tstruct i40iw_alloc_ucontext_resp uresp = {};\n-\tstruct i40iw_ucontext *ucontext = to_ucontext(uctx);\n-\n-\tif (ib_copy_from_udata(&req, udata, sizeof(req)))\n-\t\treturn -EINVAL;\n-\n-\tif (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {\n-\t\ti40iw_pr_err(\"Unsupported provider library version %u.\\n\", req.userspace_ver);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\turesp.max_qps = iwdev->max_qp;\n-\turesp.max_pds = iwdev->max_pd;\n-\turesp.wq_size = iwdev->max_qp_wr * 2;\n-\turesp.kernel_ver = req.userspace_ver;\n-\n-\tucontext->iwdev = iwdev;\n-\tucontext->abi_ver = req.userspace_ver;\n-\n-\tif (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))\n-\t\treturn -EFAULT;\n-\n-\tINIT_LIST_HEAD(&ucontext->cq_reg_mem_list);\n-\tspin_lock_init(&ucontext->cq_reg_mem_list_lock);\n-\tINIT_LIST_HEAD(&ucontext->qp_reg_mem_list);\n-\tspin_lock_init(&ucontext->qp_reg_mem_list_lock);\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_dealloc_ucontext - deallocate the user context data structure\n- * @context: user context created during alloc\n- */\n-static void i40iw_dealloc_ucontext(struct ib_ucontext *context)\n-{\n-\treturn;\n-}\n-\n-/**\n- * i40iw_mmap - user memory map\n- * @context: context created during alloc\n- * @vma: kernel info for user memory map\n- */\n-static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)\n-{\n-\tstruct i40iw_ucontext *ucontext;\n-\tu64 db_addr_offset;\n-\tu64 push_offset;\n-\n-\tucontext = to_ucontext(context);\n-\tif (ucontext->iwdev->sc_dev.is_pf) {\n-\t\tdb_addr_offset = I40IW_DB_ADDR_OFFSET;\n-\t\tpush_offset = I40IW_PUSH_OFFSET;\n-\t\tif (vma->vm_pgoff)\n-\t\t\tvma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;\n-\t} else {\n-\t\tdb_addr_offset = I40IW_VF_DB_ADDR_OFFSET;\n-\t\tpush_offset = I40IW_VF_PUSH_OFFSET;\n-\t\tif (vma->vm_pgoff)\n-\t\t\tvma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;\n-\t}\n-\n-\tvma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;\n-\n-\tif (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {\n-\t\tvma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);\n-\t\tvma->vm_private_data = ucontext;\n-\t} else {\n-\t\tif ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)\n-\t\t\tvma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);\n-\t\telse\n-\t\t\tvma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);\n-\t}\n-\n-\tif (io_remap_pfn_range(vma, vma->vm_start,\n-\t\t\t vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),\n-\t\t\t PAGE_SIZE, vma->vm_page_prot))\n-\t\treturn -EAGAIN;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_alloc_push_page - allocate a push page for qp\n- * @iwdev: iwarp device\n- * @qp: hardware control qp\n- */\n-static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status;\n-\n-\tif (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)\n-\t\treturn;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tatomic_inc(&cqp_request->refcount);\n-\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;\n-\tcqp_info->post_sq = 1;\n-\n-\tcqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;\n-\tcqp_info->in.u.manage_push_page.info.free_page = 0;\n-\tcqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;\n-\tcqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;\n-\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (!status)\n-\t\tqp->push_idx = cqp_request->compl_info.op_ret_val;\n-\telse\n-\t\ti40iw_pr_err(\"CQP-OP Push page fail\");\n-\ti40iw_put_cqp_request(&iwdev->cqp, cqp_request);\n-}\n-\n-/**\n- * i40iw_dealloc_push_page - free a push page for qp\n- * @iwdev: iwarp device\n- * @qp: hardware control qp\n- */\n-static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)\n-{\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tenum i40iw_status_code status;\n-\n-\tif (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)\n-\t\treturn;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;\n-\tcqp_info->post_sq = 1;\n-\n-\tcqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;\n-\tcqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;\n-\tcqp_info->in.u.manage_push_page.info.free_page = 1;\n-\tcqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;\n-\tcqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;\n-\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (!status)\n-\t\tqp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;\n-\telse\n-\t\ti40iw_pr_err(\"CQP-OP Push page fail\");\n-}\n-\n-/**\n- * i40iw_alloc_pd - allocate protection domain\n- * @pd: PD pointer\n- * @udata: user data\n- */\n-static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)\n-{\n-\tstruct i40iw_pd *iwpd = to_iwpd(pd);\n-\tstruct i40iw_device *iwdev = to_iwdev(pd->device);\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_alloc_pd_resp uresp;\n-\tstruct i40iw_sc_pd *sc_pd;\n-\tu32 pd_id = 0;\n-\tint err;\n-\n-\tif (iwdev->closing)\n-\t\treturn -ENODEV;\n-\n-\terr = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,\n-\t\t\t\t iwdev->max_pd, &pd_id, &iwdev->next_pd);\n-\tif (err) {\n-\t\ti40iw_pr_err(\"alloc resource failed\\n\");\n-\t\treturn err;\n-\t}\n-\n-\tsc_pd = &iwpd->sc_pd;\n-\n-\tif (udata) {\n-\t\tstruct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(\n-\t\t\tudata, struct i40iw_ucontext, ibucontext);\n-\t\tdev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);\n-\t\tmemset(&uresp, 0, sizeof(uresp));\n-\t\turesp.pd_id = pd_id;\n-\t\tif (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {\n-\t\t\terr = -EFAULT;\n-\t\t\tgoto error;\n-\t\t}\n-\t} else {\n-\t\tdev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);\n-\t}\n-\n-\ti40iw_add_pdusecount(iwpd);\n-\treturn 0;\n-\n-error:\n-\ti40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_dealloc_pd - deallocate pd\n- * @ibpd: ptr of pd to be deallocated\n- * @udata: user data or null for kernel object\n- */\n-static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)\n-{\n-\tstruct i40iw_pd *iwpd = to_iwpd(ibpd);\n-\tstruct i40iw_device *iwdev = to_iwdev(ibpd->device);\n-\n-\ti40iw_rem_pdusecount(iwpd, iwdev);\n-}\n-\n-/**\n- * i40iw_get_pbl - Retrieve pbl from a list given a virtual\n- * address\n- * @va: user virtual address\n- * @pbl_list: pbl list to search in (QP's or CQ's)\n- */\n-static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,\n-\t\t\t\t struct list_head *pbl_list)\n-{\n-\tstruct i40iw_pbl *iwpbl;\n-\n-\tlist_for_each_entry(iwpbl, pbl_list, list) {\n-\t\tif (iwpbl->user_base == va) {\n-\t\t\tiwpbl->on_list = false;\n-\t\t\tlist_del(&iwpbl->list);\n-\t\t\treturn iwpbl;\n-\t\t}\n-\t}\n-\treturn NULL;\n-}\n-\n-/**\n- * i40iw_free_qp_resources - free up memory resources for qp\n- * @iwdev: iwarp device\n- * @iwqp: qp ptr (user or kernel)\n- * @qp_num: qp number assigned\n- */\n-void i40iw_free_qp_resources(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_qp *iwqp,\n-\t\t\t u32 qp_num)\n-{\n-\tstruct i40iw_pbl *iwpbl = &iwqp->iwpbl;\n-\n-\ti40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);\n-\ti40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);\n-\tif (qp_num)\n-\t\ti40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);\n-\tif (iwpbl->pbl_allocated)\n-\t\ti40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);\n-\ti40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);\n-\ti40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);\n-\tkfree(iwqp->kqp.wrid_mem);\n-\tiwqp->kqp.wrid_mem = NULL;\n-\tkfree(iwqp->allocated_buffer);\n-}\n-\n-/**\n- * i40iw_clean_cqes - clean cq entries for qp\n- * @iwqp: qp ptr (user or kernel)\n- * @iwcq: cq ptr\n- */\n-static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)\n-{\n-\tstruct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;\n-\n-\tukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);\n-}\n-\n-/**\n- * i40iw_destroy_qp - destroy qp\n- * @ibqp: qp's ib pointer also to get to device's qp address\n- */\n-static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)\n-{\n-\tstruct i40iw_qp *iwqp = to_iwqp(ibqp);\n-\n-\tiwqp->destroyed = 1;\n-\n-\tif (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)\n-\t\ti40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);\n-\n-\tif (!iwqp->user_mode) {\n-\t\tif (iwqp->iwscq) {\n-\t\t\ti40iw_clean_cqes(iwqp, iwqp->iwscq);\n-\t\t\tif (iwqp->iwrcq != iwqp->iwscq)\n-\t\t\t\ti40iw_clean_cqes(iwqp, iwqp->iwrcq);\n-\t\t}\n-\t}\n-\n-\ti40iw_rem_ref(&iwqp->ibqp);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_setup_virt_qp - setup for allocation of virtual qp\n- * @dev: iwarp device\n- * @qp: qp ptr\n- * @init_info: initialize info to return\n- */\n-static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_qp *iwqp,\n-\t\t\t struct i40iw_qp_init_info *init_info)\n-{\n-\tstruct i40iw_pbl *iwpbl = &iwqp->iwpbl;\n-\tstruct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;\n-\n-\tiwqp->page = qpmr->sq_page;\n-\tinit_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);\n-\tif (iwpbl->pbl_allocated) {\n-\t\tinit_info->virtual_map = true;\n-\t\tinit_info->sq_pa = qpmr->sq_pbl.idx;\n-\t\tinit_info->rq_pa = qpmr->rq_pbl.idx;\n-\t} else {\n-\t\tinit_info->sq_pa = qpmr->sq_pbl.addr;\n-\t\tinit_info->rq_pa = qpmr->rq_pbl.addr;\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_setup_kmode_qp - setup initialization for kernel mode qp\n- * @iwdev: iwarp device\n- * @iwqp: qp ptr (user or kernel)\n- * @info: initialize info to return\n- */\n-static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,\n-\t\t\t\tstruct i40iw_qp *iwqp,\n-\t\t\t\tstruct i40iw_qp_init_info *info)\n-{\n-\tstruct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;\n-\tu32 sqdepth, rqdepth;\n-\tu8 sqshift;\n-\tu32 size;\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;\n-\n-\ti40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);\n-\tstatus = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);\n-\tif (status)\n-\t\treturn -ENOMEM;\n-\n-\tstatus = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);\n-\tif (status)\n-\t\treturn -ENOMEM;\n-\n-\tsize = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);\n-\tiwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);\n-\n-\tukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;\n-\tif (!ukinfo->sq_wrtrk_array)\n-\t\treturn -ENOMEM;\n-\n-\tukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];\n-\n-\tsize = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;\n-\tsize += (I40IW_SHADOW_AREA_SIZE << 3);\n-\n-\tstatus = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);\n-\tif (status) {\n-\t\tkfree(ukinfo->sq_wrtrk_array);\n-\t\tukinfo->sq_wrtrk_array = NULL;\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tukinfo->sq = mem->va;\n-\tinfo->sq_pa = mem->pa;\n-\n-\tukinfo->rq = &ukinfo->sq[sqdepth];\n-\tinfo->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);\n-\n-\tukinfo->shadow_area = ukinfo->rq[rqdepth].elem;\n-\tinfo->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);\n-\n-\tukinfo->sq_size = sqdepth >> sqshift;\n-\tukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;\n-\tukinfo->qp_id = iwqp->ibqp.qp_num;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_create_qp - create qp\n- * @ibpd: ptr of pd\n- * @init_attr: attributes for qp\n- * @udata: user data for create qp\n- */\n-static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,\n-\t\t\t\t struct ib_qp_init_attr *init_attr,\n-\t\t\t\t struct ib_udata *udata)\n-{\n-\tstruct i40iw_pd *iwpd = to_iwpd(ibpd);\n-\tstruct i40iw_device *iwdev = to_iwdev(ibpd->device);\n-\tstruct i40iw_cqp *iwcqp = &iwdev->cqp;\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(\n-\t\tudata, struct i40iw_ucontext, ibucontext);\n-\tstruct i40iw_create_qp_req req;\n-\tstruct i40iw_create_qp_resp uresp;\n-\tu32 qp_num = 0;\n-\tvoid *mem;\n-\tenum i40iw_status_code ret;\n-\tint err_code;\n-\tint sq_size;\n-\tint rq_size;\n-\tstruct i40iw_sc_qp *qp;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_qp_init_info init_info;\n-\tstruct i40iw_create_qp_info *qp_info;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\n-\tstruct i40iw_qp_host_ctx_info *ctx_info;\n-\tstruct i40iwarp_offload_info *iwarp_info;\n-\tunsigned long flags;\n-\n-\tif (iwdev->closing)\n-\t\treturn ERR_PTR(-ENODEV);\n-\n-\tif (init_attr->create_flags)\n-\t\treturn ERR_PTR(-EINVAL);\n-\tif (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)\n-\t\tinit_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;\n-\n-\tif (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)\n-\t\tinit_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;\n-\n-\tif (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)\n-\t\tinit_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;\n-\n-\tmemset(&init_info, 0, sizeof(init_info));\n-\n-\tsq_size = init_attr->cap.max_send_wr;\n-\trq_size = init_attr->cap.max_recv_wr;\n-\n-\tinit_info.vsi = &iwdev->vsi;\n-\tinit_info.qp_uk_init_info.sq_size = sq_size;\n-\tinit_info.qp_uk_init_info.rq_size = rq_size;\n-\tinit_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;\n-\tinit_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;\n-\tinit_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;\n-\n-\tmem = kzalloc(sizeof(*iwqp), GFP_KERNEL);\n-\tif (!mem)\n-\t\treturn ERR_PTR(-ENOMEM);\n-\n-\tiwqp = (struct i40iw_qp *)mem;\n-\tiwqp->allocated_buffer = mem;\n-\tqp = &iwqp->sc_qp;\n-\tqp->back_qp = (void *)iwqp;\n-\tqp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;\n-\n-\tiwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;\n-\n-\tif (i40iw_allocate_dma_mem(dev->hw,\n-\t\t\t\t &iwqp->q2_ctx_mem,\n-\t\t\t\t I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,\n-\t\t\t\t 256)) {\n-\t\ti40iw_pr_err(\"dma_mem failed\\n\");\n-\t\terr_code = -ENOMEM;\n-\t\tgoto error;\n-\t}\n-\n-\tinit_info.q2 = iwqp->q2_ctx_mem.va;\n-\tinit_info.q2_pa = iwqp->q2_ctx_mem.pa;\n-\n-\tinit_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;\n-\tinit_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;\n-\n-\terr_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,\n-\t\t\t\t\t&qp_num, &iwdev->next_qp);\n-\tif (err_code) {\n-\t\ti40iw_pr_err(\"qp resource\\n\");\n-\t\tgoto error;\n-\t}\n-\n-\tiwqp->iwdev = iwdev;\n-\tiwqp->iwpd = iwpd;\n-\tiwqp->ibqp.qp_num = qp_num;\n-\tqp = &iwqp->sc_qp;\n-\tiwqp->iwscq = to_iwcq(init_attr->send_cq);\n-\tiwqp->iwrcq = to_iwcq(init_attr->recv_cq);\n-\n-\tiwqp->host_ctx.va = init_info.host_ctx;\n-\tiwqp->host_ctx.pa = init_info.host_ctx_pa;\n-\tiwqp->host_ctx.size = I40IW_QP_CTX_SIZE;\n-\n-\tinit_info.pd = &iwpd->sc_pd;\n-\tinit_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;\n-\tiwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;\n-\n-\tif (init_attr->qp_type != IB_QPT_RC) {\n-\t\terr_code = -EINVAL;\n-\t\tgoto error;\n-\t}\n-\tif (iwdev->push_mode)\n-\t\ti40iw_alloc_push_page(iwdev, qp);\n-\tif (udata) {\n-\t\terr_code = ib_copy_from_udata(&req, udata, sizeof(req));\n-\t\tif (err_code) {\n-\t\t\ti40iw_pr_err(\"ib_copy_from_data\\n\");\n-\t\t\tgoto error;\n-\t\t}\n-\t\tiwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;\n-\t\tiwqp->user_mode = 1;\n-\n-\t\tif (req.user_wqe_buffers) {\n-\t\t\tstruct i40iw_pbl *iwpbl;\n-\n-\t\t\tspin_lock_irqsave(\n-\t\t\t &ucontext->qp_reg_mem_list_lock, flags);\n-\t\t\tiwpbl = i40iw_get_pbl(\n-\t\t\t (unsigned long)req.user_wqe_buffers,\n-\t\t\t &ucontext->qp_reg_mem_list);\n-\t\t\tspin_unlock_irqrestore(\n-\t\t\t &ucontext->qp_reg_mem_list_lock, flags);\n-\n-\t\t\tif (!iwpbl) {\n-\t\t\t\terr_code = -ENODATA;\n-\t\t\t\ti40iw_pr_err(\"no pbl info\\n\");\n-\t\t\t\tgoto error;\n-\t\t\t}\n-\t\t\tmemcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));\n-\t\t}\n-\t\terr_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);\n-\t} else {\n-\t\terr_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);\n-\t}\n-\n-\tif (err_code) {\n-\t\ti40iw_pr_err(\"setup qp failed\\n\");\n-\t\tgoto error;\n-\t}\n-\n-\tinit_info.type = I40IW_QP_TYPE_IWARP;\n-\tret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);\n-\tif (ret) {\n-\t\terr_code = -EPROTO;\n-\t\ti40iw_pr_err(\"qp_init fail\\n\");\n-\t\tgoto error;\n-\t}\n-\tctx_info = &iwqp->ctx_info;\n-\tiwarp_info = &iwqp->iwarp_info;\n-\tiwarp_info->rd_enable = true;\n-\tiwarp_info->wr_rdresp_en = true;\n-\tif (!iwqp->user_mode) {\n-\t\tiwarp_info->fast_reg_en = true;\n-\t\tiwarp_info->priv_mode_en = true;\n-\t}\n-\tiwarp_info->ddp_ver = 1;\n-\tiwarp_info->rdmap_ver = 1;\n-\n-\tctx_info->iwarp_info_valid = true;\n-\tctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;\n-\tctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;\n-\tif (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {\n-\t\tctx_info->push_mode_en = false;\n-\t} else {\n-\t\tctx_info->push_mode_en = true;\n-\t\tctx_info->push_idx = qp->push_idx;\n-\t}\n-\n-\tret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,\n-\t\t\t\t\t (u64 *)iwqp->host_ctx.va,\n-\t\t\t\t\t ctx_info);\n-\tctx_info->iwarp_info_valid = false;\n-\tcqp_request = i40iw_get_cqp_request(iwcqp, true);\n-\tif (!cqp_request) {\n-\t\terr_code = -ENOMEM;\n-\t\tgoto error;\n-\t}\n-\tcqp_info = &cqp_request->info;\n-\tqp_info = &cqp_request->info.in.u.qp_create.info;\n-\n-\tmemset(qp_info, 0, sizeof(*qp_info));\n-\n-\tqp_info->cq_num_valid = true;\n-\tqp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;\n-\n-\tcqp_info->cqp_cmd = OP_QP_CREATE;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.qp_create.qp = qp;\n-\tcqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;\n-\tret = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (ret) {\n-\t\ti40iw_pr_err(\"CQP-OP QP create fail\");\n-\t\terr_code = -EACCES;\n-\t\tgoto error;\n-\t}\n-\n-\ti40iw_add_ref(&iwqp->ibqp);\n-\tspin_lock_init(&iwqp->lock);\n-\tiwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;\n-\tiwdev->qp_table[qp_num] = iwqp;\n-\ti40iw_add_pdusecount(iwqp->iwpd);\n-\ti40iw_add_devusecount(iwdev);\n-\tif (udata) {\n-\t\tmemset(&uresp, 0, sizeof(uresp));\n-\t\turesp.actual_sq_size = sq_size;\n-\t\turesp.actual_rq_size = rq_size;\n-\t\turesp.qp_id = qp_num;\n-\t\turesp.push_idx = qp->push_idx;\n-\t\terr_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));\n-\t\tif (err_code) {\n-\t\t\ti40iw_pr_err(\"copy_to_udata failed\\n\");\n-\t\t\ti40iw_destroy_qp(&iwqp->ibqp, udata);\n-\t\t\t/* let the completion of the qp destroy free the qp */\n-\t\t\treturn ERR_PTR(err_code);\n-\t\t}\n-\t}\n-\tinit_completion(&iwqp->sq_drained);\n-\tinit_completion(&iwqp->rq_drained);\n-\n-\treturn &iwqp->ibqp;\n-error:\n-\ti40iw_free_qp_resources(iwdev, iwqp, qp_num);\n-\treturn ERR_PTR(err_code);\n-}\n-\n-/**\n- * i40iw_query - query qp attributes\n- * @ibqp: qp pointer\n- * @attr: attributes pointer\n- * @attr_mask: Not used\n- * @init_attr: qp attributes to return\n- */\n-static int i40iw_query_qp(struct ib_qp *ibqp,\n-\t\t\t struct ib_qp_attr *attr,\n-\t\t\t int attr_mask,\n-\t\t\t struct ib_qp_init_attr *init_attr)\n-{\n-\tstruct i40iw_qp *iwqp = to_iwqp(ibqp);\n-\tstruct i40iw_sc_qp *qp = &iwqp->sc_qp;\n-\n-\tattr->qp_state = iwqp->ibqp_state;\n-\tattr->cur_qp_state = attr->qp_state;\n-\tattr->qp_access_flags = 0;\n-\tattr->cap.max_send_wr = qp->qp_uk.sq_size;\n-\tattr->cap.max_recv_wr = qp->qp_uk.rq_size;\n-\tattr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;\n-\tattr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;\n-\tattr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;\n-\tattr->port_num = 1;\n-\tinit_attr->event_handler = iwqp->ibqp.event_handler;\n-\tinit_attr->qp_context = iwqp->ibqp.qp_context;\n-\tinit_attr->send_cq = iwqp->ibqp.send_cq;\n-\tinit_attr->recv_cq = iwqp->ibqp.recv_cq;\n-\tinit_attr->srq = iwqp->ibqp.srq;\n-\tinit_attr->cap = attr->cap;\n-\tinit_attr->port_num = 1;\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_hw_modify_qp - setup cqp for modify qp\n- * @iwdev: iwarp device\n- * @iwqp: qp ptr (user or kernel)\n- * @info: info for modify qp\n- * @wait: flag to wait or not for modify qp completion\n- */\n-void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,\n-\t\t\tstruct i40iw_modify_qp_info *info, bool wait)\n-{\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_modify_qp_info *m_info;\n-\tstruct i40iw_gen_ae_info ae_info;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_info = &cqp_request->info;\n-\tm_info = &cqp_info->in.u.qp_modify.info;\n-\tmemcpy(m_info, info, sizeof(*m_info));\n-\tcqp_info->cqp_cmd = OP_QP_MODIFY;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;\n-\tcqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;\n-\tif (!i40iw_handle_cqp_op(iwdev, cqp_request))\n-\t\treturn;\n-\n-\tswitch (m_info->next_iwarp_state) {\n-\tcase I40IW_QP_STATE_RTS:\n-\t\tif (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)\n-\t\t\ti40iw_send_reset(iwqp->cm_node);\n-\t\t/* fall through */\n-\tcase I40IW_QP_STATE_IDLE:\n-\tcase I40IW_QP_STATE_TERMINATE:\n-\tcase I40IW_QP_STATE_CLOSING:\n-\t\tae_info.ae_code = I40IW_AE_BAD_CLOSE;\n-\t\tae_info.ae_source = 0;\n-\t\ti40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);\n-\t\tbreak;\n-\tcase I40IW_QP_STATE_ERROR:\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_modify_qp - modify qp request\n- * @ibqp: qp's pointer for modify\n- * @attr: access attributes\n- * @attr_mask: state mask\n- * @udata: user data\n- */\n-int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,\n-\t\t int attr_mask, struct ib_udata *udata)\n-{\n-\tstruct i40iw_qp *iwqp = to_iwqp(ibqp);\n-\tstruct i40iw_device *iwdev = iwqp->iwdev;\n-\tstruct i40iw_qp_host_ctx_info *ctx_info;\n-\tstruct i40iwarp_offload_info *iwarp_info;\n-\tstruct i40iw_modify_qp_info info;\n-\tu8 issue_modify_qp = 0;\n-\tu8 dont_wait = 0;\n-\tu32 err;\n-\tunsigned long flags;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tctx_info = &iwqp->ctx_info;\n-\tiwarp_info = &iwqp->iwarp_info;\n-\n-\tspin_lock_irqsave(&iwqp->lock, flags);\n-\n-\tif (attr_mask & IB_QP_STATE) {\n-\t\tif (iwdev->closing && attr->qp_state != IB_QPS_ERR) {\n-\t\t\terr = -EINVAL;\n-\t\t\tgoto exit;\n-\t\t}\n-\n-\t\tswitch (attr->qp_state) {\n-\t\tcase IB_QPS_INIT:\n-\t\tcase IB_QPS_RTR:\n-\t\t\tif (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {\n-\t\t\t\terr = -EINVAL;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\t\t\tif (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {\n-\t\t\t\tinfo.next_iwarp_state = I40IW_QP_STATE_IDLE;\n-\t\t\t\tissue_modify_qp = 1;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase IB_QPS_RTS:\n-\t\t\tif ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||\n-\t\t\t (!iwqp->cm_id)) {\n-\t\t\t\terr = -EINVAL;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\n-\t\t\tissue_modify_qp = 1;\n-\t\t\tiwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;\n-\t\t\tiwqp->hte_added = 1;\n-\t\t\tinfo.next_iwarp_state = I40IW_QP_STATE_RTS;\n-\t\t\tinfo.tcp_ctx_valid = true;\n-\t\t\tinfo.ord_valid = true;\n-\t\t\tinfo.arp_cache_idx_valid = true;\n-\t\t\tinfo.cq_num_valid = true;\n-\t\t\tbreak;\n-\t\tcase IB_QPS_SQD:\n-\t\t\tif (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {\n-\t\t\t\terr = 0;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\t\t\tif ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||\n-\t\t\t (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {\n-\t\t\t\terr = 0;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\t\t\tif (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {\n-\t\t\t\terr = -EINVAL;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\t\t\tinfo.next_iwarp_state = I40IW_QP_STATE_CLOSING;\n-\t\t\tissue_modify_qp = 1;\n-\t\t\tbreak;\n-\t\tcase IB_QPS_SQE:\n-\t\t\tif (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {\n-\t\t\t\terr = -EINVAL;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\t\t\tinfo.next_iwarp_state = I40IW_QP_STATE_TERMINATE;\n-\t\t\tissue_modify_qp = 1;\n-\t\t\tbreak;\n-\t\tcase IB_QPS_ERR:\n-\t\tcase IB_QPS_RESET:\n-\t\t\tif (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {\n-\t\t\t\terr = -EINVAL;\n-\t\t\t\tgoto exit;\n-\t\t\t}\n-\t\t\tif (iwqp->sc_qp.term_flags)\n-\t\t\t\ti40iw_terminate_del_timer(&iwqp->sc_qp);\n-\t\t\tinfo.next_iwarp_state = I40IW_QP_STATE_ERROR;\n-\t\t\tif ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&\n-\t\t\t iwdev->iw_status &&\n-\t\t\t (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))\n-\t\t\t\tinfo.reset_tcp_conn = true;\n-\t\t\telse\n-\t\t\t\tdont_wait = 1;\n-\t\t\tissue_modify_qp = 1;\n-\t\t\tinfo.next_iwarp_state = I40IW_QP_STATE_ERROR;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\terr = -EINVAL;\n-\t\t\tgoto exit;\n-\t\t}\n-\n-\t\tiwqp->ibqp_state = attr->qp_state;\n-\n-\t}\n-\tif (attr_mask & IB_QP_ACCESS_FLAGS) {\n-\t\tctx_info->iwarp_info_valid = true;\n-\t\tif (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)\n-\t\t\tiwarp_info->wr_rdresp_en = true;\n-\t\tif (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)\n-\t\t\tiwarp_info->wr_rdresp_en = true;\n-\t\tif (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)\n-\t\t\tiwarp_info->rd_enable = true;\n-\t\tif (attr->qp_access_flags & IB_ACCESS_MW_BIND)\n-\t\t\tiwarp_info->bind_en = true;\n-\n-\t\tif (iwqp->user_mode) {\n-\t\t\tiwarp_info->rd_enable = true;\n-\t\t\tiwarp_info->wr_rdresp_en = true;\n-\t\t\tiwarp_info->priv_mode_en = false;\n-\t\t}\n-\t}\n-\n-\tif (ctx_info->iwarp_info_valid) {\n-\t\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\t\tint ret;\n-\n-\t\tctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;\n-\t\tctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;\n-\t\tret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,\n-\t\t\t\t\t\t (u64 *)iwqp->host_ctx.va,\n-\t\t\t\t\t\t ctx_info);\n-\t\tif (ret) {\n-\t\t\ti40iw_pr_err(\"setting QP context\\n\");\n-\t\t\terr = -EINVAL;\n-\t\t\tgoto exit;\n-\t\t}\n-\t}\n-\n-\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\n-\tif (issue_modify_qp) {\n-\t\ti40iw_hw_modify_qp(iwdev, iwqp, &info, true);\n-\n-\t\tspin_lock_irqsave(&iwqp->lock, flags);\n-\t\tiwqp->iwarp_state = info.next_iwarp_state;\n-\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\t}\n-\n-\tif (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {\n-\t\tif (dont_wait) {\n-\t\t\tif (iwqp->cm_id && iwqp->hw_tcp_state) {\n-\t\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n-\t\t\t\tiwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;\n-\t\t\t\tiwqp->last_aeq = I40IW_AE_RESET_SENT;\n-\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\t\t\t\ti40iw_cm_disconn(iwqp);\n-\t\t\t}\n-\t\t} else {\n-\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n-\t\t\tif (iwqp->cm_id) {\n-\t\t\t\tif (atomic_inc_return(&iwqp->close_timer_started) == 1) {\n-\t\t\t\t\tiwqp->cm_id->add_ref(iwqp->cm_id);\n-\t\t\t\t\ti40iw_schedule_cm_timer(iwqp->cm_node,\n-\t\t\t\t\t\t\t\t(struct i40iw_puda_buf *)iwqp,\n-\t\t\t\t\t\t\t\t I40IW_TIMER_TYPE_CLOSE, 1, 0);\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\t\t}\n-\t}\n-\treturn 0;\n-exit:\n-\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\treturn err;\n-}\n-\n-/**\n- * cq_free_resources - free up recources for cq\n- * @iwdev: iwarp device\n- * @iwcq: cq ptr\n- */\n-static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)\n-{\n-\tstruct i40iw_sc_cq *cq = &iwcq->sc_cq;\n-\n-\tif (!iwcq->user_mode)\n-\t\ti40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);\n-\ti40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);\n-}\n-\n-/**\n- * i40iw_cq_wq_destroy - send cq destroy cqp\n- * @iwdev: iwarp device\n- * @cq: hardware control cq\n- */\n-void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)\n-{\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn;\n-\n-\tcqp_info = &cqp_request->info;\n-\n-\tcqp_info->cqp_cmd = OP_CQ_DESTROY;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.cq_destroy.cq = cq;\n-\tcqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP Destroy QP fail\");\n-}\n-\n-/**\n- * i40iw_destroy_cq - destroy cq\n- * @ib_cq: cq pointer\n- * @udata: user data or NULL for kernel object\n- */\n-static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)\n-{\n-\tstruct i40iw_cq *iwcq;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_sc_cq *cq;\n-\n-\tiwcq = to_iwcq(ib_cq);\n-\tiwdev = to_iwdev(ib_cq->device);\n-\tcq = &iwcq->sc_cq;\n-\ti40iw_cq_wq_destroy(iwdev, cq);\n-\tcq_free_resources(iwdev, iwcq);\n-\ti40iw_rem_devusecount(iwdev);\n-}\n-\n-/**\n- * i40iw_create_cq - create cq\n- * @ibcq: CQ allocated\n- * @attr: attributes for cq\n- * @udata: user data\n- */\n-static int i40iw_create_cq(struct ib_cq *ibcq,\n-\t\t\t const struct ib_cq_init_attr *attr,\n-\t\t\t struct ib_udata *udata)\n-{\n-\tstruct ib_device *ibdev = ibcq->device;\n-\tstruct i40iw_device *iwdev = to_iwdev(ibdev);\n-\tstruct i40iw_cq *iwcq = to_iwcq(ibcq);\n-\tstruct i40iw_pbl *iwpbl;\n-\tu32 cq_num = 0;\n-\tstruct i40iw_sc_cq *cq;\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_cq_init_info info = {};\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tstruct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;\n-\tunsigned long flags;\n-\tint err_code;\n-\tint entries = attr->cqe;\n-\n-\tif (iwdev->closing)\n-\t\treturn -ENODEV;\n-\n-\tif (entries > iwdev->max_cqe)\n-\t\treturn -EINVAL;\n-\n-\terr_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,\n-\t\t\t\t\tiwdev->max_cq, &cq_num,\n-\t\t\t\t\t&iwdev->next_cq);\n-\tif (err_code)\n-\t\treturn err_code;\n-\n-\tcq = &iwcq->sc_cq;\n-\tcq->back_cq = (void *)iwcq;\n-\tspin_lock_init(&iwcq->lock);\n-\n-\tinfo.dev = dev;\n-\tukinfo->cq_size = max(entries, 4);\n-\tukinfo->cq_id = cq_num;\n-\tiwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;\n-\tinfo.ceqe_mask = 0;\n-\tif (attr->comp_vector < iwdev->ceqs_count)\n-\t\tinfo.ceq_id = attr->comp_vector;\n-\tinfo.ceq_id_valid = true;\n-\tinfo.ceqe_mask = 1;\n-\tinfo.type = I40IW_CQ_TYPE_IWARP;\n-\tif (udata) {\n-\t\tstruct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(\n-\t\t\tudata, struct i40iw_ucontext, ibucontext);\n-\t\tstruct i40iw_create_cq_req req;\n-\t\tstruct i40iw_cq_mr *cqmr;\n-\n-\t\tmemset(&req, 0, sizeof(req));\n-\t\tiwcq->user_mode = true;\n-\t\tif (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {\n-\t\t\terr_code = -EFAULT;\n-\t\t\tgoto cq_free_resources;\n-\t\t}\n-\n-\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n-\t\tiwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,\n-\t\t\t\t &ucontext->cq_reg_mem_list);\n-\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n-\t\tif (!iwpbl) {\n-\t\t\terr_code = -EPROTO;\n-\t\t\tgoto cq_free_resources;\n-\t\t}\n-\n-\t\tiwcq->iwpbl = iwpbl;\n-\t\tiwcq->cq_mem_size = 0;\n-\t\tcqmr = &iwpbl->cq_mr;\n-\t\tinfo.shadow_area_pa = cpu_to_le64(cqmr->shadow);\n-\t\tif (iwpbl->pbl_allocated) {\n-\t\t\tinfo.virtual_map = true;\n-\t\t\tinfo.pbl_chunk_size = 1;\n-\t\t\tinfo.first_pm_pbl_idx = cqmr->cq_pbl.idx;\n-\t\t} else {\n-\t\t\tinfo.cq_base_pa = cqmr->cq_pbl.addr;\n-\t\t}\n-\t} else {\n-\t\t/* Kmode allocations */\n-\t\tint rsize;\n-\t\tint shadow;\n-\n-\t\trsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);\n-\t\trsize = round_up(rsize, 256);\n-\t\tshadow = I40IW_SHADOW_AREA_SIZE << 3;\n-\t\tstatus = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,\n-\t\t\t\t\t\trsize + shadow, 256);\n-\t\tif (status) {\n-\t\t\terr_code = -ENOMEM;\n-\t\t\tgoto cq_free_resources;\n-\t\t}\n-\t\tukinfo->cq_base = iwcq->kmem.va;\n-\t\tinfo.cq_base_pa = iwcq->kmem.pa;\n-\t\tinfo.shadow_area_pa = info.cq_base_pa + rsize;\n-\t\tukinfo->shadow_area = iwcq->kmem.va + rsize;\n-\t}\n-\n-\tif (dev->iw_priv_cq_ops->cq_init(cq, &info)) {\n-\t\ti40iw_pr_err(\"init cq fail\\n\");\n-\t\terr_code = -EPROTO;\n-\t\tgoto cq_free_resources;\n-\t}\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request) {\n-\t\terr_code = -ENOMEM;\n-\t\tgoto cq_free_resources;\n-\t}\n-\n-\tcqp_info = &cqp_request->info;\n-\tcqp_info->cqp_cmd = OP_CQ_CREATE;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.cq_create.cq = cq;\n-\tcqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status) {\n-\t\ti40iw_pr_err(\"CQP-OP Create QP fail\");\n-\t\terr_code = -EPROTO;\n-\t\tgoto cq_free_resources;\n-\t}\n-\n-\tif (udata) {\n-\t\tstruct i40iw_create_cq_resp resp;\n-\n-\t\tmemset(&resp, 0, sizeof(resp));\n-\t\tresp.cq_id = info.cq_uk_init_info.cq_id;\n-\t\tresp.cq_size = info.cq_uk_init_info.cq_size;\n-\t\tif (ib_copy_to_udata(udata, &resp, sizeof(resp))) {\n-\t\t\ti40iw_pr_err(\"copy to user data\\n\");\n-\t\t\terr_code = -EPROTO;\n-\t\t\tgoto cq_destroy;\n-\t\t}\n-\t}\n-\n-\ti40iw_add_devusecount(iwdev);\n-\treturn 0;\n-\n-cq_destroy:\n-\ti40iw_cq_wq_destroy(iwdev, cq);\n-cq_free_resources:\n-\tcq_free_resources(iwdev, iwcq);\n-\treturn err_code;\n-}\n-\n-/**\n- * i40iw_get_user_access - get hw access from IB access\n- * @acc: IB access to return hw access\n- */\n-static inline u16 i40iw_get_user_access(int acc)\n-{\n-\tu16 access = 0;\n-\n-\taccess |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;\n-\taccess |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;\n-\taccess |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;\n-\taccess |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;\n-\treturn access;\n-}\n-\n-/**\n- * i40iw_free_stag - free stag resource\n- * @iwdev: iwarp device\n- * @stag: stag to free\n- */\n-static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)\n-{\n-\tu32 stag_idx;\n-\n-\tstag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;\n-\ti40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);\n-\ti40iw_rem_devusecount(iwdev);\n-}\n-\n-/**\n- * i40iw_create_stag - create random stag\n- * @iwdev: iwarp device\n- */\n-static u32 i40iw_create_stag(struct i40iw_device *iwdev)\n-{\n-\tu32 stag = 0;\n-\tu32 stag_index = 0;\n-\tu32 next_stag_index;\n-\tu32 driver_key;\n-\tu32 random;\n-\tu8 consumer_key;\n-\tint ret;\n-\n-\tget_random_bytes(&random, sizeof(random));\n-\tconsumer_key = (u8)random;\n-\n-\tdriver_key = random & ~iwdev->mr_stagmask;\n-\tnext_stag_index = (random & iwdev->mr_stagmask) >> 8;\n-\tnext_stag_index %= iwdev->max_mr;\n-\n-\tret = i40iw_alloc_resource(iwdev,\n-\t\t\t\t iwdev->allocated_mrs, iwdev->max_mr,\n-\t\t\t\t &stag_index, &next_stag_index);\n-\tif (!ret) {\n-\t\tstag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;\n-\t\tstag |= driver_key;\n-\t\tstag += (u32)consumer_key;\n-\t\ti40iw_add_devusecount(iwdev);\n-\t}\n-\treturn stag;\n-}\n-\n-/**\n- * i40iw_next_pbl_addr - Get next pbl address\n- * @pbl: pointer to a pble\n- * @pinfo: info pointer\n- * @idx: index\n- */\n-static inline u64 *i40iw_next_pbl_addr(u64 *pbl,\n-\t\t\t\t struct i40iw_pble_info **pinfo,\n-\t\t\t\t u32 *idx)\n-{\n-\t*idx += 1;\n-\tif ((!(*pinfo)) || (*idx != (*pinfo)->cnt))\n-\t\treturn ++pbl;\n-\t*idx = 0;\n-\t(*pinfo)++;\n-\treturn (u64 *)(*pinfo)->addr;\n-}\n-\n-/**\n- * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally\n- * @iwmr: iwmr for IB's user page addresses\n- * @pbl: ple pointer to save 1 level or 0 level pble\n- * @level: indicated level 0, 1 or 2\n- */\n-static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,\n-\t\t\t\t u64 *pbl,\n-\t\t\t\t enum i40iw_pble_level level)\n-{\n-\tstruct ib_umem *region = iwmr->region;\n-\tstruct i40iw_pbl *iwpbl = &iwmr->iwpbl;\n-\tstruct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;\n-\tstruct i40iw_pble_info *pinfo;\n-\tstruct ib_block_iter biter;\n-\tu32 idx = 0;\n-\n-\tpinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;\n-\n-\tif (iwmr->type == IW_MEMREG_TYPE_QP)\n-\t\tiwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);\n-\n-\trdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,\n-\t\t\t iwmr->page_size) {\n-\t\t*pbl = rdma_block_iter_dma_address(&biter);\n-\t\tpbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);\n-\t}\n-}\n-\n-/**\n- * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous\n- * @arr: lvl1 pbl array\n- * @npages: page count\n- * pg_size: page size\n- *\n- */\n-static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)\n-{\n-\tu32 pg_idx;\n-\n-\tfor (pg_idx = 0; pg_idx < npages; pg_idx++) {\n-\t\tif ((*arr + (pg_size * pg_idx)) != arr[pg_idx])\n-\t\t\treturn false;\n-\t}\n-\treturn true;\n-}\n-\n-/**\n- * i40iw_check_mr_contiguous - check if MR is physically contiguous\n- * @palloc: pbl allocation struct\n- * pg_size: page size\n- */\n-static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)\n-{\n-\tstruct i40iw_pble_level2 *lvl2 = &palloc->level2;\n-\tstruct i40iw_pble_info *leaf = lvl2->leaf;\n-\tu64 *arr = NULL;\n-\tu64 *start_addr = NULL;\n-\tint i;\n-\tbool ret;\n-\n-\tif (palloc->level == I40IW_LEVEL_1) {\n-\t\tarr = (u64 *)palloc->level1.addr;\n-\t\tret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);\n-\t\treturn ret;\n-\t}\n-\n-\tstart_addr = (u64 *)leaf->addr;\n-\n-\tfor (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {\n-\t\tarr = (u64 *)leaf->addr;\n-\t\tif ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)\n-\t\t\treturn false;\n-\t\tret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);\n-\t\tif (!ret)\n-\t\t\treturn false;\n-\t}\n-\n-\treturn true;\n-}\n-\n-/**\n- * i40iw_setup_pbles - copy user pg address to pble's\n- * @iwdev: iwarp device\n- * @iwmr: mr pointer for this memory registration\n- * @use_pbles: flag if to use pble's\n- */\n-static int i40iw_setup_pbles(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_mr *iwmr,\n-\t\t\t bool use_pbles)\n-{\n-\tstruct i40iw_pbl *iwpbl = &iwmr->iwpbl;\n-\tstruct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;\n-\tstruct i40iw_pble_info *pinfo;\n-\tu64 *pbl;\n-\tenum i40iw_status_code status;\n-\tenum i40iw_pble_level level = I40IW_LEVEL_1;\n-\n-\tif (use_pbles) {\n-\t\tmutex_lock(&iwdev->pbl_mutex);\n-\t\tstatus = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);\n-\t\tmutex_unlock(&iwdev->pbl_mutex);\n-\t\tif (status)\n-\t\t\treturn -ENOMEM;\n-\n-\t\tiwpbl->pbl_allocated = true;\n-\t\tlevel = palloc->level;\n-\t\tpinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;\n-\t\tpbl = (u64 *)pinfo->addr;\n-\t} else {\n-\t\tpbl = iwmr->pgaddrmem;\n-\t}\n-\n-\ti40iw_copy_user_pgaddrs(iwmr, pbl, level);\n-\n-\tif (use_pbles)\n-\t\tiwmr->pgaddrmem[0] = *pbl;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_handle_q_mem - handle memory for qp and cq\n- * @iwdev: iwarp device\n- * @req: information for q memory management\n- * @iwpbl: pble struct\n- * @use_pbles: flag to use pble\n- */\n-static int i40iw_handle_q_mem(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_mem_reg_req *req,\n-\t\t\t struct i40iw_pbl *iwpbl,\n-\t\t\t bool use_pbles)\n-{\n-\tstruct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;\n-\tstruct i40iw_mr *iwmr = iwpbl->iwmr;\n-\tstruct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;\n-\tstruct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;\n-\tstruct i40iw_hmc_pble *hmc_p;\n-\tu64 *arr = iwmr->pgaddrmem;\n-\tu32 pg_size;\n-\tint err;\n-\tint total;\n-\tbool ret = true;\n-\n-\ttotal = req->sq_pages + req->rq_pages + req->cq_pages;\n-\tpg_size = iwmr->page_size;\n-\n-\terr = i40iw_setup_pbles(iwdev, iwmr, use_pbles);\n-\tif (err)\n-\t\treturn err;\n-\n-\tif (use_pbles && (palloc->level != I40IW_LEVEL_1)) {\n-\t\ti40iw_free_pble(iwdev->pble_rsrc, palloc);\n-\t\tiwpbl->pbl_allocated = false;\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tif (use_pbles)\n-\t\tarr = (u64 *)palloc->level1.addr;\n-\n-\tif (iwmr->type == IW_MEMREG_TYPE_QP) {\n-\t\thmc_p = &qpmr->sq_pbl;\n-\t\tqpmr->shadow = (dma_addr_t)arr[total];\n-\n-\t\tif (use_pbles) {\n-\t\t\tret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);\n-\t\t\tif (ret)\n-\t\t\t\tret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);\n-\t\t}\n-\n-\t\tif (!ret) {\n-\t\t\thmc_p->idx = palloc->level1.idx;\n-\t\t\thmc_p = &qpmr->rq_pbl;\n-\t\t\thmc_p->idx = palloc->level1.idx + req->sq_pages;\n-\t\t} else {\n-\t\t\thmc_p->addr = arr[0];\n-\t\t\thmc_p = &qpmr->rq_pbl;\n-\t\t\thmc_p->addr = arr[req->sq_pages];\n-\t\t}\n-\t} else {\t\t/* CQ */\n-\t\thmc_p = &cqmr->cq_pbl;\n-\t\tcqmr->shadow = (dma_addr_t)arr[total];\n-\n-\t\tif (use_pbles)\n-\t\t\tret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);\n-\n-\t\tif (!ret)\n-\t\t\thmc_p->idx = palloc->level1.idx;\n-\t\telse\n-\t\t\thmc_p->addr = arr[0];\n-\t}\n-\n-\tif (use_pbles && ret) {\n-\t\ti40iw_free_pble(iwdev->pble_rsrc, palloc);\n-\t\tiwpbl->pbl_allocated = false;\n-\t}\n-\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_hw_alloc_stag - cqp command to allocate stag\n- * @iwdev: iwarp device\n- * @iwmr: iwarp mr pointer\n- */\n-static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)\n-{\n-\tstruct i40iw_allocate_stag_info *info;\n-\tstruct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);\n-\tenum i40iw_status_code status;\n-\tint err = 0;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn -ENOMEM;\n-\n-\tcqp_info = &cqp_request->info;\n-\tinfo = &cqp_info->in.u.alloc_stag.info;\n-\tmemset(info, 0, sizeof(*info));\n-\tinfo->page_size = PAGE_SIZE;\n-\tinfo->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;\n-\tinfo->pd_id = iwpd->sc_pd.pd_id;\n-\tinfo->total_len = iwmr->length;\n-\tinfo->remote_access = true;\n-\tcqp_info->cqp_cmd = OP_ALLOC_STAG;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;\n-\tcqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;\n-\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status) {\n-\t\terr = -ENOMEM;\n-\t\ti40iw_pr_err(\"CQP-OP MR Reg fail\");\n-\t}\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_alloc_mr - register stag for fast memory registration\n- * @pd: ibpd pointer\n- * @mr_type: memory for stag registrion\n- * @max_num_sg: man number of pages\n- * @udata: user data or NULL for kernel objects\n- */\n-static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,\n-\t\t\t\t u32 max_num_sg, struct ib_udata *udata)\n-{\n-\tstruct i40iw_pd *iwpd = to_iwpd(pd);\n-\tstruct i40iw_device *iwdev = to_iwdev(pd->device);\n-\tstruct i40iw_pble_alloc *palloc;\n-\tstruct i40iw_pbl *iwpbl;\n-\tstruct i40iw_mr *iwmr;\n-\tenum i40iw_status_code status;\n-\tu32 stag;\n-\tint err_code = -ENOMEM;\n-\n-\tiwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);\n-\tif (!iwmr)\n-\t\treturn ERR_PTR(-ENOMEM);\n-\n-\tstag = i40iw_create_stag(iwdev);\n-\tif (!stag) {\n-\t\terr_code = -EOVERFLOW;\n-\t\tgoto err;\n-\t}\n-\tstag &= ~I40IW_CQPSQ_STAG_KEY_MASK;\n-\tiwmr->stag = stag;\n-\tiwmr->ibmr.rkey = stag;\n-\tiwmr->ibmr.lkey = stag;\n-\tiwmr->ibmr.pd = pd;\n-\tiwmr->ibmr.device = pd->device;\n-\tiwpbl = &iwmr->iwpbl;\n-\tiwpbl->iwmr = iwmr;\n-\tiwmr->type = IW_MEMREG_TYPE_MEM;\n-\tpalloc = &iwpbl->pble_alloc;\n-\tiwmr->page_cnt = max_num_sg;\n-\tmutex_lock(&iwdev->pbl_mutex);\n-\tstatus = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);\n-\tmutex_unlock(&iwdev->pbl_mutex);\n-\tif (status)\n-\t\tgoto err1;\n-\n-\tif (palloc->level != I40IW_LEVEL_1)\n-\t\tgoto err2;\n-\terr_code = i40iw_hw_alloc_stag(iwdev, iwmr);\n-\tif (err_code)\n-\t\tgoto err2;\n-\tiwpbl->pbl_allocated = true;\n-\ti40iw_add_pdusecount(iwpd);\n-\treturn &iwmr->ibmr;\n-err2:\n-\ti40iw_free_pble(iwdev->pble_rsrc, palloc);\n-err1:\n-\ti40iw_free_stag(iwdev, stag);\n-err:\n-\tkfree(iwmr);\n-\treturn ERR_PTR(err_code);\n-}\n-\n-/**\n- * i40iw_set_page - populate pbl list for fmr\n- * @ibmr: ib mem to access iwarp mr pointer\n- * @addr: page dma address fro pbl list\n- */\n-static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)\n-{\n-\tstruct i40iw_mr *iwmr = to_iwmr(ibmr);\n-\tstruct i40iw_pbl *iwpbl = &iwmr->iwpbl;\n-\tstruct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;\n-\tu64 *pbl;\n-\n-\tif (unlikely(iwmr->npages == iwmr->page_cnt))\n-\t\treturn -ENOMEM;\n-\n-\tpbl = (u64 *)palloc->level1.addr;\n-\tpbl[iwmr->npages++] = cpu_to_le64(addr);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_map_mr_sg - map of sg list for fmr\n- * @ibmr: ib mem to access iwarp mr pointer\n- * @sg: scatter gather list for fmr\n- * @sg_nents: number of sg pages\n- */\n-static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,\n-\t\t\t int sg_nents, unsigned int *sg_offset)\n-{\n-\tstruct i40iw_mr *iwmr = to_iwmr(ibmr);\n-\n-\tiwmr->npages = 0;\n-\treturn ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);\n-}\n-\n-/**\n- * i40iw_drain_sq - drain the send queue\n- * @ibqp: ib qp pointer\n- */\n-static void i40iw_drain_sq(struct ib_qp *ibqp)\n-{\n-\tstruct i40iw_qp *iwqp = to_iwqp(ibqp);\n-\tstruct i40iw_sc_qp *qp = &iwqp->sc_qp;\n-\n-\tif (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))\n-\t\twait_for_completion(&iwqp->sq_drained);\n-}\n-\n-/**\n- * i40iw_drain_rq - drain the receive queue\n- * @ibqp: ib qp pointer\n- */\n-static void i40iw_drain_rq(struct ib_qp *ibqp)\n-{\n-\tstruct i40iw_qp *iwqp = to_iwqp(ibqp);\n-\tstruct i40iw_sc_qp *qp = &iwqp->sc_qp;\n-\n-\tif (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))\n-\t\twait_for_completion(&iwqp->rq_drained);\n-}\n-\n-/**\n- * i40iw_hwreg_mr - send cqp command for memory registration\n- * @iwdev: iwarp device\n- * @iwmr: iwarp mr pointer\n- * @access: access for MR\n- */\n-static int i40iw_hwreg_mr(struct i40iw_device *iwdev,\n-\t\t\t struct i40iw_mr *iwmr,\n-\t\t\t u16 access)\n-{\n-\tstruct i40iw_pbl *iwpbl = &iwmr->iwpbl;\n-\tstruct i40iw_reg_ns_stag_info *stag_info;\n-\tstruct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);\n-\tstruct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;\n-\tenum i40iw_status_code status;\n-\tint err = 0;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn -ENOMEM;\n-\n-\tcqp_info = &cqp_request->info;\n-\tstag_info = &cqp_info->in.u.mr_reg_non_shared.info;\n-\tmemset(stag_info, 0, sizeof(*stag_info));\n-\tstag_info->va = (void *)(unsigned long)iwpbl->user_base;\n-\tstag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;\n-\tstag_info->stag_key = (u8)iwmr->stag;\n-\tstag_info->total_len = iwmr->length;\n-\tstag_info->access_rights = access;\n-\tstag_info->pd_id = iwpd->sc_pd.pd_id;\n-\tstag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;\n-\tstag_info->page_size = iwmr->page_size;\n-\n-\tif (iwpbl->pbl_allocated) {\n-\t\tif (palloc->level == I40IW_LEVEL_1) {\n-\t\t\tstag_info->first_pm_pbl_index = palloc->level1.idx;\n-\t\t\tstag_info->chunk_size = 1;\n-\t\t} else {\n-\t\t\tstag_info->first_pm_pbl_index = palloc->level2.root.idx;\n-\t\t\tstag_info->chunk_size = 3;\n-\t\t}\n-\t} else {\n-\t\tstag_info->reg_addr_pa = iwmr->pgaddrmem[0];\n-\t}\n-\n-\tcqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;\n-\tcqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;\n-\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status) {\n-\t\terr = -ENOMEM;\n-\t\ti40iw_pr_err(\"CQP-OP MR Reg fail\");\n-\t}\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_reg_user_mr - Register a user memory region\n- * @pd: ptr of pd\n- * @start: virtual start address\n- * @length: length of mr\n- * @virt: virtual address\n- * @acc: access of mr\n- * @udata: user data\n- */\n-static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,\n-\t\t\t\t u64 start,\n-\t\t\t\t u64 length,\n-\t\t\t\t u64 virt,\n-\t\t\t\t int acc,\n-\t\t\t\t struct ib_udata *udata)\n-{\n-\tstruct i40iw_pd *iwpd = to_iwpd(pd);\n-\tstruct i40iw_device *iwdev = to_iwdev(pd->device);\n-\tstruct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(\n-\t\tudata, struct i40iw_ucontext, ibucontext);\n-\tstruct i40iw_pble_alloc *palloc;\n-\tstruct i40iw_pbl *iwpbl;\n-\tstruct i40iw_mr *iwmr;\n-\tstruct ib_umem *region;\n-\tstruct i40iw_mem_reg_req req;\n-\tu64 pbl_depth = 0;\n-\tu32 stag = 0;\n-\tu16 access;\n-\tu64 region_length;\n-\tbool use_pbles = false;\n-\tunsigned long flags;\n-\tint err = -ENOSYS;\n-\tint ret;\n-\tint pg_shift;\n-\n-\tif (iwdev->closing)\n-\t\treturn ERR_PTR(-ENODEV);\n-\n-\tif (length > I40IW_MAX_MR_SIZE)\n-\t\treturn ERR_PTR(-EINVAL);\n-\tregion = ib_umem_get(udata, start, length, acc, 0);\n-\tif (IS_ERR(region))\n-\t\treturn (struct ib_mr *)region;\n-\n-\tif (ib_copy_from_udata(&req, udata, sizeof(req))) {\n-\t\tib_umem_release(region);\n-\t\treturn ERR_PTR(-EFAULT);\n-\t}\n-\n-\tiwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);\n-\tif (!iwmr) {\n-\t\tib_umem_release(region);\n-\t\treturn ERR_PTR(-ENOMEM);\n-\t}\n-\n-\tiwpbl = &iwmr->iwpbl;\n-\tiwpbl->iwmr = iwmr;\n-\tiwmr->region = region;\n-\tiwmr->ibmr.pd = pd;\n-\tiwmr->ibmr.device = pd->device;\n-\n-\tiwmr->page_size = PAGE_SIZE;\n-\tif (req.reg_type == IW_MEMREG_TYPE_MEM)\n-\t\tiwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,\n-\t\t\t\t\t\t\t virt);\n-\n-\tregion_length = region->length + (start & (iwmr->page_size - 1));\n-\tpg_shift = ffs(iwmr->page_size) - 1;\n-\tpbl_depth = region_length >> pg_shift;\n-\tpbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;\n-\tiwmr->length = region->length;\n-\n-\tiwpbl->user_base = virt;\n-\tpalloc = &iwpbl->pble_alloc;\n-\n-\tiwmr->type = req.reg_type;\n-\tiwmr->page_cnt = (u32)pbl_depth;\n-\n-\tswitch (req.reg_type) {\n-\tcase IW_MEMREG_TYPE_QP:\n-\t\tuse_pbles = ((req.sq_pages + req.rq_pages) > 2);\n-\t\terr = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);\n-\t\tif (err)\n-\t\t\tgoto error;\n-\t\tspin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);\n-\t\tlist_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);\n-\t\tiwpbl->on_list = true;\n-\t\tspin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);\n-\t\tbreak;\n-\tcase IW_MEMREG_TYPE_CQ:\n-\t\tuse_pbles = (req.cq_pages > 1);\n-\t\terr = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);\n-\t\tif (err)\n-\t\t\tgoto error;\n-\n-\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n-\t\tlist_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);\n-\t\tiwpbl->on_list = true;\n-\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n-\t\tbreak;\n-\tcase IW_MEMREG_TYPE_MEM:\n-\t\tuse_pbles = (iwmr->page_cnt != 1);\n-\t\taccess = I40IW_ACCESS_FLAGS_LOCALREAD;\n-\n-\t\terr = i40iw_setup_pbles(iwdev, iwmr, use_pbles);\n-\t\tif (err)\n-\t\t\tgoto error;\n-\n-\t\tif (use_pbles) {\n-\t\t\tret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);\n-\t\t\tif (ret) {\n-\t\t\t\ti40iw_free_pble(iwdev->pble_rsrc, palloc);\n-\t\t\t\tiwpbl->pbl_allocated = false;\n-\t\t\t}\n-\t\t}\n-\n-\t\taccess |= i40iw_get_user_access(acc);\n-\t\tstag = i40iw_create_stag(iwdev);\n-\t\tif (!stag) {\n-\t\t\terr = -ENOMEM;\n-\t\t\tgoto error;\n-\t\t}\n-\n-\t\tiwmr->stag = stag;\n-\t\tiwmr->ibmr.rkey = stag;\n-\t\tiwmr->ibmr.lkey = stag;\n-\n-\t\terr = i40iw_hwreg_mr(iwdev, iwmr, access);\n-\t\tif (err) {\n-\t\t\ti40iw_free_stag(iwdev, stag);\n-\t\t\tgoto error;\n-\t\t}\n-\n-\t\tbreak;\n-\tdefault:\n-\t\tgoto error;\n-\t}\n-\n-\tiwmr->type = req.reg_type;\n-\tif (req.reg_type == IW_MEMREG_TYPE_MEM)\n-\t\ti40iw_add_pdusecount(iwpd);\n-\treturn &iwmr->ibmr;\n-\n-error:\n-\tif (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)\n-\t\ti40iw_free_pble(iwdev->pble_rsrc, palloc);\n-\tib_umem_release(region);\n-\tkfree(iwmr);\n-\treturn ERR_PTR(err);\n-}\n-\n-/**\n- * i40iw_reg_phys_mr - register kernel physical memory\n- * @pd: ibpd pointer\n- * @addr: physical address of memory to register\n- * @size: size of memory to register\n- * @acc: Access rights\n- * @iova_start: start of virtual address for physical buffers\n- */\n-struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,\n-\t\t\t\tu64 addr,\n-\t\t\t\tu64 size,\n-\t\t\t\tint acc,\n-\t\t\t\tu64 *iova_start)\n-{\n-\tstruct i40iw_pd *iwpd = to_iwpd(pd);\n-\tstruct i40iw_device *iwdev = to_iwdev(pd->device);\n-\tstruct i40iw_pbl *iwpbl;\n-\tstruct i40iw_mr *iwmr;\n-\tenum i40iw_status_code status;\n-\tu32 stag;\n-\tu16 access = I40IW_ACCESS_FLAGS_LOCALREAD;\n-\tint ret;\n-\n-\tiwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);\n-\tif (!iwmr)\n-\t\treturn ERR_PTR(-ENOMEM);\n-\tiwmr->ibmr.pd = pd;\n-\tiwmr->ibmr.device = pd->device;\n-\tiwpbl = &iwmr->iwpbl;\n-\tiwpbl->iwmr = iwmr;\n-\tiwmr->type = IW_MEMREG_TYPE_MEM;\n-\tiwpbl->user_base = *iova_start;\n-\tstag = i40iw_create_stag(iwdev);\n-\tif (!stag) {\n-\t\tret = -EOVERFLOW;\n-\t\tgoto err;\n-\t}\n-\taccess |= i40iw_get_user_access(acc);\n-\tiwmr->stag = stag;\n-\tiwmr->ibmr.rkey = stag;\n-\tiwmr->ibmr.lkey = stag;\n-\tiwmr->page_cnt = 1;\n-\tiwmr->pgaddrmem[0] = addr;\n-\tiwmr->length = size;\n-\tstatus = i40iw_hwreg_mr(iwdev, iwmr, access);\n-\tif (status) {\n-\t\ti40iw_free_stag(iwdev, stag);\n-\t\tret = -ENOMEM;\n-\t\tgoto err;\n-\t}\n-\n-\ti40iw_add_pdusecount(iwpd);\n-\treturn &iwmr->ibmr;\n- err:\n-\tkfree(iwmr);\n-\treturn ERR_PTR(ret);\n-}\n-\n-/**\n- * i40iw_get_dma_mr - register physical mem\n- * @pd: ptr of pd\n- * @acc: access for memory\n- */\n-static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)\n-{\n-\tu64 kva = 0;\n-\n-\treturn i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);\n-}\n-\n-/**\n- * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP\n- * @iwmr: iwmr for IB's user page addresses\n- * @ucontext: ptr to user context\n- */\n-static void i40iw_del_memlist(struct i40iw_mr *iwmr,\n-\t\t\t struct i40iw_ucontext *ucontext)\n-{\n-\tstruct i40iw_pbl *iwpbl = &iwmr->iwpbl;\n-\tunsigned long flags;\n-\n-\tswitch (iwmr->type) {\n-\tcase IW_MEMREG_TYPE_CQ:\n-\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n-\t\tif (iwpbl->on_list) {\n-\t\t\tiwpbl->on_list = false;\n-\t\t\tlist_del(&iwpbl->list);\n-\t\t}\n-\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n-\t\tbreak;\n-\tcase IW_MEMREG_TYPE_QP:\n-\t\tspin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);\n-\t\tif (iwpbl->on_list) {\n-\t\t\tiwpbl->on_list = false;\n-\t\t\tlist_del(&iwpbl->list);\n-\t\t}\n-\t\tspin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);\n-\t\tbreak;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n-/**\n- * i40iw_dereg_mr - deregister mr\n- * @ib_mr: mr ptr for dereg\n- */\n-static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)\n-{\n-\tstruct ib_pd *ibpd = ib_mr->pd;\n-\tstruct i40iw_pd *iwpd = to_iwpd(ibpd);\n-\tstruct i40iw_mr *iwmr = to_iwmr(ib_mr);\n-\tstruct i40iw_device *iwdev = to_iwdev(ib_mr->device);\n-\tenum i40iw_status_code status;\n-\tstruct i40iw_dealloc_stag_info *info;\n-\tstruct i40iw_pbl *iwpbl = &iwmr->iwpbl;\n-\tstruct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;\n-\tstruct i40iw_cqp_request *cqp_request;\n-\tstruct cqp_commands_info *cqp_info;\n-\tu32 stag_idx;\n-\n-\tib_umem_release(iwmr->region);\n-\n-\tif (iwmr->type != IW_MEMREG_TYPE_MEM) {\n-\t\t/* region is released. only test for userness. */\n-\t\tif (iwmr->region) {\n-\t\t\tstruct i40iw_ucontext *ucontext =\n-\t\t\t\trdma_udata_to_drv_context(\n-\t\t\t\t\tudata,\n-\t\t\t\t\tstruct i40iw_ucontext,\n-\t\t\t\t\tibucontext);\n-\n-\t\t\ti40iw_del_memlist(iwmr, ucontext);\n-\t\t}\n-\t\tif (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)\n-\t\t\ti40iw_free_pble(iwdev->pble_rsrc, palloc);\n-\t\tkfree(iwmr);\n-\t\treturn 0;\n-\t}\n-\n-\tcqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);\n-\tif (!cqp_request)\n-\t\treturn -ENOMEM;\n-\n-\tcqp_info = &cqp_request->info;\n-\tinfo = &cqp_info->in.u.dealloc_stag.info;\n-\tmemset(info, 0, sizeof(*info));\n-\n-\tinfo->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);\n-\tinfo->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);\n-\tstag_idx = info->stag_idx;\n-\tinfo->mr = true;\n-\tif (iwpbl->pbl_allocated)\n-\t\tinfo->dealloc_pbl = true;\n-\n-\tcqp_info->cqp_cmd = OP_DEALLOC_STAG;\n-\tcqp_info->post_sq = 1;\n-\tcqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;\n-\tcqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;\n-\tstatus = i40iw_handle_cqp_op(iwdev, cqp_request);\n-\tif (status)\n-\t\ti40iw_pr_err(\"CQP-OP dealloc failed for stag_idx = 0x%x\\n\", stag_idx);\n-\ti40iw_rem_pdusecount(iwpd, iwdev);\n-\ti40iw_free_stag(iwdev, iwmr->stag);\n-\tif (iwpbl->pbl_allocated)\n-\t\ti40iw_free_pble(iwdev->pble_rsrc, palloc);\n-\tkfree(iwmr);\n-\treturn 0;\n-}\n-\n-/**\n- * hw_rev_show\n- */\n-static ssize_t hw_rev_show(struct device *dev,\n-\t\t\t struct device_attribute *attr, char *buf)\n-{\n-\tstruct i40iw_ib_device *iwibdev =\n-\t\trdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);\n-\tu32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;\n-\n-\treturn sprintf(buf, \"%x\\n\", hw_rev);\n-}\n-static DEVICE_ATTR_RO(hw_rev);\n-\n-/**\n- * hca_type_show\n- */\n-static ssize_t hca_type_show(struct device *dev,\n-\t\t\t struct device_attribute *attr, char *buf)\n-{\n-\treturn sprintf(buf, \"I40IW\\n\");\n-}\n-static DEVICE_ATTR_RO(hca_type);\n-\n-/**\n- * board_id_show\n- */\n-static ssize_t board_id_show(struct device *dev,\n-\t\t\t struct device_attribute *attr, char *buf)\n-{\n-\treturn sprintf(buf, \"%.*s\\n\", 32, \"I40IW Board ID\");\n-}\n-static DEVICE_ATTR_RO(board_id);\n-\n-static struct attribute *i40iw_dev_attributes[] = {\n-\t&dev_attr_hw_rev.attr,\n-\t&dev_attr_hca_type.attr,\n-\t&dev_attr_board_id.attr,\n-\tNULL\n-};\n-\n-static const struct attribute_group i40iw_attr_group = {\n-\t.attrs = i40iw_dev_attributes,\n-};\n-\n-/**\n- * i40iw_copy_sg_list - copy sg list for qp\n- * @sg_list: copied into sg_list\n- * @sgl: copy from sgl\n- * @num_sges: count of sg entries\n- */\n-static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)\n-{\n-\tunsigned int i;\n-\n-\tfor (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {\n-\t\tsg_list[i].tag_off = sgl[i].addr;\n-\t\tsg_list[i].len = sgl[i].length;\n-\t\tsg_list[i].stag = sgl[i].lkey;\n-\t}\n-}\n-\n-/**\n- * i40iw_post_send - kernel application wr\n- * @ibqp: qp ptr for wr\n- * @ib_wr: work request ptr\n- * @bad_wr: return of bad wr if err\n- */\n-static int i40iw_post_send(struct ib_qp *ibqp,\n-\t\t\t const struct ib_send_wr *ib_wr,\n-\t\t\t const struct ib_send_wr **bad_wr)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_qp_uk *ukqp;\n-\tstruct i40iw_post_sq_info info;\n-\tenum i40iw_status_code ret;\n-\tint err = 0;\n-\tunsigned long flags;\n-\tbool inv_stag;\n-\n-\tiwqp = (struct i40iw_qp *)ibqp;\n-\tukqp = &iwqp->sc_qp.qp_uk;\n-\n-\tspin_lock_irqsave(&iwqp->lock, flags);\n-\n-\tif (iwqp->flush_issued) {\n-\t\terr = -EINVAL;\n-\t\tgoto out;\n-\t}\n-\n-\twhile (ib_wr) {\n-\t\tinv_stag = false;\n-\t\tmemset(&info, 0, sizeof(info));\n-\t\tinfo.wr_id = (u64)(ib_wr->wr_id);\n-\t\tif ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)\n-\t\t\tinfo.signaled = true;\n-\t\tif (ib_wr->send_flags & IB_SEND_FENCE)\n-\t\t\tinfo.read_fence = true;\n-\n-\t\tswitch (ib_wr->opcode) {\n-\t\tcase IB_WR_SEND:\n-\t\t\t/* fall-through */\n-\t\tcase IB_WR_SEND_WITH_INV:\n-\t\t\tif (ib_wr->opcode == IB_WR_SEND) {\n-\t\t\t\tif (ib_wr->send_flags & IB_SEND_SOLICITED)\n-\t\t\t\t\tinfo.op_type = I40IW_OP_TYPE_SEND_SOL;\n-\t\t\t\telse\n-\t\t\t\t\tinfo.op_type = I40IW_OP_TYPE_SEND;\n-\t\t\t} else {\n-\t\t\t\tif (ib_wr->send_flags & IB_SEND_SOLICITED)\n-\t\t\t\t\tinfo.op_type = I40IW_OP_TYPE_SEND_SOL_INV;\n-\t\t\t\telse\n-\t\t\t\t\tinfo.op_type = I40IW_OP_TYPE_SEND_INV;\n-\t\t\t}\n-\n-\t\t\tif (ib_wr->send_flags & IB_SEND_INLINE) {\n-\t\t\t\tinfo.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;\n-\t\t\t\tinfo.op.inline_send.len = ib_wr->sg_list[0].length;\n-\t\t\t\tret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);\n-\t\t\t} else {\n-\t\t\t\tinfo.op.send.num_sges = ib_wr->num_sge;\n-\t\t\t\tinfo.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;\n-\t\t\t\tret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);\n-\t\t\t}\n-\n-\t\t\tif (ret) {\n-\t\t\t\tif (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)\n-\t\t\t\t\terr = -ENOMEM;\n-\t\t\t\telse\n-\t\t\t\t\terr = -EINVAL;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase IB_WR_RDMA_WRITE:\n-\t\t\tinfo.op_type = I40IW_OP_TYPE_RDMA_WRITE;\n-\n-\t\t\tif (ib_wr->send_flags & IB_SEND_INLINE) {\n-\t\t\t\tinfo.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;\n-\t\t\t\tinfo.op.inline_rdma_write.len = ib_wr->sg_list[0].length;\n-\t\t\t\tinfo.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;\n-\t\t\t\tinfo.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;\n-\t\t\t\tret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);\n-\t\t\t} else {\n-\t\t\t\tinfo.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;\n-\t\t\t\tinfo.op.rdma_write.num_lo_sges = ib_wr->num_sge;\n-\t\t\t\tinfo.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;\n-\t\t\t\tinfo.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;\n-\t\t\t\tret = ukqp->ops.iw_rdma_write(ukqp, &info, false);\n-\t\t\t}\n-\n-\t\t\tif (ret) {\n-\t\t\t\tif (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)\n-\t\t\t\t\terr = -ENOMEM;\n-\t\t\t\telse\n-\t\t\t\t\terr = -EINVAL;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase IB_WR_RDMA_READ_WITH_INV:\n-\t\t\tinv_stag = true;\n-\t\t\t/* fall-through*/\n-\t\tcase IB_WR_RDMA_READ:\n-\t\t\tif (ib_wr->num_sge > I40IW_MAX_SGE_RD) {\n-\t\t\t\terr = -EINVAL;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t\tinfo.op_type = I40IW_OP_TYPE_RDMA_READ;\n-\t\t\tinfo.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;\n-\t\t\tinfo.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;\n-\t\t\tinfo.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;\n-\t\t\tinfo.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;\n-\t\t\tinfo.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;\n-\t\t\tret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);\n-\t\t\tif (ret) {\n-\t\t\t\tif (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)\n-\t\t\t\t\terr = -ENOMEM;\n-\t\t\t\telse\n-\t\t\t\t\terr = -EINVAL;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase IB_WR_LOCAL_INV:\n-\t\t\tinfo.op_type = I40IW_OP_TYPE_INV_STAG;\n-\t\t\tinfo.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;\n-\t\t\tret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);\n-\t\t\tif (ret)\n-\t\t\t\terr = -ENOMEM;\n-\t\t\tbreak;\n-\t\tcase IB_WR_REG_MR:\n-\t\t{\n-\t\t\tstruct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);\n-\t\t\tint flags = reg_wr(ib_wr)->access;\n-\t\t\tstruct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;\n-\t\t\tstruct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;\n-\t\t\tstruct i40iw_fast_reg_stag_info info;\n-\n-\t\t\tmemset(&info, 0, sizeof(info));\n-\t\t\tinfo.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;\n-\t\t\tinfo.access_rights |= i40iw_get_user_access(flags);\n-\t\t\tinfo.stag_key = reg_wr(ib_wr)->key & 0xff;\n-\t\t\tinfo.stag_idx = reg_wr(ib_wr)->key >> 8;\n-\t\t\tinfo.page_size = reg_wr(ib_wr)->mr->page_size;\n-\t\t\tinfo.wr_id = ib_wr->wr_id;\n-\n-\t\t\tinfo.addr_type = I40IW_ADDR_TYPE_VA_BASED;\n-\t\t\tinfo.va = (void *)(uintptr_t)iwmr->ibmr.iova;\n-\t\t\tinfo.total_len = iwmr->ibmr.length;\n-\t\t\tinfo.reg_addr_pa = *(u64 *)palloc->level1.addr;\n-\t\t\tinfo.first_pm_pbl_index = palloc->level1.idx;\n-\t\t\tinfo.local_fence = ib_wr->send_flags & IB_SEND_FENCE;\n-\t\t\tinfo.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;\n-\n-\t\t\tif (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)\n-\t\t\t\tinfo.chunk_size = 1;\n-\n-\t\t\tret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);\n-\t\t\tif (ret)\n-\t\t\t\terr = -ENOMEM;\n-\t\t\tbreak;\n-\t\t}\n-\t\tdefault:\n-\t\t\terr = -EINVAL;\n-\t\t\ti40iw_pr_err(\" upost_send bad opcode = 0x%x\\n\",\n-\t\t\t\t ib_wr->opcode);\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tif (err)\n-\t\t\tbreak;\n-\t\tib_wr = ib_wr->next;\n-\t}\n-\n-out:\n-\tif (err)\n-\t\t*bad_wr = ib_wr;\n-\telse\n-\t\tukqp->ops.iw_qp_post_wr(ukqp);\n-\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_post_recv - post receive wr for kernel application\n- * @ibqp: ib qp pointer\n- * @ib_wr: work request for receive\n- * @bad_wr: bad wr caused an error\n- */\n-static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,\n-\t\t\t const struct ib_recv_wr **bad_wr)\n-{\n-\tstruct i40iw_qp *iwqp;\n-\tstruct i40iw_qp_uk *ukqp;\n-\tstruct i40iw_post_rq_info post_recv;\n-\tstruct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];\n-\tenum i40iw_status_code ret = 0;\n-\tunsigned long flags;\n-\tint err = 0;\n-\n-\tiwqp = (struct i40iw_qp *)ibqp;\n-\tukqp = &iwqp->sc_qp.qp_uk;\n-\n-\tmemset(&post_recv, 0, sizeof(post_recv));\n-\tspin_lock_irqsave(&iwqp->lock, flags);\n-\n-\tif (iwqp->flush_issued) {\n-\t\terr = -EINVAL;\n-\t\tgoto out;\n-\t}\n-\n-\twhile (ib_wr) {\n-\t\tpost_recv.num_sges = ib_wr->num_sge;\n-\t\tpost_recv.wr_id = ib_wr->wr_id;\n-\t\ti40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);\n-\t\tpost_recv.sg_list = sg_list;\n-\t\tret = ukqp->ops.iw_post_receive(ukqp, &post_recv);\n-\t\tif (ret) {\n-\t\t\ti40iw_pr_err(\" post_recv err %d\\n\", ret);\n-\t\t\tif (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)\n-\t\t\t\terr = -ENOMEM;\n-\t\t\telse\n-\t\t\t\terr = -EINVAL;\n-\t\t\t*bad_wr = ib_wr;\n-\t\t\tgoto out;\n-\t\t}\n-\t\tib_wr = ib_wr->next;\n-\t}\n- out:\n-\tspin_unlock_irqrestore(&iwqp->lock, flags);\n-\treturn err;\n-}\n-\n-/**\n- * i40iw_poll_cq - poll cq for completion (kernel apps)\n- * @ibcq: cq to poll\n- * @num_entries: number of entries to poll\n- * @entry: wr of entry completed\n- */\n-static int i40iw_poll_cq(struct ib_cq *ibcq,\n-\t\t\t int num_entries,\n-\t\t\t struct ib_wc *entry)\n-{\n-\tstruct i40iw_cq *iwcq;\n-\tint cqe_count = 0;\n-\tstruct i40iw_cq_poll_info cq_poll_info;\n-\tenum i40iw_status_code ret;\n-\tstruct i40iw_cq_uk *ukcq;\n-\tstruct i40iw_sc_qp *qp;\n-\tstruct i40iw_qp *iwqp;\n-\tunsigned long flags;\n-\n-\tiwcq = (struct i40iw_cq *)ibcq;\n-\tukcq = &iwcq->sc_cq.cq_uk;\n-\n-\tspin_lock_irqsave(&iwcq->lock, flags);\n-\twhile (cqe_count < num_entries) {\n-\t\tret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);\n-\t\tif (ret == I40IW_ERR_QUEUE_EMPTY) {\n-\t\t\tbreak;\n-\t\t} else if (ret == I40IW_ERR_QUEUE_DESTROYED) {\n-\t\t\tcontinue;\n-\t\t} else if (ret) {\n-\t\t\tif (!cqe_count)\n-\t\t\t\tcqe_count = -1;\n-\t\t\tbreak;\n-\t\t}\n-\t\tentry->wc_flags = 0;\n-\t\tentry->wr_id = cq_poll_info.wr_id;\n-\t\tif (cq_poll_info.error) {\n-\t\t\tentry->status = IB_WC_WR_FLUSH_ERR;\n-\t\t\tentry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;\n-\t\t} else {\n-\t\t\tentry->status = IB_WC_SUCCESS;\n-\t\t}\n-\n-\t\tswitch (cq_poll_info.op_type) {\n-\t\tcase I40IW_OP_TYPE_RDMA_WRITE:\n-\t\t\tentry->opcode = IB_WC_RDMA_WRITE;\n-\t\t\tbreak;\n-\t\tcase I40IW_OP_TYPE_RDMA_READ_INV_STAG:\n-\t\tcase I40IW_OP_TYPE_RDMA_READ:\n-\t\t\tentry->opcode = IB_WC_RDMA_READ;\n-\t\t\tbreak;\n-\t\tcase I40IW_OP_TYPE_SEND_SOL:\n-\t\tcase I40IW_OP_TYPE_SEND_SOL_INV:\n-\t\tcase I40IW_OP_TYPE_SEND_INV:\n-\t\tcase I40IW_OP_TYPE_SEND:\n-\t\t\tentry->opcode = IB_WC_SEND;\n-\t\t\tbreak;\n-\t\tcase I40IW_OP_TYPE_REC:\n-\t\t\tentry->opcode = IB_WC_RECV;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tentry->opcode = IB_WC_RECV;\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tentry->ex.imm_data = 0;\n-\t\tqp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;\n-\t\tentry->qp = (struct ib_qp *)qp->back_qp;\n-\t\tentry->src_qp = cq_poll_info.qp_id;\n-\t\tiwqp = (struct i40iw_qp *)qp->back_qp;\n-\t\tif (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {\n-\t\t\tif (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))\n-\t\t\t\tcomplete(&iwqp->sq_drained);\n-\t\t\tif (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))\n-\t\t\t\tcomplete(&iwqp->rq_drained);\n-\t\t}\n-\t\tentry->byte_len = cq_poll_info.bytes_xfered;\n-\t\tentry++;\n-\t\tcqe_count++;\n-\t}\n-\tspin_unlock_irqrestore(&iwcq->lock, flags);\n-\treturn cqe_count;\n-}\n-\n-/**\n- * i40iw_req_notify_cq - arm cq kernel application\n- * @ibcq: cq to arm\n- * @notify_flags: notofication flags\n- */\n-static int i40iw_req_notify_cq(struct ib_cq *ibcq,\n-\t\t\t enum ib_cq_notify_flags notify_flags)\n-{\n-\tstruct i40iw_cq *iwcq;\n-\tstruct i40iw_cq_uk *ukcq;\n-\tunsigned long flags;\n-\tenum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;\n-\n-\tiwcq = (struct i40iw_cq *)ibcq;\n-\tukcq = &iwcq->sc_cq.cq_uk;\n-\tif (notify_flags == IB_CQ_SOLICITED)\n-\t\tcq_notify = IW_CQ_COMPL_SOLICITED;\n-\tspin_lock_irqsave(&iwcq->lock, flags);\n-\tukcq->ops.iw_cq_request_notification(ukcq, cq_notify);\n-\tspin_unlock_irqrestore(&iwcq->lock, flags);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_port_immutable - return port's immutable data\n- * @ibdev: ib dev struct\n- * @port_num: port number\n- * @immutable: immutable data for the port return\n- */\n-static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,\n-\t\t\t\tstruct ib_port_immutable *immutable)\n-{\n-\tstruct ib_port_attr attr;\n-\tint err;\n-\n-\timmutable->core_cap_flags = RDMA_CORE_PORT_IWARP;\n-\n-\terr = ib_query_port(ibdev, port_num, &attr);\n-\n-\tif (err)\n-\t\treturn err;\n-\n-\timmutable->pkey_tbl_len = attr.pkey_tbl_len;\n-\timmutable->gid_tbl_len = attr.gid_tbl_len;\n-\n-\treturn 0;\n-}\n-\n-static const char * const i40iw_hw_stat_names[] = {\n-\t// 32bit names\n-\t[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = \"ip4InDiscards\",\n-\t[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = \"ip4InTruncatedPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = \"ip4OutNoRoutes\",\n-\t[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = \"ip6InDiscards\",\n-\t[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = \"ip6InTruncatedPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = \"ip6OutNoRoutes\",\n-\t[I40IW_HW_STAT_INDEX_TCPRTXSEG] = \"tcpRetransSegs\",\n-\t[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = \"tcpInOptErrors\",\n-\t[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = \"tcpInProtoErrors\",\n-\t// 64bit names\n-\t[I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4InOctets\",\n-\t[I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4InPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4InReasmRqd\",\n-\t[I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4InMcastPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4OutOctets\",\n-\t[I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4OutPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4OutSegRqd\",\n-\t[I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip4OutMcastPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6InOctets\",\n-\t[I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6InPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6InReasmRqd\",\n-\t[I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6InMcastPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6OutOctets\",\n-\t[I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6OutPkts\",\n-\t[I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6OutSegRqd\",\n-\t[I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"ip6OutMcastPkts\",\n-\t[I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"tcpInSegs\",\n-\t[I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"tcpOutSegs\",\n-\t[I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwInRdmaReads\",\n-\t[I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwInRdmaSends\",\n-\t[I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwInRdmaWrites\",\n-\t[I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwOutRdmaReads\",\n-\t[I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwOutRdmaSends\",\n-\t[I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwOutRdmaWrites\",\n-\t[I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwRdmaBnd\",\n-\t[I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =\n-\t\t\"iwRdmaInv\"\n-};\n-\n-static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)\n-{\n-\tu32 firmware_version = I40IW_FW_VERSION;\n-\n-\tsnprintf(str, IB_FW_VERSION_NAME_MAX, \"%u.%u\", firmware_version,\n-\t\t (firmware_version & 0x000000ff));\n-}\n-\n-/**\n- * i40iw_alloc_hw_stats - Allocate a hw stats structure\n- * @ibdev: device pointer from stack\n- * @port_num: port number\n- */\n-static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,\n-\t\t\t\t\t\t u8 port_num)\n-{\n-\tstruct i40iw_device *iwdev = to_iwdev(ibdev);\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tint num_counters = I40IW_HW_STAT_INDEX_MAX_32 +\n-\t\tI40IW_HW_STAT_INDEX_MAX_64;\n-\tunsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;\n-\n-\tBUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=\n-\t\t (I40IW_HW_STAT_INDEX_MAX_32 +\n-\t\t I40IW_HW_STAT_INDEX_MAX_64));\n-\n-\t/*\n-\t * PFs get the default update lifespan, but VFs only update once\n-\t * per second\n-\t */\n-\tif (!dev->is_pf)\n-\t\tlifespan = 1000;\n-\treturn rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,\n-\t\t\t\t\t lifespan);\n-}\n-\n-/**\n- * i40iw_get_hw_stats - Populates the rdma_hw_stats structure\n- * @ibdev: device pointer from stack\n- * @stats: stats pointer from stack\n- * @port_num: port number\n- * @index: which hw counter the stack is requesting we update\n- */\n-static int i40iw_get_hw_stats(struct ib_device *ibdev,\n-\t\t\t struct rdma_hw_stats *stats,\n-\t\t\t u8 port_num, int index)\n-{\n-\tstruct i40iw_device *iwdev = to_iwdev(ibdev);\n-\tstruct i40iw_sc_dev *dev = &iwdev->sc_dev;\n-\tstruct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;\n-\tstruct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;\n-\n-\tif (dev->is_pf) {\n-\t\ti40iw_hw_stats_read_all(devstat, &devstat->hw_stats);\n-\t} else {\n-\t\tif (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))\n-\t\t\treturn -ENOSYS;\n-\t}\n-\n-\tmemcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));\n-\n-\treturn stats->num_counters;\n-}\n-\n-/**\n- * i40iw_query_gid - Query port GID\n- * @ibdev: device pointer from stack\n- * @port: port number\n- * @index: Entry index\n- * @gid: Global ID\n- */\n-static int i40iw_query_gid(struct ib_device *ibdev,\n-\t\t\t u8 port,\n-\t\t\t int index,\n-\t\t\t union ib_gid *gid)\n-{\n-\tstruct i40iw_device *iwdev = to_iwdev(ibdev);\n-\n-\tmemset(gid->raw, 0, sizeof(gid->raw));\n-\tether_addr_copy(gid->raw, iwdev->netdev->dev_addr);\n-\treturn 0;\n-}\n-\n-/**\n- * i40iw_query_pkey - Query partition key\n- * @ibdev: device pointer from stack\n- * @port: port number\n- * @index: index of pkey\n- * @pkey: pointer to store the pkey\n- */\n-static int i40iw_query_pkey(struct ib_device *ibdev,\n-\t\t\t u8 port,\n-\t\t\t u16 index,\n-\t\t\t u16 *pkey)\n-{\n-\t*pkey = 0;\n-\treturn 0;\n-}\n-\n-static const struct ib_device_ops i40iw_dev_ops = {\n-\t.owner = THIS_MODULE,\n-\t.driver_id = RDMA_DRIVER_I40IW,\n-\t/* NOTE: Older kernels wrongly use 0 for the uverbs_abi_ver */\n-\t.uverbs_abi_ver = I40IW_ABI_VER,\n-\n-\t.alloc_hw_stats = i40iw_alloc_hw_stats,\n-\t.alloc_mr = i40iw_alloc_mr,\n-\t.alloc_pd = i40iw_alloc_pd,\n-\t.alloc_ucontext = i40iw_alloc_ucontext,\n-\t.create_cq = i40iw_create_cq,\n-\t.create_qp = i40iw_create_qp,\n-\t.dealloc_pd = i40iw_dealloc_pd,\n-\t.dealloc_ucontext = i40iw_dealloc_ucontext,\n-\t.dereg_mr = i40iw_dereg_mr,\n-\t.destroy_cq = i40iw_destroy_cq,\n-\t.destroy_qp = i40iw_destroy_qp,\n-\t.drain_rq = i40iw_drain_rq,\n-\t.drain_sq = i40iw_drain_sq,\n-\t.get_dev_fw_str = i40iw_get_dev_fw_str,\n-\t.get_dma_mr = i40iw_get_dma_mr,\n-\t.get_hw_stats = i40iw_get_hw_stats,\n-\t.get_port_immutable = i40iw_port_immutable,\n-\t.iw_accept = i40iw_accept,\n-\t.iw_add_ref = i40iw_add_ref,\n-\t.iw_connect = i40iw_connect,\n-\t.iw_create_listen = i40iw_create_listen,\n-\t.iw_destroy_listen = i40iw_destroy_listen,\n-\t.iw_get_qp = i40iw_get_qp,\n-\t.iw_reject = i40iw_reject,\n-\t.iw_rem_ref = i40iw_rem_ref,\n-\t.map_mr_sg = i40iw_map_mr_sg,\n-\t.mmap = i40iw_mmap,\n-\t.modify_qp = i40iw_modify_qp,\n-\t.poll_cq = i40iw_poll_cq,\n-\t.post_recv = i40iw_post_recv,\n-\t.post_send = i40iw_post_send,\n-\t.query_device = i40iw_query_device,\n-\t.query_gid = i40iw_query_gid,\n-\t.query_pkey = i40iw_query_pkey,\n-\t.query_port = i40iw_query_port,\n-\t.query_qp = i40iw_query_qp,\n-\t.reg_user_mr = i40iw_reg_user_mr,\n-\t.req_notify_cq = i40iw_req_notify_cq,\n-\tINIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),\n-\tINIT_RDMA_OBJ_SIZE(ib_cq, i40iw_cq, ibcq),\n-\tINIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),\n-};\n-\n-/**\n- * i40iw_init_rdma_device - initialization of iwarp device\n- * @iwdev: iwarp device\n- */\n-static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_ib_device *iwibdev;\n-\tstruct net_device *netdev = iwdev->netdev;\n-\tstruct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;\n-\n-\tiwibdev = ib_alloc_device(i40iw_ib_device, ibdev);\n-\tif (!iwibdev) {\n-\t\ti40iw_pr_err(\"iwdev == NULL\\n\");\n-\t\treturn NULL;\n-\t}\n-\tiwdev->iwibdev = iwibdev;\n-\tiwibdev->iwdev = iwdev;\n-\n-\tiwibdev->ibdev.node_type = RDMA_NODE_RNIC;\n-\tether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);\n-\n-\tiwibdev->ibdev.uverbs_cmd_mask =\n-\t (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |\n-\t (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |\n-\t (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |\n-\t (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |\n-\t (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |\n-\t (1ull << IB_USER_VERBS_CMD_REG_MR) |\n-\t (1ull << IB_USER_VERBS_CMD_DEREG_MR) |\n-\t (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |\n-\t (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |\n-\t (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |\n-\t (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |\n-\t (1ull << IB_USER_VERBS_CMD_CREATE_QP) |\n-\t (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |\n-\t (1ull << IB_USER_VERBS_CMD_QUERY_QP) |\n-\t (1ull << IB_USER_VERBS_CMD_POLL_CQ) |\n-\t (1ull << IB_USER_VERBS_CMD_CREATE_AH) |\n-\t (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |\n-\t (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |\n-\t (1ull << IB_USER_VERBS_CMD_POST_RECV) |\n-\t (1ull << IB_USER_VERBS_CMD_POST_SEND);\n-\tiwibdev->ibdev.phys_port_cnt = 1;\n-\tiwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;\n-\tiwibdev->ibdev.dev.parent = &pcidev->dev;\n-\tmemcpy(iwibdev->ibdev.iw_ifname, netdev->name,\n-\t sizeof(iwibdev->ibdev.iw_ifname));\n-\tib_set_device_ops(&iwibdev->ibdev, &i40iw_dev_ops);\n-\n-\treturn iwibdev;\n-}\n-\n-/**\n- * i40iw_port_ibevent - indicate port event\n- * @iwdev: iwarp device\n- */\n-void i40iw_port_ibevent(struct i40iw_device *iwdev)\n-{\n-\tstruct i40iw_ib_device *iwibdev = iwdev->iwibdev;\n-\tstruct ib_event event;\n-\n-\tevent.device = &iwibdev->ibdev;\n-\tevent.element.port_num = 1;\n-\tevent.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;\n-\tib_dispatch_event(&event);\n-}\n-\n-/**\n- * i40iw_destroy_rdma_device - destroy rdma device and free resources\n- * @iwibdev: IB device ptr\n- */\n-void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)\n-{\n-\tib_unregister_device(&iwibdev->ibdev);\n-\twait_event_timeout(iwibdev->iwdev->close_wq,\n-\t\t\t !atomic64_read(&iwibdev->iwdev->use_count),\n-\t\t\t I40IW_EVENT_TIMEOUT);\n-\tib_dealloc_device(&iwibdev->ibdev);\n-}\n-\n-/**\n- * i40iw_register_rdma_device - register iwarp device to IB\n- * @iwdev: iwarp device\n- */\n-int i40iw_register_rdma_device(struct i40iw_device *iwdev)\n-{\n-\tint ret;\n-\tstruct i40iw_ib_device *iwibdev;\n-\n-\tiwdev->iwibdev = i40iw_init_rdma_device(iwdev);\n-\tif (!iwdev->iwibdev)\n-\t\treturn -ENOMEM;\n-\tiwibdev = iwdev->iwibdev;\n-\trdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);\n-\tret = ib_register_device(&iwibdev->ibdev, \"i40iw%d\");\n-\tif (ret)\n-\t\tgoto error;\n-\n-\treturn 0;\n-error:\n-\tib_dealloc_device(&iwdev->iwibdev->ibdev);\n-\treturn ret;\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h\ndeleted file mode 100644\nindex 3a41375..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h\n+++ /dev/null\n@@ -1,179 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_VERBS_H\n-#define I40IW_VERBS_H\n-\n-struct i40iw_ucontext {\n-\tstruct ib_ucontext ibucontext;\n-\tstruct i40iw_device *iwdev;\n-\tstruct list_head cq_reg_mem_list;\n-\tspinlock_t cq_reg_mem_list_lock; /* memory list for cq's */\n-\tstruct list_head qp_reg_mem_list;\n-\tspinlock_t qp_reg_mem_list_lock; /* memory list for qp's */\n-\tint abi_ver;\n-};\n-\n-struct i40iw_pd {\n-\tstruct ib_pd ibpd;\n-\tstruct i40iw_sc_pd sc_pd;\n-\tatomic_t usecount;\n-};\n-\n-struct i40iw_hmc_pble {\n-\tunion {\n-\t\tu32 idx;\n-\t\tdma_addr_t addr;\n-\t};\n-};\n-\n-struct i40iw_cq_mr {\n-\tstruct i40iw_hmc_pble cq_pbl;\n-\tdma_addr_t shadow;\n-};\n-\n-struct i40iw_qp_mr {\n-\tstruct i40iw_hmc_pble sq_pbl;\n-\tstruct i40iw_hmc_pble rq_pbl;\n-\tdma_addr_t shadow;\n-\tstruct page *sq_page;\n-};\n-\n-struct i40iw_pbl {\n-\tstruct list_head list;\n-\tunion {\n-\t\tstruct i40iw_qp_mr qp_mr;\n-\t\tstruct i40iw_cq_mr cq_mr;\n-\t};\n-\n-\tbool pbl_allocated;\n-\tbool on_list;\n-\tu64 user_base;\n-\tstruct i40iw_pble_alloc pble_alloc;\n-\tstruct i40iw_mr *iwmr;\n-};\n-\n-#define MAX_SAVE_PAGE_ADDRS 4\n-struct i40iw_mr {\n-\tunion {\n-\t\tstruct ib_mr ibmr;\n-\t\tstruct ib_mw ibmw;\n-\t\tstruct ib_fmr ibfmr;\n-\t};\n-\tstruct ib_umem *region;\n-\tu16 type;\n-\tu32 page_cnt;\n-\tu64 page_size;\n-\tu32 npages;\n-\tu32 stag;\n-\tu64 length;\n-\tu64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];\n-\tstruct i40iw_pbl iwpbl;\n-};\n-\n-struct i40iw_cq {\n-\tstruct ib_cq ibcq;\n-\tstruct i40iw_sc_cq sc_cq;\n-\tu16 cq_head;\n-\tu16 cq_size;\n-\tu16 cq_number;\n-\tbool user_mode;\n-\tu32 polled_completions;\n-\tu32 cq_mem_size;\n-\tstruct i40iw_dma_mem kmem;\n-\tspinlock_t lock; /* for poll cq */\n-\tstruct i40iw_pbl *iwpbl;\n-};\n-\n-struct disconn_work {\n-\tstruct work_struct work;\n-\tstruct i40iw_qp *iwqp;\n-};\n-\n-struct iw_cm_id;\n-struct ietf_mpa_frame;\n-struct i40iw_ud_file;\n-\n-struct i40iw_qp_kmode {\n-\tstruct i40iw_dma_mem dma_mem;\n-\tu64 *wrid_mem;\n-};\n-\n-struct i40iw_qp {\n-\tstruct ib_qp ibqp;\n-\tstruct i40iw_sc_qp sc_qp;\n-\tstruct i40iw_device *iwdev;\n-\tstruct i40iw_cq *iwscq;\n-\tstruct i40iw_cq *iwrcq;\n-\tstruct i40iw_pd *iwpd;\n-\tstruct i40iw_qp_host_ctx_info ctx_info;\n-\tstruct i40iwarp_offload_info iwarp_info;\n-\tvoid *allocated_buffer;\n-\tatomic_t refcount;\n-\tstruct iw_cm_id *cm_id;\n-\tvoid *cm_node;\n-\tstruct ib_mr *lsmm_mr;\n-\tstruct work_struct work;\n-\tenum ib_qp_state ibqp_state;\n-\tu32 iwarp_state;\n-\tu32 qp_mem_size;\n-\tu32 last_aeq;\n-\tatomic_t close_timer_started;\n-\tspinlock_t lock; /* for post work requests */\n-\tstruct i40iw_qp_context *iwqp_context;\n-\tvoid *pbl_vbase;\n-\tdma_addr_t pbl_pbase;\n-\tstruct page *page;\n-\tu8 active_conn:1;\n-\tu8 user_mode:1;\n-\tu8 hte_added:1;\n-\tu8 flush_issued:1;\n-\tu8 destroyed:1;\n-\tu8 sig_all:1;\n-\tu8 pau_mode:1;\n-\tu8 rsvd:1;\n-\tu16 term_sq_flush_code;\n-\tu16 term_rq_flush_code;\n-\tu8 hw_iwarp_state;\n-\tu8 hw_tcp_state;\n-\tstruct i40iw_qp_kmode kqp;\n-\tstruct i40iw_dma_mem host_ctx;\n-\tstruct timer_list terminate_timer;\n-\tstruct i40iw_pbl iwpbl;\n-\tstruct i40iw_dma_mem q2_ctx_mem;\n-\tstruct i40iw_dma_mem ietf_mem;\n-\tstruct completion sq_drained;\n-\tstruct completion rq_drained;\n-};\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c\ndeleted file mode 100644\nindex e33d481..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_vf.c\n+++ /dev/null\n@@ -1,85 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_register.h\"\n-#include \"i40iw_status.h\"\n-#include \"i40iw_hmc.h\"\n-#include \"i40iw_d.h\"\n-#include \"i40iw_type.h\"\n-#include \"i40iw_p.h\"\n-#include \"i40iw_vf.h\"\n-\n-/**\n- * i40iw_manage_vf_pble_bp - manage vf pble\n- * @cqp: cqp for cqp' sq wqe\n- * @info: pble info\n- * @scratch: pointer for completion\n- * @post_sq: to post and ring\n- */\n-enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t struct i40iw_manage_vf_pble_info *info,\n-\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t bool post_sq)\n-{\n-\tu64 *wqe;\n-\tu64 temp, header, pd_pl_pba = 0;\n-\n-\twqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);\n-\tif (!wqe)\n-\t\treturn I40IW_ERR_RING_FULL;\n-\n-\ttemp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) |\n-\t LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) |\n-\t LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX);\n-\tset_64bit_val(wqe, 16, temp);\n-\n-\theader = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) |\n-\t LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) |\n-\t LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);\n-\tset_64bit_val(wqe, 24, header);\n-\n-\tpd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA);\n-\tset_64bit_val(wqe, 32, pd_pl_pba);\n-\n-\ti40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, \"MANAGE VF_PBLE_BP WQE\", wqe, I40IW_CQP_WQE_SIZE * 8);\n-\n-\tif (post_sq)\n-\t\ti40iw_sc_cqp_post_sq(cqp);\n-\treturn 0;\n-}\n-\n-const struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {\n-\ti40iw_manage_vf_pble_bp\n-};\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h\ndeleted file mode 100644\nindex 4359559..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_vf.h\n+++ /dev/null\n@@ -1,62 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_VF_H\n-#define I40IW_VF_H\n-\n-struct i40iw_sc_cqp;\n-\n-struct i40iw_manage_vf_pble_info {\n-\tu32 sd_index;\n-\tu16 first_pd_index;\n-\tu16 pd_entry_cnt;\n-\tu8 inv_pd_ent;\n-\tu64 pd_pl_pba;\n-};\n-\n-struct i40iw_vf_cqp_ops {\n-\tenum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *,\n-\t\t\t\t\t\t struct i40iw_manage_vf_pble_info *,\n-\t\t\t\t\t\t u64,\n-\t\t\t\t\t\t bool);\n-};\n-\n-enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,\n-\t\t\t\t\t struct i40iw_manage_vf_pble_info *info,\n-\t\t\t\t\t u64 scratch,\n-\t\t\t\t\t bool post_sq);\n-\n-extern const struct i40iw_vf_cqp_ops iw_vf_cqp_ops;\n-\n-#endif\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c\ndeleted file mode 100644\nindex 48fd327..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c\n+++ /dev/null\n@@ -1,756 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#include \"i40iw_osdep.h\"\n-#include \"i40iw_register.h\"\n-#include \"i40iw_status.h\"\n-#include \"i40iw_hmc.h\"\n-#include \"i40iw_d.h\"\n-#include \"i40iw_type.h\"\n-#include \"i40iw_p.h\"\n-#include \"i40iw_virtchnl.h\"\n-\n-/**\n- * vchnl_vf_send_get_ver_req - Request Channel version\n- * @dev: IWARP device pointer\n- * @vchnl_req: Virtual channel message request pointer\n- */\n-static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\tstruct i40iw_virtchnl_req *vchnl_req)\n-{\n-\tenum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;\n-\n-\tif (!dev->vchnl_up)\n-\t\treturn ret_code;\n-\n-\tmemset(vchnl_msg, 0, sizeof(*vchnl_msg));\n-\tvchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;\n-\tvchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);\n-\tvchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;\n-\tvchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-\treturn ret_code;\n-}\n-\n-/**\n- * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF\n- * @dev: IWARP device pointer\n- * @vchnl_req: Virtual channel message request pointer\n- */\n-static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\t struct i40iw_virtchnl_req *vchnl_req)\n-{\n-\tenum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;\n-\n-\tif (!dev->vchnl_up)\n-\t\treturn ret_code;\n-\n-\tmemset(vchnl_msg, 0, sizeof(*vchnl_msg));\n-\tvchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;\n-\tvchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);\n-\tvchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;\n-\tvchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-\treturn ret_code;\n-}\n-\n-/**\n- * vchnl_vf_send_get_pe_stats_req - Request PE stats from VF\n- * @dev: IWARP device pointer\n- * @vchnl_req: Virtual channel message request pointer\n- */\n-static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\t struct i40iw_virtchnl_req *vchnl_req)\n-{\n-\tenum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;\n-\n-\tif (!dev->vchnl_up)\n-\t\treturn ret_code;\n-\n-\tmemset(vchnl_msg, 0, sizeof(*vchnl_msg));\n-\tvchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;\n-\tvchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;\n-\tvchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;\n-\tvchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-\treturn ret_code;\n-}\n-\n-/**\n- * vchnl_vf_send_add_hmc_objs_req - Add HMC objects\n- * @dev: IWARP device pointer\n- * @vchnl_req: Virtual channel message request pointer\n- */\n-static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\t struct i40iw_virtchnl_req *vchnl_req,\n-\t\t\t\t\t\t\t enum i40iw_hmc_rsrc_type rsrc_type,\n-\t\t\t\t\t\t\t u32 start_index,\n-\t\t\t\t\t\t\t u32 rsrc_count)\n-{\n-\tenum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;\n-\tstruct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;\n-\n-\tif (!dev->vchnl_up)\n-\t\treturn ret_code;\n-\n-\tadd_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;\n-\tmemset(vchnl_msg, 0, sizeof(*vchnl_msg));\n-\tmemset(add_hmc_obj, 0, sizeof(*add_hmc_obj));\n-\tvchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;\n-\tvchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;\n-\tvchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;\n-\tvchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;\n-\tadd_hmc_obj->obj_type = (u16)rsrc_type;\n-\tadd_hmc_obj->start_index = start_index;\n-\tadd_hmc_obj->obj_count = rsrc_count;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-\treturn ret_code;\n-}\n-\n-/**\n- * vchnl_vf_send_del_hmc_objs_req - del HMC objects\n- * @dev: IWARP device pointer\n- * @vchnl_req: Virtual channel message request pointer\n- * @ rsrc_type - resource type to delete\n- * @ start_index - starting index for resource\n- * @ rsrc_count - number of resource type to delete\n- */\n-static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t\t struct i40iw_virtchnl_req *vchnl_req,\n-\t\t\t\t\t\t\t enum i40iw_hmc_rsrc_type rsrc_type,\n-\t\t\t\t\t\t\t u32 start_index,\n-\t\t\t\t\t\t\t u32 rsrc_count)\n-{\n-\tenum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;\n-\tstruct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;\n-\n-\tif (!dev->vchnl_up)\n-\t\treturn ret_code;\n-\n-\tadd_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;\n-\tmemset(vchnl_msg, 0, sizeof(*vchnl_msg));\n-\tmemset(add_hmc_obj, 0, sizeof(*add_hmc_obj));\n-\tvchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;\n-\tvchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;\n-\tvchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;\n-\tvchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;\n-\tadd_hmc_obj->obj_type = (u16)rsrc_type;\n-\tadd_hmc_obj->start_index = start_index;\n-\tadd_hmc_obj->obj_count = rsrc_count;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-\treturn ret_code;\n-}\n-\n-/**\n- * vchnl_pf_send_get_ver_resp - Send channel version to VF\n- * @dev: IWARP device pointer\n- * @vf_id: Virtual function ID associated with the message\n- * @vchnl_msg: Virtual channel message buffer pointer\n- */\n-static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,\n-\t\t\t\t u32 vf_id,\n-\t\t\t\t struct i40iw_virtchnl_op_buf *vchnl_msg)\n-{\n-\tenum i40iw_status_code ret_code;\n-\tu8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];\n-\tstruct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;\n-\n-\tmemset(resp_buffer, 0, sizeof(*resp_buffer));\n-\tvchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;\n-\tvchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);\n-\tvchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;\n-\t*((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-}\n-\n-/**\n- * vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF\n- * @dev: IWARP device pointer\n- * @vf_id: Virtual function ID associated with the message\n- * @vchnl_msg: Virtual channel message buffer pointer\n- */\n-static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 vf_id,\n-\t\t\t\t\t struct i40iw_virtchnl_op_buf *vchnl_msg,\n-\t\t\t\t\t u16 hmc_fcn)\n-{\n-\tenum i40iw_status_code ret_code;\n-\tu8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];\n-\tstruct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;\n-\n-\tmemset(resp_buffer, 0, sizeof(*resp_buffer));\n-\tvchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;\n-\tvchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);\n-\tvchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;\n-\t*((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-}\n-\n-/**\n- * vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF\n- * @dev: IWARP device pointer\n- * @vf_id: Virtual function ID associated with the message\n- * @vchnl_msg: Virtual channel message buffer pointer\n- * @hw_stats: HW Stats struct\n- */\n-\n-static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 vf_id,\n-\t\t\t\t\t struct i40iw_virtchnl_op_buf *vchnl_msg,\n-\t\t\t\t\t struct i40iw_dev_hw_stats *hw_stats)\n-{\n-\tenum i40iw_status_code ret_code;\n-\tu8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];\n-\tstruct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;\n-\n-\tmemset(resp_buffer, 0, sizeof(*resp_buffer));\n-\tvchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;\n-\tvchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);\n-\tvchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;\n-\t*((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = *hw_stats;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-}\n-\n-/**\n- * vchnl_pf_send_error_resp - Send an error response to VF\n- * @dev: IWARP device pointer\n- * @vf_id: Virtual function ID associated with the message\n- * @vchnl_msg: Virtual channel message buffer pointer\n- */\n-static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,\n-\t\t\t\t struct i40iw_virtchnl_op_buf *vchnl_msg,\n-\t\t\t\t u16 op_ret_code)\n-{\n-\tenum i40iw_status_code ret_code;\n-\tu8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];\n-\tstruct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;\n-\n-\tmemset(resp_buffer, 0, sizeof(resp_buffer));\n-\tvchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;\n-\tvchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);\n-\tvchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;\n-\tret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));\n-\tif (ret_code)\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: virt channel send failed 0x%x\\n\", __func__, ret_code);\n-}\n-\n-/**\n- * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn\n- * @cqp_req_param: CQP Request param value\n- * @not_used: unused CQP callback parameter\n- */\n-static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,\n-\t\t\t\t\tstruct i40iw_ccq_cqe_info *cqe_info)\n-{\n-\tstruct i40iw_vfdev *vf_dev = callback_param;\n-\tstruct i40iw_virt_mem vf_dev_mem;\n-\n-\tif (cqe_info->error) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\\n\",\n-\t\t\t cqe_info->maj_err_code, cqe_info->min_err_code);\n-\t\tdev->vf_dev[vf_dev->iw_vf_idx] = NULL;\n-\t\tvchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,\n-\t\t\t\t\t (u16)I40IW_ERR_CQP_COMPL_ERROR);\n-\t\tvf_dev_mem.va = vf_dev;\n-\t\tvf_dev_mem.size = sizeof(*vf_dev);\n-\t\ti40iw_free_virt_mem(dev->hw, &vf_dev_mem);\n-\t} else {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"CQP Completion Operation Return information = 0x%08x\\n\",\n-\t\t\t cqe_info->op_ret_val);\n-\t\tvf_dev->pmf_index = (u16)cqe_info->op_ret_val;\n-\t\tvf_dev->msg_count--;\n-\t\tvchnl_pf_send_get_hmc_fcn_resp(dev,\n-\t\t\t\t\t vf_dev->vf_id,\n-\t\t\t\t\t &vf_dev->vf_msg_buffer.vchnl_msg,\n-\t\t\t\t\t vf_dev->pmf_index);\n-\t}\n-}\n-\n-/**\n- * pf_add_hmc_obj - Callback for Add HMC Object\n- * @vf_dev: pointer to the VF Device\n- */\n-static void pf_add_hmc_obj_callback(void *work_vf_dev)\n-{\n-\tstruct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;\n-\tstruct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;\n-\tstruct i40iw_hmc_create_obj_info info;\n-\tstruct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (!vf_dev->pf_hmc_initialized) {\n-\t\tret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);\n-\t\tif (ret_code)\n-\t\t\tgoto add_out;\n-\t\tvf_dev->pf_hmc_initialized = true;\n-\t}\n-\n-\tadd_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.hmc_info = hmc_info;\n-\tinfo.is_pf = false;\n-\tinfo.rsrc_type = (u32)add_hmc_obj->obj_type;\n-\tinfo.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;\n-\tinfo.start_idx = add_hmc_obj->start_index;\n-\tinfo.count = add_hmc_obj->obj_count;\n-\ti40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,\n-\t\t \"I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\\n\",\n-\t\t info.count, info.rsrc_type);\n-\tret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);\n-\tif (!ret_code)\n-\t\tvf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;\n-add_out:\n-\tvf_dev->msg_count--;\n-\tvchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);\n-}\n-\n-/**\n- * pf_del_hmc_obj_callback - Callback for delete HMC Object\n- * @work_vf_dev: pointer to the VF Device\n- */\n-static void pf_del_hmc_obj_callback(void *work_vf_dev)\n-{\n-\tstruct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;\n-\tstruct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;\n-\tstruct i40iw_hmc_del_obj_info info;\n-\tstruct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;\n-\tenum i40iw_status_code ret_code = I40IW_SUCCESS;\n-\n-\tif (!vf_dev->pf_hmc_initialized)\n-\t\tgoto del_out;\n-\n-\tdel_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;\n-\n-\tmemset(&info, 0, sizeof(info));\n-\tinfo.hmc_info = hmc_info;\n-\tinfo.is_pf = false;\n-\tinfo.rsrc_type = (u32)del_hmc_obj->obj_type;\n-\tinfo.start_idx = del_hmc_obj->start_index;\n-\tinfo.count = del_hmc_obj->obj_count;\n-\ti40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,\n-\t\t \"I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\\n\",\n-\t\t info.count, info.rsrc_type);\n-\tret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);\n-del_out:\n-\tvf_dev->msg_count--;\n-\tvchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);\n-}\n-\n-/**\n- * i40iw_vf_init_pestat - Initialize stats for VF\n- * @devL pointer to the VF Device\n- * @stats: Statistics structure pointer\n- * @index: Stats index\n- */\n-static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)\n-{\n-\tstats->hw = dev->hw;\n-\ti40iw_hw_stats_init(stats, (u8)index, false);\n-\tspin_lock_init(&stats->lock);\n-}\n-\n-/**\n- * i40iw_vchnl_recv_pf - Receive PF virtual channel messages\n- * @dev: IWARP device pointer\n- * @vf_id: Virtual function ID associated with the message\n- * @msg: Virtual channel message buffer pointer\n- * @len: Length of the virtual channels message\n- */\n-enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 vf_id,\n-\t\t\t\t\t u8 *msg,\n-\t\t\t\t\t u16 len)\n-{\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;\n-\tstruct i40iw_vfdev *vf_dev = NULL;\n-\tstruct i40iw_hmc_fcn_info hmc_fcn_info;\n-\tu16 iw_vf_idx;\n-\tu16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;\n-\tstruct i40iw_virt_mem vf_dev_mem;\n-\tstruct i40iw_virtchnl_work_info work_info;\n-\tstruct i40iw_vsi_pestat *stats;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (!dev || !msg || !len)\n-\t\treturn I40IW_ERR_PARAM;\n-\n-\tif (!dev->vchnl_up)\n-\t\treturn I40IW_ERR_NOT_READY;\n-\tif (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {\n-\t\tvchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);\n-\t\treturn I40IW_SUCCESS;\n-\t}\n-\tfor (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {\n-\t\tif (!dev->vf_dev[iw_vf_idx]) {\n-\t\t\tif (first_avail_iw_vf == I40IW_MAX_PE_ENABLED_VF_COUNT)\n-\t\t\t\tfirst_avail_iw_vf = iw_vf_idx;\n-\t\t\tcontinue;\n-\t\t}\n-\t\tif (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {\n-\t\t\tvf_dev = dev->vf_dev[iw_vf_idx];\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\tif (vf_dev) {\n-\t\tif (!vf_dev->msg_count) {\n-\t\t\tvf_dev->msg_count++;\n-\t\t} else {\n-\t\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t\t \"VF%u already has a channel message in progress.\\n\",\n-\t\t\t\t vf_id);\n-\t\t\treturn I40IW_SUCCESS;\n-\t\t}\n-\t}\n-\tswitch (vchnl_msg->iw_op_code) {\n-\tcase I40IW_VCHNL_OP_GET_HMC_FCN:\n-\t\tif (!vf_dev &&\n-\t\t (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {\n-\t\t\tret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +\n-\t\t\t\t\t\t\t (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));\n-\t\t\tif (!ret_code) {\n-\t\t\t\tvf_dev = vf_dev_mem.va;\n-\t\t\t\tvf_dev->stats_initialized = false;\n-\t\t\t\tvf_dev->pf_dev = dev;\n-\t\t\t\tvf_dev->msg_count = 1;\n-\t\t\t\tvf_dev->vf_id = vf_id;\n-\t\t\t\tvf_dev->iw_vf_idx = first_avail_iw_vf;\n-\t\t\t\tvf_dev->pf_hmc_initialized = false;\n-\t\t\t\tvf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);\n-\t\t\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t\t\t \"vf_dev %p, hmc_info %p, hmc_obj %p\\n\",\n-\t\t\t\t\t vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);\n-\t\t\t\tdev->vf_dev[first_avail_iw_vf] = vf_dev;\n-\t\t\t\tiw_vf_idx = first_avail_iw_vf;\n-\t\t\t} else {\n-\t\t\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t\t\t \"VF%u Unable to allocate a VF device structure.\\n\",\n-\t\t\t\t\t vf_id);\n-\t\t\t\tvchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);\n-\t\t\t\treturn I40IW_SUCCESS;\n-\t\t\t}\n-\t\t\tmemcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);\n-\t\t\thmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;\n-\t\t\thmc_fcn_info.vf_id = vf_id;\n-\t\t\thmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;\n-\t\t\thmc_fcn_info.cqp_callback_param = vf_dev;\n-\t\t\thmc_fcn_info.free_fcn = false;\n-\t\t\tret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);\n-\t\t\tif (ret_code)\n-\t\t\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t\t\t \"VF%u error CQP HMC Function operation.\\n\",\n-\t\t\t\t\t vf_id);\n-\t\t\ti40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);\n-\t\t\tvf_dev->stats_initialized = true;\n-\t\t} else {\n-\t\t\tif (vf_dev) {\n-\t\t\t\tvf_dev->msg_count--;\n-\t\t\t\tvchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);\n-\t\t\t} else {\n-\t\t\t\tvchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,\n-\t\t\t\t\t\t\t (u16)I40IW_ERR_NO_MEMORY);\n-\t\t\t}\n-\t\t}\n-\t\tbreak;\n-\tcase I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:\n-\t\tif (!vf_dev)\n-\t\t\treturn I40IW_ERR_BAD_PTR;\n-\t\twork_info.worker_vf_dev = vf_dev;\n-\t\twork_info.callback_fcn = pf_add_hmc_obj_callback;\n-\t\tmemcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);\n-\t\ti40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);\n-\t\tbreak;\n-\tcase I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:\n-\t\tif (!vf_dev)\n-\t\t\treturn I40IW_ERR_BAD_PTR;\n-\t\twork_info.worker_vf_dev = vf_dev;\n-\t\twork_info.callback_fcn = pf_del_hmc_obj_callback;\n-\t\tmemcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);\n-\t\ti40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);\n-\t\tbreak;\n-\tcase I40IW_VCHNL_OP_GET_STATS:\n-\t\tif (!vf_dev)\n-\t\t\treturn I40IW_ERR_BAD_PTR;\n-\t\tstats = &vf_dev->pestat;\n-\t\ti40iw_hw_stats_read_all(stats, &stats->hw_stats);\n-\t\tvf_dev->msg_count--;\n-\t\tvchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);\n-\t\tbreak;\n-\tdefault:\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"40iw_vchnl_recv_pf: Invalid OpCode 0x%x\\n\",\n-\t\t\t vchnl_msg->iw_op_code);\n-\t\tvchnl_pf_send_error_resp(dev, vf_id,\n-\t\t\t\t\t vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);\n-\t}\n-\treturn I40IW_SUCCESS;\n-}\n-\n-/**\n- * i40iw_vchnl_recv_vf - Receive VF virtual channel messages\n- * @dev: IWARP device pointer\n- * @vf_id: Virtual function ID associated with the message\n- * @msg: Virtual channel message buffer pointer\n- * @len: Length of the virtual channels message\n- */\n-enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 vf_id,\n-\t\t\t\t\t u8 *msg,\n-\t\t\t\t\t u16 len)\n-{\n-\tstruct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;\n-\tstruct i40iw_virtchnl_req *vchnl_req;\n-\n-\tvchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;\n-\tvchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;\n-\tif (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {\n-\t\tif (vchnl_req->parm_len && vchnl_req->parm)\n-\t\t\tmemcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: Got response, data size %u\\n\", __func__,\n-\t\t\t vchnl_req->parm_len);\n-\t} else {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s: error length on response, Got %u, expected %u\\n\", __func__,\n-\t\t\t len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));\n-\t}\n-\n-\treturn I40IW_SUCCESS;\n-}\n-\n-/**\n- * i40iw_vchnl_vf_get_ver - Request Channel version\n- * @dev: IWARP device pointer\n- * @vchnl_ver: Virtual channel message version pointer\n- */\n-enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 *vchnl_ver)\n-{\n-\tstruct i40iw_virtchnl_req vchnl_req;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (!i40iw_vf_clear_to_send(dev))\n-\t\treturn I40IW_ERR_TIMEOUT;\n-\tmemset(&vchnl_req, 0, sizeof(vchnl_req));\n-\tvchnl_req.dev = dev;\n-\tvchnl_req.parm = vchnl_ver;\n-\tvchnl_req.parm_len = sizeof(*vchnl_ver);\n-\tvchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;\n-\n-\tret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s Send message failed 0x%0x\\n\", __func__, ret_code);\n-\t\treturn ret_code;\n-\t}\n-\tret_code = i40iw_vf_wait_vchnl_resp(dev);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\telse\n-\t\treturn vchnl_req.ret_code;\n-}\n-\n-/**\n- * i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function\n- * @dev: IWARP device pointer\n- * @hmc_fcn: HMC function index pointer\n- */\n-enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u16 *hmc_fcn)\n-{\n-\tstruct i40iw_virtchnl_req vchnl_req;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (!i40iw_vf_clear_to_send(dev))\n-\t\treturn I40IW_ERR_TIMEOUT;\n-\tmemset(&vchnl_req, 0, sizeof(vchnl_req));\n-\tvchnl_req.dev = dev;\n-\tvchnl_req.parm = hmc_fcn;\n-\tvchnl_req.parm_len = sizeof(*hmc_fcn);\n-\tvchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;\n-\n-\tret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s Send message failed 0x%0x\\n\", __func__, ret_code);\n-\t\treturn ret_code;\n-\t}\n-\tret_code = i40iw_vf_wait_vchnl_resp(dev);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\telse\n-\t\treturn vchnl_req.ret_code;\n-}\n-\n-/**\n- * i40iw_vchnl_vf_add_hmc_objs - Add HMC Object\n- * @dev: IWARP device pointer\n- * @rsrc_type: HMC Resource type\n- * @start_index: Starting index of the objects to be added\n- * @rsrc_count: Number of resources to be added\n- */\n-enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t enum i40iw_hmc_rsrc_type rsrc_type,\n-\t\t\t\t\t\t u32 start_index,\n-\t\t\t\t\t\t u32 rsrc_count)\n-{\n-\tstruct i40iw_virtchnl_req vchnl_req;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (!i40iw_vf_clear_to_send(dev))\n-\t\treturn I40IW_ERR_TIMEOUT;\n-\tmemset(&vchnl_req, 0, sizeof(vchnl_req));\n-\tvchnl_req.dev = dev;\n-\tvchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;\n-\n-\tret_code = vchnl_vf_send_add_hmc_objs_req(dev,\n-\t\t\t\t\t\t &vchnl_req,\n-\t\t\t\t\t\t rsrc_type,\n-\t\t\t\t\t\t start_index,\n-\t\t\t\t\t\t rsrc_count);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s Send message failed 0x%0x\\n\", __func__, ret_code);\n-\t\treturn ret_code;\n-\t}\n-\tret_code = i40iw_vf_wait_vchnl_resp(dev);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\telse\n-\t\treturn vchnl_req.ret_code;\n-}\n-\n-/**\n- * i40iw_vchnl_vf_del_hmc_obj - del HMC obj\n- * @dev: IWARP device pointer\n- * @rsrc_type: HMC Resource type\n- * @start_index: Starting index of the object to delete\n- * @rsrc_count: Number of resources to be delete\n- */\n-enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t enum i40iw_hmc_rsrc_type rsrc_type,\n-\t\t\t\t\t\t u32 start_index,\n-\t\t\t\t\t\t u32 rsrc_count)\n-{\n-\tstruct i40iw_virtchnl_req vchnl_req;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (!i40iw_vf_clear_to_send(dev))\n-\t\treturn I40IW_ERR_TIMEOUT;\n-\tmemset(&vchnl_req, 0, sizeof(vchnl_req));\n-\tvchnl_req.dev = dev;\n-\tvchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;\n-\n-\tret_code = vchnl_vf_send_del_hmc_objs_req(dev,\n-\t\t\t\t\t\t &vchnl_req,\n-\t\t\t\t\t\t rsrc_type,\n-\t\t\t\t\t\t start_index,\n-\t\t\t\t\t\t rsrc_count);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s Send message failed 0x%0x\\n\", __func__, ret_code);\n-\t\treturn ret_code;\n-\t}\n-\tret_code = i40iw_vf_wait_vchnl_resp(dev);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\telse\n-\t\treturn vchnl_req.ret_code;\n-}\n-\n-/**\n- * i40iw_vchnl_vf_get_pe_stats - Get PE stats\n- * @dev: IWARP device pointer\n- * @hw_stats: HW stats struct\n- */\n-enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dev_hw_stats *hw_stats)\n-{\n-\tstruct i40iw_virtchnl_req vchnl_req;\n-\tenum i40iw_status_code ret_code;\n-\n-\tif (!i40iw_vf_clear_to_send(dev))\n-\t\treturn I40IW_ERR_TIMEOUT;\n-\tmemset(&vchnl_req, 0, sizeof(vchnl_req));\n-\tvchnl_req.dev = dev;\n-\tvchnl_req.parm = hw_stats;\n-\tvchnl_req.parm_len = sizeof(*hw_stats);\n-\tvchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;\n-\n-\tret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);\n-\tif (ret_code) {\n-\t\ti40iw_debug(dev, I40IW_DEBUG_VIRT,\n-\t\t\t \"%s Send message failed 0x%0x\\n\", __func__, ret_code);\n-\t\treturn ret_code;\n-\t}\n-\tret_code = i40iw_vf_wait_vchnl_resp(dev);\n-\tif (ret_code)\n-\t\treturn ret_code;\n-\telse\n-\t\treturn vchnl_req.ret_code;\n-}\ndiff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h\ndeleted file mode 100644\nindex 24886ef..0000000\n--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h\n+++ /dev/null\n@@ -1,124 +0,0 @@\n-/*******************************************************************************\n-*\n-* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.\n-*\n-* This software is available to you under a choice of one of two\n-* licenses. You may choose to be licensed under the terms of the GNU\n-* General Public License (GPL) Version 2, available from the file\n-* COPYING in the main directory of this source tree, or the\n-* OpenFabrics.org BSD license below:\n-*\n-* Redistribution and use in source and binary forms, with or\n-* without modification, are permitted provided that the following\n-* conditions are met:\n-*\n-* - Redistributions of source code must retain the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer.\n-*\n-* - Redistributions in binary form must reproduce the above\n-*\tcopyright notice, this list of conditions and the following\n-*\tdisclaimer in the documentation and/or other materials\n-*\tprovided with the distribution.\n-*\n-* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n-* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n-* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n-* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-* SOFTWARE.\n-*\n-*******************************************************************************/\n-\n-#ifndef I40IW_VIRTCHNL_H\n-#define I40IW_VIRTCHNL_H\n-\n-#include \"i40iw_hmc.h\"\n-\n-#pragma pack(push, 1)\n-\n-struct i40iw_virtchnl_op_buf {\n-\tu16 iw_op_code;\n-\tu16 iw_op_ver;\n-\tu16 iw_chnl_buf_len;\n-\tu16 rsvd;\n-\tu64 iw_chnl_op_ctx;\n-\t/* Member alignment MUST be maintained above this location */\n-\tu8 iw_chnl_buf[1];\n-};\n-\n-struct i40iw_virtchnl_resp_buf {\n-\tu64 iw_chnl_op_ctx;\n-\tu16 iw_chnl_buf_len;\n-\ts16 iw_op_ret_code;\n-\t/* Member alignment MUST be maintained above this location */\n-\tu16 rsvd[2];\n-\tu8 iw_chnl_buf[1];\n-};\n-\n-enum i40iw_virtchnl_ops {\n-\tI40IW_VCHNL_OP_GET_VER = 0,\n-\tI40IW_VCHNL_OP_GET_HMC_FCN,\n-\tI40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE,\n-\tI40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE,\n-\tI40IW_VCHNL_OP_GET_STATS\n-};\n-\n-#define I40IW_VCHNL_OP_GET_VER_V0 0\n-#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0\n-#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0\n-#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0\n-#define I40IW_VCHNL_OP_GET_STATS_V0 0\n-#define I40IW_VCHNL_CHNL_VER_V0 0\n-\n-struct i40iw_dev_hw_stats;\n-\n-struct i40iw_virtchnl_hmc_obj_range {\n-\tu16 obj_type;\n-\tu16 rsvd;\n-\tu32 start_index;\n-\tu32 obj_count;\n-};\n-\n-enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 vf_id,\n-\t\t\t\t\t u8 *msg,\n-\t\t\t\t\t u16 len);\n-\n-enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 vf_id,\n-\t\t\t\t\t u8 *msg,\n-\t\t\t\t\t u16 len);\n-\n-struct i40iw_virtchnl_req {\n-\tstruct i40iw_sc_dev *dev;\n-\tstruct i40iw_virtchnl_op_buf *vchnl_msg;\n-\tvoid *parm;\n-\tu32 vf_id;\n-\tu16 parm_len;\n-\ts16 ret_code;\n-};\n-\n-#pragma pack(pop)\n-\n-enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t u32 *vchnl_ver);\n-\n-enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t u16 *hmc_fcn);\n-\n-enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t enum i40iw_hmc_rsrc_type rsrc_type,\n-\t\t\t\t\t\t u32 start_index,\n-\t\t\t\t\t\t u32 rsrc_count);\n-\n-enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t enum i40iw_hmc_rsrc_type rsrc_type,\n-\t\t\t\t\t\t u32 start_index,\n-\t\t\t\t\t\t u32 rsrc_count);\n-\n-enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,\n-\t\t\t\t\t\t struct i40iw_dev_hw_stats *hw_stats);\n-#endif\ndiff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig\nnew file mode 100644\nindex 0000000..edbbffa\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/Kconfig\n@@ -0,0 +1,11 @@\n+config INFINIBAND_IRDMA\n+ tristate \"Intel(R) Ethernet Connection RDMA Driver\"\n+ depends on INET && (I40E || ICE)\n+ depends on IPV6 || !IPV6\n+ depends on PCI\n+ select GENERIC_ALLOCATOR\n+ help\n+ This is an Ethernet RDMA driver that supports E810 (iWARP/RoCE)\n+ and X722 (iWARP) network devices.\n+ To compile this driver as a module, choose M here. The module\n+ will be called irdma.\ndiff --git a/drivers/infiniband/hw/irdma/Makefile b/drivers/infiniband/hw/irdma/Makefile\nnew file mode 100644\nindex 0000000..160fafe\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/Makefile\n@@ -0,0 +1,28 @@\n+# SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB\n+# Copyright (c) 2019, Intel Corporation.\n+\n+#\n+# Makefile for the Intel(R) Ethernet Connection RDMA Linux Driver\n+#\n+\n+obj-$(CONFIG_INFINIBAND_IRDMA) += irdma.o\n+\n+irdma-objs := cm.o \\\n+ ctrl.o \\\n+ hmc.o \\\n+ hw.o \\\n+ i40iw_hw.o \\\n+ i40iw_if.o \\\n+ icrdma_hw.o \\\n+ irdma_if.o \\\n+ main.o \\\n+ pble.o \\\n+ puda.o \\\n+ trace.o \\\n+ uda.o \\\n+ uk.o \\\n+ utils.o \\\n+ verbs.o \\\n+ ws.o \\\n+\n+CFLAGS_trace.o = -I$(src)\ndiff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h\ndeleted file mode 100644\nindex 79890ba..0000000\n--- a/include/uapi/rdma/i40iw-abi.h\n+++ /dev/null\n@@ -1,107 +0,0 @@\n-/*\n- * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.\n- * Copyright (c) 2005 Topspin Communications. All rights reserved.\n- * Copyright (c) 2005 Cisco Systems. All rights reserved.\n- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.\n- *\n- * This software is available to you under a choice of one of two\n- * licenses. You may choose to be licensed under the terms of the GNU\n- * General Public License (GPL) Version 2, available from the file\n- * COPYING in the main directory of this source tree, or the\n- * OpenIB.org BSD license below:\n- *\n- * Redistribution and use in source and binary forms, with or\n- * without modification, are permitted provided that the following\n- * conditions are met:\n- *\n- * - Redistributions of source code must retain the above\n- * copyright notice, this list of conditions and the following\n- * disclaimer.\n- *\n- * - Redistributions in binary form must reproduce the above\n- * copyright notice, this list of conditions and the following\n- * disclaimer in the documentation and/or other materials\n- * provided with the distribution.\n- *\n- * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n- * SOFTWARE.\n- *\n- */\n-\n-#ifndef I40IW_ABI_H\n-#define I40IW_ABI_H\n-\n-#include <linux/types.h>\n-\n-#define I40IW_ABI_VER 5\n-\n-struct i40iw_alloc_ucontext_req {\n-\t__u32 reserved32;\n-\t__u8 userspace_ver;\n-\t__u8 reserved8[3];\n-};\n-\n-struct i40iw_alloc_ucontext_resp {\n-\t__u32 max_pds;\t\t/* maximum pds allowed for this user process */\n-\t__u32 max_qps;\t\t/* maximum qps allowed for this user process */\n-\t__u32 wq_size;\t\t/* size of the WQs (sq+rq) allocated to the mmaped area */\n-\t__u8 kernel_ver;\n-\t__u8 reserved[3];\n-};\n-\n-struct i40iw_alloc_pd_resp {\n-\t__u32 pd_id;\n-\t__u8 reserved[4];\n-};\n-\n-struct i40iw_create_cq_req {\n-\t__aligned_u64 user_cq_buffer;\n-\t__aligned_u64 user_shadow_area;\n-};\n-\n-struct i40iw_create_qp_req {\n-\t__aligned_u64 user_wqe_buffers;\n-\t__aligned_u64 user_compl_ctx;\n-\n-\t/* UDA QP PHB */\n-\t__aligned_u64 user_sq_phb;\t/* place for VA of the sq phb buff */\n-\t__aligned_u64 user_rq_phb;\t/* place for VA of the rq phb buff */\n-};\n-\n-enum i40iw_memreg_type {\n-\tIW_MEMREG_TYPE_MEM = 0x0000,\n-\tIW_MEMREG_TYPE_QP = 0x0001,\n-\tIW_MEMREG_TYPE_CQ = 0x0002,\n-};\n-\n-struct i40iw_mem_reg_req {\n-\t__u16 reg_type;\t\t/* Memory, QP or CQ */\n-\t__u16 cq_pages;\n-\t__u16 rq_pages;\n-\t__u16 sq_pages;\n-};\n-\n-struct i40iw_create_cq_resp {\n-\t__u32 cq_id;\n-\t__u32 cq_size;\n-\t__u32 mmap_db_index;\n-\t__u32 reserved;\n-};\n-\n-struct i40iw_create_qp_resp {\n-\t__u32 qp_id;\n-\t__u32 actual_sq_size;\n-\t__u32 actual_rq_size;\n-\t__u32 i40iw_drv_opt;\n-\t__u16 push_idx;\n-\t__u8 lsmm;\n-\t__u8 rsvd2;\n-};\n-\n-#endif\n", "prefixes": [ "rdma-nxt", "15/16" ] }