get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1124837/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1124837,
    "url": "http://patchwork.ozlabs.org/api/patches/1124837/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-2-shiraz.saleem@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20190629185405.1601-2-shiraz.saleem@intel.com>",
    "list_archive_url": null,
    "date": "2019-06-29T18:53:49",
    "name": "[rdma-next,01/17] RDMA/irdma: Add driver framework definitions",
    "commit_ref": null,
    "pull_url": null,
    "state": "rejected",
    "archived": false,
    "hash": "3a086718e7da014e60876466c41c9fd7349f1732",
    "submitter": {
        "id": 69500,
        "url": "http://patchwork.ozlabs.org/api/people/69500/?format=api",
        "name": "Saleem, Shiraz",
        "email": "shiraz.saleem@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-2-shiraz.saleem@intel.com/mbox/",
    "series": [
        {
            "id": 116886,
            "url": "http://patchwork.ozlabs.org/api/series/116886/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=116886",
            "date": "2019-06-29T18:53:48",
            "name": "Add unified Intel Ethernet RDMA driver (irdma)",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/116886/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1124837/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1124837/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.136; helo=silver.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 45bjVV5q25z9s3Z\n\tfor <incoming@patchwork.ozlabs.org>;\n\tSun, 30 Jun 2019 04:54:42 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id EEEE3204DE;\n\tSat, 29 Jun 2019 18:54:40 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id V7Rggztu932S; Sat, 29 Jun 2019 18:54:25 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id 13464204BF;\n\tSat, 29 Jun 2019 18:54:25 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id E96631BF3AD\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:22 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id D441686D05\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:22 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id EOLf3g0ZedM2 for <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:15 +0000 (UTC)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id 6737686B05\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:15 +0000 (UTC)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t29 Jun 2019 11:54:15 -0700",
            "from ssaleem-mobl.amr.corp.intel.com ([10.254.177.95])\n\tby fmsmga004.fm.intel.com with ESMTP; 29 Jun 2019 11:54:14 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.63,432,1557212400\"; d=\"scan'208\";a=\"185972852\"",
        "From": "Shiraz Saleem <shiraz.saleem@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Sat, 29 Jun 2019 13:53:49 -0500",
        "Message-Id": "<20190629185405.1601-2-shiraz.saleem@intel.com>",
        "X-Mailer": "git-send-email 2.21.0",
        "In-Reply-To": "<20190629185405.1601-1-shiraz.saleem@intel.com>",
        "References": "<20190629185405.1601-1-shiraz.saleem@intel.com>",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [PATCH rdma-next 01/17] RDMA/irdma: Add driver\n\tframework definitions",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Cc": "Mustafa Ismail <mustafa.ismail@intel.com>,\n\tShiraz Saleem <shiraz.saleem@intel.com>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Mustafa Ismail <mustafa.ismail@intel.com>\n\nRegister irdma as a platform driver capable of supporting platform\ndevices from multi-generation RDMA capable Intel HW. Establish the\ninterface with all supported netdev peer devices and initialize HW.\n\nSigned-off-by: Mustafa Ismail <mustafa.ismail@intel.com>\nSigned-off-by: Shiraz Saleem <shiraz.saleem@intel.com>\n---\n drivers/infiniband/hw/irdma/i40iw_if.c | 256 +++++++++++++\n drivers/infiniband/hw/irdma/irdma_if.c | 426 ++++++++++++++++++++++\n drivers/infiniband/hw/irdma/main.c     | 531 +++++++++++++++++++++++++++\n drivers/infiniband/hw/irdma/main.h     | 639 +++++++++++++++++++++++++++++++++\n 4 files changed, 1852 insertions(+)\n create mode 100644 drivers/infiniband/hw/irdma/i40iw_if.c\n create mode 100644 drivers/infiniband/hw/irdma/irdma_if.c\n create mode 100644 drivers/infiniband/hw/irdma/main.c\n create mode 100644 drivers/infiniband/hw/irdma/main.h",
    "diff": "diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c\nnew file mode 100644\nindex 0000000..9067495\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/i40iw_if.c\n@@ -0,0 +1,256 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include <linux/module.h>\n+#include <linux/moduleparam.h>\n+#include <linux/netdevice.h>\n+#include <linux/etherdevice.h>\n+#include <net/addrconf.h>\n+#include \"main.h\"\n+#include \"i40iw_hw.h\"\n+#include \"i40e_client.h\"\n+\n+/**\n+ * i40iw_request_reset - Request a reset\n+ * @rf: RDMA PCI function\n+ *\n+ */\n+void i40iw_request_reset(struct irdma_pci_f *rf)\n+{\n+\tstruct i40e_info *ldev = (struct i40e_info *)rf->ldev.if_ldev;\n+\n+\tldev->ops->request_reset(ldev, rf->ldev.if_client, 1);\n+}\n+\n+/**\n+ * i40iw_open - client interface operation open for iwarp/uda device\n+ * @ldev: lan device information\n+ * @client: iwarp client information, provided during registration\n+ *\n+ * Called by the lan driver during the processing of client register\n+ * Create device resources, set up queues, pble and hmc objects and\n+ * register the device with the ib verbs interface\n+ * Return 0 if successful, otherwise return error\n+ */\n+static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)\n+{\n+\tstruct irdma_device *iwdev = NULL;\n+\tstruct irdma_handler *hdl = NULL;\n+\tstruct irdma_priv_ldev *pldev;\n+\tstruct irdma_sc_dev *dev;\n+\tstruct irdma_pci_f *rf;\n+\tstruct irdma_l2params l2params = {};\n+\tint err_code = -EIO;\n+\tint i;\n+\tu16 qset;\n+\tu16 last_qset = IRDMA_NO_QSET;\n+\n+\thdl = irdma_find_handler(ldev->pcidev);\n+\tif (hdl)\n+\t\treturn 0;\n+\n+\thdl = kzalloc((sizeof(*hdl) + sizeof(*iwdev)), GFP_KERNEL);\n+\tif (!hdl)\n+\t\treturn -ENOMEM;\n+\n+\tiwdev = (struct irdma_device *)((u8 *)hdl + sizeof(*hdl));\n+\n+\tiwdev->param_wq = alloc_ordered_workqueue(\"l2params\", WQ_MEM_RECLAIM);\n+\tif (!iwdev->param_wq)\n+\t\tgoto error;\n+\n+\trf = &hdl->rf;\n+\trf->hdl = hdl;\n+\tdev = &rf->sc_dev;\n+\tdev->back_dev = rf;\n+\trf->rdma_ver = IRDMA_GEN_1;\n+\thdl->platform_dev = &ldev->platform_dev;\n+\tirdma_init_rf_config_params(rf);\n+\trf->init_hw = i40iw_init_hw;\n+\trf->hw.hw_addr = ldev->hw_addr;\n+\trf->pdev = ldev->pcidev;\n+\trf->netdev = ldev->netdev;\n+\tdev->pci_rev = rf->pdev->revision;\n+\tiwdev->rf = rf;\n+\tiwdev->hdl = hdl;\n+\tiwdev->ldev = &rf->ldev;\n+\tiwdev->init_state = INITIAL_STATE;\n+\tiwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;\n+\tiwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;\n+\tiwdev->netdev = ldev->netdev;\n+\tiwdev->create_ilq = true;\n+\tiwdev->vsi_num = 0;\n+\n+\tpldev = &rf->ldev;\n+\thdl->ldev = pldev;\n+\tpldev->if_client = client;\n+\tpldev->if_ldev = ldev;\n+\tpldev->fn_num = ldev->fid;\n+\tpldev->ftype = ldev->ftype;\n+\tpldev->pf_vsi_num = 0;\n+\tpldev->msix_count = ldev->msix_count;\n+\tpldev->msix_entries = ldev->msix_entries;\n+\n+\tif (irdma_ctrl_init_hw(rf))\n+\t\tgoto error;\n+\n+\tl2params.mtu =\n+\t\t(ldev->params.mtu) ? ldev->params.mtu : IRDMA_DEFAULT_MTU;\n+\tfor (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {\n+\t\tqset = ldev->params.qos.prio_qos[i].qs_handle;\n+\t\tl2params.up2tc[i] = ldev->params.qos.prio_qos[i].tc;\n+\t\tl2params.qs_handle_list[i] = qset;\n+\t\tif (last_qset == IRDMA_NO_QSET)\n+\t\t\tlast_qset = qset;\n+\t\telse if ((qset != last_qset) && (qset != IRDMA_NO_QSET))\n+\t\t\tiwdev->dcb = true;\n+\t}\n+\n+\tif (irdma_rt_init_hw(rf, iwdev, &l2params)) {\n+\t\tirdma_deinit_ctrl_hw(rf);\n+\t\tgoto error;\n+\t}\n+\n+\tirdma_add_handler(hdl);\n+\treturn 0;\n+error:\n+\tkfree(hdl);\n+\treturn err_code;\n+}\n+\n+/**\n+ * i40iw_l2params_worker - worker for l2 params change\n+ * @work: work pointer for l2 params\n+ */\n+static void i40iw_l2params_worker(struct work_struct *work)\n+{\n+\tstruct l2params_work *dwork =\n+\t\tcontainer_of(work, struct l2params_work, work);\n+\tstruct irdma_device *iwdev = dwork->iwdev;\n+\n+\tirdma_change_l2params(&iwdev->vsi, &dwork->l2params);\n+\tatomic_dec(&iwdev->params_busy);\n+\tkfree(work);\n+}\n+\n+/**\n+ * i40iw_l2param_change - handle qs handles for qos and mss change\n+ * @ldev: lan device information\n+ * @client: client for parameter change\n+ * @params: new parameters from L2\n+ */\n+static void i40iw_l2param_change(struct i40e_info *ldev,\n+\t\t\t\t struct i40e_client *client,\n+\t\t\t\t struct i40e_params *params)\n+{\n+\tstruct irdma_handler *hdl;\n+\tstruct irdma_l2params *l2params;\n+\tstruct l2params_work *work;\n+\tstruct irdma_device *iwdev;\n+\tint i;\n+\n+\thdl = irdma_find_handler(ldev->pcidev);\n+\tif (!hdl)\n+\t\treturn;\n+\n+\tiwdev = (struct irdma_device *)((u8 *)hdl + sizeof(*hdl));\n+\n+\tif (atomic_read(&iwdev->params_busy))\n+\t\treturn;\n+\twork = kzalloc(sizeof(*work), GFP_KERNEL);\n+\tif (!work)\n+\t\treturn;\n+\n+\tatomic_inc(&iwdev->params_busy);\n+\twork->iwdev = iwdev;\n+\tl2params = &work->l2params;\n+\tfor (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)\n+\t\tl2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;\n+\n+\tl2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;\n+\n+\tINIT_WORK(&work->work, i40iw_l2params_worker);\n+\tqueue_work(iwdev->param_wq, &work->work);\n+}\n+\n+/**\n+ * i40iw_close - client interface operation close for iwarp/uda device\n+ * @ldev: lan device information\n+ * @client: client to close\n+ * @reset: flag to indicate close on reset\n+ *\n+ * Called by the lan driver during the processing of client unregister\n+ * Destroy and clean up the driver resources\n+ */\n+static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client,\n+\t\t\tbool reset)\n+{\n+\tstruct irdma_handler *hdl;\n+\tstruct irdma_pci_f *rf;\n+\tstruct irdma_device *iwdev;\n+\n+\thdl = irdma_find_handler(ldev->pcidev);\n+\tif (!hdl)\n+\t\treturn;\n+\trf = &hdl->rf;\n+\tiwdev = (struct irdma_device *)((u8 *)hdl + sizeof(*hdl));\n+\n+\tif (iwdev->param_wq)\n+\t\tdestroy_workqueue(iwdev->param_wq);\n+\n+\tif (reset)\n+\t\tiwdev->reset = true;\n+\n+\tirdma_deinit_rt_device(iwdev);\n+\tirdma_deinit_ctrl_hw(rf);\n+\tirdma_del_handler(irdma_find_handler(ldev->pcidev));\n+\tkfree(hdl);\n+\tpr_info(\"IRDMA hardware deinitialization complete\\n\");\n+}\n+\n+/* client interface functions */\n+static const struct i40e_client_ops i40e_ops = {\n+\t.open = i40iw_open,\n+\t.close = i40iw_close,\n+\t.l2_param_change = i40iw_l2param_change\n+};\n+\n+static struct i40e_client i40iw_client = {\n+\t.name = \"irdma\",\n+\t.ops = &i40e_ops,\n+\t.version.major = I40E_CLIENT_VERSION_MAJOR,\n+\t.version.minor = I40E_CLIENT_VERSION_MINOR,\n+\t.version.build = I40E_CLIENT_VERSION_BUILD,\n+\t.type = I40E_CLIENT_IWARP,\n+};\n+\n+int i40iw_probe(struct platform_device *pdev)\n+{\n+\tstruct i40e_info *ldev = container_of(pdev, struct i40e_info,\n+\t\t\t\t\t      platform_dev);\n+\tif (ldev->version.major != I40E_CLIENT_VERSION_MAJOR ||\n+\t    ldev->version.minor != I40E_CLIENT_VERSION_MINOR) {\n+\t\tpr_err(\"version mismatch:\\n\");\n+\t\tpr_err(\"expected major ver %d, caller specified major ver %d\\n\",\n+\t\t       I40E_CLIENT_VERSION_MAJOR, ldev->version.major);\n+\t\tpr_err(\"expected minor ver %d, caller specified minor ver %d\\n\",\n+\t\t       I40E_CLIENT_VERSION_MINOR, ldev->version.minor);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!ldev->ops->client_device_register)\n+\t\treturn -EINVAL;\n+\n+\tldev->client = &i40iw_client;\n+\n+\treturn ldev->ops->client_device_register(ldev);\n+}\n+\n+void i40iw_remove(struct platform_device *pdev)\n+{\n+\tstruct i40e_info *ldev = container_of(pdev, struct i40e_info,\n+\t\t\t\t\t      platform_dev);\n+\n+\tif (ldev->ops->client_device_unregister)\n+\t\tldev->ops->client_device_unregister(ldev);\n+}\ndiff --git a/drivers/infiniband/hw/irdma/irdma_if.c b/drivers/infiniband/hw/irdma/irdma_if.c\nnew file mode 100644\nindex 0000000..317b06a\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/irdma_if.c\n@@ -0,0 +1,426 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include <linux/module.h>\n+#include <linux/moduleparam.h>\n+#include <ice_idc.h>\n+#include \"main.h\"\n+#include \"ws.h\"\n+#include \"icrdma_hw.h\"\n+\n+/**\n+ * irdma_lan_register_qset - Register qset with LAN driver\n+ * @vsi: vsi structure\n+ * @tc_node: Traffic class node\n+ */\n+enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi,\n+\t\t\t\t\t       struct irdma_ws_node *tc_node)\n+{\n+\tstruct irdma_device *iwdev = vsi->back_vsi;\n+\tstruct ice_peer_dev *ldev = (struct ice_peer_dev *)iwdev->ldev->if_ldev;\n+\tstruct ice_res rdma_qset_res = {};\n+\tint ret;\n+\n+\tif (ldev->ops->alloc_res) {\n+\t\trdma_qset_res.cnt_req = 1;\n+\t\trdma_qset_res.res_type = ICE_RDMA_QSETS_TXSCHED;\n+\t\trdma_qset_res.res[0].res.qsets.qs_handle = tc_node->qs_handle;\n+\t\trdma_qset_res.res[0].res.qsets.tc = tc_node->traffic_class;\n+\t\trdma_qset_res.res[0].res.qsets.vsi_id = vsi->vsi_idx;\n+\t\tret = ldev->ops->alloc_res(ldev, &rdma_qset_res, 0);\n+\t\tif (ret) {\n+\t\t\tdev_dbg(rfdev_to_dev(vsi->dev),\n+\t\t\t\t\"WS: LAN alloc_res for rdma qset failed.\\n\");\n+\t\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t\t}\n+\n+\t\ttc_node->l2_sched_node_id = rdma_qset_res.res[0].res.qsets.teid;\n+\t\tvsi->qos[tc_node->user_pri].l2_sched_node_id =\n+\t\t\trdma_qset_res.res[0].res.qsets.teid;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_lan_unregister_qset - Unregister qset with LAN driver\n+ * @vsi: vsi structure\n+ * @tc_node: Traffic class node\n+ */\n+void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,\n+\t\t\t       struct irdma_ws_node *tc_node)\n+{\n+\tstruct irdma_device *iwdev = vsi->back_vsi;\n+\tstruct ice_peer_dev *ldev = (struct ice_peer_dev *)iwdev->ldev->if_ldev;\n+\tstruct ice_res rdma_qset_res = {};\n+\n+\tif (ldev->ops->free_res) {\n+\t\trdma_qset_res.res_allocated = 1;\n+\t\trdma_qset_res.res_type = ICE_RDMA_QSETS_TXSCHED;\n+\t\trdma_qset_res.res[0].res.qsets.vsi_id = vsi->vsi_idx;\n+\t\trdma_qset_res.res[0].res.qsets.teid = tc_node->l2_sched_node_id;\n+\t\trdma_qset_res.res[0].res.qsets.qs_handle = tc_node->qs_handle;\n+\n+\t\tif (ldev->ops->free_res(ldev, &rdma_qset_res))\n+\t\t\tdev_dbg(rfdev_to_dev(vsi->dev),\n+\t\t\t\t\"WS: LAN free_res for rdma qset failed.\\n\");\n+\t}\n+}\n+\n+/**\n+ * irdma_log_invalid_mtu: log warning on invalid mtu\n+ * @mtu: maximum tranmission unit\n+ */\n+static void irdma_log_invalid_mtu(u16 mtu)\n+{\n+\tif (mtu < IRDMA_MIN_MTU_IPV4)\n+\t\tpr_warn(\"Current MTU setting of %d is too low for RDMA traffic. Minimum MTU is 576 for IPv4 and 1280 for IPv6\\n\",\n+\t\t\tmtu);\n+\telse if (mtu < IRDMA_MIN_MTU_IPV6)\n+\t\tpr_warn(\"Current MTU setting of %d is too low for IPv6 RDMA traffic, the minimum is 1280\\n\",\n+\t\t\tmtu);\n+}\n+\n+/**\n+ * irdma_prep_tc_change - Prepare for TC changes\n+ * @ldev: Peer device structure\n+ */\n+static void irdma_prep_tc_change(struct ice_peer_dev *ldev)\n+{\n+\tstruct irdma_device *iwdev;\n+\n+\tiwdev = irdma_get_device(ldev->netdev);\n+\tif (!iwdev)\n+\t\treturn;\n+\n+\tif (iwdev->vsi.tc_change_pending)\n+\t\tgoto done;\n+\n+\tiwdev->vsi.tc_change_pending = true;\n+\tirdma_suspend_qps(&iwdev->vsi);\n+\n+\t/* Wait for all qp's to suspend */\n+\twait_event_timeout(iwdev->suspend_wq,\n+\t\t\t   !atomic_read(&iwdev->vsi.qp_suspend_reqs),\n+\t\t\t   IRDMA_EVENT_TIMEOUT);\n+\tirdma_ws_reset(&iwdev->vsi);\n+done:\n+\tirdma_put_device(iwdev);\n+}\n+\n+/**\n+ * irdma_event_handler - Called by LAN driver to notify events\n+ * @ldev: Peer device structure\n+ * @event: event from LAN driver\n+ */\n+static void irdma_event_handler(struct ice_peer_dev *ldev,\n+\t\t\t\tstruct ice_event *event)\n+{\n+\tstruct irdma_l2params l2params = {};\n+\tstruct irdma_device *iwdev;\n+\tint i;\n+\n+\tiwdev = irdma_get_device(ldev->netdev);\n+\tif (!iwdev)\n+\t\treturn;\n+\n+\tif (test_bit(ICE_EVENT_LINK_CHANGE, event->type)) {\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\"CLNT: LINK_CHANGE event\\n\");\n+\t} else if (test_bit(ICE_EVENT_MTU_CHANGE, event->type)) {\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\"CLNT: new MTU = %d\\n\", event->info.mtu);\n+\t\tif (iwdev->vsi.mtu != event->info.mtu) {\n+\t\t\tl2params.mtu = event->info.mtu;\n+\t\t\tl2params.mtu_changed = true;\n+\t\t\tirdma_log_invalid_mtu(l2params.mtu);\n+\t\t\tirdma_change_l2params(&iwdev->vsi, &l2params);\n+\t\t}\n+\t} else if (test_bit(ICE_EVENT_TC_CHANGE, event->type)) {\n+\t\tif (!iwdev->vsi.tc_change_pending)\n+\t\t\tgoto done;\n+\n+\t\tl2params.tc_changed = true;\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev), \"CLNT: TC Change\\n\");\n+\t\tiwdev->dcb = event->info.port_qos.num_tc > 1 ? true : false;\n+\n+\t\tfor (i = 0; i < ICE_IDC_MAX_USER_PRIORITY; ++i)\n+\t\t\tl2params.up2tc[i] = event->info.port_qos.up2tc[i];\n+\t\tirdma_change_l2params(&iwdev->vsi, &l2params);\n+\t} else if (test_bit(ICE_EVENT_API_CHANGE, event->type)) {\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\"CLNT: API_CHANGE\\n\");\n+\t}\n+\n+done:\n+\tirdma_put_device(iwdev);\n+}\n+\n+/**\n+ * irdma_open - client interface operation open for RDMA device\n+ * @ldev: lan device information\n+ *\n+ * Called by the lan driver during the processing of client\n+ * register.\n+ */\n+static int irdma_open(struct ice_peer_dev *ldev)\n+{\n+\tstruct irdma_handler *hdl;\n+\tstruct irdma_device *iwdev;\n+\tstruct irdma_sc_dev *dev;\n+\tenum irdma_status_code status;\n+\tstruct ice_event events = {};\n+\tstruct irdma_pci_f *rf;\n+\tstruct irdma_priv_ldev *pldev;\n+\tstruct irdma_l2params l2params = {};\n+\tint i;\n+\n+\thdl = irdma_find_handler(ldev->pdev);\n+\tif (!hdl)\n+\t\treturn -ENODEV;\n+\n+\trf = &hdl->rf;\n+\tif (rf->init_state != CEQ0_CREATED)\n+\t\treturn -EINVAL;\n+\n+\tiwdev = kzalloc(sizeof(*iwdev), GFP_KERNEL);\n+\tif (!iwdev)\n+\t\treturn -ENOMEM;\n+\n+\tiwdev->hdl = hdl;\n+\tiwdev->rf = rf;\n+\tiwdev->ldev = &rf->ldev;\n+\tpldev = &rf->ldev;\n+\tpldev->pf_vsi_num = ldev->pf_vsi_num;\n+\n+\t/* Set configfs default values */\n+\tiwdev->push_mode = 0;\n+\tiwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;\n+\tiwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;\n+\n+\tdev = &hdl->rf.sc_dev;\n+\tiwdev->netdev = ldev->netdev;\n+\tiwdev->create_ilq = true;\n+\tif (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY) {\n+\t\tiwdev->roce_mode = true;\n+\t\tiwdev->create_ilq = false;\n+\t}\n+\tl2params.mtu = ldev->initial_mtu;\n+\n+\tl2params.num_tc = ldev->initial_qos_info.num_tc;\n+\tl2params.num_apps = ldev->initial_qos_info.num_apps;\n+\tl2params.vsi_prio_type = ldev->initial_qos_info.vsi_priority_type;\n+\tl2params.vsi_rel_bw = ldev->initial_qos_info.vsi_relative_bw;\n+\tfor (i = 0; i < l2params.num_tc; i++) {\n+\t\tl2params.tc_info[i].egress_virt_up =\n+\t\t\tldev->initial_qos_info.tc_info[i].egress_virt_up;\n+\t\tl2params.tc_info[i].ingress_virt_up =\n+\t\t\tldev->initial_qos_info.tc_info[i].ingress_virt_up;\n+\t\tl2params.tc_info[i].prio_type =\n+\t\t\tldev->initial_qos_info.tc_info[i].prio_type;\n+\t\tl2params.tc_info[i].rel_bw =\n+\t\t\tldev->initial_qos_info.tc_info[i].rel_bw;\n+\t\tl2params.tc_info[i].tc_ctx =\n+\t\t\tldev->initial_qos_info.tc_info[i].tc_ctx;\n+\t}\n+\tfor (i = 0; i < ICE_IDC_MAX_USER_PRIORITY; i++)\n+\t\tl2params.up2tc[i] = ldev->initial_qos_info.up2tc[i];\n+\n+\tiwdev->vsi_num = ldev->pf_vsi_num;\n+\tldev->ops->update_vsi_filter(ldev, ICE_RDMA_FILTER_BOTH, true);\n+\n+\tstatus = irdma_rt_init_hw(rf, iwdev, &l2params);\n+\tif (status) {\n+\t\tkfree(iwdev);\n+\t\treturn -EIO;\n+\t}\n+\n+\tevents.reporter = ldev;\n+\tset_bit(ICE_EVENT_LINK_CHANGE, events.type);\n+\tset_bit(ICE_EVENT_MTU_CHANGE, events.type);\n+\tset_bit(ICE_EVENT_TC_CHANGE, events.type);\n+\tset_bit(ICE_EVENT_API_CHANGE, events.type);\n+\n+\tif (ldev->ops->reg_for_notification)\n+\t\tldev->ops->reg_for_notification(ldev, &events);\n+\tdev_info(rfdev_to_dev(dev), \"IRDMA VSI Open Successful\");\n+\tinit_waitqueue_head(&iwdev->suspend_wq);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_close - client interface operation close for iwarp/uda device\n+ * @ldev: lan device information\n+ * @reason: reason for closing\n+ *\n+ * Called by the lan driver during the processing of client unregister\n+ * Destroy and clean up the driver resources\n+ */\n+static void irdma_close(struct ice_peer_dev *ldev, enum ice_close_reason reason)\n+{\n+\tstruct irdma_device *iwdev;\n+\n+\tiwdev = irdma_get_device(ldev->netdev);\n+\tif (!iwdev)\n+\t\treturn;\n+\n+\tirdma_put_device(iwdev);\n+\tif (reason == ICE_REASON_HW_RESET_PENDING) {\n+\t\tiwdev->reset = true;\n+\t\tiwdev->rf->reset = true;\n+\t}\n+\n+\tif (iwdev->init_state >= CEQ0_CREATED)\n+\t\tirdma_deinit_rt_device(iwdev);\n+\n+\tkfree(iwdev);\n+\tldev->ops->update_vsi_filter(ldev, ICE_RDMA_FILTER_BOTH, false);\n+\tpr_info(\"IRDMA VSI close complete\\n\");\n+}\n+\n+/**\n+ * irdma_deinit_pf - Unrolls PF initializations done during irdma_probe()\n+ * @rf: RDMA PCI function\n+ */\n+static void irdma_deinit_pf(struct irdma_pci_f *rf)\n+{\n+\tif (rf->free_qp_wq)\n+\t\tdestroy_workqueue(rf->free_qp_wq);\n+\tif (rf->free_cqbuf_wq)\n+\t\tdestroy_workqueue(rf->free_cqbuf_wq);\n+\tirdma_deinit_ctrl_hw(rf);\n+\tirdma_del_handler(rf->hdl);\n+\tkfree(rf->hdl);\n+}\n+\n+/**\n+ * irdma_remove - GEN_2 device remove()\n+ * @pdev: platform device\n+ *\n+ * Called on module unload.\n+ */\n+int irdma_remove(struct platform_device *pdev)\n+{\n+\tstruct ice_peer_dev *ldev = container_of(pdev, struct ice_peer_dev,\n+\t\t\t\t\t\t platform_dev);\n+\tstruct irdma_handler *hdl;\n+\n+\thdl = irdma_find_handler(ldev->pdev);\n+\tif (!hdl)\n+\t\treturn 0;\n+\n+\tif (ldev->ops->peer_unregister)\n+\t\tldev->ops->peer_unregister(ldev);\n+\n+\tirdma_deinit_pf(&hdl->rf);\n+\tpr_info(\"IRDMA hardware deinitialization complete\\n\");\n+\n+\treturn 0;\n+}\n+\n+static const struct ice_peer_ops irdma_peer_ops = {\n+\t.close = irdma_close,\n+\t.event_handler = irdma_event_handler,\n+\t.open = irdma_open,\n+\t.prep_tc_change = irdma_prep_tc_change,\n+};\n+\n+static struct ice_peer_drv irdma_peer_drv = {\n+\t.driver_id = ICE_PEER_RDMA_DRIVER,\n+\t.name = KBUILD_MODNAME,\n+\t.ver.major = ICE_PEER_MAJOR_VER,\n+\t.ver.minor = ICE_PEER_MINOR_VER,\n+};\n+\n+/**\n+ * irdma_probe - GEN_2 device probe()\n+ * @pdev: platform device\n+ *\n+ * Create device resources, set up queues, pble and hmc objects.\n+ * Return 0 if successful, otherwise return error\n+ */\n+int irdma_probe(struct platform_device *pdev)\n+{\n+\tstruct ice_peer_dev *ldev = container_of(pdev, struct ice_peer_dev,\n+\t\t\t\t\t\t platform_dev);\n+\tstruct irdma_handler *hdl;\n+\tstruct irdma_pci_f *rf;\n+\tstruct irdma_sc_dev *dev;\n+\tstruct irdma_priv_ldev *pldev;\n+\tint ret;\n+\n+\tpr_info(\"probe: ldev=%p, ldev->dev.pdev.bus->number=%d, ldev->netdev=%p\\n\",\n+\t\tldev, ldev->pdev->bus->number, ldev->netdev);\n+\n+\tif (ldev->ver.major != ICE_PEER_MAJOR_VER ||\n+\t    ldev->ver.minor != ICE_PEER_MINOR_VER) {\n+\t\tpr_err(\"version mismatch:\\n\");\n+\t\tpr_err(\"expected major ver %d, caller specified major ver %d\\n\",\n+\t\t       ICE_PEER_MAJOR_VER, ldev->ver.major);\n+\t\tpr_err(\"expected minor ver %d, caller specified minor ver %d\\n\",\n+\t\t       ICE_PEER_MINOR_VER, ldev->ver.minor);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thdl = irdma_find_handler(ldev->pdev);\n+\tif (hdl)\n+\t\treturn -EBUSY;\n+\n+\tif (!ldev->ops->peer_register)\n+\t\treturn -EINVAL;\n+\n+\thdl = kzalloc(sizeof(*hdl), GFP_KERNEL);\n+\tif (!hdl)\n+\t\treturn -ENOMEM;\n+\n+\trf = &hdl->rf;\n+\tpldev = &rf->ldev;\n+\thdl->ldev = pldev;\n+\thdl->platform_dev = pdev;\n+\trf->hdl = hdl;\n+\tdev = &rf->sc_dev;\n+\tdev->back_dev = rf;\n+\trf->init_hw = icrdma_init_hw;\n+\tpldev->if_ldev = ldev;\n+\trf->rdma_ver = IRDMA_GEN_2;\n+\tirdma_init_rf_config_params(rf);\n+\tdev->pci_rev = ldev->pdev->revision;\n+\trf->default_vsi.vsi_idx = ldev->pf_vsi_num;\n+\t/* save information from ldev to priv_ldev*/\n+\tpldev->fn_num = ldev->fn_num;\n+\trf->hw.hw_addr = ldev->hw_addr;\n+\trf->pdev = ldev->pdev;\n+\trf->netdev = ldev->netdev;\n+\tpldev->ftype = ldev->ftype;\n+\tpldev->msix_count = ldev->msix_count;\n+\tpldev->msix_entries = ldev->msix_entries;\n+\tirdma_add_handler(hdl);\n+\tif (irdma_ctrl_init_hw(rf)) {\n+\t\tirdma_del_handler(hdl);\n+\t\tkfree(hdl);\n+\t\treturn -EIO;\n+\t}\n+\tldev->peer_ops = &irdma_peer_ops;\n+\tldev->peer_drv = &irdma_peer_drv;\n+\tret = ldev->ops->peer_register(ldev);\n+\tif (ret) {\n+\t\tirdma_deinit_pf(rf);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * icrdma_request_reset - Request a reset\n+ * @rf: RDMA PCI function\n+ *\n+ */\n+void icrdma_request_reset(struct irdma_pci_f *rf)\n+{\n+\tstruct ice_peer_dev *ldev = (struct ice_peer_dev *)rf->ldev.if_ldev;\n+\n+\tif (ldev && ldev->ops && ldev->ops->request_reset)\n+\t\tldev->ops->request_reset(ldev, ICE_PEER_PFR);\n+}\ndiff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c\nnew file mode 100644\nindex 0000000..b0c1a28\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/main.c\n@@ -0,0 +1,531 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"main.h\"\n+\n+/* Legacy i40iw module parameters */\n+static int resource_profile;\n+module_param(resource_profile, int, 0644);\n+MODULE_PARM_DESC(resource_profile, \"Resource Profile: 0=PF only, 1=Weighted VF, 2=Even Distribution\");\n+\n+static int max_rdma_vfs = 32;\n+module_param(max_rdma_vfs, int, 0644);\n+MODULE_PARM_DESC(max_rdma_vfs, \"Maximum VF count: 0-32 32=default\");\n+\n+static int mpa_version = 2;\n+module_param(mpa_version, int, 0644);\n+MODULE_PARM_DESC(mpa_version, \"MPA version: deprecated parameter\");\n+\n+static int push_mode;\n+module_param(push_mode, int, 0644);\n+MODULE_PARM_DESC(push_mode, \"Low latency mode: deprecated parameter\");\n+\n+static int debug;\n+module_param(debug, int, 0644);\n+MODULE_PARM_DESC(debug, \"debug flags: deprecated parameter\");\n+\n+MODULE_ALIAS(\"i40iw\");\n+MODULE_AUTHOR(\"Intel Corporation, <e1000-rdma@lists.sourceforge.net>\");\n+MODULE_DESCRIPTION(\"Intel(R) Ethernet Connection RDMA Driver\");\n+MODULE_LICENSE(\"Dual BSD/GPL\");\n+\n+LIST_HEAD(irdma_handlers);\n+DEFINE_SPINLOCK(irdma_handler_lock);\n+\n+static struct notifier_block irdma_inetaddr_notifier = {\n+\t.notifier_call = irdma_inetaddr_event\n+};\n+\n+static struct notifier_block irdma_inetaddr6_notifier = {\n+\t.notifier_call = irdma_inet6addr_event\n+};\n+\n+static struct notifier_block irdma_net_notifier = {\n+\t.notifier_call = irdma_net_event\n+};\n+\n+static struct notifier_block irdma_netdevice_notifier = {\n+\t.notifier_call = irdma_netdevice_event\n+};\n+\n+void irdma_init_rf_config_params(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_dl_priv *dl_priv;\n+\n+\trf->rsrc_profile = (resource_profile < IRDMA_HMC_PROFILE_EQUAL) ?\n+\t\t\t    (u8)resource_profile + IRDMA_HMC_PROFILE_DEFAULT :\n+\t\t\t    IRDMA_HMC_PROFILE_DEFAULT;\n+\trf->max_rdma_vfs = (rf->rsrc_profile != IRDMA_HMC_PROFILE_DEFAULT) ?\n+\t\t\t    max_rdma_vfs : 0;\n+\trf->max_ena_vfs = rf->max_rdma_vfs;\n+\tdl_priv = platform_get_drvdata(rf->hdl->platform_dev);\n+\trf->limits_sel = dl_priv->limits_sel;\n+\trf->protocol_used = dl_priv->roce_ena ? IRDMA_ROCE_PROTOCOL_ONLY :\n+\t\t\t\t\t\tIRDMA_IWARP_PROTOCOL_ONLY;\n+}\n+\n+/**\n+ * irdma_get_device - find a iwdev given a netdev\n+ * @netdev: pointer to net_device\n+ *\n+ * This function takes a reference on ibdev and prevents ib\n+ * device deregistration. The caller must call a matching\n+ * irdma_put_device.\n+ */\n+struct irdma_device *irdma_get_device(struct net_device *netdev)\n+{\n+\tstruct ib_device *ibdev = ib_device_get_by_netdev(netdev,\n+\t\t\t\t\t\t\t  RDMA_DRIVER_I40IW);\n+\n+\tif (!ibdev)\n+\t\treturn NULL;\n+\n+\treturn to_iwdev(ibdev);\n+}\n+\n+/**\n+ * irdma_put_device - release ibdev refcnt\n+ * @iwdev: device\n+ *\n+ * release refcnt on ibdev taken with irdma_get_device.\n+ */\n+void irdma_put_device(struct irdma_device *iwdev)\n+{\n+\tstruct ib_device *ibdev = &iwdev->iwibdev->ibdev;\n+\n+\tib_device_put(ibdev);\n+}\n+\n+/**\n+ * irdma_find_ice_handler - find a handler given a client info\n+ * @pdev: pointer to pci dev info\n+ */\n+struct irdma_handler *irdma_find_handler(struct pci_dev *pdev)\n+{\n+\tstruct irdma_handler *hdl;\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&irdma_handler_lock, flags);\n+\tlist_for_each_entry (hdl, &irdma_handlers, list) {\n+\t\tif (hdl->rf.pdev->devfn == pdev->devfn &&\n+\t\t    hdl->rf.pdev->bus->number == pdev->bus->number) {\n+\t\t\tspin_unlock_irqrestore(&irdma_handler_lock, flags);\n+\t\t\treturn hdl;\n+\t\t}\n+\t}\n+\tspin_unlock_irqrestore(&irdma_handler_lock, flags);\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * irdma_add_handler - add a handler to the list\n+ * @hdl: handler to be added to the handler list\n+ */\n+void irdma_add_handler(struct irdma_handler *hdl)\n+{\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&irdma_handler_lock, flags);\n+\tlist_add(&hdl->list, &irdma_handlers);\n+\tspin_unlock_irqrestore(&irdma_handler_lock, flags);\n+}\n+\n+/**\n+ * irdma_del_handler - delete a handler from the list\n+ * @hdl: handler to be deleted from the handler list\n+ */\n+void irdma_del_handler(struct irdma_handler *hdl)\n+{\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&irdma_handler_lock, flags);\n+\tlist_del(&hdl->list);\n+\tspin_unlock_irqrestore(&irdma_handler_lock, flags);\n+}\n+\n+/**\n+ * irdma_register_notifiers - register tcp ip notifiers\n+ */\n+void irdma_register_notifiers(void)\n+{\n+\tregister_inetaddr_notifier(&irdma_inetaddr_notifier);\n+\tregister_inet6addr_notifier(&irdma_inetaddr6_notifier);\n+\tregister_netevent_notifier(&irdma_net_notifier);\n+\tregister_netdevice_notifier(&irdma_netdevice_notifier);\n+}\n+\n+void irdma_unregister_notifiers(void)\n+{\n+\tunregister_netevent_notifier(&irdma_net_notifier);\n+\tunregister_inetaddr_notifier(&irdma_inetaddr_notifier);\n+\tunregister_inet6addr_notifier(&irdma_inetaddr6_notifier);\n+\tunregister_netdevice_notifier(&irdma_netdevice_notifier);\n+}\n+\n+/**\n+ * irdma_add_ipv6_addr - add ipv6 address to the hw arp table\n+ * @iwdev: iwarp device\n+ */\n+static void irdma_add_ipv6_addr(struct irdma_device *iwdev)\n+{\n+\tstruct net_device *ip_dev;\n+\tstruct inet6_dev *idev;\n+\tstruct inet6_ifaddr *ifp, *tmp;\n+\tu32 local_ipaddr6[4];\n+\n+\trcu_read_lock();\n+\tfor_each_netdev_rcu (&init_net, ip_dev) {\n+\t\tif (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&\n+\t\t      rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||\n+\t\t      ip_dev == iwdev->netdev) && ip_dev->flags & IFF_UP) {\n+\t\t\tidev = __in6_dev_get(ip_dev);\n+\t\t\tif (!idev) {\n+\t\t\t\tdev_err(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\t\t\"ipv6 inet device not found\\n\");\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tlist_for_each_entry_safe (ifp, tmp, &idev->addr_list,\n+\t\t\t\t\t\t  if_list) {\n+\t\t\t\tdev_info(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\t\t \"IP=%pI6, vlan_id=%d, MAC=%pM\\n\",\n+\t\t\t\t\t &ifp->addr,\n+\t\t\t\t\t rdma_vlan_dev_vlan_id(ip_dev),\n+\t\t\t\t\t ip_dev->dev_addr);\n+\n+\t\t\t\tirdma_copy_ip_ntohl(local_ipaddr6,\n+\t\t\t\t\t\t    ifp->addr.in6_u.u6_addr32);\n+\t\t\t\tirdma_manage_arp_cache(iwdev->rf,\n+\t\t\t\t\t\t       ip_dev->dev_addr,\n+\t\t\t\t\t\t       local_ipaddr6, false,\n+\t\t\t\t\t\t       IRDMA_ARP_ADD);\n+\t\t\t}\n+\t\t}\n+\t}\n+\trcu_read_unlock();\n+}\n+\n+/**\n+ * irdma_add_ipv4_addr - add ipv4 address to the hw arp table\n+ * @iwdev: iwarp device\n+ */\n+static void irdma_add_ipv4_addr(struct irdma_device *iwdev)\n+{\n+\tstruct net_device *dev;\n+\tstruct in_device *idev;\n+\tbool got_lock = true;\n+\tu32 ip_addr;\n+\n+\tif (!rtnl_trylock())\n+\t\tgot_lock = false;\n+\n+\tfor_each_netdev (&init_net, dev) {\n+\t\tif (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&\n+\t\t      rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||\n+\t\t      dev == iwdev->netdev) && dev->flags & IFF_UP) {\n+\t\t\tidev = in_dev_get(dev);\n+\t\t\tfor_ifa(idev)\n+\t\t\t{\n+\t\t\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\t\t\"CM: IP=%pI4, vlan_id=%d, MAC=%pM\\n\",\n+\t\t\t\t\t&ifa->ifa_address,\n+\t\t\t\t\trdma_vlan_dev_vlan_id(dev),\n+\t\t\t\t\tdev->dev_addr);\n+\n+\t\t\t\tip_addr = ntohl(ifa->ifa_address);\n+\t\t\t\tirdma_manage_arp_cache(iwdev->rf, dev->dev_addr,\n+\t\t\t\t\t\t       &ip_addr, true,\n+\t\t\t\t\t\t       IRDMA_ARP_ADD);\n+\t\t\t}\n+\t\t\tendfor_ifa(idev);\n+\t\t\tin_dev_put(idev);\n+\t\t}\n+\t}\n+\tif (got_lock)\n+\t\trtnl_unlock();\n+}\n+\n+/**\n+ * irdma_add_ip - add ip addresses\n+ * @iwdev: iwarp device\n+ *\n+ * Add ipv4/ipv6 addresses to the arp cache\n+ */\n+void irdma_add_ip(struct irdma_device *iwdev)\n+{\n+\tirdma_add_ipv4_addr(iwdev);\n+\tirdma_add_ipv6_addr(iwdev);\n+}\n+\n+/**\n+ * irdma_request_reset - Request a reset\n+ * @rf: RDMA PCI function\n+ *\n+ */\n+void irdma_request_reset(struct irdma_pci_f *rf)\n+{\n+\tdev_warn(rfdev_to_dev(&rf->sc_dev),\n+\t\t \"Requesting a a reset from LAN driver\\n\");\n+\tif (rf->rdma_ver == IRDMA_GEN_1)\n+\t\ti40iw_request_reset(rf);\n+\telse\n+\t\ticrdma_request_reset(rf);\n+}\n+\n+static int irdma_devlink_rsrc_limits_validate(struct devlink *dl, u32 id,\n+\t\t\t\t\t      union devlink_param_value val,\n+\t\t\t\t\t      struct netlink_ext_ack *extack)\n+{\n+\tu8 value = val.vu8;\n+\n+\tif (value > 5) {\n+\t\tNL_SET_ERR_MSG_MOD(extack, \"resource limits selector range is (0-5)\");\n+\t\treturn -ERANGE;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+enum irdma_dl_param_id {\n+\tIRDMA_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,\n+\tIRDMA_DEVLINK_PARAM_ID_LIMITS_SELECTOR,\n+\tIRDMA_DEVLINK_PARAM_ID_ROCE_ENABLE,\n+};\n+\n+static const struct devlink_param irdma_devlink_params[] = {\n+\t/* Common */\n+\tDEVLINK_PARAM_DRIVER(IRDMA_DEVLINK_PARAM_ID_LIMITS_SELECTOR,\n+\t\t\t     \"resource_limits_selector\", DEVLINK_PARAM_TYPE_U8,\n+\t\t\t      BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),\n+\t\t\t      NULL, NULL, irdma_devlink_rsrc_limits_validate),\n+#define IRDMA_DL_COMMON_PARAMS_ARRAY_SZ 1\n+\t/* GEN_2 only */\n+\tDEVLINK_PARAM_DRIVER(IRDMA_DEVLINK_PARAM_ID_ROCE_ENABLE,\n+\t\t\t     \"roce_enable\", DEVLINK_PARAM_TYPE_BOOL,\n+\t\t\t      BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),\n+\t\t\t      NULL, NULL, NULL),\n+};\n+\n+static int irdma_devlink_reload(struct devlink *devlink,\n+\t\t\t\tstruct netlink_ext_ack *extack)\n+{\n+\tstruct irdma_dl_priv *priv = devlink_priv(devlink);\n+\tunion devlink_param_value saved_value;\n+\tconst struct platform_device_id *id = platform_get_device_id(priv->pdev);\n+\tu8 gen_ver = id->driver_data;\n+\tint ret;\n+\n+\tswitch (gen_ver) {\n+\tcase IRDMA_GEN_2:\n+\t\tirdma_remove(priv->pdev);\n+\t\tdevlink_param_driverinit_value_get(devlink,\n+\t\t\t\t\tIRDMA_DEVLINK_PARAM_ID_ROCE_ENABLE,\n+\t\t\t\t\t&saved_value);\n+\t\tpriv->roce_ena = saved_value.vbool;\n+\t\tdevlink_param_driverinit_value_get(devlink,\n+\t\t\t\t\tIRDMA_DEVLINK_PARAM_ID_LIMITS_SELECTOR,\n+\t\t\t\t\t&saved_value);\n+\t\tpriv->limits_sel = saved_value.vu8;\n+\t\tret = irdma_probe(priv->pdev);\n+\t\tbreak;\n+\tcase IRDMA_GEN_1:\n+\t\ti40iw_remove(priv->pdev);\n+\t\tdevlink_param_driverinit_value_get(devlink,\n+\t\t\t\t\tIRDMA_DEVLINK_PARAM_ID_LIMITS_SELECTOR,\n+\t\t\t\t\t&saved_value);\n+\t\tpriv->limits_sel = saved_value.vu8;\n+\t\tret = i40iw_probe(priv->pdev);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -ENODEV;\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static const struct devlink_ops irdma_devlink_ops = {\n+\t.reload = irdma_devlink_reload,\n+};\n+\n+static void irdma_devlink_unregister(struct platform_device *pdev)\n+{\n+\tconst struct platform_device_id *id = platform_get_device_id(pdev);\n+\tstruct irdma_dl_priv *priv = platform_get_drvdata(pdev);\n+\tstruct devlink *devlink = priv_to_devlink(priv);\n+\tu8 gen_ver = id->driver_data;\n+\n+\tif (gen_ver == IRDMA_GEN_2)\n+\t\tdevlink_params_unregister(devlink, irdma_devlink_params,\n+\t\t\t\t\t  ARRAY_SIZE(irdma_devlink_params));\n+\telse if (gen_ver == IRDMA_GEN_1)\n+\t\tdevlink_params_unregister(devlink, irdma_devlink_params,\n+\t\t\t\t\t  IRDMA_DL_COMMON_PARAMS_ARRAY_SZ);\n+\n+\tdevlink_unregister(devlink);\n+\tdevlink_free(devlink);\n+}\n+\n+static int irdma_devlink_register(struct platform_device *pdev)\n+{\n+\tconst struct platform_device_id *id = platform_get_device_id(pdev);\n+\tu8 gen_ver = id->driver_data;\n+\tstruct devlink *devlink;\n+\tstruct irdma_dl_priv *priv;\n+\tunion devlink_param_value value;\n+\tint ret;\n+\n+\tdevlink = devlink_alloc(&irdma_devlink_ops, sizeof(struct irdma_dl_priv));\n+\tif (!devlink)\n+\t\treturn -ENOMEM;\n+\n+\tpriv = devlink_priv(devlink);\n+\tpriv->pdev = pdev;\n+\tpriv->roce_ena = 0;\n+\tplatform_set_drvdata(pdev, priv);\n+\n+\tret = devlink_register(devlink, &pdev->dev);\n+\tif (ret)\n+\t\tgoto err_dl_free;\n+\n+\tswitch (gen_ver) {\n+\tcase IRDMA_GEN_2:\n+\t\tpriv->limits_sel = 0;\n+\t\tret = devlink_params_register(devlink, irdma_devlink_params,\n+\t\t\t\t\t      ARRAY_SIZE(irdma_devlink_params));\n+\t\tif (!ret) {\n+\t\t\tvalue.vbool = priv->roce_ena;\n+\t\t\tdevlink_param_driverinit_value_set(devlink,\n+\t\t\t\t\t   IRDMA_DEVLINK_PARAM_ID_ROCE_ENABLE,\n+\t\t\t\t\t   value);\n+\t\t}\n+\t\tbreak;\n+\tcase IRDMA_GEN_1:\n+\t\tpriv->limits_sel = 2;\n+\t\tret = devlink_params_register(devlink, irdma_devlink_params,\n+\t\t\t\t\t      IRDMA_DL_COMMON_PARAMS_ARRAY_SZ);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -ENODEV;\n+\t\tbreak;\n+\t}\n+\n+\tif (ret)\n+\t\tgoto err_dl_unreg;\n+\n+\tvalue.vu8 = priv->limits_sel;\n+\tdevlink_param_driverinit_value_set(devlink,\n+\t\t\t\t\t   IRDMA_DEVLINK_PARAM_ID_LIMITS_SELECTOR,\n+\t\t\t\t\t   value);\n+\tdevlink_params_publish(devlink);\n+\n+\treturn 0;\n+\n+err_dl_unreg:\n+\tdevlink_unregister(devlink);\n+err_dl_free:\n+\tdevlink_free(devlink);\n+\n+\treturn ret;\n+}\n+\n+static int irdma_bus_probe(struct platform_device *pdev)\n+{\n+\tconst struct platform_device_id *id = platform_get_device_id(pdev);\n+\tu8 gen_ver = id->driver_data;\n+\tint ret = -ENODEV;\n+\n+\tret = irdma_devlink_register(pdev);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tswitch (gen_ver) {\n+\tcase IRDMA_GEN_2:\n+\t\tret = irdma_probe(pdev);\n+\t\tbreak;\n+\tcase IRDMA_GEN_1:\n+\t\tret = i40iw_probe(pdev);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tif (ret)\n+\t\tirdma_devlink_unregister(pdev);\n+\n+\treturn ret;\n+}\n+\n+static int irdma_bus_remove(struct platform_device *pdev)\n+{\n+\tconst struct platform_device_id *id = platform_get_device_id(pdev);\n+\tu8 gen_ver = id->driver_data;\n+\n+\tswitch (gen_ver) {\n+\tcase IRDMA_GEN_2:\n+\t\tirdma_remove(pdev);\n+\t\tbreak;\n+\tcase IRDMA_GEN_1:\n+\t\ti40iw_remove(pdev);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tirdma_devlink_unregister(pdev);\n+\n+\treturn 0;\n+}\n+\n+static const struct platform_device_id irdma_platform_id_table[] = {\n+\t{\"ice_rdma\", IRDMA_GEN_2},\n+\t{\"i40e_rdma\", IRDMA_GEN_1},\n+\t{},\n+};\n+\n+MODULE_DEVICE_TABLE(platform, irdma_platform_id_table);\n+\n+static struct platform_driver irdma_pdriver = {\n+\t.probe = irdma_bus_probe,\n+\t.remove = irdma_bus_remove,\n+\t.id_table = irdma_platform_id_table,\n+\t.driver = {\n+\t\t   .name = \"irdma\",\n+\t\t   .owner = THIS_MODULE,\n+\t\t  },\n+};\n+\n+/**\n+ * irdma_init_module - driver initialization function\n+ *\n+ * First function to call when the driver is loaded\n+ * Register the driver as ice client and port mapper client\n+ */\n+static int __init irdma_init_module(void)\n+{\n+\tint ret;\n+\n+\tret = platform_driver_register(&irdma_pdriver);\n+\tif (ret) {\n+\t\tpr_err(\"Failed irdma platform_driver_register()\\n\");\n+\t\treturn ret;\n+\t}\n+\tirdma_register_notifiers();\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_exit_module - driver exit clean up function\n+ *\n+ * The function is called just before the driver is unloaded\n+ * Unregister the driver as ice client and port mapper client\n+ */\n+static void __exit irdma_exit_module(void)\n+{\n+\tirdma_unregister_notifiers();\n+\tplatform_driver_unregister(&irdma_pdriver);\n+}\n+\n+module_init(irdma_init_module);\n+module_exit(irdma_exit_module);\ndiff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h\nnew file mode 100644\nindex 0000000..8f3cbcd\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/main.h\n@@ -0,0 +1,639 @@\n+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef IRDMA_MAIN_H\n+#define IRDMA_MAIN_H\n+\n+#include <linux/ip.h>\n+#include <linux/tcp.h>\n+#include <linux/if_vlan.h>\n+#include <net/addrconf.h>\n+#include <net/netevent.h>\n+#include <net/devlink.h>\n+#include <linux/netdevice.h>\n+#include <linux/inetdevice.h>\n+#include <linux/spinlock.h>\n+#include <linux/kernel.h>\n+#include <linux/delay.h>\n+#include <linux/pci.h>\n+#include <linux/dma-mapping.h>\n+#include <linux/workqueue.h>\n+#include <linux/slab.h>\n+#include <linux/io.h>\n+#include <linux/crc32c.h>\n+#include <linux/kthread.h>\n+#include <linux/platform_device.h>\n+#include <rdma/ib_smi.h>\n+#include <rdma/ib_verbs.h>\n+#include <rdma/ib_pack.h>\n+#include <rdma/rdma_cm.h>\n+#include <rdma/iw_cm.h>\n+#include <crypto/hash.h>\n+#include \"status.h\"\n+#include \"osdep.h\"\n+#include \"defs.h\"\n+#include \"hmc.h\"\n+#include \"type.h\"\n+#include \"protos.h\"\n+#include \"pble.h\"\n+#include \"verbs.h\"\n+#include \"cm.h\"\n+#include \"user.h\"\n+#include \"puda.h\"\n+#include <rdma/irdma-abi.h>\n+\n+extern struct list_head irdma_handlers;\n+extern spinlock_t irdma_handler_lock;\n+\n+#define IRDMA_FW_VER_DEFAULT\t2\n+#define IRDMA_HW_VER\t        2\n+\n+#define IRDMA_ARP_ADD\t\t1\n+#define IRDMA_ARP_DELETE\t2\n+#define IRDMA_ARP_RESOLVE\t3\n+\n+#define IRDMA_MACIP_ADD\t\t1\n+#define IRDMA_MACIP_DELETE\t2\n+\n+#define IW_CCQ_SIZE\t(IRDMA_CQP_SW_SQSIZE_2048 + 1)\n+#define IW_CEQ_SIZE\t2048\n+#define IW_AEQ_SIZE\t2048\n+\n+#define RX_BUF_SIZE\t(1536 + 8)\n+#define IW_REG0_SIZE\t(4 * 1024)\n+#define IW_TX_TIMEOUT\t(6 * HZ)\n+#define IW_FIRST_QPN\t1\n+\n+#define IW_SW_CONTEXT_ALIGN\t1024\n+\n+#define MAX_DPC_ITERATIONS\t128\n+\n+#define IRDMA_EVENT_TIMEOUT\t\t100000\n+#define IRDMA_VCHNL_EVENT_TIMEOUT\t100000\n+\n+#define\tIRDMA_NO_QSET\t0xffff\n+\n+#define IW_CFG_FPM_QP_COUNT\t\t32768\n+#define IRDMA_MAX_PAGES_PER_FMR\t\t512\n+#define IRDMA_MIN_PAGES_PER_FMR\t\t1\n+#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED\t2\n+#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED\t3\n+\n+#define IRDMA_Q_TYPE_PE_AEQ\t0x80\n+#define IRDMA_Q_INVALID_IDX\t0xffff\n+#define IRDMA_REM_ENDPOINT_TRK_QPID\t3\n+\n+#define IRDMA_DRV_OPT_ENA_MPA_VER_0\t\t0x00000001\n+#define IRDMA_DRV_OPT_DISABLE_MPA_CRC\t\t0x00000002\n+#define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE\t0x00000004\n+#define IRDMA_DRV_OPT_DISABLE_INTF\t\t0x00000008\n+#define IRDMA_DRV_OPT_ENA_MSI\t\t\t0x00000010\n+#define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT\t\t0x00000020\n+#define IRDMA_DRV_OPT_NO_INLINE_DATA\t\t0x00000080\n+#define IRDMA_DRV_OPT_DISABLE_INT_MOD\t\t0x00000100\n+#define IRDMA_DRV_OPT_DISABLE_VIRT_WQ\t\t0x00000200\n+#define IRDMA_DRV_OPT_ENA_PAU\t\t\t0x00000400\n+#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP\t\t0x00000800\n+\n+#define IW_HMC_OBJ_TYPE_NUM\tARRAY_SIZE(iw_hmc_obj_types)\n+\n+enum init_completion_state {\n+\tINVALID_STATE = 0,\n+\tINITIAL_STATE,\n+\tCQP_CREATED,\n+\tHMC_OBJS_CREATED,\n+\tCCQ_CREATED,\n+\tAEQ_CREATED,\n+\tCEQ0_CREATED, /* Last state of probe */\n+\tCEQS_CREATED,\n+\tILQ_CREATED,\n+\tIEQ_CREATED,\n+\tPBLE_CHUNK_MEM,\n+\tIP_ADDR_REGISTERED,\n+\tRDMA_DEV_REGISTERED, /* Last state of open */\n+};\n+\n+struct irdma_rsrc_limits {\n+\tu32 qplimit;\n+\tu32 mrlimit;\n+\tu32 cqlimit;\n+};\n+\n+struct irdma_cqp_compl_info {\n+\tu32 op_ret_val;\n+\tu16 maj_err_code;\n+\tu16 min_err_code;\n+\tbool error;\n+\tu8 op_code;\n+};\n+\n+struct irdma_cqp_request {\n+\tstruct cqp_cmds_info info;\n+\twait_queue_head_t waitq;\n+\tstruct list_head list;\n+\tatomic_t refcount;\n+\tvoid (*callback_fcn)(struct irdma_cqp_request *cqp_request);\n+\tvoid *param;\n+\tstruct irdma_cqp_compl_info compl_info;\n+\tbool waiting;\n+\tbool request_done;\n+\tbool dynamic;\n+};\n+\n+struct irdma_cqp {\n+\tstruct irdma_sc_cqp sc_cqp;\n+\tspinlock_t req_lock; /* protect CQP request list */\n+\tspinlock_t compl_lock; /* protect CQP completion processing */\n+\twait_queue_head_t waitq;\n+\twait_queue_head_t remove_wq;\n+\tstruct irdma_dma_mem sq;\n+\tstruct irdma_dma_mem host_ctx;\n+\tu64 *scratch_array;\n+\tstruct irdma_cqp_request *cqp_requests;\n+\tstruct list_head cqp_avail_reqs;\n+\tstruct list_head cqp_pending_reqs;\n+\tstruct task_struct *cqp_compl_thread;\n+\tstruct semaphore cqp_compl_sem;\n+};\n+\n+struct irdma_ccq {\n+\tstruct irdma_sc_cq sc_cq;\n+\tstruct irdma_dma_mem mem_cq;\n+\tstruct irdma_dma_mem shadow_area;\n+};\n+\n+struct irdma_ceq {\n+\tstruct irdma_sc_ceq sc_ceq;\n+\tstruct irdma_dma_mem mem;\n+\tu32 irq;\n+\tu32 msix_idx;\n+\tstruct irdma_pci_f *rf;\n+\tstruct tasklet_struct dpc_tasklet;\n+};\n+\n+struct irdma_aeq {\n+\tstruct irdma_sc_aeq sc_aeq;\n+\tstruct irdma_dma_mem mem;\n+};\n+\n+struct irdma_arp_entry {\n+\tu32 ip_addr[4];\n+\tu8 mac_addr[ETH_ALEN];\n+};\n+\n+struct irdma_msix_vector {\n+\tu32 idx;\n+\tu32 irq;\n+\tu32 cpu_affinity;\n+\tu32 ceq_id;\n+\tcpumask_t mask;\n+};\n+\n+struct l2params_work {\n+\tstruct work_struct work;\n+\tstruct irdma_device *iwdev;\n+\tstruct irdma_l2params l2params;\n+};\n+\n+struct virtchnl_work {\n+\tstruct work_struct work;\n+\tunion {\n+\t\tstruct irdma_cqp_request *cqp_request;\n+\t\tstruct irdma_virtchnl_work_info work_info;\n+\t};\n+};\n+\n+struct irdma_mc_table_info {\n+\tbool ipv4_valid;\n+\tu32 mgn;\n+\tu32 dest_ip[4];\n+\tbool lan_fwd;\n+};\n+\n+struct mc_table_list {\n+\tstruct list_head list;\n+\tstruct irdma_mc_table_info mc_info;\n+\tstruct irdma_mcast_grp_info mc_grp_ctx;\n+};\n+\n+struct irdma_qv_info {\n+\tu32 v_idx; /* msix_vector */\n+\tu16 ceq_idx;\n+\tu16 aeq_idx;\n+\tu8 itr_idx;\n+};\n+\n+struct irdma_qvlist_info {\n+\tu32 num_vectors;\n+\tstruct irdma_qv_info qv_info[1];\n+};\n+\n+struct irdma_priv_ldev {\n+\tunsigned int fn_num;\n+\tbool ftype;\n+\tu16 pf_vsi_num;\n+\tu16 msix_count;\n+\tstruct msix_entry *msix_entries;\n+\tvoid *if_client;\n+\tvoid *if_ldev;\n+};\n+\n+struct irdma_dl_priv {\n+\tstruct platform_device *pdev;\n+\tbool roce_ena;\n+\tu8 limits_sel;\n+};\n+\n+struct irdma_pci_f {\n+\tbool ooo;\n+\tbool reset;\n+\tbool rsrc_created;\n+\tbool stop_cqp_thread;\n+\tbool msix_shared;\n+\tu8 rsrc_profile;\n+\tu8 max_rdma_vfs;\n+\tu8 max_ena_vfs;\n+\tu8 *hmc_info_mem;\n+\tu8 *mem_rsrc;\n+\tu8 rdma_ver;\n+\tenum irdma_protocol_used protocol_used;\n+\tu32 sd_type;\n+\tu32 msix_count;\n+\tu32 max_mr;\n+\tu32 max_qp;\n+\tu32 max_cq;\n+\tu32 max_ah;\n+\tu32 next_ah;\n+\tu32 max_mcg;\n+\tu32 next_mcg;\n+\tu32 max_pd;\n+\tu32 next_qp;\n+\tu32 next_cq;\n+\tu32 next_pd;\n+\tu32 max_mr_size;\n+\tu32 max_cqe;\n+\tu32 mr_stagmask;\n+\tu32 used_pds;\n+\tu32 used_cqs;\n+\tu32 used_mrs;\n+\tu32 used_qps;\n+\tu32 arp_table_size;\n+\tu32 next_arp_index;\n+\tu32 ceqs_count;\n+\tu32 next_ws_node_id;\n+\tu32 max_ws_node_id;\n+\tu32 limits_sel;\n+\tunsigned long *allocated_ws_nodes;\n+\tunsigned long *allocated_qps;\n+\tunsigned long *allocated_cqs;\n+\tunsigned long *allocated_mrs;\n+\tunsigned long *allocated_pds;\n+\tunsigned long *allocated_mcgs;\n+\tunsigned long *allocated_ahs;\n+\tunsigned long *allocated_arps;\n+\tenum init_completion_state init_state;\n+\tstruct irdma_sc_dev sc_dev;\n+\tstruct irdma_priv_ldev ldev;\n+\tstruct irdma_handler *hdl;\n+\tstruct pci_dev *pdev;\n+\tstruct net_device *netdev;\n+\tstruct irdma_hw hw;\n+\tstruct irdma_cqp cqp;\n+\tstruct irdma_ccq ccq;\n+\tstruct irdma_aeq aeq;\n+\tstruct irdma_ceq *ceqlist;\n+\tstruct irdma_hmc_pble_rsrc *pble_rsrc;\n+\tstruct irdma_arp_entry *arp_table;\n+\tspinlock_t arp_lock; /*protect ARP table access*/\n+\tspinlock_t rsrc_lock; /* protect HW resource array access */\n+\tspinlock_t qptable_lock; /*protect QP table access*/\n+\tstruct irdma_qp **qp_table;\n+\tspinlock_t qh_list_lock; /* protect mc_qht_list */\n+\tstruct mc_table_list mc_qht_list;\n+\tstruct irdma_msix_vector *iw_msixtbl;\n+\tstruct irdma_qvlist_info *iw_qvlist;\n+\tstruct tasklet_struct dpc_tasklet;\n+\tstruct irdma_dma_mem obj_mem;\n+\tstruct irdma_dma_mem obj_next;\n+\tatomic_t vchnl_msgs;\n+\twait_queue_head_t vchnl_waitq;\n+\tstruct workqueue_struct *free_qp_wq;\n+\tstruct workqueue_struct *free_cqbuf_wq;\n+\tstruct virtchnl_work virtchnl_w[IRDMA_MAX_PE_ENA_VF_COUNT];\n+\tstruct irdma_sc_vsi default_vsi;\n+\tvoid *back_fcn;\n+\tvoid (*init_hw)(struct irdma_sc_dev *dev);\n+};\n+\n+struct irdma_device {\n+\tstruct irdma_ib_device *iwibdev;\n+\tstruct irdma_pci_f *rf;\n+\tstruct irdma_priv_ldev *ldev;\n+\tstruct net_device *netdev;\n+\tstruct irdma_handler *hdl;\n+\tstruct irdma_sc_vsi vsi;\n+\tstruct irdma_cm_core cm_core;\n+\tbool roce_mode;\n+\tu32 vendor_id;\n+\tu32 vendor_part_id;\n+\tu32 device_cap_flags;\n+\tu32 push_mode;\n+\tu32 rcv_wnd;\n+\tu16 mac_ip_table_idx;\n+\tu8 rcv_wscale;\n+\tu16 vsi_num;\n+\tbool create_ilq;\n+\tu8 iw_status;\n+\tstruct tasklet_struct dpc_tasklet;\n+\tenum init_completion_state init_state;\n+\tbool dcb;\n+\tbool reset;\n+\twait_queue_head_t suspend_wq;\n+\tstruct workqueue_struct *param_wq;\n+\tatomic_t params_busy;\n+};\n+\n+struct irdma_ib_device {\n+\tstruct ib_device ibdev;\n+\tstruct irdma_device *iwdev;\n+};\n+\n+struct irdma_handler {\n+\tstruct list_head list;\n+\tstruct irdma_pci_f rf;\n+\tstruct irdma_priv_ldev *ldev;\n+\tstruct platform_device *platform_dev;\n+\tbool shared_res_created;\n+};\n+\n+/***********************************************************/\n+/**\n+ * to_iwdev - get device\n+ * @ibdev: ib device\n+ **/\n+static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)\n+{\n+\treturn container_of(ibdev, struct irdma_ib_device, ibdev)->iwdev;\n+}\n+\n+/**\n+ * to_ucontext - get user context\n+ * @ibucontext: ib user context\n+ **/\n+static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)\n+{\n+\treturn container_of(ibucontext, struct irdma_ucontext, ibucontext);\n+}\n+\n+/**\n+ * to_iwpd - get protection domain\n+ * @ibpd: ib pd\n+ **/\n+static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)\n+{\n+\treturn container_of(ibpd, struct irdma_pd, ibpd);\n+}\n+\n+/**\n+ * to_iwah - get device ah\n+ * @ibdev: ib ah\n+ **/\n+static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)\n+{\n+\treturn container_of(ibah, struct irdma_ah, ibah);\n+}\n+\n+/**\n+ * to_iwmr - get device memory region\n+ * @ibdev: ib memory region\n+ **/\n+static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)\n+{\n+\treturn container_of(ibmr, struct irdma_mr, ibmr);\n+}\n+\n+/**\n+ * to_iwmr_from_ibfmr - get device memory region\n+ * @ibfmr: ib fmr\n+ **/\n+static inline struct irdma_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)\n+{\n+\treturn container_of(ibfmr, struct irdma_mr, ibfmr);\n+}\n+\n+/**\n+ * to_iwmw - get device memory window\n+ * @ibmw: ib memory window\n+ **/\n+static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)\n+{\n+\treturn container_of(ibmw, struct irdma_mr, ibmw);\n+}\n+\n+/**\n+ * to_iwcq - get completion queue\n+ * @ibcq: ib cqdevice\n+ **/\n+static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)\n+{\n+\treturn container_of(ibcq, struct irdma_cq, ibcq);\n+}\n+\n+/**\n+ * to_iwqp - get device qp\n+ * @ibqp: ib qp\n+ **/\n+static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)\n+{\n+\treturn container_of(ibqp, struct irdma_qp, ibqp);\n+}\n+\n+/**\n+ * irdma_alloc_resource - allocate a resource\n+ * @iwdev: device pointer\n+ * @resource_array: resource bit array:\n+ * @max_resources: maximum resource number\n+ * @req_resources_num: Allocated resource number\n+ * @next: next free id\n+ **/\n+static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,\n+\t\t\t\t   unsigned long *rsrc_array, u32 max_rsrc,\n+\t\t\t\t   u32 *req_rsrc_num, u32 *next)\n+{\n+\tu32 rsrc_num;\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&rf->rsrc_lock, flags);\n+\trsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);\n+\tif (rsrc_num >= max_rsrc) {\n+\t\trsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);\n+\t\tif (rsrc_num >= max_rsrc) {\n+\t\t\tspin_unlock_irqrestore(&rf->rsrc_lock, flags);\n+\t\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\t\"ERR: resource [%d] allocation failed\\n\",\n+\t\t\t\trsrc_num);\n+\t\t\treturn -EOVERFLOW;\n+\t\t}\n+\t}\n+\tset_bit(rsrc_num, rsrc_array);\n+\t*next = rsrc_num + 1;\n+\tif (*next == max_rsrc)\n+\t\t*next = 0;\n+\t*req_rsrc_num = rsrc_num;\n+\tspin_unlock_irqrestore(&rf->rsrc_lock, flags);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_is_resource_allocated - detrmine if resource is\n+ * allocated\n+ * @iwdev: device pointer\n+ * @resource_array: resource array for the resource_num\n+ * @resource_num: resource number to check\n+ **/\n+static inline bool irdma_is_rsrc_allocated(struct irdma_pci_f *rf,\n+\t\t\t\t\t   unsigned long *rsrc_array,\n+\t\t\t\t\t   u32 rsrc_num)\n+{\n+\tbool bit_is_set;\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&rf->rsrc_lock, flags);\n+\n+\tbit_is_set = test_bit(rsrc_num, rsrc_array);\n+\tspin_unlock_irqrestore(&rf->rsrc_lock, flags);\n+\n+\treturn bit_is_set;\n+}\n+\n+/**\n+ * irdma_free_resource - free a resource\n+ * @iwdev: device pointer\n+ * @resource_array: resource array for the resource_num\n+ * @resource_num: resource number to free\n+ **/\n+static inline void irdma_free_rsrc(struct irdma_pci_f *rf,\n+\t\t\t\t   unsigned long *rsrc_array, u32 rsrc_num)\n+{\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&rf->rsrc_lock, flags);\n+\tclear_bit(rsrc_num, rsrc_array);\n+\tspin_unlock_irqrestore(&rf->rsrc_lock, flags);\n+}\n+\n+void irdma_init_rf_config_params(struct irdma_pci_f *rf);\n+enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf);\n+void irdma_deinit_ctrl_hw(struct irdma_pci_f *rf);\n+enum irdma_status_code irdma_rt_init_hw(struct irdma_pci_f *rf,\n+\t\t\t\t\tstruct irdma_device *iwdev,\n+\t\t\t\t\tstruct irdma_l2params *l2params);\n+void irdma_deinit_rt_device(struct irdma_device *iwdev);\n+void irdma_add_ref(struct ib_qp *ibqp);\n+void irdma_rem_ref(struct ib_qp *ibqp);\n+struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);\n+void irdma_flush_wqes(struct irdma_pci_f *rf, struct irdma_qp *qp);\n+void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,\n+\t\t\t    u32 *ip_addr, bool ipv4, u32 action);\n+int irdma_manage_apbvt(struct irdma_device *iwdev, u16 accel_local_port,\n+\t\t       bool add_port);\n+struct irdma_cqp_request *irdma_get_cqp_request(struct irdma_cqp *cqp,\n+\t\t\t\t\t\tbool wait);\n+void irdma_free_cqp_request(struct irdma_cqp *cqp,\n+\t\t\t    struct irdma_cqp_request *cqp_request);\n+void irdma_put_cqp_request(struct irdma_cqp *cqp,\n+\t\t\t   struct irdma_cqp_request *cqp_request);\n+struct irdma_device *irdma_get_device(struct net_device *netdev);\n+void irdma_put_device(struct irdma_device *iwdev);\n+struct irdma_handler *irdma_find_handler(struct pci_dev *pdev);\n+struct irdma_device *irdma_find_iwdev(const char *name);\n+void irdma_add_handler(struct irdma_handler *hdl);\n+void irdma_del_handler(struct irdma_handler *hdl);\n+void irdma_add_ip(struct irdma_device *iwdev);\n+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);\n+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx);\n+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);\n+\n+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);\n+int irdma_register_rdma_device(struct irdma_device *iwdev);\n+void irdma_port_ibevent(struct irdma_device *iwdev);\n+void irdma_cm_disconn(struct irdma_qp *qp);\n+\n+enum irdma_status_code\n+irdma_handle_cqp_op(struct irdma_pci_f *rf,\n+\t\t    struct irdma_cqp_request *cqp_request);\n+\n+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,\n+\t\t    struct ib_udata *udata);\n+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,\n+\t\t\t int attr_mask, struct ib_udata *udata);\n+void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);\n+\n+void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);\n+/* TODO: remove once VMWare implements or if not needed */\n+enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,\n+\t\t\t\t\t  struct irdma_qp *iwqp,\n+\t\t\t\t\t  struct irdma_modify_qp_info *info,\n+\t\t\t\t\t  bool wait);\n+enum irdma_status_code irdma_qp_suspend_resume(struct irdma_sc_qp *qp,\n+\t\t\t\t\t       bool suspend);\n+enum irdma_status_code\n+irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,\n+\t\t   enum irdma_quad_entry_type etype,\n+\t\t   enum irdma_quad_hash_manage_type mtype, void *cmnode,\n+\t\t   bool wait);\n+void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);\n+void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);\n+void irdma_free_qp_rsrc(struct irdma_device *iwdev, struct irdma_qp *iwqp,\n+\t\t\tu32 qp_num);\n+void irdma_request_reset(struct irdma_pci_f *rf);\n+void irdma_destroy_rdma_device(struct irdma_ib_device *iwibdev);\n+enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);\n+void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);\n+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,\n+\t\t\t u8 term_len);\n+int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);\n+int irdma_send_reset(struct irdma_cm_node *cm_node);\n+struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,\n+\t\t\t\t      u16 rem_port, u32 *rem_addr, u16 loc_port,\n+\t\t\t\t      u32 *loc_addr, bool add_refcnt,\n+\t\t\t\t      bool accelerated_list);\n+enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,\n+\t\t\t\t\t   struct irdma_sc_qp *qp,\n+\t\t\t\t\t   struct irdma_qp_flush_info *info,\n+\t\t\t\t\t   bool wait);\n+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,\n+\t\t  struct irdma_gen_ae_info *info, bool wait);\n+void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);\n+void irdma_copy_ip_htonl(__be32 *dst, u32 *src);\n+u16 irdma_get_vlan_ipv4(u32 *addr);\n+struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);\n+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,\n+\t\t\t\tint acc, u64 *iova_start);\n+int cqp_compl_thread(void *context);\n+int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,\n+\t\t\t void *ptr);\n+int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,\n+\t\t\t  void *ptr);\n+int irdma_net_event(struct notifier_block *notifier, unsigned long event,\n+\t\t    void *ptr);\n+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,\n+\t\t\t  void *ptr);\n+int irdma_probe(struct platform_device *pdev);\n+int irdma_remove(struct platform_device *pdev);\n+int i40iw_probe(struct platform_device *pdev);\n+void i40iw_remove(struct platform_device *pdev);\n+void i40iw_request_reset(struct irdma_pci_f *rf);\n+void icrdma_request_reset(struct irdma_pci_f *rf);\n+void irdma_register_notifiers(void);\n+void irdma_unregister_notifiers(void);\n+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);\n+int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,\n+\t\t    bool wait,\n+\t\t    void (*callback_fcn)(struct irdma_cqp_request *cqp_request),\n+\t\t    void *cb_param);\n+void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);\n+int irdma_configfs_init(void);\n+void irdma_configfs_exit(void);\n+#endif /* IRDMA_MAIN_H */\n",
    "prefixes": [
        "rdma-next",
        "01/17"
    ]
}