Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1124847/?format=api
{ "id": 1124847, "url": "http://patchwork.ozlabs.org/api/patches/1124847/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-10-shiraz.saleem@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20190629185405.1601-10-shiraz.saleem@intel.com>", "list_archive_url": null, "date": "2019-06-29T18:53:57", "name": "[rdma-next,09/17] RDMA/irdma: Implement device supported verb APIs", "commit_ref": null, "pull_url": null, "state": "rejected", "archived": false, "hash": "be30655df91c889eb8c8189066f4b7a888594bb2", "submitter": { "id": 69500, "url": "http://patchwork.ozlabs.org/api/people/69500/?format=api", "name": "Saleem, Shiraz", "email": "shiraz.saleem@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-10-shiraz.saleem@intel.com/mbox/", "series": [ { "id": 116886, "url": "http://patchwork.ozlabs.org/api/series/116886/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=116886", "date": "2019-06-29T18:53:48", "name": "Add unified Intel Ethernet RDMA driver (irdma)", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/116886/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1124847/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1124847/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.138; helo=whitealder.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 45bqHw5t8Hz9s3l\n\tfor <incoming@patchwork.ozlabs.org>;\n\tSun, 30 Jun 2019 09:15:56 +1000 (AEST)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 80A7486B19;\n\tSat, 29 Jun 2019 23:15:54 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id IkCwzI8UNE9Y; Sat, 29 Jun 2019 23:15:35 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 1AA9B868B4;\n\tSat, 29 Jun 2019 23:15:35 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 376131BF3AD\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:28 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 28ED0878F1\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:28 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id ELaA0AD7CpC6 for <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:21 +0000 (UTC)", "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id 13B2187D8C\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:21 +0000 (UTC)", "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t29 Jun 2019 11:54:20 -0700", "from ssaleem-mobl.amr.corp.intel.com ([10.254.177.95])\n\tby fmsmga004.fm.intel.com with ESMTP; 29 Jun 2019 11:54:20 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.63,432,1557212400\"; d=\"scan'208\";a=\"185972878\"", "From": "Shiraz Saleem <shiraz.saleem@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Sat, 29 Jun 2019 13:53:57 -0500", "Message-Id": "<20190629185405.1601-10-shiraz.saleem@intel.com>", "X-Mailer": "git-send-email 2.21.0", "In-Reply-To": "<20190629185405.1601-1-shiraz.saleem@intel.com>", "References": "<20190629185405.1601-1-shiraz.saleem@intel.com>", "MIME-Version": "1.0", "X-Mailman-Approved-At": "Sat, 29 Jun 2019 23:15:33 +0000", "Subject": "[Intel-wired-lan] [PATCH rdma-next 09/17] RDMA/irdma: Implement\n\tdevice supported verb APIs", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Cc": "Mustafa Ismail <mustafa.ismail@intel.com>,\n\tShiraz Saleem <shiraz.saleem@intel.com>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Mustafa Ismail <mustafa.ismail@intel.com>\n\nImplement device supported verb APIs. The supported APIs\nvary based on the underlying transport the ibdev is\nregistered as (i.e. iWARP or RoCEv2).\n\nSigned-off-by: Mustafa Ismail <mustafa.ismail@intel.com>\nSigned-off-by: Shiraz Saleem <shiraz.saleem@intel.com>\n---\n drivers/infiniband/hw/irdma/verbs.c | 4347 ++++++++++++++++++++++++++++++\n drivers/infiniband/hw/irdma/verbs.h | 199 ++\n include/uapi/rdma/rdma_user_ioctl_cmds.h | 1 +\n 3 files changed, 4547 insertions(+)\n create mode 100644 drivers/infiniband/hw/irdma/verbs.c\n create mode 100644 drivers/infiniband/hw/irdma/verbs.h", "diff": "diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c\nnew file mode 100644\nindex 0000000..4c1dcb3\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/verbs.c\n@@ -0,0 +1,4347 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include <linux/random.h>\n+#include <linux/highmem.h>\n+#include <linux/time.h>\n+#include <linux/irq.h>\n+#include <asm/byteorder.h>\n+#include <net/ip.h>\n+#include <rdma/ib_verbs.h>\n+#include <rdma/iw_cm.h>\n+#include <rdma/ib_user_verbs.h>\n+#include <rdma/ib_umem.h>\n+#include <rdma/uverbs_ioctl.h>\n+#include <rdma/ib_cache.h>\n+#include \"main.h\"\n+\n+/**\n+ * irdma_query_device - get device attributes\n+ * @ibdev: device pointer from stack\n+ * @props: returning device attributes\n+ * @udata: user data\n+ */\n+static int irdma_query_device(struct ib_device *ibdev,\n+\t\t\t struct ib_device_attr *props,\n+\t\t\t struct ib_udata *udata)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\tstruct irdma_pci_f *rf = iwdev->rf;\n+\tstruct pci_dev *pdev = iwdev->rf->pdev;\n+\tstruct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;\n+\n+\tif (udata->inlen || udata->outlen)\n+\t\treturn -EINVAL;\n+\n+\tmemset(props, 0, sizeof(*props));\n+\tether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);\n+\tprops->fw_ver = (u64)FW_MAJOR_VER(&rf->sc_dev) << 32 |\n+\t\t\tFW_MINOR_VER(&rf->sc_dev) << 16;\n+\tprops->device_cap_flags = iwdev->device_cap_flags;\n+\tprops->vendor_id = pdev->vendor;\n+\tprops->vendor_part_id = pdev->device;\n+\tprops->hw_ver = (u32)rf->sc_dev.pci_rev;\n+\tprops->max_mr_size = hw_attrs->max_mr_size;\n+\tprops->max_qp = rf->max_qp - rf->used_qps;\n+\tprops->max_qp_wr = hw_attrs->max_qp_wr;\n+\tprops->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;\n+\tprops->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;\n+\tprops->max_cq = rf->max_cq - rf->used_cqs;\n+\tprops->max_cqe = rf->max_cqe;\n+\tprops->max_mr = rf->max_mr - rf->used_mrs;\n+\tprops->max_mw = props->max_mr;\n+\tprops->max_pd = rf->max_pd - rf->used_pds;\n+\tprops->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;\n+\tprops->max_qp_rd_atom = hw_attrs->max_hw_ird;\n+\tprops->max_qp_init_rd_atom = props->max_qp_rd_atom;\n+\tprops->atomic_cap = IB_ATOMIC_NONE;\n+\tprops->max_map_per_fmr = 1;\n+\tprops->max_ah = rf->max_ah;\n+\tprops->max_mcast_grp = rf->max_mcg;\n+\tprops->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;\n+\tprops->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;\n+\tprops->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed\n+ * @link_speed: netdev phy link speed\n+ * @active_speed: IB port speed\n+ * @active_width: IB port width\n+ */\n+static void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,\n+\t\t\t\t\t u8 *active_width)\n+{\n+\tif (link_speed <= SPEED_1000) {\n+\t\t*active_width = IB_WIDTH_1X;\n+\t\t*active_speed = IB_SPEED_SDR;\n+\t} else if (link_speed <= SPEED_10000) {\n+\t\t*active_width = IB_WIDTH_1X;\n+\t\t*active_speed = IB_SPEED_FDR10;\n+\t} else if (link_speed <= SPEED_20000) {\n+\t\t*active_width = IB_WIDTH_4X;\n+\t\t*active_speed = IB_SPEED_DDR;\n+\t} else if (link_speed <= SPEED_25000) {\n+\t\t*active_width = IB_WIDTH_1X;\n+\t\t*active_speed = IB_SPEED_EDR;\n+\t} else if (link_speed <= SPEED_40000) {\n+\t\t*active_width = IB_WIDTH_4X;\n+\t\t*active_speed = IB_SPEED_FDR10;\n+\t} else {\n+\t\t*active_width = IB_WIDTH_4X;\n+\t\t*active_speed = IB_SPEED_EDR;\n+\t}\n+}\n+\n+/**\n+ * irdma_query_port - get port attributes\n+ * @ibdev: device pointer from stack\n+ * @port: port number for query\n+ * @props: returning device attributes\n+ */\n+static int irdma_query_port(struct ib_device *ibdev, u8 port,\n+\t\t\t struct ib_port_attr *props)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\tstruct net_device *netdev = iwdev->netdev;\n+\n+\t/* no need to zero out pros here. done by caller */\n+\tprops->max_mtu = IB_MTU_4096;\n+\tprops->active_mtu = ib_mtu_int_to_enum(netdev->mtu);\n+\n+\tprops->lid = 1;\n+\tprops->lmc = 0;\n+\tprops->sm_lid = 0;\n+\tprops->sm_sl = 0;\n+\tif (netif_carrier_ok(netdev) && netif_running(netdev)) {\n+\t\tprops->state = IB_PORT_ACTIVE;\n+\t\tprops->phys_state = 5;\n+\t} else {\n+\t\tprops->state = IB_PORT_DOWN;\n+\t\tprops->phys_state = 3;\n+\t}\n+\tirdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,\n+\t\t\t\t &props->active_width);\n+\n+\tif (rdma_protocol_roce(ibdev, 1)) {\n+\t\tprops->gid_tbl_len = 32;\n+\t\tprops->ip_gids = true;\n+\t} else {\n+\t\tprops->gid_tbl_len = 1;\n+\t}\n+\tprops->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;\n+\tprops->qkey_viol_cntr = 0;\n+\tprops->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;\n+\tprops->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_disassociate_ucontext - Disassociate user context\n+ * @context: ib user context\n+ */\n+static void irdma_disassociate_ucontext(struct ib_ucontext *context)\n+{\n+}\n+\n+/**\n+ * irdma_mmap - user memory map\n+ * @context: context created during alloc\n+ * @vma: kernel info for user memory map\n+ */\n+static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)\n+{\n+\tstruct irdma_ucontext *ucontext;\n+\tu64 db_addr_offset;\n+\tu64 push_offset;\n+\n+\tucontext = to_ucontext(context);\n+\tdb_addr_offset = ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];\n+\tif (ucontext->iwdev->rf->sc_dev.is_pf) {\n+\t\tpush_offset = IRDMA_PUSH_OFFSET;\n+\t\tif (vma->vm_pgoff)\n+\t\t\tvma->vm_pgoff += IRDMA_PF_FIRST_PUSH_PAGE_INDEX - 1;\n+\t} else {\n+\t\tpush_offset = IRDMA_VF_PUSH_OFFSET;\n+\t\tif (vma->vm_pgoff)\n+\t\t\tvma->vm_pgoff += IRDMA_VF_FIRST_PUSH_PAGE_INDEX - 1;\n+\t}\n+\n+\tvma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;\n+\tif (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {\n+\t\tvma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);\n+\t\tvma->vm_private_data = ucontext;\n+\t} else {\n+\t\tif ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)\n+\t\t\tvma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);\n+\t\telse\n+\t\t\tvma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);\n+\t}\n+\n+\treturn rdma_user_mmap_io(context, vma,\n+\t\t\t\t vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->rf->pdev, 0)\n+\t\t\t\t\t\t >> PAGE_SHIFT),\n+\t\t\t\t PAGE_SIZE, vma->vm_page_prot);\n+}\n+\n+/**\n+ * irdma_alloc_push_page - allocate a push page for qp\n+ * @iwqp: qp pointer\n+ */\n+static void irdma_alloc_push_page(struct irdma_qp *iwqp)\n+{\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tstruct irdma_device *iwdev = iwqp->iwdev;\n+\tstruct irdma_sc_qp *qp = &iwqp->sc_qp;\n+\tenum irdma_status_code status;\n+\n+\tif (qp->push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX)\n+\t\treturn;\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, true);\n+\tif (!cqp_request)\n+\t\treturn;\n+\n+\tatomic_inc(&cqp_request->refcount);\n+\tcqp_info = &cqp_request->info;\n+\tcqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.manage_push_page.info.push_idx = 0;\n+\tcqp_info->in.u.manage_push_page.info.qs_handle =\n+\t\tqp->vsi->qos[qp->user_pri].qs_handle;\n+\tcqp_info->in.u.manage_push_page.info.free_page = 0;\n+\tcqp_info->in.u.manage_push_page.info.push_page_type = 0;\n+\tcqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;\n+\tcqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;\n+\n+\tstatus = irdma_handle_cqp_op(iwdev->rf, cqp_request);\n+\tif (!status) {\n+\t\tqp->push_idx = cqp_request->compl_info.op_ret_val;\n+\t\tqp->push_offset = 0;\n+\t} else {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP Push page fail\");\n+\t}\n+\n+\tirdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);\n+}\n+\n+/**\n+ * irdma_alloc_ucontext - Allocate the user context data structure\n+ * @uctx: uverbs context pointer\n+ * @udata: user data\n+ *\n+ * This keeps track of all objects associated with a particular\n+ * user-mode client.\n+ */\n+static int irdma_alloc_ucontext(struct ib_ucontext *uctx,\n+\t\t\t\tstruct ib_udata *udata)\n+{\n+\tstruct ib_device *ibdev = uctx->device;\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\tstruct irdma_alloc_ucontext_req req;\n+\tstruct irdma_alloc_ucontext_resp uresp = {};\n+\tstruct i40iw_alloc_ucontext_resp uresp_gen1 = {};\n+\tstruct irdma_ucontext *ucontext = to_ucontext(uctx);\n+\tstruct irdma_uk_attrs *uk_attrs;\n+\n+\tif (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))\n+\t\treturn -EINVAL;\n+\n+\tif (req.userspace_ver > IRDMA_ABI_VER)\n+\t\tgoto ver_error;\n+\n+\tucontext->iwdev = iwdev;\n+\tucontext->abi_ver = req.userspace_ver;\n+\n+\tuk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;\n+\t/* GEN_1 legacy support with libi40iw */\n+\tif (req.userspace_ver <= 5) {\n+\t\tif (uk_attrs->hw_rev != IRDMA_GEN_1)\n+\t\t\tgoto ver_error;\n+\n+\t\turesp_gen1.max_qps = iwdev->rf->max_qp;\n+\t\turesp_gen1.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;\n+\t\turesp_gen1.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;\n+\t\turesp_gen1.kernel_ver = req.userspace_ver;\n+\t\tif (ib_copy_to_udata(udata, &uresp_gen1,\n+\t\t\t\t min(sizeof(uresp_gen1), udata->outlen)))\n+\t\t\treturn -EFAULT;\n+\t} else {\n+\t\turesp.kernel_ver = req.userspace_ver;\n+\t\turesp.feature_flags = uk_attrs->feature_flags;\n+\t\turesp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;\n+\t\turesp.max_hw_read_sges = uk_attrs->max_hw_read_sges;\n+\t\turesp.max_hw_inline = uk_attrs->max_hw_inline;\n+\t\turesp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;\n+\t\turesp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;\n+\t\turesp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;\n+\t\turesp.max_hw_cq_size = uk_attrs->max_hw_cq_size;\n+\t\turesp.min_hw_cq_size = uk_attrs->min_hw_cq_size;\n+\t\turesp.hw_rev = uk_attrs->hw_rev;\n+\t\tif (ib_copy_to_udata(udata, &uresp,\n+\t\t\t\t min(sizeof(uresp), udata->outlen)))\n+\t\t\treturn -EFAULT;\n+\t}\n+\n+\tINIT_LIST_HEAD(&ucontext->cq_reg_mem_list);\n+\tspin_lock_init(&ucontext->cq_reg_mem_list_lock);\n+\tINIT_LIST_HEAD(&ucontext->qp_reg_mem_list);\n+\tspin_lock_init(&ucontext->qp_reg_mem_list_lock);\n+\n+\treturn 0;\n+\n+ver_error:\n+\tdev_err(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\"Invalid userspace driver version detected. Detected version %d, should be %d\\n\",\n+\t\treq.userspace_ver, IRDMA_ABI_VER);\n+\turesp.kernel_ver = IRDMA_ABI_VER;\n+\treturn -EINVAL;\n+}\n+\n+/**\n+ * irdma_dealloc_ucontext - deallocate the user context data structure\n+ * @context: user context created during alloc\n+ */\n+static void irdma_dealloc_ucontext(struct ib_ucontext *context)\n+{\n+}\n+\n+/**\n+ * irdma_alloc_pd - allocate protection domain\n+ * @pd: PD pointer\n+ * @udata: user data\n+ */\n+static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)\n+{\n+\tstruct irdma_pd *iwpd = to_iwpd(pd);\n+\tstruct irdma_device *iwdev = to_iwdev(pd->device);\n+\tstruct irdma_sc_dev *dev = &iwdev->rf->sc_dev;\n+\tstruct irdma_pci_f *rf = iwdev->rf;\n+\tstruct irdma_alloc_pd_resp uresp = {};\n+\tstruct irdma_sc_pd *sc_pd;\n+\tu32 pd_id = 0;\n+\tint err;\n+\n+\terr = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,\n+\t\t\t &rf->next_pd);\n+\tif (err)\n+\t\treturn err;\n+\n+\tsc_pd = &iwpd->sc_pd;\n+\tif (udata) {\n+\t\tstruct irdma_ucontext *ucontext =\n+\t\t\trdma_udata_to_drv_context(udata, struct irdma_ucontext,\n+\t\t\t\t\t\t ibucontext);\n+\t\tdev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);\n+\t\turesp.pd_id = pd_id;\n+\t\tif (ib_copy_to_udata(udata, &uresp,\n+\t\t\t\t min(sizeof(uresp), udata->outlen))) {\n+\t\t\terr = -EFAULT;\n+\t\t\tgoto error;\n+\t\t}\n+\t} else {\n+\t\tdev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);\n+\t}\n+\n+\treturn 0;\n+error:\n+\tirdma_free_rsrc(rf, rf->allocated_pds, pd_id);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_dealloc_pd - deallocate pd\n+ * @ibpd: ptr of pd to be deallocated\n+ * @udata: user data\n+ */\n+static void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)\n+{\n+\tstruct irdma_pd *iwpd = to_iwpd(ibpd);\n+\tstruct irdma_device *iwdev = to_iwdev(ibpd->device);\n+\n+\tirdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);\n+}\n+\n+/**\n+ * irdma_get_pbl - Retrieve pbl from a list given a virtual\n+ * address\n+ * @va: user virtual address\n+ * @pbl_list: pbl list to search in (QP's or CQ's)\n+ */\n+static struct irdma_pbl *irdma_get_pbl(unsigned long va,\n+\t\t\t\t struct list_head *pbl_list)\n+{\n+\tstruct irdma_pbl *iwpbl;\n+\n+\tlist_for_each_entry (iwpbl, pbl_list, list) {\n+\t\tif (iwpbl->user_base == va) {\n+\t\t\tlist_del(&iwpbl->list);\n+\t\t\tiwpbl->on_list = false;\n+\t\t\treturn iwpbl;\n+\t\t}\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * irdma_clean_cqes - clean cq entries for qp\n+ * @iwqp: qp ptr (user or kernel)\n+ * @iwcq: cq ptr\n+ */\n+static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)\n+{\n+\tstruct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&iwcq->lock, flags);\n+\tukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);\n+\tspin_unlock_irqrestore(&iwcq->lock, flags);\n+}\n+\n+/**\n+ * irdma_destroy_qp - destroy qp\n+ * @ibqp: qp's ib pointer also to get to device's qp address\n+ * @udata: user data\n+ */\n+static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)\n+{\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\n+\tiwqp->destroyed = 1;\n+\tif (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)\n+\t\tirdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 0, 0, 0);\n+\n+\tif (!iwqp->user_mode) {\n+\t\tif (iwqp->iwscq) {\n+\t\t\tirdma_clean_cqes(iwqp, iwqp->iwscq);\n+\t\t\tif (iwqp->iwrcq != iwqp->iwscq)\n+\t\t\t\tirdma_clean_cqes(iwqp, iwqp->iwrcq);\n+\t\t}\n+\t}\n+\n+\tirdma_rem_ref(&iwqp->ibqp);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_setup_virt_qp - setup for allocation of virtual qp\n+ * @iwdev: iwarp device\n+ * @iwqp: qp ptr\n+ * @init_info: initialize info to return\n+ */\n+static int irdma_setup_virt_qp(struct irdma_device *iwdev,\n+\t\t\t struct irdma_qp *iwqp,\n+\t\t\t struct irdma_qp_init_info *init_info)\n+{\n+\tstruct irdma_pbl *iwpbl = iwqp->iwpbl;\n+\tstruct irdma_qp_mr *qpmr = &iwpbl->qp_mr;\n+\n+\tiwqp->page = qpmr->sq_page;\n+\tinit_info->shadow_area_pa = qpmr->shadow;\n+\tif (iwpbl->pbl_allocated) {\n+\t\tinit_info->virtual_map = true;\n+\t\tinit_info->sq_pa = qpmr->sq_pbl.idx;\n+\t\tinit_info->rq_pa = qpmr->rq_pbl.idx;\n+\t} else {\n+\t\tinit_info->sq_pa = qpmr->sq_pbl.addr;\n+\t\tinit_info->rq_pa = qpmr->rq_pbl.addr;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_setup_kmode_qp - setup initialization for kernel mode qp\n+ * @iwdev: iwarp device\n+ * @iwqp: qp ptr (user or kernel)\n+ * @info: initialize info to return\n+ */\n+static int irdma_setup_kmode_qp(struct irdma_device *iwdev,\n+\t\t\t\tstruct irdma_qp *iwqp,\n+\t\t\t\tstruct irdma_qp_init_info *info)\n+{\n+\tstruct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;\n+\tu32 sqdepth, rqdepth;\n+\tu8 sqshift, rqshift;\n+\tu32 size;\n+\tenum irdma_status_code status;\n+\tstruct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;\n+\tstruct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;\n+\n+\tirdma_get_wqe_shift(uk_attrs,\n+\t\tuk_attrs->hw_rev > IRDMA_GEN_1 ? ukinfo->max_sq_frag_cnt + 1 :\n+\t\t\t\t\t\t ukinfo->max_sq_frag_cnt,\n+\t\tukinfo->max_inline_data, &sqshift);\n+\tstatus = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,\n+\t\t\t\t &sqdepth);\n+\tif (status)\n+\t\treturn -ENOMEM;\n+\n+\tif (uk_attrs->hw_rev == IRDMA_GEN_1)\n+\t\trqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;\n+\telse\n+\t\tirdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,\n+\t\t\t\t &rqshift);\n+\n+\tstatus = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,\n+\t\t\t\t &rqdepth);\n+\tif (status)\n+\t\treturn -ENOMEM;\n+\n+\tsize = sqdepth * sizeof(struct irdma_sq_uk_wr_trk_info) +\n+\t (rqdepth << 3);\n+\tiwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);\n+\tif (!iwqp->kqp.wrid_mem)\n+\t\treturn -ENOMEM;\n+\n+\tukinfo->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)\n+\t\t\t\t iwqp->kqp.wrid_mem;\n+\tif (!ukinfo->sq_wrtrk_array)\n+\t\treturn -ENOMEM;\n+\n+\tukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];\n+\tsize = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;\n+\tsize += (IRDMA_SHADOW_AREA_SIZE << 3);\n+\n+\tmem->size = ALIGN(size, 256);\n+\tmem->va = dma_alloc_coherent(hw_to_dev(iwdev->rf->sc_dev.hw),\n+\t\t\t\t mem->size, &mem->pa, GFP_KERNEL);\n+\tif (!mem->va) {\n+\t\tkfree(ukinfo->sq_wrtrk_array);\n+\t\tukinfo->sq_wrtrk_array = NULL;\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tukinfo->sq = mem->va;\n+\tinfo->sq_pa = mem->pa;\n+\tukinfo->rq = &ukinfo->sq[sqdepth];\n+\tinfo->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);\n+\tukinfo->shadow_area = ukinfo->rq[rqdepth].elem;\n+\tinfo->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);\n+\tukinfo->sq_size = sqdepth >> sqshift;\n+\tukinfo->rq_size = rqdepth >> rqshift;\n+\tukinfo->qp_id = iwqp->ibqp.qp_num;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_roce_mtu - set MTU to supported path MTU values\n+ * @mtu: MTU\n+ */\n+static u32 irdma_roce_mtu(u32 mtu)\n+{\n+\tif (mtu > 4096)\n+\t\treturn 4096;\n+\telse if (mtu > 2048)\n+\t\treturn 2048;\n+\telse if (mtu > 1024)\n+\t\treturn 1024;\n+\telse if (mtu > 512)\n+\t\treturn 512;\n+\telse\n+\t\treturn 256;\n+}\n+\n+/**\n+ * irdma_create_qp - create qp\n+ * @ibpd: ptr of pd\n+ * @init_attr: attributes for qp\n+ * @udata: user data for create qp\n+ */\n+static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,\n+\t\t\t\t struct ib_qp_init_attr *init_attr,\n+\t\t\t\t struct ib_udata *udata)\n+{\n+\tstruct irdma_pd *iwpd = to_iwpd(ibpd);\n+\tstruct irdma_device *iwdev = to_iwdev(ibpd->device);\n+\tstruct irdma_pci_f *rf = iwdev->rf;\n+\tstruct irdma_cqp *iwcqp = &rf->cqp;\n+\tstruct irdma_qp *iwqp;\n+\tstruct irdma_create_qp_req req;\n+\tstruct irdma_create_qp_resp uresp = {};\n+\tstruct i40iw_create_qp_resp uresp_gen1 = {};\n+\tu32 qp_num = 0;\n+\tvoid *mem;\n+\tenum irdma_status_code ret;\n+\tint err_code = 0;\n+\tint sq_size;\n+\tint rq_size;\n+\tstruct irdma_sc_qp *qp;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;\n+\tstruct irdma_qp_init_info init_info = {};\n+\tstruct irdma_create_qp_info *qp_info;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tstruct irdma_qp_host_ctx_info *ctx_info;\n+\tstruct irdma_iwarp_offload_info *iwarp_info;\n+\tstruct irdma_roce_offload_info *roce_info;\n+\tstruct irdma_udp_offload_info *udp_info;\n+\tunsigned long flags;\n+\n+\tif (init_attr->create_flags ||\n+\t init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||\n+\t init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||\n+\t init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)\n+\t\treturn ERR_PTR(-EINVAL);\n+\n+\tsq_size = init_attr->cap.max_send_wr;\n+\trq_size = init_attr->cap.max_recv_wr;\n+\n+\tinit_info.vsi = &iwdev->vsi;\n+\tinit_info.qp_uk_init_info.uk_attrs = uk_attrs;\n+\tinit_info.qp_uk_init_info.sq_size = sq_size;\n+\tinit_info.qp_uk_init_info.rq_size = rq_size;\n+\tinit_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;\n+\tinit_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;\n+\tinit_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;\n+\n+\tmem = kzalloc(sizeof(*iwqp), GFP_KERNEL);\n+\tif (!mem)\n+\t\treturn ERR_PTR(-ENOMEM);\n+\n+\tiwqp = mem;\n+\tqp = &iwqp->sc_qp;\n+\tqp->qp_uk.back_qp = (void *)iwqp;\n+\tqp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;\n+\n+\tiwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,\n+\t\t\t\t 256);\n+\tiwqp->q2_ctx_mem.va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t\t iwqp->q2_ctx_mem.size,\n+\t\t\t\t\t\t &iwqp->q2_ctx_mem.pa,\n+\t\t\t\t\t\t GFP_KERNEL);\n+\tif (!iwqp->q2_ctx_mem.va) {\n+\t\terr_code = -ENOMEM;\n+\t\tgoto error;\n+\t}\n+\n+\tinit_info.q2 = iwqp->q2_ctx_mem.va;\n+\tinit_info.q2_pa = iwqp->q2_ctx_mem.pa;\n+\tinit_info.host_ctx = (void *)init_info.q2 + IRDMA_Q2_BUF_SIZE;\n+\tinit_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;\n+\n+\tif (init_attr->qp_type == IB_QPT_GSI && rf->sc_dev.is_pf)\n+\t\tqp_num = 1;\n+\telse\n+\t\terr_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,\n+\t\t\t\t\t &qp_num, &rf->next_qp);\n+\tif (err_code)\n+\t\tgoto error;\n+\n+\tiwqp->iwdev = iwdev;\n+\tiwqp->iwpd = iwpd;\n+\tif (init_attr->qp_type == IB_QPT_GSI && !rf->sc_dev.is_pf)\n+\t\tiwqp->ibqp.qp_num = 1;\n+\telse\n+\t\tiwqp->ibqp.qp_num = qp_num;\n+\n+\tqp = &iwqp->sc_qp;\n+\tiwqp->iwscq = to_iwcq(init_attr->send_cq);\n+\tiwqp->iwrcq = to_iwcq(init_attr->recv_cq);\n+\tiwqp->host_ctx.va = init_info.host_ctx;\n+\tiwqp->host_ctx.pa = init_info.host_ctx_pa;\n+\tiwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;\n+\n+\tinit_info.pd = &iwpd->sc_pd;\n+\tinit_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;\n+\tif (!rdma_protocol_roce(&iwdev->iwibdev->ibdev, 1))\n+\t\tinit_info.qp_uk_init_info.first_sq_wq = 1;\n+\tiwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;\n+\tinit_waitqueue_head(&iwqp->waitq);\n+\tinit_waitqueue_head(&iwqp->mod_qp_waitq);\n+\n+\tif (rdma_protocol_roce(&iwdev->iwibdev->ibdev, 1)) {\n+\t\tif (init_attr->qp_type != IB_QPT_RC &&\n+\t\t init_attr->qp_type != IB_QPT_UD &&\n+\t\t init_attr->qp_type != IB_QPT_GSI) {\n+\t\t\terr_code = -EINVAL;\n+\t\t\tgoto error;\n+\t\t}\n+\t} else {\n+\t\tif (init_attr->qp_type != IB_QPT_RC) {\n+\t\t\terr_code = -EINVAL;\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\tif (udata) {\n+\t\terr_code = ib_copy_from_udata(&req, udata,\n+\t\t\t\t\t min(sizeof(req), udata->inlen));\n+\t\tif (err_code) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: ib_copy_from_data fail\\n\");\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\tiwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;\n+\t\tiwqp->user_mode = 1;\n+\t\tif (req.user_wqe_bufs) {\n+\t\t\tstruct irdma_ucontext *ucontext =\n+\t\t\t\trdma_udata_to_drv_context(udata,\n+\t\t\t\t\t\t\t struct irdma_ucontext,\n+\t\t\t\t\t\t\t ibucontext);\n+\t\t\tspin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);\n+\t\t\tiwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,\n+\t\t\t\t\t\t &ucontext->qp_reg_mem_list);\n+\t\t\tspin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);\n+\n+\t\t\tif (!iwqp->iwpbl) {\n+\t\t\t\terr_code = -ENODATA;\n+\t\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t\t \"VERBS: no pbl info\\n\");\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t}\n+\t\tinit_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;\n+\t\terr_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info);\n+\t} else {\n+\t\tinit_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;\n+\t\terr_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info);\n+\t}\n+\n+\tif (err_code) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: setup qp failed\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\tif (rdma_protocol_roce(&iwdev->iwibdev->ibdev, 1)) {\n+\t\tif (init_attr->qp_type == IB_QPT_RC) {\n+\t\t\tinit_info.type = IRDMA_QP_TYPE_ROCE_RC;\n+\t\t\tinit_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |\n+\t\t\t\t\t\t\t IRDMA_WRITE_WITH_IMM |\n+\t\t\t\t\t\t\t IRDMA_ROCE;\n+\t\t} else {\n+\t\t\tinit_info.type = IRDMA_QP_TYPE_ROCE_UD;\n+\t\t\tinit_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |\n+\t\t\t\t\t\t\t IRDMA_ROCE;\n+\t\t}\n+\t} else {\n+\t\tinit_info.type = IRDMA_QP_TYPE_IWARP;\n+\t\tinit_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;\n+\t}\n+\n+\tret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);\n+\tif (ret) {\n+\t\terr_code = -EPROTO;\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: qp_init fail\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\tctx_info = &iwqp->ctx_info;\n+\tif (rdma_protocol_roce(&iwdev->iwibdev->ibdev, 1)) {\n+\t\tiwqp->ctx_info.roce_info = &iwqp->roce_info;\n+\t\tiwqp->ctx_info.udp_info = &iwqp->udp_info;\n+\t\tudp_info = &iwqp->udp_info;\n+\t\tudp_info->snd_mss = irdma_roce_mtu(iwdev->vsi.mtu);\n+\t\tudp_info->cwnd = 0x400;\n+\t\tudp_info->src_port = 0xc000;\n+\t\tudp_info->dst_port = ROCE_V2_UDP_DPORT;\n+\t\troce_info = &iwqp->roce_info;\n+\t\tether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);\n+\n+\t\tif (init_attr->qp_type == IB_QPT_GSI && !rf->sc_dev.is_pf)\n+\t\t\troce_info->is_qp1 = true;\n+\t\troce_info->rd_en = true;\n+\t\troce_info->wr_rdresp_en = true;\n+\n+\t\troce_info->ack_credits = 0x1E;\n+\t\troce_info->ird_size = IRDMA_MAX_ENCODED_IRD_SIZE;\n+\t\troce_info->ord_size = dev->hw_attrs.max_hw_ord;\n+\n+\t\tif (!iwqp->user_mode) {\n+\t\t\troce_info->priv_mode_en = true;\n+\t\t\troce_info->fast_reg_en = true;\n+\t\t\troce_info->udprivcq_en = true;\n+\t\t}\n+\t\troce_info->roce_tver = 0;\n+\t} else {\n+\t\tiwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;\n+\t\tiwarp_info = &iwqp->iwarp_info;\n+\t\tether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);\n+\t\tiwarp_info->rd_en = true;\n+\t\tiwarp_info->wr_rdresp_en = true;\n+\n+\t\tif (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\t\tiwarp_info->ib_rd_en = true;\n+\t\tif (!iwqp->user_mode) {\n+\t\t\tiwarp_info->priv_mode_en = true;\n+\t\t\tiwarp_info->fast_reg_en = true;\n+\t\t}\n+\t\tiwarp_info->ddp_ver = 1;\n+\t\tiwarp_info->rdmap_ver = 1;\n+\t\tctx_info->iwarp_info_valid = true;\n+\t}\n+\tctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;\n+\tctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;\n+\tif (rdma_protocol_roce(&iwdev->iwibdev->ibdev, 1)) {\n+\t\tret = dev->iw_priv_qp_ops->qp_setctx_roce(&iwqp->sc_qp,\n+\t\t\t\t\t\t\t iwqp->host_ctx.va,\n+\t\t\t\t\t\t\t ctx_info);\n+\t} else {\n+\t\tret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,\n+\t\t\t\t\t\t iwqp->host_ctx.va,\n+\t\t\t\t\t\t ctx_info);\n+\t\tctx_info->iwarp_info_valid = false;\n+\t}\n+\n+\tcqp_request = irdma_get_cqp_request(iwcqp, true);\n+\tif (!cqp_request) {\n+\t\terr_code = -ENOMEM;\n+\t\tgoto error;\n+\t}\n+\n+\tcqp_info = &cqp_request->info;\n+\tqp_info = &cqp_request->info.in.u.qp_create.info;\n+\tmemset(qp_info, 0, sizeof(*qp_info));\n+\tqp_info->mac_valid = true;\n+\tqp_info->cq_num_valid = true;\n+\tqp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;\n+\n+\tcqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.qp_create.qp = qp;\n+\tcqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;\n+\tret = irdma_handle_cqp_op(rf, cqp_request);\n+\tif (ret) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP QP create fail\");\n+\t\terr_code = -ENOMEM;\n+\t\tgoto error;\n+\t}\n+\n+\tirdma_add_ref(&iwqp->ibqp);\n+\tspin_lock_init(&iwqp->lock);\n+\tspin_lock_init(&iwqp->sc_qp.pfpdu.lock);\n+\tiwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;\n+\trf->qp_table[qp_num] = iwqp;\n+\tiwqp->max_send_wr = sq_size;\n+\tiwqp->max_recv_wr = rq_size;\n+\tif (udata) {\n+\t\t/* GEN_1 legacy support with libi40iw */\n+\t\tif (iwpd->sc_pd.abi_ver <= 5) {\n+\t\t\turesp_gen1.lsmm = 1;\n+\t\t\turesp_gen1.actual_sq_size = sq_size;\n+\t\t\turesp_gen1.actual_rq_size = rq_size;\n+\t\t\turesp_gen1.qp_id = qp_num;\n+\t\t\turesp_gen1.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;\n+\t\t\turesp_gen1.lsmm = 1;\n+\t\t\terr_code = ib_copy_to_udata(udata, &uresp_gen1,\n+\t\t\t\t\t\t min(sizeof(uresp_gen1), udata->outlen));\n+\t\t} else {\n+\t\t\tif (rdma_protocol_iwarp(&iwdev->iwibdev->ibdev, 1))\n+\t\t\t\turesp.lsmm = 1;\n+\t\t\turesp.actual_sq_size = sq_size;\n+\t\t\turesp.actual_rq_size = rq_size;\n+\t\t\turesp.qp_id = qp_num;\n+\t\t\turesp.qp_caps = qp->qp_uk.qp_caps;\n+\n+\t\t\terr_code = ib_copy_to_udata(udata, &uresp,\n+\t\t\t\t\t\t min(sizeof(uresp), udata->outlen));\n+\t\t}\n+\t\tif (err_code) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: copy_to_udata failed\\n\");\n+\t\t\tirdma_destroy_qp(&iwqp->ibqp, udata);\n+\t\t\treturn ERR_PTR(err_code);\n+\t\t}\n+\t}\n+\tinit_completion(&iwqp->sq_drained);\n+\tinit_completion(&iwqp->rq_drained);\n+\treturn &iwqp->ibqp;\n+\n+error:\n+\tirdma_free_qp_rsrc(iwdev, iwqp, qp_num);\n+\n+\treturn ERR_PTR(err_code);\n+}\n+\n+/**\n+ * irdma_query - query qp attributes\n+ * @ibqp: qp pointer\n+ * @attr: attributes pointer\n+ * @attr_mask: Not used\n+ * @init_attr: qp attributes to return\n+ */\n+static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,\n+\t\t\t int attr_mask, struct ib_qp_init_attr *init_attr)\n+{\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\tstruct irdma_sc_qp *qp = &iwqp->sc_qp;\n+\n+\tattr->qp_state = iwqp->ibqp_state;\n+\tattr->cur_qp_state = iwqp->ibqp_state;\n+\tattr->qp_access_flags = 0;\n+\tattr->cap.max_send_wr = iwqp->max_send_wr;\n+\tattr->cap.max_recv_wr = iwqp->max_recv_wr;\n+\tattr->cap.max_inline_data = qp->qp_uk.max_inline_data;\n+\tattr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;\n+\tattr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;\n+\tattr->qkey = iwqp->roce_info.qkey;\n+\n+\tinit_attr->event_handler = iwqp->ibqp.event_handler;\n+\tinit_attr->qp_context = iwqp->ibqp.qp_context;\n+\tinit_attr->send_cq = iwqp->ibqp.send_cq;\n+\tinit_attr->recv_cq = iwqp->ibqp.recv_cq;\n+\tinit_attr->srq = iwqp->ibqp.srq;\n+\tinit_attr->cap = attr->cap;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_query_pkey - Query partition key\n+ * @ibdev: device pointer from stack\n+ * @port: port number\n+ * @index: index of pkey\n+ * @pkey: pointer to store the pkey\n+ */\n+static int irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,\n+\t\t\t u16 *pkey)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\n+\tif (index >= IRDMA_PKEY_TBL_SZ)\n+\t\treturn -EINVAL;\n+\n+\tif (rdma_protocol_roce(&iwdev->iwibdev->ibdev, 1))\n+\t\t*pkey = IRDMA_DEFAULT_PKEY;\n+\telse\n+\t\t*pkey = 0;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_modify_qp_roce - modify qp request\n+ * @ibqp: qp's pointer for modify\n+ * @attr: access attributes\n+ * @attr_mask: state mask\n+ * @udata: user data\n+ */\n+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,\n+\t\t\t int attr_mask, struct ib_udata *udata)\n+{\n+\tstruct irdma_pd *iwpd = to_iwpd(ibqp->pd);\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\tstruct irdma_device *iwdev = iwqp->iwdev;\n+\tstruct irdma_sc_dev *dev = &iwdev->rf->sc_dev;\n+\tstruct irdma_qp_host_ctx_info *ctx_info;\n+\tstruct irdma_roce_offload_info *roce_info;\n+\tstruct irdma_udp_offload_info *udp_info;\n+\tstruct irdma_modify_qp_info info = {};\n+\tstruct irdma_modify_qp_resp uresp = {};\n+\tunsigned long flags;\n+\tu8 issue_modify_qp = 0;\n+\tint ret = 0;\n+\n+\tctx_info = &iwqp->ctx_info;\n+\troce_info = &iwqp->roce_info;\n+\tudp_info = &iwqp->udp_info;\n+\n+\tif (attr_mask & IB_QP_DEST_QPN)\n+\t\troce_info->dest_qp = attr->dest_qp_num;\n+\n+\tif (attr_mask & IB_QP_PKEY_INDEX) {\n+\t\tret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,\n+\t\t\t\t &roce_info->p_key);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\tif (attr_mask & IB_QP_QKEY)\n+\t\troce_info->qkey = attr->qkey;\n+\n+\tif (attr_mask & IB_QP_PORT)\n+\t\tiwqp->roce_ah.av.attrs.port_num = attr->ah_attr.port_num;\n+\n+\tif (attr_mask & IB_QP_PATH_MTU) {\n+\t\tconst u16 path_mtu[] = {-1, 256, 512, 1024, 2048, 4096};\n+\n+\t\tif (attr->path_mtu < IB_MTU_256 ||\n+\t\t attr->path_mtu > IB_MTU_4096 ||\n+\t\t iwdev->vsi.mtu <= path_mtu[attr->path_mtu]) {\n+\t\t\tdev_warn(rfdev_to_dev(dev), \"Invalid MTU %d\\n\",\n+\t\t\t\t attr->path_mtu);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tudp_info->snd_mss = path_mtu[attr->path_mtu];\n+\t}\n+\n+\tif (attr_mask & IB_QP_SQ_PSN) {\n+\t\tudp_info->psn_nxt = attr->sq_psn;\n+\t\tudp_info->lsn = 0xffff;\n+\t\tudp_info->psn_una = attr->sq_psn;\n+\t\tudp_info->psn_max = attr->sq_psn;\n+\t}\n+\n+\tif (attr_mask & IB_QP_RQ_PSN)\n+\t\tudp_info->epsn = attr->rq_psn;\n+\n+\tif (attr_mask & IB_QP_RNR_RETRY)\n+\t\tudp_info->rnr_nak_thresh = attr->rnr_retry;\n+\n+\tif (attr_mask & IB_QP_RETRY_CNT)\n+\t\tudp_info->rexmit_thresh = attr->retry_cnt;\n+\n+\tctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;\n+\n+\tif (attr_mask & IB_QP_AV) {\n+\t\tstruct irdma_av *av = &iwqp->roce_ah.av;\n+\t\tconst struct ib_gid_attr *sgid_attr;\n+\t\tu16 vlan_id = VLAN_N_VID;\n+\t\tu32 local_ip[4];\n+\n+\t\tmemset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));\n+\t\tif (attr->ah_attr.ah_flags & IB_AH_GRH) {\n+\t\t\tudp_info->ttl = attr->ah_attr.grh.hop_limit;\n+\t\t\tudp_info->flow_label = attr->ah_attr.grh.flow_label;\n+\t\t\tudp_info->tos = attr->ah_attr.grh.traffic_class;\n+\t\t\tdev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);\n+\t\t\tctx_info->user_pri = rt_tos2priority(udp_info->tos);\n+\t\t\tiwqp->sc_qp.user_pri = ctx_info->user_pri;\n+\t\t\tif (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))\n+\t\t\t\treturn -ENOMEM;\n+\t\t\tirdma_qp_add_qos(&iwqp->sc_qp);\n+\t\t}\n+\t\tsgid_attr = attr->ah_attr.grh.sgid_attr;\n+\t\tret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,\n+\t\t\t\t\t ctx_info->roce_info->mac_addr);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\tif (vlan_id >= VLAN_N_VID && iwdev->dcb)\n+\t\t\tvlan_id = 0;\n+\t\tif (vlan_id < VLAN_N_VID) {\n+\t\t\tudp_info->insert_vlan_tag = true;\n+\t\t\tudp_info->vlan_tag = vlan_id |\n+\t\t\t\tctx_info->user_pri << VLAN_PRIO_SHIFT;\n+\t\t} else {\n+\t\t\tudp_info->insert_vlan_tag = false;\n+\t\t}\n+\n+\t\tav->attrs = attr->ah_attr;\n+\t\tav->attrs.port_num = attr->ah_attr.port_num;\n+\t\trdma_gid2ip(&av->sgid_addr.saddr, &sgid_attr->gid);\n+\t\trdma_gid2ip(&av->dgid_addr.saddr, &attr->ah_attr.grh.dgid);\n+\t\troce_info->local_qp = ibqp->qp_num;\n+\t\tif (av->sgid_addr.saddr.sa_family == AF_INET6) {\n+\t\t\t__be32 *daddr =\n+\t\t\t\tav->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;\n+\t\t\t__be32 *saddr =\n+\t\t\t\tav->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;\n+\n+\t\t\tirdma_copy_ip_ntohl(&udp_info->dest_ip_addr0, daddr);\n+\t\t\tirdma_copy_ip_ntohl(&udp_info->local_ipaddr0, saddr);\n+\n+\t\t\tudp_info->ipv4 = false;\n+\t\t\tirdma_copy_ip_ntohl(local_ip, daddr);\n+\n+\t\t\tudp_info->arp_idx = irdma_arp_table(iwdev->rf,\n+\t\t\t\t\t\t\t &local_ip[0],\n+\t\t\t\t\t\t\t false, NULL,\n+\t\t\t\t\t\t\t IRDMA_ARP_RESOLVE);\n+\t\t} else {\n+\t\t\t__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;\n+\t\t\t__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;\n+\n+\t\t\tlocal_ip[0] = ntohl(daddr);\n+\n+\t\t\tudp_info->ipv4 = true;\n+\t\t\tudp_info->dest_ip_addr0 = 0;\n+\t\t\tudp_info->dest_ip_addr1 = 0;\n+\t\t\tudp_info->dest_ip_addr2 = 0;\n+\t\t\tudp_info->dest_ip_addr3 = local_ip[0];\n+\n+\t\t\tudp_info->local_ipaddr0 = 0;\n+\t\t\tudp_info->local_ipaddr1 = 0;\n+\t\t\tudp_info->local_ipaddr2 = 0;\n+\t\t\tudp_info->local_ipaddr3 = ntohl(saddr);\n+\t\t}\n+\t\tudp_info->arp_idx =\n+\t\t\tirdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,\n+\t\t\t\t attr->ah_attr.roce.dmac);\n+\t}\n+\n+\tif (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {\n+\t\tif (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {\n+\t\t\tdev_err(rfdev_to_dev(dev),\n+\t\t\t\t\"rd_atomic = %d, above max_hw_ord=%d\\n\",\n+\t\t\t\tattr->max_rd_atomic, dev->hw_attrs.max_hw_ord);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tif (attr->max_rd_atomic)\n+\t\t\troce_info->ord_size = attr->max_rd_atomic;\n+\t\tinfo.ord_valid = true;\n+\t}\n+\n+\tif (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {\n+\t\tif (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {\n+\t\t\tdev_err(rfdev_to_dev(dev),\n+\t\t\t\t\"rd_atomic = %d, above max_hw_ird=%d\\n\",\n+\t\t\t\tattr->max_rd_atomic, dev->hw_attrs.max_hw_ird);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tif (attr->max_dest_rd_atomic)\n+\t\t\troce_info->ird_size = irdma_derive_hw_ird_setting(attr->max_dest_rd_atomic);\n+\t}\n+\n+\tif (attr_mask & IB_QP_ACCESS_FLAGS) {\n+\t\tif (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)\n+\t\t\troce_info->wr_rdresp_en = true;\n+\t\tif (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)\n+\t\t\troce_info->wr_rdresp_en = true;\n+\t\tif (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)\n+\t\t\troce_info->rd_en = true;\n+\t\tif (attr->qp_access_flags & IB_ACCESS_MW_BIND)\n+\t\t\troce_info->bind_en = true;\n+\n+\t\tif (iwqp->user_mode) {\n+\t\t\troce_info->rd_en = true;\n+\t\t\troce_info->wr_rdresp_en = true;\n+\t\t\troce_info->priv_mode_en = false;\n+\t\t}\n+\t}\n+\n+\twait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));\n+\n+\tspin_lock_irqsave(&iwqp->lock, flags);\n+\tif (attr_mask & IB_QP_STATE) {\n+\t\tif (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,\n+\t\t\t\t\tiwqp->ibqp.qp_type, attr_mask)) {\n+\t\t\tdev_warn(rfdev_to_dev(dev),\n+\t\t\t\t \"modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\\n\",\n+\t\t\t\t iwqp->ibqp.qp_num, iwqp->ibqp_state,\n+\t\t\t\t attr->qp_state);\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto exit;\n+\t\t}\n+\t\tinfo.curr_iwarp_state = iwqp->iwarp_state;\n+\n+\t\tswitch (attr->qp_state) {\n+\t\tcase IB_QPS_INIT:\n+\t\t\tif (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {\n+\t\t\t\tret = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tif (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {\n+\t\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_IDLE;\n+\t\t\t\tissue_modify_qp = 1;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IB_QPS_RTR:\n+\t\t\tif (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {\n+\t\t\t\tret = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\t\t\tinfo.arp_cache_idx_valid = true;\n+\t\t\tinfo.cq_num_valid = true;\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_RTR;\n+\t\t\tissue_modify_qp = 1;\n+\t\t\tbreak;\n+\t\tcase IB_QPS_RTS:\n+\t\t\tif (iwqp->ibqp_state < IB_QPS_RTR ||\n+\t\t\t iwqp->ibqp_state == IB_QPS_ERR) {\n+\t\t\t\tret = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tinfo.arp_cache_idx_valid = true;\n+\t\t\tinfo.cq_num_valid = true;\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_RTS;\n+\t\t\tissue_modify_qp = 1;\n+\t\t\tif (iwdev->push_mode && udata &&\n+\t\t\t dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\t\t\tirdma_alloc_push_page(iwqp);\n+\t\t\tbreak;\n+\t\tcase IB_QPS_SQD:\n+\t\t\tif (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS)\n+\t\t\t\tgoto exit;\n+\n+\t\t\tif (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||\n+\t\t\t iwqp->iwarp_state < IRDMA_QP_STATE_RTS)\n+\t\t\t\tgoto exit;\n+\n+\t\t\tif (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {\n+\t\t\t\tret = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_ERROR;\n+\t\t\tissue_modify_qp = 1;\n+\t\t\tbreak;\n+\t\tcase IB_QPS_SQE:\n+\t\tcase IB_QPS_ERR:\n+\t\tcase IB_QPS_RESET:\n+\t\t\tif (iwqp->ibqp_state == IB_QPS_SQD)\n+\t\t\t\tbreak;\n+\n+\t\t\tif (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {\n+\t\t\t\tret = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_ERROR;\n+\t\t\tissue_modify_qp = 1;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto exit;\n+\t\t}\n+\n+\t\tiwqp->ibqp_state = attr->qp_state;\n+\t}\n+\n+\tctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;\n+\tctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;\n+\tret = dev->iw_priv_qp_ops->qp_setctx_roce(&iwqp->sc_qp,\n+\t\t\t\t\t\t iwqp->host_ctx.va, ctx_info);\n+\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\n+\tif (ret) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: setctx_roce\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (attr_mask & IB_QP_STATE) {\n+\t\tif (issue_modify_qp) {\n+\t\t\tctx_info->rem_endpoint_idx = udp_info->arp_idx;\n+\t\t\tif (irdma_hw_modify_qp(iwdev, iwqp, &info, true))\n+\t\t\t\treturn -EINVAL;\n+\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\t\tif (iwqp->iwarp_state == info.curr_iwarp_state) {\n+\t\t\t\tiwqp->iwarp_state = info.next_iwarp_state;\n+\t\t\t\tiwqp->ibqp_state = attr->qp_state;\n+\t\t\t}\n+\t\t\tif (iwqp->ibqp_state > IB_QPS_RTS &&\n+\t\t\t !iwqp->flush_issued) {\n+\t\t\t\tiwqp->flush_issued = 1;\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t\tirdma_flush_wqes(iwdev->rf, iwqp);\n+\t\t\t} else {\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tiwqp->ibqp_state = attr->qp_state;\n+\t\t}\n+\t\tif (udata && dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\t\turesp.push_idx = iwqp->sc_qp.push_idx;\n+\t\t\turesp.push_offset = iwqp->sc_qp.push_offset;\n+\t\t\tret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));\n+\t\t\tif (ret) {\n+\t\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t\t \"VERBS: copy_to_udata failed\\n\");\n+\t\t\t\treturn ret;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+exit:\n+\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * irdma_modify_qp - modify qp request\n+ * @ibqp: qp's pointer for modify\n+ * @attr: access attributes\n+ * @attr_mask: state mask\n+ * @udata: user data\n+ */\n+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,\n+\t\t struct ib_udata *udata)\n+{\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\tstruct irdma_device *iwdev = iwqp->iwdev;\n+\tstruct irdma_sc_dev *dev = &iwdev->rf->sc_dev;\n+\tstruct irdma_qp_host_ctx_info *ctx_info;\n+\tstruct irdma_tcp_offload_info *tcp_info;\n+\tstruct irdma_iwarp_offload_info *offload_info;\n+\tstruct irdma_modify_qp_info info = {};\n+\tstruct irdma_modify_qp_resp uresp = {};\n+\tu8 issue_modify_qp = 0;\n+\tu8 dont_wait = 0;\n+\tint err;\n+\tunsigned long flags;\n+\n+\tctx_info = &iwqp->ctx_info;\n+\toffload_info = &iwqp->iwarp_info;\n+\ttcp_info = &iwqp->tcp_info;\n+\n+\twait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));\n+\n+\tspin_lock_irqsave(&iwqp->lock, flags);\n+\tif (attr_mask & IB_QP_STATE) {\n+\t\tinfo.curr_iwarp_state = iwqp->iwarp_state;\n+\t\tswitch (attr->qp_state) {\n+\t\tcase IB_QPS_INIT:\n+\t\tcase IB_QPS_RTR:\n+\t\t\tif (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tif (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {\n+\t\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_IDLE;\n+\t\t\t\tissue_modify_qp = 1;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IB_QPS_RTS:\n+\t\t\tif (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||\n+\t\t\t !iwqp->cm_id) {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tissue_modify_qp = 1;\n+\t\t\tiwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;\n+\t\t\tiwqp->hte_added = 1;\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_RTS;\n+\t\t\tinfo.tcp_ctx_valid = true;\n+\t\t\tinfo.ord_valid = true;\n+\t\t\tinfo.arp_cache_idx_valid = true;\n+\t\t\tinfo.cq_num_valid = true;\n+\t\t\tif (iwdev->push_mode && udata &&\n+\t\t\t dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\t\t\tirdma_alloc_push_page(iwqp);\n+\t\t\tbreak;\n+\t\tcase IB_QPS_SQD:\n+\t\t\tif (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {\n+\t\t\t\terr = 0;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tif (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||\n+\t\t\t iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {\n+\t\t\t\terr = 0;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tif (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_CLOSING;\n+\t\t\tissue_modify_qp = 1;\n+\t\t\tbreak;\n+\t\tcase IB_QPS_SQE:\n+\t\t\tif (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\t/* fall-through */\n+\t\tcase IB_QPS_ERR:\n+\t\tcase IB_QPS_RESET:\n+\t\t\tif (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tif (iwqp->sc_qp.term_flags) {\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t\tirdma_terminate_del_timer(&iwqp->sc_qp);\n+\t\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\t\t}\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_ERROR;\n+\t\t\tif (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&\n+\t\t\t iwdev->iw_status &&\n+\t\t\t iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)\n+\t\t\t\tinfo.reset_tcp_conn = true;\n+\t\t\telse\n+\t\t\t\tdont_wait = 1;\n+\n+\t\t\tissue_modify_qp = 1;\n+\t\t\tinfo.next_iwarp_state = IRDMA_QP_STATE_ERROR;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\terr = -EINVAL;\n+\t\t\tgoto exit;\n+\t\t}\n+\n+\t\tiwqp->ibqp_state = attr->qp_state;\n+\t}\n+\tif (attr_mask & IB_QP_ACCESS_FLAGS) {\n+\t\tctx_info->iwarp_info_valid = true;\n+\t\tif (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)\n+\t\t\toffload_info->wr_rdresp_en = true;\n+\t\tif (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)\n+\t\t\toffload_info->wr_rdresp_en = true;\n+\t\tif (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)\n+\t\t\toffload_info->rd_en = true;\n+\t\tif (attr->qp_access_flags & IB_ACCESS_MW_BIND)\n+\t\t\toffload_info->bind_en = true;\n+\n+\t\tif (iwqp->user_mode) {\n+\t\t\toffload_info->rd_en = true;\n+\t\t\toffload_info->wr_rdresp_en = true;\n+\t\t\toffload_info->priv_mode_en = false;\n+\t\t}\n+\t}\n+\n+\tif (ctx_info->iwarp_info_valid) {\n+\t\tstruct irdma_sc_dev *dev = &iwdev->rf->sc_dev;\n+\t\tint ret;\n+\n+\t\tctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;\n+\t\tctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;\n+\t\tret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,\n+\t\t\t\t\t\t iwqp->host_ctx.va,\n+\t\t\t\t\t\t ctx_info);\n+\t\tif (ret) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: setting QP context\\n\");\n+\t\t\terr = -EINVAL;\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\n+\tif (attr_mask & IB_QP_STATE) {\n+\t\tif (issue_modify_qp) {\n+\t\t\tctx_info->rem_endpoint_idx = tcp_info->arp_idx;\n+\t\t\tif (irdma_hw_modify_qp(iwdev, iwqp, &info, true))\n+\t\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\tif (iwqp->iwarp_state == info.curr_iwarp_state) {\n+\t\t\tiwqp->iwarp_state = info.next_iwarp_state;\n+\t\t\tiwqp->ibqp_state = attr->qp_state;\n+\t\t}\n+\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t}\n+\n+\tif (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {\n+\t\tif (dont_wait) {\n+\t\t\tif (iwqp->cm_id && iwqp->hw_tcp_state) {\n+\t\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\t\t\tiwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;\n+\t\t\t\tiwqp->last_aeq = IRDMA_AE_RESET_SENT;\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tint close_timer_started;\n+\n+\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\t\tclose_timer_started = atomic_inc_return(&iwqp->close_timer_started);\n+\t\t\tif (iwqp->cm_id && close_timer_started == 1) {\n+\t\t\t\tiwqp->cm_id->add_ref(iwqp->cm_id);\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t\tirdma_schedule_cm_timer(iwqp->cm_node,\n+\t\t\t\t\t\t\t(struct irdma_puda_buf *)iwqp,\n+\t\t\t\t\t\t\tIRDMA_TIMER_TYPE_CLOSE,\n+\t\t\t\t\t\t\t1,\n+\t\t\t\t\t\t\t0);\n+\t\t\t} else {\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t}\n+\t\t}\n+\t}\n+\tif (attr_mask & IB_QP_STATE && udata &&\n+\t dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\turesp.push_idx = iwqp->sc_qp.push_idx;\n+\t\turesp.push_offset = iwqp->sc_qp.push_offset;\n+\t\terr = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));\n+\t\tif (err) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: copy_to_udata failed\\n\");\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+exit:\n+\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_cq_free_rsrc - free up resources for cq\n+ * @rf: RDMA PCI function\n+ * @iwcq: cq ptr\n+ */\n+static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)\n+{\n+\tstruct irdma_sc_cq *cq = &iwcq->sc_cq;\n+\n+\tif (!iwcq->user_mode) {\n+\t\tdma_free_coherent(hw_to_dev(rf->sc_dev.hw), iwcq->kmem.size,\n+\t\t\t\t iwcq->kmem.va, iwcq->kmem.pa);\n+\t\tiwcq->kmem.va = NULL;\n+\t\tdma_free_coherent(hw_to_dev(rf->sc_dev.hw),\n+\t\t\t\t iwcq->kmem_shadow.size,\n+\t\t\t\t iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);\n+\t\tiwcq->kmem_shadow.va = NULL;\n+\t}\n+\n+\tirdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);\n+}\n+\n+/**\n+ * irdma_free_cqbuf - free a cq buffer\n+ * @work: provides access to the cq buffer to free\n+ */\n+static void irdma_free_cqbuf(struct work_struct *work)\n+{\n+\tstruct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);\n+\n+\tdma_free_coherent(hw_to_dev(cq_buf->hw), cq_buf->kmem_buf.size,\n+\t\t\t cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);\n+\tcq_buf->kmem_buf.va = NULL;\n+\tkfree(cq_buf);\n+}\n+\n+/**\n+ * irdma_process_resize_list - remove resized cq buffers from the resize_list\n+ * @iwcq: cq which owns the resize_list\n+ * @iwdev: irdma device\n+ * @lcqe_buf: the buffer where the last cqe is received\n+ */\n+static int irdma_process_resize_list(struct irdma_cq *iwcq,\n+\t\t\t\t struct irdma_device *iwdev,\n+\t\t\t\t struct irdma_cq_buf *lcqe_buf)\n+{\n+\tstruct list_head *tmp_node, *list_node;\n+\tstruct irdma_cq_buf *cq_buf;\n+\tint cnt = 0;\n+\n+\tlist_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {\n+\t\tcq_buf = list_entry(list_node, struct irdma_cq_buf, list);\n+\t\tif (cq_buf == lcqe_buf)\n+\t\t\treturn cnt;\n+\n+\t\tlist_del(&cq_buf->list);\n+\t\tqueue_work(iwdev->rf->free_cqbuf_wq, &cq_buf->work);\n+\t\tcnt++;\n+\t}\n+\n+\treturn cnt;\n+}\n+\n+/**\n+ * irdma_destroy_cq - destroy cq\n+ * @ib_cq: cq pointer\n+ * @udata: user data\n+ */\n+static void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)\n+{\n+\tstruct irdma_cq *iwcq;\n+\tstruct irdma_device *iwdev;\n+\tstruct irdma_sc_cq *cq;\n+\tunsigned long flags;\n+\n+\tiwcq = to_iwcq(ib_cq);\n+\tiwdev = to_iwdev(ib_cq->device);\n+\n+\tif (!list_empty(&iwcq->resize_list)) {\n+\t\tspin_lock_irqsave(&iwcq->lock, flags);\n+\t\tirdma_process_resize_list(iwcq, iwdev, NULL);\n+\t\tspin_unlock_irqrestore(&iwcq->lock, flags);\n+\t}\n+\tcq = &iwcq->sc_cq;\n+\tirdma_cq_wq_destroy(iwdev->rf, cq);\n+\tirdma_cq_free_rsrc(iwdev->rf, iwcq);\n+}\n+\n+/**\n+ * irdma_resize_cq - resize cq\n+ * @ibcq: cq to be resized\n+ * @entries: desired cq size\n+ * @udata: user data\n+ */\n+static int irdma_resize_cq(struct ib_cq *ibcq, int entries,\n+\t\t\t struct ib_udata *udata)\n+{\n+\tstruct irdma_cq *iwcq = to_iwcq(ibcq);\n+\tstruct irdma_sc_dev *dev = iwcq->sc_cq.dev;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tstruct irdma_modify_cq_info *m_info;\n+\tstruct irdma_modify_cq_info info = {};\n+\tstruct irdma_dma_mem kmem_buf;\n+\tstruct irdma_cq_mr *cqmr_buf;\n+\tstruct irdma_pbl *iwpbl_buf;\n+\tstruct irdma_device *iwdev;\n+\tstruct irdma_pci_f *rf;\n+\tstruct irdma_cq_buf *cq_buf = NULL;\n+\tenum irdma_status_code status = 0;\n+\tunsigned long flags;\n+\n+\tiwdev = to_iwdev(ibcq->device);\n+\trf = iwdev->rf;\n+\n+\tif (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &\n+\t IRDMA_FEATURE_CQ_RESIZE))\n+\t\treturn -ENOTSUPP;\n+\n+\tif (entries > rf->max_cqe)\n+\t\treturn -EINVAL;\n+\n+\tif (!iwcq->user_mode) {\n+\t\tentries++;\n+\t\tif (rf->sc_dev.hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\t\tentries *= 2;\n+\t}\n+\n+\tinfo.cq_size = max(entries, 4);\n+\n+\tif (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)\n+\t\treturn 0;\n+\n+\tif (udata) {\n+\t\tstruct irdma_resize_cq_req req = {};\n+\t\tstruct irdma_ucontext *ucontext =\n+\t\t\trdma_udata_to_drv_context(udata, struct irdma_ucontext,\n+\t\t\t\t\t\t ibucontext);\n+\n+\t\t/* CQ resize not supported with legacy GEN_1 libi40iw */\n+\t\tif (ucontext->abi_ver <= 5)\n+\t\t\treturn -ENOTSUPP;\n+\n+\t\tif (ib_copy_from_udata(&req, udata,\n+\t\t\t\t min(sizeof(req), udata->inlen)))\n+\t\t\treturn -EINVAL;\n+\n+\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\tiwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,\n+\t\t\t\t\t &ucontext->cq_reg_mem_list);\n+\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n+\n+\t\tif (!iwpbl_buf)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tcqmr_buf = &iwpbl_buf->cq_mr;\n+\t\tif (iwpbl_buf->pbl_allocated) {\n+\t\t\tinfo.virtual_map = true;\n+\t\t\tinfo.pbl_chunk_size = 1;\n+\t\t\tinfo.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;\n+\t\t} else {\n+\t\t\tinfo.cq_pa = cqmr_buf->cq_pbl.addr;\n+\t\t}\n+\t} else {\n+\t\t/* Kmode CQ resize */\n+\t\tint rsize;\n+\n+\t\trsize = info.cq_size * sizeof(struct irdma_cqe);\n+\t\tkmem_buf.size = ALIGN(round_up(rsize, 256), 256);\n+\t\tkmem_buf.va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t\t kmem_buf.size, &kmem_buf.pa,\n+\t\t\t\t\t\t GFP_KERNEL);\n+\t\tif (!kmem_buf.va)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tinfo.cq_base = kmem_buf.va;\n+\t\tinfo.cq_pa = kmem_buf.pa;\n+\t\tcq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);\n+\t\tif (!cq_buf)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\tcqp_request = irdma_get_cqp_request(&rf->cqp, true);\n+\tif (!cqp_request) {\n+\t\tkfree(cq_buf);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tinfo.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;\n+\tinfo.ceq_valid = false;\n+\tinfo.cq_resize = true;\n+\n+\tcqp_info = &cqp_request->info;\n+\tm_info = &cqp_info->in.u.cq_modify.info;\n+\tmemcpy(m_info, &info, sizeof(*m_info));\n+\n+\tcqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;\n+\tcqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;\n+\tcqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;\n+\tcqp_info->post_sq = 1;\n+\tstatus = irdma_handle_cqp_op(rf, cqp_request);\n+\tif (status) {\n+\t\tkfree(cq_buf);\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP Resize CQ fail\");\n+\t\treturn -EPROTO;\n+\t}\n+\n+\tspin_lock_irqsave(&iwcq->lock, flags);\n+\tif (cq_buf) {\n+\t\tcq_buf->kmem_buf = iwcq->kmem;\n+\t\tcq_buf->hw = dev->hw;\n+\t\tmemcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));\n+\t\tINIT_WORK(&cq_buf->work, irdma_free_cqbuf);\n+\t\tlist_add_tail(&cq_buf->list, &iwcq->resize_list);\n+\t\tiwcq->kmem = kmem_buf;\n+\t}\n+\n+\tdev->iw_priv_cq_ops->cq_resize(&iwcq->sc_cq, &info);\n+\tibcq->cqe = info.cq_size - 1;\n+\tspin_unlock_irqrestore(&iwcq->lock, flags);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_create_cq - create cq\n+ * @ibcq: CQ allocated\n+ * @attr: attributes for cq\n+ * @udata: user data\n+ */\n+static int irdma_create_cq(struct ib_cq *ibcq,\n+\t\t\t const struct ib_cq_init_attr *attr,\n+\t\t\t struct ib_udata *udata)\n+{\n+\tstruct ib_device *ibdev = ibcq->device;\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\tstruct irdma_pci_f *rf = iwdev->rf;\n+\tstruct irdma_cq *iwcq = to_iwcq(ibcq);\n+\tu32 cq_num = 0;\n+\tstruct irdma_sc_cq *cq;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_cq_init_info info = {};\n+\tenum irdma_status_code status;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tstruct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;\n+\tunsigned long flags;\n+\tint err_code;\n+\tint entries = attr->cqe;\n+\n+\terr_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,\n+\t\t\t\t &rf->next_cq);\n+\tif (err_code)\n+\t\treturn err_code;\n+\n+\tcq = &iwcq->sc_cq;\n+\tcq->back_cq = (void *)iwcq;\n+\tspin_lock_init(&iwcq->lock);\n+\tINIT_LIST_HEAD(&iwcq->resize_list);\n+\tinfo.dev = dev;\n+\tukinfo->cq_size = max(entries, 4);\n+\tukinfo->cq_id = cq_num;\n+\tiwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;\n+\tif (attr->comp_vector < rf->ceqs_count)\n+\t\tinfo.ceq_id = attr->comp_vector;\n+\tinfo.ceq_id_valid = true;\n+\tinfo.ceqe_mask = 1;\n+\tinfo.type = IRDMA_CQ_TYPE_IWARP;\n+\tinfo.vsi = &iwdev->vsi;\n+\n+\tif (udata) {\n+\t\tstruct irdma_ucontext *ucontext;\n+\t\tstruct irdma_create_cq_req req = {};\n+\t\tstruct irdma_cq_mr *cqmr;\n+\t\tstruct irdma_pbl *iwpbl;\n+\t\tstruct irdma_pbl *iwpbl_shadow;\n+\t\tstruct irdma_cq_mr *cqmr_shadow;\n+\n+\t\tiwcq->user_mode = true;\n+\t\tucontext =\n+\t\t\trdma_udata_to_drv_context(udata, struct irdma_ucontext,\n+\t\t\t\t\t\t ibucontext);\n+\t\tif (ib_copy_from_udata(&req, udata,\n+\t\t\t\t min(sizeof(req), udata->inlen))) {\n+\t\t\terr_code = -EFAULT;\n+\t\t\tgoto cq_free_rsrc;\n+\t\t}\n+\n+\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\tiwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,\n+\t\t\t\t &ucontext->cq_reg_mem_list);\n+\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\tif (!iwpbl) {\n+\t\t\terr_code = -EPROTO;\n+\t\t\tgoto cq_free_rsrc;\n+\t\t}\n+\n+\t\tiwcq->iwpbl = iwpbl;\n+\t\tiwcq->cq_mem_size = 0;\n+\t\tcqmr = &iwpbl->cq_mr;\n+\n+\t\tif (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &\n+\t\t IRDMA_FEATURE_CQ_RESIZE &&\n+\t\t ucontext->abi_ver > 5) {\n+\t\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\t\tiwpbl_shadow = irdma_get_pbl(\n+\t\t\t\t\t(unsigned long)req.user_shadow_area,\n+\t\t\t\t\t&ucontext->cq_reg_mem_list);\n+\t\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n+\n+\t\t\tif (!iwpbl_shadow) {\n+\t\t\t\terr_code = -EPROTO;\n+\t\t\t\tgoto cq_free_rsrc;\n+\t\t\t}\n+\t\t\tiwcq->iwpbl_shadow = iwpbl_shadow;\n+\t\t\tcqmr_shadow = &iwpbl_shadow->cq_mr;\n+\t\t\tinfo.shadow_area_pa = cqmr_shadow->cq_pbl.addr;\n+\t\t\tcqmr->split = true;\n+\t\t} else {\n+\t\t\tinfo.shadow_area_pa = cqmr->shadow;\n+\t\t}\n+\t\tif (iwpbl->pbl_allocated) {\n+\t\t\tinfo.virtual_map = true;\n+\t\t\tinfo.pbl_chunk_size = 1;\n+\t\t\tinfo.first_pm_pbl_idx = cqmr->cq_pbl.idx;\n+\t\t} else {\n+\t\t\tinfo.cq_base_pa = cqmr->cq_pbl.addr;\n+\t\t}\n+\t} else {\n+\t\t/* Kmode allocations */\n+\t\tint rsize;\n+\n+\t\tif (entries > rf->max_cqe) {\n+\t\t\terr_code = -EINVAL;\n+\t\t\tgoto cq_free_rsrc;\n+\t\t}\n+\n+\t\tentries++;\n+\t\tif (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\t\tentries *= 2;\n+\t\tukinfo->cq_size = entries;\n+\n+\t\trsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);\n+\t\tiwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);\n+\t\tiwcq->kmem.va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t\t iwcq->kmem.size,\n+\t\t\t\t\t\t &iwcq->kmem.pa, GFP_KERNEL);\n+\t\tif (!iwcq->kmem.va) {\n+\t\t\terr_code = -ENOMEM;\n+\t\t\tgoto cq_free_rsrc;\n+\t\t}\n+\n+\t\tiwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,\n+\t\t\t\t\t 64);\n+\t\tiwcq->kmem_shadow.va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t\t\t iwcq->kmem_shadow.size,\n+\t\t\t\t\t\t\t &iwcq->kmem_shadow.pa,\n+\t\t\t\t\t\t\t GFP_KERNEL);\n+\t\tif (!iwcq->kmem_shadow.va) {\n+\t\t\tdma_free_coherent(hw_to_dev(rf->sc_dev.hw),\n+\t\t\t\t\t iwcq->kmem.size, iwcq->kmem.va,\n+\t\t\t\t\t iwcq->kmem.pa);\n+\t\t\tiwcq->kmem.va = NULL;\n+\t\t\terr_code = -ENOMEM;\n+\t\t\tgoto cq_free_rsrc;\n+\t\t}\n+\t\tinfo.shadow_area_pa = iwcq->kmem_shadow.pa;\n+\t\tukinfo->shadow_area = iwcq->kmem_shadow.va;\n+\t\tukinfo->cq_base = iwcq->kmem.va;\n+\t\tinfo.cq_base_pa = iwcq->kmem.pa;\n+\t}\n+\n+\tif (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\tinfo.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,\n+\t\t\t\t\t\t (u32)IRDMA_MAX_CQ_READ_THRESH);\n+\tif (dev->iw_priv_cq_ops->cq_init(cq, &info)) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: init cq fail\\n\");\n+\t\terr_code = -EPROTO;\n+\t\tgoto cq_free_rsrc;\n+\t}\n+\n+\tcqp_request = irdma_get_cqp_request(&rf->cqp, true);\n+\tif (!cqp_request) {\n+\t\terr_code = -ENOMEM;\n+\t\tgoto cq_free_rsrc;\n+\t}\n+\n+\tcqp_info = &cqp_request->info;\n+\tcqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.cq_create.cq = cq;\n+\tcqp_info->in.u.cq_create.check_overflow = true;\n+\tcqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(rf, cqp_request);\n+\tif (status) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP Create CQ fail\");\n+\t\terr_code = -ENOMEM;\n+\t\tgoto cq_free_rsrc;\n+\t}\n+\n+\tif (udata) {\n+\t\tstruct irdma_create_cq_resp resp = {};\n+\n+\t\tresp.cq_id = info.cq_uk_init_info.cq_id;\n+\t\tresp.cq_size = info.cq_uk_init_info.cq_size;\n+\t\tif (ib_copy_to_udata(udata, &resp,\n+\t\t\t\t min(sizeof(resp), udata->outlen))) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: copy to user data\\n\");\n+\t\t\terr_code = -EPROTO;\n+\t\t\tgoto cq_destroy;\n+\t\t}\n+\t}\n+\treturn 0;\n+\n+cq_destroy:\n+\tirdma_cq_wq_destroy(rf, cq);\n+cq_free_rsrc:\n+\tirdma_cq_free_rsrc(rf, iwcq);\n+\n+\treturn err_code;\n+}\n+\n+/**\n+ * irdma_get_user_access - get hw access from IB access\n+ * @acc: IB access to return hw access\n+ */\n+static inline u16 irdma_get_user_access(int acc)\n+{\n+\tu16 access = 0;\n+\n+\taccess |= (acc & IB_ACCESS_LOCAL_WRITE) ?\n+\t\t IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;\n+\taccess |= (acc & IB_ACCESS_REMOTE_WRITE) ?\n+\t\t IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;\n+\taccess |= (acc & IB_ACCESS_REMOTE_READ) ?\n+\t\t IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;\n+\taccess |= (acc & IB_ACCESS_MW_BIND) ?\n+\t\t IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;\n+\n+\treturn access;\n+}\n+\n+/**\n+ * irdma_free_stag - free stag resource\n+ * @iwdev: iwarp device\n+ * @stag: stag to free\n+ */\n+static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)\n+{\n+\tu32 stag_idx;\n+\n+\tstag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;\n+\tirdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);\n+}\n+\n+/**\n+ * irdma_create_stag - create random stag\n+ * @iwdev: iwarp device\n+ */\n+static u32 irdma_create_stag(struct irdma_device *iwdev)\n+{\n+\tu32 stag = 0;\n+\tu32 stag_index = 0;\n+\tu32 next_stag_index;\n+\tu32 driver_key;\n+\tu32 random;\n+\tu8 consumer_key;\n+\tint ret;\n+\n+\tget_random_bytes(&random, sizeof(random));\n+\tconsumer_key = (u8)random;\n+\n+\tdriver_key = random & ~iwdev->rf->mr_stagmask;\n+\tnext_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;\n+\tnext_stag_index %= iwdev->rf->max_mr;\n+\n+\tret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,\n+\t\t\t iwdev->rf->max_mr, &stag_index,\n+\t\t\t &next_stag_index);\n+\tif (ret)\n+\t\treturn stag;\n+\tstag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;\n+\tstag |= driver_key;\n+\tstag += (u32)consumer_key;\n+\n+\treturn stag;\n+}\n+\n+/**\n+ * irdma_next_pbl_addr - Get next pbl address\n+ * @pbl: pointer to a pble\n+ * @pinfo: info pointer\n+ * @idx: index\n+ */\n+static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,\n+\t\t\t\t u32 *idx)\n+{\n+\t*idx += 1;\n+\tif (!(*pinfo) || *idx != (*pinfo)->cnt)\n+\t\treturn ++pbl;\n+\t*idx = 0;\n+\t(*pinfo)++;\n+\n+\treturn (u64 *)(uintptr_t)(*pinfo)->addr;\n+}\n+\n+/**\n+ * irdma_copy_user_pgaddrs - copy user page address to pble's os locally\n+ * @iwmr: iwmr for IB's user page addresses\n+ * @pbl: ple pointer to save 1 level or 0 level pble\n+ * @level: indicated level 0, 1 or 2\n+ */\n+static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,\n+\t\t\t\t enum irdma_pble_level level)\n+{\n+\tstruct ib_umem *region = iwmr->region;\n+\tstruct irdma_pbl *iwpbl = &iwmr->iwpbl;\n+\tstruct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;\n+\tstruct irdma_pble_info *pinfo;\n+\tstruct ib_block_iter biter;\n+\tu32 idx = 0;\n+\tu32 pbl_cnt = 0;\n+\n+\tpinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;\n+\n+\tif (iwmr->type == IW_MEMREG_TYPE_QP)\n+\t\tiwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);\n+\n+\trdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,\n+\t\t\t iwmr->page_size) {\n+\t\t*pbl = rdma_block_iter_dma_address(&biter);\n+\t\tif (++pbl_cnt == palloc->total_cnt)\n+\t\t\tbreak;\n+\t\tpbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);\n+\t}\n+}\n+\n+/**\n+ * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous\n+ * @arr: lvl1 pbl array\n+ * @npages: page count\n+ * @pg_size: page size\n+ *\n+ */\n+static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)\n+{\n+\tu32 pg_idx;\n+\n+\tfor (pg_idx = 0; pg_idx < npages; pg_idx++) {\n+\t\tif ((*arr + (pg_size * pg_idx)) != arr[pg_idx])\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+/**\n+ * irdma_check_mr_contiguous - check if MR is physically contiguous\n+ * @palloc: pbl allocation struct\n+ * @pg_size: page size\n+ */\n+static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,\n+\t\t\t\t u32 pg_size)\n+{\n+\tstruct irdma_pble_level2 *lvl2 = &palloc->level2;\n+\tstruct irdma_pble_info *leaf = lvl2->leaf;\n+\tu64 *arr = NULL;\n+\tu64 *start_addr = NULL;\n+\tint i;\n+\tbool ret;\n+\n+\tif (palloc->level == PBLE_LEVEL_1) {\n+\t\tarr = (u64 *)(uintptr_t)palloc->level1.addr;\n+\t\tret = irdma_check_mem_contiguous(arr, palloc->total_cnt,\n+\t\t\t\t\t\t pg_size);\n+\t\treturn ret;\n+\t}\n+\n+\tstart_addr = (u64 *)(uintptr_t)leaf->addr;\n+\n+\tfor (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {\n+\t\tarr = (u64 *)(uintptr_t)leaf->addr;\n+\t\tif ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)\n+\t\t\treturn false;\n+\t\tret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);\n+\t\tif (!ret)\n+\t\t\treturn false;\n+\t}\n+\n+\treturn true;\n+}\n+\n+/**\n+ * irdma_setup_pbles - copy user pg address to pble's\n+ * @rf: RDMA PCI function\n+ * @iwmr: mr pointer for this memory registration\n+ * @use_pbles: flag if to use pble's\n+ */\n+static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,\n+\t\t\t bool use_pbles)\n+{\n+\tstruct irdma_pbl *iwpbl = &iwmr->iwpbl;\n+\tstruct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;\n+\tstruct irdma_pble_info *pinfo;\n+\tu64 *pbl;\n+\tenum irdma_status_code status;\n+\tenum irdma_pble_level level = PBLE_LEVEL_1;\n+\n+\tif (use_pbles) {\n+\t\tstatus = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,\n+\t\t\t\t\tfalse);\n+\t\tif (status)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tiwpbl->pbl_allocated = true;\n+\t\tlevel = palloc->level;\n+\t\tpinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :\n+\t\t\t\t\t\t palloc->level2.leaf;\n+\t\tpbl = (u64 *)(uintptr_t)pinfo->addr;\n+\t} else {\n+\t\tpbl = iwmr->pgaddrmem;\n+\t}\n+\n+\tirdma_copy_user_pgaddrs(iwmr, pbl, level);\n+\n+\tif (use_pbles)\n+\t\tiwmr->pgaddrmem[0] = *pbl;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_handle_q_mem - handle memory for qp and cq\n+ * @iwdev: iwarp device\n+ * @req: information for q memory management\n+ * @iwpbl: pble struct\n+ * @use_pbles: flag to use pble\n+ */\n+static int irdma_handle_q_mem(struct irdma_device *iwdev,\n+\t\t\t struct irdma_mem_reg_req *req,\n+\t\t\t struct irdma_pbl *iwpbl, bool use_pbles)\n+{\n+\tstruct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;\n+\tstruct irdma_mr *iwmr = iwpbl->iwmr;\n+\tstruct irdma_qp_mr *qpmr = &iwpbl->qp_mr;\n+\tstruct irdma_cq_mr *cqmr = &iwpbl->cq_mr;\n+\tstruct irdma_hmc_pble *hmc_p;\n+\tu64 *arr = iwmr->pgaddrmem;\n+\tu32 pg_size;\n+\tint err;\n+\tint total;\n+\tbool ret = true;\n+\n+\ttotal = req->sq_pages + req->rq_pages + req->cq_pages;\n+\tpg_size = iwmr->page_size;\n+\terr = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);\n+\tif (err)\n+\t\treturn err;\n+\n+\tif (use_pbles && palloc->level != PBLE_LEVEL_1) {\n+\t\tirdma_free_pble(iwdev->rf->pble_rsrc, palloc);\n+\t\tiwpbl->pbl_allocated = false;\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (use_pbles)\n+\t\tarr = (u64 *)(uintptr_t)palloc->level1.addr;\n+\n+\tif (iwmr->type == IW_MEMREG_TYPE_QP) {\n+\t\thmc_p = &qpmr->sq_pbl;\n+\t\tqpmr->shadow = (dma_addr_t)arr[total];\n+\n+\t\tif (use_pbles) {\n+\t\t\tret = irdma_check_mem_contiguous(arr, req->sq_pages,\n+\t\t\t\t\t\t\t pg_size);\n+\t\t\tif (ret)\n+\t\t\t\tret = irdma_check_mem_contiguous(&arr[req->sq_pages],\n+\t\t\t\t\t\t\t\t req->rq_pages,\n+\t\t\t\t\t\t\t\t pg_size);\n+\t\t}\n+\n+\t\tif (!ret) {\n+\t\t\thmc_p->idx = palloc->level1.idx;\n+\t\t\thmc_p = &qpmr->rq_pbl;\n+\t\t\thmc_p->idx = palloc->level1.idx + req->sq_pages;\n+\t\t} else {\n+\t\t\thmc_p->addr = arr[0];\n+\t\t\thmc_p = &qpmr->rq_pbl;\n+\t\t\thmc_p->addr = arr[req->sq_pages];\n+\t\t}\n+\t} else { /* CQ */\n+\t\thmc_p = &cqmr->cq_pbl;\n+\n+\t\tif (!cqmr->split)\n+\t\t\tcqmr->shadow = (dma_addr_t)arr[total];\n+\n+\t\tif (use_pbles)\n+\t\t\tret = irdma_check_mem_contiguous(arr, req->cq_pages,\n+\t\t\t\t\t\t\t pg_size);\n+\n+\t\tif (!ret)\n+\t\t\thmc_p->idx = palloc->level1.idx;\n+\t\telse\n+\t\t\thmc_p->addr = arr[0];\n+\t}\n+\n+\tif (use_pbles && ret) {\n+\t\tirdma_free_pble(iwdev->rf->pble_rsrc, palloc);\n+\t\tiwpbl->pbl_allocated = false;\n+\t}\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_hw_alloc_mw - create the hw memory window\n+ * @iwdev: iwarp device\n+ * @iwmr: pointer to memory window info\n+ */\n+static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)\n+{\n+\tstruct irdma_mw_alloc_info *info;\n+\tstruct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, true);\n+\tif (!cqp_request)\n+\t\treturn -ENOMEM;\n+\n+\tcqp_info = &cqp_request->info;\n+\tinfo = &cqp_info->in.u.mw_alloc.info;\n+\tmemset(info, 0, sizeof(*info));\n+\tif (iwmr->ibmw.type == IB_MW_TYPE_1)\n+\t\tinfo->mw_wide = true;\n+\n+\tinfo->page_size = PAGE_SIZE;\n+\tinfo->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;\n+\tinfo->pd_id = iwpd->sc_pd.pd_id;\n+\tinfo->remote_access = true;\n+\tcqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;\n+\tcqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;\n+\tif (irdma_handle_cqp_op(iwdev->rf, cqp_request)) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP allow MW failed\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_alloc_mw\n+ * @pd: Protection domain\n+ * @type: Window type\n+ * @udata: user data pointer\n+ */\n+static struct ib_mw *irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,\n+\t\t\t\t struct ib_udata *udata)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(pd->device);\n+\tstruct irdma_mr *iwmr;\n+\tint err_code;\n+\tu32 stag;\n+\n+\tiwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);\n+\tif (!iwmr)\n+\t\treturn ERR_PTR(-ENOMEM);\n+\n+\tstag = irdma_create_stag(iwdev);\n+\tif (!stag) {\n+\t\terr_code = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\tiwmr->stag = stag;\n+\tiwmr->ibmw.rkey = stag;\n+\tiwmr->ibmw.pd = pd;\n+\tiwmr->ibmw.type = type;\n+\tiwmr->ibmw.device = pd->device;\n+\tiwmr->type = IW_MEMREG_TYPE_MW;\n+\n+\terr_code = irdma_hw_alloc_mw(iwdev, iwmr);\n+\tif (err_code)\n+\t\tgoto err1;\n+\n+\treturn &iwmr->ibmw;\n+\n+err1:\n+\tirdma_free_stag(iwdev, stag);\n+err:\n+\tkfree(iwmr);\n+\n+\treturn ERR_PTR(err_code);\n+}\n+\n+/**\n+ * irdma_dealloc_mw\n+ * @ibmw: memory window structure.\n+ */\n+static int irdma_dealloc_mw(struct ib_mw *ibmw)\n+{\n+\tstruct ib_pd *ibpd = ibmw->pd;\n+\tstruct irdma_pd *iwpd = to_iwpd(ibpd);\n+\tstruct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);\n+\tstruct irdma_device *iwdev = to_iwdev(ibmw->device);\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tstruct irdma_dealloc_stag_info *info;\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, true);\n+\tif (!cqp_request)\n+\t\treturn -ENOMEM;\n+\n+\tcqp_info = &cqp_request->info;\n+\tinfo = &cqp_info->in.u.dealloc_stag.info;\n+\tmemset(info, 0, sizeof(*info));\n+\tinfo->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;\n+\tinfo->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);\n+\tinfo->mr = false;\n+\tcqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;\n+\tcqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;\n+\tif (irdma_handle_cqp_op(iwdev->rf, cqp_request))\n+\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t \"VERBS: CQP-OP dealloc MW failed for stag_idx = 0x%x\\n\",\n+\t\t\t info->stag_idx);\n+\tirdma_free_stag(iwdev, iwmr->stag);\n+\tkfree(iwmr);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_hw_alloc_stag - cqp command to allocate stag\n+ * @iwdev: iwarp device\n+ * @iwmr: iwarp mr pointer\n+ */\n+static int irdma_hw_alloc_stag(struct irdma_device *iwdev,\n+\t\t\t struct irdma_mr *iwmr)\n+{\n+\tstruct irdma_allocate_stag_info *info;\n+\tstruct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);\n+\tenum irdma_status_code status;\n+\tint err = 0;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, true);\n+\tif (!cqp_request)\n+\t\treturn -ENOMEM;\n+\n+\tcqp_info = &cqp_request->info;\n+\tinfo = &cqp_info->in.u.alloc_stag.info;\n+\tmemset(info, 0, sizeof(*info));\n+\tinfo->page_size = PAGE_SIZE;\n+\tinfo->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;\n+\tinfo->pd_id = iwpd->sc_pd.pd_id;\n+\tinfo->total_len = iwmr->len;\n+\tinfo->remote_access = true;\n+\tcqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;\n+\tcqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(iwdev->rf, cqp_request);\n+\tif (status) {\n+\t\terr = -ENOMEM;\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP MR alloc stag fail\");\n+\t}\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_alloc_mr - register stag for fast memory registration\n+ * @pd: ibpd pointer\n+ * @mr_type: memory for stag registrion\n+ * @max_num_sg: man number of pages\n+ * @udata: user data\n+ */\n+static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,\n+\t\t\t\t u32 max_num_sg, struct ib_udata *udata)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(pd->device);\n+\tstruct irdma_pble_alloc *palloc;\n+\tstruct irdma_pbl *iwpbl;\n+\tstruct irdma_mr *iwmr;\n+\tenum irdma_status_code status;\n+\tu32 stag;\n+\tint err_code = -ENOMEM;\n+\n+\tiwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);\n+\tif (!iwmr)\n+\t\treturn ERR_PTR(-ENOMEM);\n+\n+\tstag = irdma_create_stag(iwdev);\n+\tif (!stag) {\n+\t\terr_code = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\tiwmr->stag = stag;\n+\tiwmr->ibmr.rkey = stag;\n+\tiwmr->ibmr.lkey = stag;\n+\tiwmr->ibmr.pd = pd;\n+\tiwmr->ibmr.device = pd->device;\n+\tiwpbl = &iwmr->iwpbl;\n+\tiwpbl->iwmr = iwmr;\n+\tiwmr->type = IW_MEMREG_TYPE_MEM;\n+\tpalloc = &iwpbl->pble_alloc;\n+\tiwmr->page_cnt = max_num_sg;\n+\tstatus = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,\n+\t\t\t\ttrue);\n+\tif (status)\n+\t\tgoto err1;\n+\n+\terr_code = irdma_hw_alloc_stag(iwdev, iwmr);\n+\tif (err_code)\n+\t\tgoto err2;\n+\n+\tiwpbl->pbl_allocated = true;\n+\n+\treturn &iwmr->ibmr;\n+err2:\n+\tirdma_free_pble(iwdev->rf->pble_rsrc, palloc);\n+err1:\n+\tirdma_free_stag(iwdev, stag);\n+err:\n+\tkfree(iwmr);\n+\n+\treturn ERR_PTR(err_code);\n+}\n+\n+/**\n+ * irdma_set_page - populate pbl list for fmr\n+ * @ibmr: ib mem to access iwarp mr pointer\n+ * @addr: page dma address fro pbl list\n+ */\n+static int irdma_set_page(struct ib_mr *ibmr, u64 addr)\n+{\n+\tstruct irdma_mr *iwmr = to_iwmr(ibmr);\n+\tstruct irdma_pbl *iwpbl = &iwmr->iwpbl;\n+\tstruct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;\n+\tu64 *pbl;\n+\n+\tif (unlikely(iwmr->npages == iwmr->page_cnt))\n+\t\treturn -ENOMEM;\n+\n+\tpbl = (u64 *)(uintptr_t)palloc->level1.addr;\n+\tpbl[iwmr->npages++] = addr;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_map_mr_sg - map of sg list for fmr\n+ * @ibmr: ib mem to access iwarp mr pointer\n+ * @sg: scatter gather list\n+ * @sg_nents: number of sg pages\n+ * @sg_offset: scatter gather list for fmr\n+ */\n+static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,\n+\t\t\t int sg_nents, unsigned int *sg_offset)\n+{\n+\tstruct irdma_mr *iwmr = to_iwmr(ibmr);\n+\n+\tiwmr->npages = 0;\n+\n+\treturn ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);\n+}\n+\n+/**\n+ * irdma_drain_sq - drain the send queue\n+ * @ibqp: ib qp pointer\n+ */\n+static void irdma_drain_sq(struct ib_qp *ibqp)\n+{\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\tstruct irdma_sc_qp *qp = &iwqp->sc_qp;\n+\n+\tif (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))\n+\t\twait_for_completion(&iwqp->sq_drained);\n+}\n+\n+/**\n+ * irdma_drain_rq - drain the receive queue\n+ * @ibqp: ib qp pointer\n+ */\n+static void irdma_drain_rq(struct ib_qp *ibqp)\n+{\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\tstruct irdma_sc_qp *qp = &iwqp->sc_qp;\n+\n+\tif (IRDMA_RING_MORE_WORK(qp->qp_uk.rq_ring))\n+\t\twait_for_completion(&iwqp->rq_drained);\n+}\n+\n+/**\n+ * irdma_hwreg_mr - send cqp command for memory registration\n+ * @iwdev: iwarp device\n+ * @iwmr: iwarp mr pointer\n+ * @access: access for MR\n+ */\n+static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,\n+\t\t\t u16 access)\n+{\n+\tstruct irdma_pbl *iwpbl = &iwmr->iwpbl;\n+\tstruct irdma_reg_ns_stag_info *stag_info;\n+\tstruct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);\n+\tstruct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;\n+\tenum irdma_status_code status;\n+\tint err = 0;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, true);\n+\tif (!cqp_request)\n+\t\treturn -ENOMEM;\n+\n+\tcqp_info = &cqp_request->info;\n+\tstag_info = &cqp_info->in.u.mr_reg_non_shared.info;\n+\tmemset(stag_info, 0, sizeof(*stag_info));\n+\tstag_info->va = (void *)(unsigned long)iwpbl->user_base;\n+\tstag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;\n+\tstag_info->stag_key = (u8)iwmr->stag;\n+\tstag_info->total_len = iwmr->len;\n+\tstag_info->access_rights = access;\n+\tstag_info->pd_id = iwpd->sc_pd.pd_id;\n+\tstag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;\n+\tstag_info->page_size = iwmr->page_size;\n+\n+\tif (iwpbl->pbl_allocated) {\n+\t\tif (palloc->level == PBLE_LEVEL_1) {\n+\t\t\tstag_info->first_pm_pbl_index = palloc->level1.idx;\n+\t\t\tstag_info->chunk_size = 1;\n+\t\t} else {\n+\t\t\tstag_info->first_pm_pbl_index = palloc->level2.root.idx;\n+\t\t\tstag_info->chunk_size = 3;\n+\t\t}\n+\t} else {\n+\t\tstag_info->reg_addr_pa = iwmr->pgaddrmem[0];\n+\t}\n+\n+\tcqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;\n+\tcqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(iwdev->rf, cqp_request);\n+\tif (status) {\n+\t\terr = -ENOMEM;\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP MR Reg fail\");\n+\t}\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_reg_user_mr - Register a user memory region\n+ * @pd: ptr of pd\n+ * @start: virtual start address\n+ * @len: length of mr\n+ * @virt: virtual address\n+ * @acc: access of mr\n+ * @udata: user data\n+ */\n+static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,\n+\t\t\t\t u64 virt, int acc,\n+\t\t\t\t struct ib_udata *udata)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(pd->device);\n+\tstruct irdma_ucontext *ucontext =\n+\t\trdma_udata_to_drv_context(udata, struct irdma_ucontext,\n+\t\t\t\t\t ibucontext);\n+\tstruct irdma_pble_alloc *palloc;\n+\tstruct irdma_pbl *iwpbl;\n+\tstruct irdma_mr *iwmr;\n+\tstruct ib_umem *region;\n+\tstruct irdma_mem_reg_req req;\n+\tu64 pbl_depth = 0;\n+\tu32 stag = 0;\n+\tu16 access;\n+\tu64 region_len;\n+\tbool use_pbles = false;\n+\tunsigned long flags;\n+\tint err = -ENOSYS;\n+\tint ret, pg_shift;\n+\n+\tif (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)\n+\t\treturn ERR_PTR(-EINVAL);\n+\n+\tregion = ib_umem_get(udata, start, len, acc, 0);\n+\tif (IS_ERR(region))\n+\t\treturn (struct ib_mr *)region;\n+\n+\tif (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {\n+\t\tib_umem_release(region);\n+\t\treturn ERR_PTR(-EFAULT);\n+\t}\n+\n+\tiwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);\n+\tif (!iwmr) {\n+\t\tib_umem_release(region);\n+\t\treturn ERR_PTR(-ENOMEM);\n+\t}\n+\n+\tiwpbl = &iwmr->iwpbl;\n+\tiwpbl->iwmr = iwmr;\n+\tiwmr->region = region;\n+\tiwmr->ibmr.pd = pd;\n+\tiwmr->ibmr.device = pd->device;\n+\tiwmr->page_size = PAGE_SIZE;\n+\n+\tif (req.reg_type == IW_MEMREG_TYPE_MEM)\n+\t\tiwmr->page_size = ib_umem_find_best_pgsz(region,\n+\t\t\t\t\t\t\t SZ_4K | SZ_2M | SZ_1G,\n+\t\t\t\t\t\t\t virt);\n+\tregion_len = region->length + (start & (iwmr->page_size - 1));\n+\tpg_shift = ffs(iwmr->page_size) - 1;\n+\tpbl_depth = region_len >> pg_shift;\n+\tpbl_depth += (region_len & (iwmr->page_size - 1)) ? 1 : 0;\n+\tiwmr->len = region->length;\n+\tiwpbl->user_base = virt;\n+\tpalloc = &iwpbl->pble_alloc;\n+\tiwmr->type = req.reg_type;\n+\tiwmr->page_cnt = (u32)pbl_depth;\n+\n+\tswitch (req.reg_type) {\n+\tcase IW_MEMREG_TYPE_QP:\n+\t\tuse_pbles = ((req.sq_pages + req.rq_pages) > 2);\n+\t\terr = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);\n+\t\tif (err)\n+\t\t\tgoto error;\n+\n+\t\tspin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);\n+\t\tlist_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);\n+\t\tiwpbl->on_list = true;\n+\t\tspin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);\n+\t\tbreak;\n+\tcase IW_MEMREG_TYPE_CQ:\n+\t\tuse_pbles = (req.cq_pages > 1);\n+\t\terr = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);\n+\t\tif (err)\n+\t\t\tgoto error;\n+\n+\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\tlist_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);\n+\t\tiwpbl->on_list = true;\n+\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\tbreak;\n+\tcase IW_MEMREG_TYPE_MEM:\n+\t\tuse_pbles = (iwmr->page_cnt != 1);\n+\t\taccess = IRDMA_ACCESS_FLAGS_LOCALREAD;\n+\n+\t\terr = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);\n+\t\tif (err)\n+\t\t\tgoto error;\n+\n+\t\tif (use_pbles) {\n+\t\t\tret = irdma_check_mr_contiguous(palloc,\n+\t\t\t\t\t\t\tiwmr->page_size);\n+\t\t\tif (ret) {\n+\t\t\t\tirdma_free_pble(iwdev->rf->pble_rsrc, palloc);\n+\t\t\t\tiwpbl->pbl_allocated = false;\n+\t\t\t}\n+\t\t}\n+\n+\t\taccess |= irdma_get_user_access(acc);\n+\t\tstag = irdma_create_stag(iwdev);\n+\t\tif (!stag) {\n+\t\t\terr = -ENOMEM;\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\tiwmr->stag = stag;\n+\t\tiwmr->ibmr.rkey = stag;\n+\t\tiwmr->ibmr.lkey = stag;\n+\t\terr = irdma_hwreg_mr(iwdev, iwmr, access);\n+\t\tif (err) {\n+\t\t\tirdma_free_stag(iwdev, stag);\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\tbreak;\n+\tdefault:\n+\t\tgoto error;\n+\t}\n+\n+\tiwmr->type = req.reg_type;\n+\n+\treturn &iwmr->ibmr;\n+\n+error:\n+\tif (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)\n+\t\tirdma_free_pble(iwdev->rf->pble_rsrc, palloc);\n+\tib_umem_release(region);\n+\tkfree(iwmr);\n+\n+\treturn ERR_PTR(err);\n+}\n+\n+/**\n+ * irdma_reg_phys_mr - register kernel physical memory\n+ * @pd: ibpd pointer\n+ * @addr: physical address of memory to register\n+ * @size: size of memory to register\n+ * @acc: Access rights\n+ * @iova_start: start of virtual address for physical buffers\n+ */\n+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int acc,\n+\t\t\t\tu64 *iova_start)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(pd->device);\n+\tstruct irdma_pbl *iwpbl;\n+\tstruct irdma_mr *iwmr;\n+\tenum irdma_status_code status;\n+\tu32 stag;\n+\tu16 access = IRDMA_ACCESS_FLAGS_LOCALREAD;\n+\tint ret;\n+\n+\tiwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);\n+\tif (!iwmr)\n+\t\treturn ERR_PTR(-ENOMEM);\n+\n+\tiwmr->ibmr.pd = pd;\n+\tiwmr->ibmr.device = pd->device;\n+\tiwpbl = &iwmr->iwpbl;\n+\tiwpbl->iwmr = iwmr;\n+\tiwmr->type = IW_MEMREG_TYPE_MEM;\n+\tiwpbl->user_base = *iova_start;\n+\tstag = irdma_create_stag(iwdev);\n+\tif (!stag) {\n+\t\tret = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\taccess |= irdma_get_user_access(acc);\n+\tiwmr->stag = stag;\n+\tiwmr->ibmr.rkey = stag;\n+\tiwmr->ibmr.lkey = stag;\n+\tiwmr->page_cnt = 1;\n+\tiwmr->pgaddrmem[0] = addr;\n+\tiwmr->len = size;\n+\tstatus = irdma_hwreg_mr(iwdev, iwmr, access);\n+\tif (status) {\n+\t\tirdma_free_stag(iwdev, stag);\n+\t\tret = -ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\treturn &iwmr->ibmr;\n+\n+err:\n+\tkfree(iwmr);\n+\n+\treturn ERR_PTR(ret);\n+}\n+\n+/**\n+ * irdma_get_dma_mr - register physical mem\n+ * @pd: ptr of pd\n+ * @acc: access for memory\n+ */\n+static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)\n+{\n+\tu64 kva = 0;\n+\n+\treturn irdma_reg_phys_mr(pd, 0, 0, acc, &kva);\n+}\n+\n+/**\n+ * irdma_del_mem_list - Deleting pbl list entries for CQ/QP\n+ * @iwmr: iwmr for IB's user page addresses\n+ * @ucontext: ptr to user context\n+ */\n+static void irdma_del_memlist(struct irdma_mr *iwmr,\n+\t\t\t struct irdma_ucontext *ucontext)\n+{\n+\tstruct irdma_pbl *iwpbl = &iwmr->iwpbl;\n+\tunsigned long flags;\n+\n+\tswitch (iwmr->type) {\n+\tcase IW_MEMREG_TYPE_CQ:\n+\t\tspin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\tif (iwpbl->on_list) {\n+\t\t\tiwpbl->on_list = false;\n+\t\t\tlist_del(&iwpbl->list);\n+\t\t}\n+\t\tspin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);\n+\t\tbreak;\n+\tcase IW_MEMREG_TYPE_QP:\n+\t\tspin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);\n+\t\tif (iwpbl->on_list) {\n+\t\t\tiwpbl->on_list = false;\n+\t\t\tlist_del(&iwpbl->list);\n+\t\t}\n+\t\tspin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n+/**\n+ * irdma_dereg_mr - deregister mr\n+ * @ib_mr: mr ptr for dereg\n+ * @udata: user data\n+ */\n+static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)\n+{\n+\tstruct ib_pd *ibpd = ib_mr->pd;\n+\tstruct irdma_pd *iwpd = to_iwpd(ibpd);\n+\tstruct irdma_mr *iwmr = to_iwmr(ib_mr);\n+\tstruct irdma_device *iwdev = to_iwdev(ib_mr->device);\n+\tenum irdma_status_code status;\n+\tstruct irdma_dealloc_stag_info *info;\n+\tstruct irdma_pbl *iwpbl = &iwmr->iwpbl;\n+\tstruct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tu32 stag_idx;\n+\n+\tif (iwmr->type != IW_MEMREG_TYPE_MEM) {\n+\t\tif (iwmr->region) {\n+\t\t\tstruct irdma_ucontext *ucontext;\n+\n+\t\t\tucontext = rdma_udata_to_drv_context(udata,\n+\t\t\t\t\t\tstruct irdma_ucontext,\n+\t\t\t\t\t\tibucontext);\n+\t\t\tirdma_del_memlist(iwmr, ucontext);\n+\t\t}\n+\t\tgoto done;\n+\t}\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, true);\n+\tif (!cqp_request)\n+\t\treturn -ENOMEM;\n+\n+\tcqp_info = &cqp_request->info;\n+\tinfo = &cqp_info->in.u.dealloc_stag.info;\n+\tmemset(info, 0, sizeof(*info));\n+\tinfo->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;\n+\tinfo->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);\n+\tstag_idx = info->stag_idx;\n+\tinfo->mr = true;\n+\tif (iwpbl->pbl_allocated)\n+\t\tinfo->dealloc_pbl = true;\n+\n+\tcqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;\n+\tcqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(iwdev->rf, cqp_request);\n+\tif (status)\n+\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t \"VERBS: CQP-OP dealloc failed for stag_idx = 0x%x\\n\",\n+\t\t\t stag_idx);\n+\tirdma_free_stag(iwdev, iwmr->stag);\n+done:\n+\tif (iwpbl->pbl_allocated)\n+\t\tirdma_free_pble(iwdev->rf->pble_rsrc, palloc);\n+\tib_umem_release(iwmr->region);\n+\tkfree(iwmr);\n+\n+\treturn 0;\n+}\n+\n+static ssize_t hw_rev_show(struct device *dev, struct device_attribute *attr,\n+\t\t\t char *buf)\n+{\n+\tstruct irdma_ib_device *iwibdev =\n+\t\trdma_device_to_drv_device(dev, struct irdma_ib_device, ibdev);\n+\tu32 hw_rev = iwibdev->iwdev->rf->sc_dev.pci_rev;\n+\n+\treturn sprintf(buf, \"%x\\n\", hw_rev);\n+}\n+\n+static ssize_t hca_type_show(struct device *dev, struct device_attribute *attr,\n+\t\t\t char *buf)\n+{\n+\treturn sprintf(buf, \"IRDMA\\n\");\n+}\n+\n+static DEVICE_ATTR_RO(hw_rev);\n+static DEVICE_ATTR_RO(hca_type);\n+\n+static struct attribute *irdma_dev_attributes[] = { &dev_attr_hw_rev.attr,\n+\t\t\t\t\t\t &dev_attr_hca_type.attr,\n+\t\t\t\t\t\t NULL };\n+\n+static const struct attribute_group irdma_attr_group = {\n+\t.attrs = irdma_dev_attributes,\n+};\n+\n+/**\n+ * irdma_copy_sg_list - copy sg list for qp\n+ * @sg_list: copied into sg_list\n+ * @sgl: copy from sgl\n+ * @num_sges: count of sg entries\n+ */\n+static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,\n+\t\t\t int num_sges)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) {\n+\t\tsg_list[i].tag_off = sgl[i].addr;\n+\t\tsg_list[i].len = sgl[i].length;\n+\t\tsg_list[i].stag = sgl[i].lkey;\n+\t}\n+}\n+\n+/**\n+ * irdma_post_send - kernel application wr\n+ * @ibqp: qp ptr for wr\n+ * @ib_wr: work request ptr\n+ * @bad_wr: return of bad wr if err\n+ */\n+static int irdma_post_send(struct ib_qp *ibqp,\n+\t\t\t const struct ib_send_wr *ib_wr,\n+\t\t\t const struct ib_send_wr **bad_wr)\n+{\n+\tstruct irdma_qp *iwqp;\n+\tstruct irdma_qp_uk *ukqp;\n+\tstruct irdma_sc_dev *dev;\n+\tstruct irdma_post_sq_info info;\n+\tenum irdma_status_code ret;\n+\tint err = 0;\n+\tunsigned long flags;\n+\tbool inv_stag;\n+\tstruct irdma_ah *ah;\n+\tbool reflush = false;\n+\n+\tiwqp = to_iwqp(ibqp);\n+\tukqp = &iwqp->sc_qp.qp_uk;\n+\tdev = &iwqp->iwdev->rf->sc_dev;\n+\n+\tspin_lock_irqsave(&iwqp->lock, flags);\n+\tif (iwqp->flush_issued && ukqp->sq_flush_complete)\n+\t\treflush = true;\n+\n+\twhile (ib_wr) {\n+\t\tmemset(&info, 0, sizeof(info));\n+\t\tinv_stag = false;\n+\t\tinfo.wr_id = (ib_wr->wr_id);\n+\t\tif ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)\n+\t\t\tinfo.signaled = true;\n+\t\tif (ib_wr->send_flags & IB_SEND_FENCE)\n+\t\t\tinfo.read_fence = true;\n+\t\tswitch (ib_wr->opcode) {\n+\t\tcase IB_WR_SEND_WITH_IMM:\n+\t\t\tif (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {\n+\t\t\t\tinfo.imm_data_valid = true;\n+\t\t\t\tinfo.imm_data = ntohl(ib_wr->ex.imm_data);\n+\t\t\t} else {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\t/* fall-through */\n+\t\tcase IB_WR_SEND:\n+\t\t\t/* fall-through */\n+\t\tcase IB_WR_SEND_WITH_INV:\n+\t\t\tif (ib_wr->opcode == IB_WR_SEND ||\n+\t\t\t ib_wr->opcode == IB_WR_SEND_WITH_IMM) {\n+\t\t\t\tif (ib_wr->send_flags & IB_SEND_SOLICITED)\n+\t\t\t\t\tinfo.op_type = IRDMA_OP_TYPE_SEND_SOL;\n+\t\t\t\telse\n+\t\t\t\t\tinfo.op_type = IRDMA_OP_TYPE_SEND;\n+\t\t\t} else {\n+\t\t\t\tif (ib_wr->send_flags & IB_SEND_SOLICITED)\n+\t\t\t\t\tinfo.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;\n+\t\t\t\telse\n+\t\t\t\t\tinfo.op_type = IRDMA_OP_TYPE_SEND_INV;\n+\t\t\t\tinfo.stag_to_inv = ib_wr->ex.invalidate_rkey;\n+\t\t\t}\n+\n+\t\t\tif (ib_wr->send_flags & IB_SEND_INLINE) {\n+\t\t\t\tinfo.op.inline_send.data = (void *)(unsigned long)\n+\t\t\t\t\t\t\t ib_wr->sg_list[0].addr;\n+\t\t\t\tinfo.op.inline_send.len = ib_wr->sg_list[0].length;\n+\t\t\t\tif (iwqp->ibqp.qp_type == IB_QPT_UD ||\n+\t\t\t\t iwqp->ibqp.qp_type == IB_QPT_GSI) {\n+\t\t\t\t\tah = to_iwah(ud_wr(ib_wr)->ah);\n+\t\t\t\t\tinfo.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;\n+\t\t\t\t\tinfo.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;\n+\t\t\t\t\tinfo.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;\n+\t\t\t\t}\n+\t\t\t\tret = ukqp->qp_ops.iw_inline_send(ukqp, &info,\n+\t\t\t\t\t\t\t\t false);\n+\t\t\t} else {\n+\t\t\t\tinfo.op.send.num_sges = ib_wr->num_sge;\n+\t\t\t\tinfo.op.send.sg_list = (struct irdma_sge *)\n+\t\t\t\t\t\t ib_wr->sg_list;\n+\t\t\t\tif (iwqp->ibqp.qp_type == IB_QPT_UD ||\n+\t\t\t\t iwqp->ibqp.qp_type == IB_QPT_GSI) {\n+\t\t\t\t\tah = to_iwah(ud_wr(ib_wr)->ah);\n+\t\t\t\t\tinfo.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;\n+\t\t\t\t\tinfo.op.send.qkey = ud_wr(ib_wr)->remote_qkey;\n+\t\t\t\t\tinfo.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;\n+\t\t\t\t}\n+\t\t\t\tret = ukqp->qp_ops.iw_send(ukqp, &info, false);\n+\t\t\t}\n+\n+\t\t\tif (ret) {\n+\t\t\t\tif (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)\n+\t\t\t\t\terr = -ENOMEM;\n+\t\t\t\telse\n+\t\t\t\t\terr = -EINVAL;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IB_WR_RDMA_WRITE_WITH_IMM:\n+\t\t\tif (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {\n+\t\t\t\tinfo.imm_data_valid = true;\n+\t\t\t\tinfo.imm_data = ntohl(ib_wr->ex.imm_data);\n+\t\t\t} else {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\t/* fall-through */\n+\t\tcase IB_WR_RDMA_WRITE:\n+\t\t\tif (ib_wr->send_flags & IB_SEND_SOLICITED)\n+\t\t\t\tinfo.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;\n+\t\t\telse\n+\t\t\t\tinfo.op_type = IRDMA_OP_TYPE_RDMA_WRITE;\n+\n+\t\t\tif (ib_wr->send_flags & IB_SEND_INLINE) {\n+\t\t\t\tinfo.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;\n+\t\t\t\tinfo.op.inline_rdma_write.len = ib_wr->sg_list[0].length;\n+\t\t\t\tinfo.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;\n+\t\t\t\tinfo.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;\n+\t\t\t\tret = ukqp->qp_ops.iw_inline_rdma_write(ukqp, &info, false);\n+\t\t\t} else {\n+\t\t\t\tinfo.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;\n+\t\t\t\tinfo.op.rdma_write.num_lo_sges = ib_wr->num_sge;\n+\t\t\t\tinfo.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;\n+\t\t\t\tinfo.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;\n+\t\t\t\tret = ukqp->qp_ops.iw_rdma_write(ukqp, &info, false);\n+\t\t\t}\n+\n+\t\t\tif (ret) {\n+\t\t\t\tif (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)\n+\t\t\t\t\terr = -ENOMEM;\n+\t\t\t\telse\n+\t\t\t\t\terr = -EINVAL;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IB_WR_RDMA_READ_WITH_INV:\n+\t\t\tinv_stag = true;\n+\t\t\t/* fall-through*/\n+\t\tcase IB_WR_RDMA_READ:\n+\t\t\tif (ib_wr->num_sge >\n+\t\t\t dev->hw_attrs.uk_attrs.max_hw_read_sges) {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tinfo.op_type = IRDMA_OP_TYPE_RDMA_READ;\n+\t\t\tinfo.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;\n+\t\t\tinfo.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;\n+\t\t\tinfo.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;\n+\t\t\tinfo.op.rdma_read.num_lo_sges = ib_wr->num_sge;\n+\n+\t\t\tret = ukqp->qp_ops.iw_rdma_read(ukqp, &info, inv_stag,\n+\t\t\t\t\t\t\tfalse);\n+\t\t\tif (ret) {\n+\t\t\t\tif (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)\n+\t\t\t\t\terr = -ENOMEM;\n+\t\t\t\telse\n+\t\t\t\t\terr = -EINVAL;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IB_WR_LOCAL_INV:\n+\t\t\tinfo.op_type = IRDMA_OP_TYPE_INV_STAG;\n+\t\t\tinfo.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;\n+\t\t\tret = ukqp->qp_ops.iw_stag_local_invalidate(ukqp, &info, true);\n+\t\t\tif (ret)\n+\t\t\t\terr = -ENOMEM;\n+\t\t\tbreak;\n+\t\tcase IB_WR_REG_MR: {\n+\t\t\tstruct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);\n+\t\t\tint flags = reg_wr(ib_wr)->access;\n+\t\t\tstruct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;\n+\t\t\tstruct irdma_fast_reg_stag_info info = {};\n+\n+\t\t\tinfo.access_rights = IRDMA_ACCESS_FLAGS_LOCALREAD;\n+\t\t\tinfo.access_rights |= irdma_get_user_access(flags);\n+\t\t\tinfo.stag_key = reg_wr(ib_wr)->key & 0xff;\n+\t\t\tinfo.stag_idx = reg_wr(ib_wr)->key >> 8;\n+\t\t\tinfo.page_size = reg_wr(ib_wr)->mr->page_size;\n+\t\t\tinfo.wr_id = ib_wr->wr_id;\n+\t\t\tinfo.addr_type = IRDMA_ADDR_TYPE_VA_BASED;\n+\t\t\tinfo.va = (void *)(uintptr_t)iwmr->ibmr.iova;\n+\t\t\tinfo.total_len = iwmr->ibmr.length;\n+\t\t\tinfo.reg_addr_pa = *((u64 *)(uintptr_t)palloc->level1.addr);\n+\t\t\tinfo.first_pm_pbl_index = palloc->level1.idx;\n+\t\t\tinfo.local_fence = ib_wr->send_flags & IB_SEND_FENCE;\n+\t\t\tinfo.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;\n+\t\t\tif (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)\n+\t\t\t\tinfo.chunk_size = 1;\n+\t\t\tret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp,\n+\t\t\t\t\t\t\t\t &info,\n+\t\t\t\t\t\t\t\t true);\n+\t\t\tif (ret)\n+\t\t\t\terr = -ENOMEM;\n+\t\t\tbreak;\n+\t\t}\n+\t\tdefault:\n+\t\t\terr = -EINVAL;\n+\t\t\tibdev_dbg(to_ibdev(iwqp->iwdev),\n+\t\t\t\t \"VERBS: upost_send bad opcode = 0x%x\\n\",\n+\t\t\t\t ib_wr->opcode);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (err)\n+\t\t\tbreak;\n+\t\tib_wr = ib_wr->next;\n+\t}\n+\n+\tif (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) {\n+\t\tukqp->qp_ops.iw_qp_post_wr(ukqp);\n+\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t} else if (reflush) {\n+\t\tstruct irdma_qp_flush_info flush_info = {};\n+\t\tstruct irdma_pci_f *rf = iwqp->iwdev->rf;\n+\n+\t\tiwqp->sc_qp.flush_sq = false;\n+\t\tiwqp->sc_qp.term_flags = 0;\n+\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\tukqp->sq_flush_complete = false;\n+\t\tflush_info.sq = true;\n+\t\tflush_info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;\n+\t\tflush_info.sq_minor_code = IRDMA_FLUSH_MAJOR_ERR;\n+\t\tirdma_hw_flush_wqes(rf, &iwqp->sc_qp, &flush_info, false);\n+\t} else {\n+\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t}\n+\tif (err)\n+\t\t*bad_wr = ib_wr;\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_post_recv - post receive wr for kernel application\n+ * @ibqp: ib qp pointer\n+ * @ib_wr: work request for receive\n+ * @bad_wr: bad wr caused an error\n+ */\n+static int irdma_post_recv(struct ib_qp *ibqp,\n+\t\t\t const struct ib_recv_wr *ib_wr,\n+\t\t\t const struct ib_recv_wr **bad_wr)\n+{\n+\tstruct irdma_qp *iwqp;\n+\tstruct irdma_qp_uk *ukqp;\n+\tstruct irdma_post_rq_info post_recv = {};\n+\tstruct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT];\n+\tenum irdma_status_code ret = 0;\n+\tunsigned long flags;\n+\tint err = 0;\n+\tbool reflush = false;\n+\n+\tiwqp = to_iwqp(ibqp);\n+\tukqp = &iwqp->sc_qp.qp_uk;\n+\n+\tspin_lock_irqsave(&iwqp->lock, flags);\n+\tif (iwqp->flush_issued && ukqp->rq_flush_complete)\n+\t\treflush = true;\n+\n+\twhile (ib_wr) {\n+\t\tpost_recv.num_sges = ib_wr->num_sge;\n+\t\tpost_recv.wr_id = ib_wr->wr_id;\n+\t\tirdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);\n+\t\tpost_recv.sg_list = sg_list;\n+\t\tret = ukqp->qp_ops.iw_post_receive(ukqp, &post_recv);\n+\t\tif (ret) {\n+\t\t\tibdev_dbg(to_ibdev(iwqp->iwdev),\n+\t\t\t\t \"VERBS: post_recv err %d\\n\", ret);\n+\t\t\tif (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)\n+\t\t\t\terr = -ENOMEM;\n+\t\t\telse\n+\t\t\t\terr = -EINVAL;\n+\t\t\tgoto out;\n+\t\t}\n+\n+\t\tib_wr = ib_wr->next;\n+\t}\n+\n+out:\n+\tif (reflush) {\n+\t\tstruct irdma_qp_flush_info flush_info = {};\n+\t\tstruct irdma_pci_f *rf = iwqp->iwdev->rf;\n+\n+\t\tiwqp->sc_qp.flush_rq = false;\n+\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\tukqp->rq_flush_complete = false;\n+\t\tflush_info.rq = true;\n+\t\tirdma_hw_flush_wqes(rf, &iwqp->sc_qp, &flush_info, false);\n+\t} else {\n+\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t}\n+\n+\tif (err)\n+\t\t*bad_wr = ib_wr;\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_process_cqe - process cqe info\n+ * @entry: processed cqe\n+ * @cq_poll_info: cqe info\n+ */\n+static void irdma_process_cqe(struct ib_wc *entry,\n+\t\t\t struct irdma_cq_poll_info *cq_poll_info)\n+{\n+\tstruct irdma_qp *iwqp;\n+\tstruct irdma_sc_qp *qp;\n+\n+\tentry->wc_flags = 0;\n+\tentry->pkey_index = 0;\n+\tentry->wr_id = cq_poll_info->wr_id;\n+\n+\tif (cq_poll_info->error) {\n+\t\tif (cq_poll_info->comp_status ==\n+\t\t IRDMA_COMPL_STATUS_FLUSHED)\n+\t\t\tentry->status = IB_WC_WR_FLUSH_ERR;\n+\t\telse if (cq_poll_info->comp_status ==\n+\t\t\t IRDMA_COMPL_STATUS_INVALID_LEN)\n+\t\t\tentry->status = IB_WC_LOC_LEN_ERR;\n+\t\telse\n+\t\t\tentry->status = IB_WC_GENERAL_ERR;\n+\t\tentry->vendor_err = cq_poll_info->major_err << 16 |\n+\t\t\t\t cq_poll_info->minor_err;\n+\t} else {\n+\t\tentry->status = IB_WC_SUCCESS;\n+\t\tif (cq_poll_info->imm_valid) {\n+\t\t\tentry->ex.imm_data = htonl(cq_poll_info->imm_data);\n+\t\t\tentry->wc_flags |= IB_WC_WITH_IMM;\n+\t\t}\n+\t\tif (cq_poll_info->ud_smac_valid) {\n+\t\t\tether_addr_copy(entry->smac, cq_poll_info->ud_smac);\n+\t\t\tentry->wc_flags |= IB_WC_WITH_SMAC;\n+\t\t}\n+\n+\t\tif (cq_poll_info->ud_vlan_valid) {\n+\t\t\tentry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;\n+\t\t\tentry->wc_flags |= IB_WC_WITH_VLAN;\n+\t\t\tentry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;\n+\t\t}\n+\t}\n+\n+\tswitch (cq_poll_info->op_type) {\n+\tcase IRDMA_OP_TYPE_RDMA_WRITE:\n+\t\tentry->opcode = IB_WC_RDMA_WRITE;\n+\t\tbreak;\n+\tcase IRDMA_OP_TYPE_RDMA_READ_INV_STAG:\n+\tcase IRDMA_OP_TYPE_RDMA_READ:\n+\t\tentry->opcode = IB_WC_RDMA_READ;\n+\t\tbreak;\n+\tcase IRDMA_OP_TYPE_SEND_INV:\n+\tcase IRDMA_OP_TYPE_SEND_SOL:\n+\tcase IRDMA_OP_TYPE_SEND_SOL_INV:\n+\tcase IRDMA_OP_TYPE_SEND:\n+\t\tentry->opcode = IB_WC_SEND;\n+\t\tif (cq_poll_info->stag_invalid_set)\n+\t\t\tentry->ex.invalidate_rkey = cq_poll_info->inv_stag;\n+\t\tbreak;\n+\tcase IRDMA_OP_TYPE_REC:\n+\t\tentry->opcode = IB_WC_RECV;\n+\t\tbreak;\n+\tcase IRDMA_OP_TYPE_REC_IMM:\n+\t\tentry->opcode = IB_WC_RECV_RDMA_WITH_IMM;\n+\t\tbreak;\n+\tdefault:\n+\t\tentry->opcode = IB_WC_RECV;\n+\t\tbreak;\n+\t}\n+\n+\tqp = cq_poll_info->qp_handle;\n+\tentry->qp = qp->qp_uk.back_qp;\n+\n+\tif (qp->qp_type == IRDMA_QP_TYPE_ROCE_UD) {\n+\t\tentry->src_qp = cq_poll_info->ud_src_qpn;\n+\t\tentry->wc_flags |=\n+\t\t\t(IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);\n+\t\tentry->network_hdr_type = cq_poll_info->ipv4 ?\n+\t\t\t\t\t\t RDMA_NETWORK_IPV4 :\n+\t\t\t\t\t\t RDMA_NETWORK_IPV6;\n+\t} else {\n+\t\tentry->src_qp = cq_poll_info->qp_id;\n+\t}\n+\tiwqp = qp->qp_uk.back_qp;\n+\tif (iwqp->iwarp_state > IRDMA_QP_STATE_RTS) {\n+\t\tif (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))\n+\t\t\tcomplete(&iwqp->sq_drained);\n+\t\tif (!IRDMA_RING_MORE_WORK(qp->qp_uk.rq_ring))\n+\t\t\tcomplete(&iwqp->rq_drained);\n+\t}\n+\t\tentry->byte_len = cq_poll_info->bytes_xfered;\n+}\n+\n+/**\n+ * irdma_get_cqes - get cq entries\n+ * @num_entries: requested number of entries\n+ * @cqe_count: received number of entries\n+ * @ukcq: cq to get completion entries from\n+ * @new_cqe: true, if at least one completion\n+ * @entry: wr of a completed entry\n+ */\n+static int irdma_get_cqes(struct irdma_cq_uk *ukcq,\n+\t\t\t int num_entries,\n+\t\t\t int *cqe_count,\n+\t\t\t bool *new_cqe,\n+\t\t\t struct ib_wc **entry)\n+{\n+\tstruct irdma_cq_poll_info cq_poll_info;\n+\tint ret = 0;\n+\n+\twhile (*cqe_count < num_entries) {\n+\t\tret = ukcq->ops.iw_cq_poll_cmpl(ukcq, &cq_poll_info);\n+\t\tif (ret == IRDMA_ERR_Q_EMPTY) {\n+\t\t\tbreak;\n+\t\t} else if (ret == IRDMA_ERR_Q_DESTROYED) {\n+\t\t\t*new_cqe = true;\n+\t\t\tcontinue;\n+\t\t} else if (ret) {\n+\t\t\tif (!*cqe_count)\n+\t\t\t\t*cqe_count = -1;\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\t*new_cqe = true;\n+\t\tirdma_process_cqe(*entry, &cq_poll_info);\n+\t\t(*cqe_count)++;\n+\t\t(*entry)++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_poll_cq - poll cq for completion (kernel apps)\n+ * @ibcq: cq to poll\n+ * @num_entries: number of entries to poll\n+ * @entry: wr of a completed entry\n+ */\n+static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,\n+\t\t\t struct ib_wc *entry)\n+{\n+\tstruct list_head *tmp_node, *list_node;\n+\tstruct irdma_cq_buf *last_buf = NULL;\n+\tstruct irdma_cq_buf *cq_buf;\n+\tenum irdma_status_code ret;\n+\tstruct irdma_device *iwdev;\n+\tstruct irdma_cq_uk *ukcq;\n+\tstruct irdma_cq *iwcq;\n+\tbool new_cqe = false;\n+\tint resized_bufs = 0;\n+\tunsigned long flags;\n+\tint cqe_count = 0;\n+\n+\tiwcq = to_iwcq(ibcq);\n+\tiwdev = to_iwdev(ibcq->device);\n+\tukcq = &iwcq->sc_cq.cq_uk;\n+\n+\tspin_lock_irqsave(&iwcq->lock, flags);\n+\t/* go through the list of previously resized CQ buffers */\n+\tlist_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {\n+\t\tbool last_cqe = false;\n+\n+\t\tcq_buf = container_of(list_node, struct irdma_cq_buf, list);\n+\t\tret = irdma_get_cqes(&cq_buf->cq_uk, num_entries, &cqe_count,\n+\t\t\t\t &last_cqe, &entry);\n+\t\tif (ret)\n+\t\t\tgoto exit;\n+\n+\t\t/* save the resized CQ buffer which has received the last cqe */\n+\t\tif (last_cqe)\n+\t\t\tlast_buf = cq_buf;\n+\t}\n+\n+\t/* check the current CQ buffer for new cqes */\n+\tret = irdma_get_cqes(ukcq, num_entries, &cqe_count, &new_cqe, &entry);\n+\tif (ret)\n+\t\tgoto exit;\n+\n+\tif (new_cqe)\n+\t\t/* all previous CQ resizes are complete */\n+\t\tresized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);\n+\telse if (last_buf)\n+\t\t/* only CQ resizes up to the last_buf are complete */\n+\t\tresized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);\n+\tif (resized_bufs)\n+\t\t/* report to the HW the number of complete CQ resizes */\n+\t\tukcq->ops.iw_cq_set_resized_cnt(ukcq, resized_bufs);\n+\n+exit:\n+\tspin_unlock_irqrestore(&iwcq->lock, flags);\n+\n+\treturn cqe_count;\n+}\n+\n+/**\n+ * irdma_req_notify_cq - arm cq kernel application\n+ * @ibcq: cq to arm\n+ * @notify_flags: notofication flags\n+ */\n+static int irdma_req_notify_cq(struct ib_cq *ibcq,\n+\t\t\t enum ib_cq_notify_flags notify_flags)\n+{\n+\tstruct irdma_cq *iwcq;\n+\tstruct irdma_cq_uk *ukcq;\n+\tunsigned long flags;\n+\tenum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;\n+\n+\tiwcq = to_iwcq(ibcq);\n+\tukcq = &iwcq->sc_cq.cq_uk;\n+\tif (notify_flags == IB_CQ_SOLICITED)\n+\t\tcq_notify = IRDMA_CQ_COMPL_SOLICITED;\n+\tspin_lock_irqsave(&iwcq->lock, flags);\n+\tukcq->ops.iw_cq_request_notification(ukcq, cq_notify);\n+\tspin_unlock_irqrestore(&iwcq->lock, flags);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_port_immutable - return port's immutable data\n+ * @ibdev: ib dev struct\n+ * @port_num: port number\n+ * @immutable: immutable data for the port return\n+ */\n+static int irdma_port_immutable(struct ib_device *ibdev, u8 port_num,\n+\t\t\t\tstruct ib_port_immutable *immutable)\n+{\n+\tstruct ib_port_attr attr;\n+\tint err;\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\n+\tif (iwdev->roce_mode) {\n+\t\timmutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;\n+\t\timmutable->max_mad_size = IB_MGMT_MAD_SIZE;\n+\t} else {\n+\t\timmutable->core_cap_flags = RDMA_CORE_PORT_IWARP;\n+\t}\n+\terr = ib_query_port(ibdev, port_num, &attr);\n+\tif (err)\n+\t\treturn err;\n+\n+\timmutable->pkey_tbl_len = attr.pkey_tbl_len;\n+\timmutable->gid_tbl_len = attr.gid_tbl_len;\n+\n+\treturn 0;\n+}\n+\n+static const char *const irdma_hw_stat_names[] = {\n+\t/* 32bit names */\n+\t[IRDMA_HW_STAT_INDEX_RXVLANERR] = \"rxVlanErrors\",\n+\t[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = \"ip4InDiscards\",\n+\t[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = \"ip4InTruncatedPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = \"ip4OutNoRoutes\",\n+\t[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = \"ip6InDiscards\",\n+\t[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = \"ip6InTruncatedPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = \"ip6OutNoRoutes\",\n+\t[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = \"tcpRetransSegs\",\n+\t[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = \"tcpInOptErrors\",\n+\t[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = \"tcpInProtoErrors\",\n+\t[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = \"cnpHandled\",\n+\t[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = \"cnpIgnored\",\n+\t[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = \"cnpSent\",\n+\n+\t/* 64bit names */\n+\t[IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4InOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4InPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4InReasmRqd\",\n+\t[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4InMcastOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4InMcastPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4OutOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4OutPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4OutSegRqd\",\n+\t[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4OutMcastOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip4OutMcastPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6InOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6InPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6InReasmRqd\",\n+\t[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6InMcastOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6InMcastPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6OutOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6OutPkts\",\n+\t[IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6OutSegRqd\",\n+\t[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6OutMcastOctets\",\n+\t[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"ip6OutMcastPkts\",\n+\t[IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"tcpInSegs\",\n+\t[IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"tcpOutSegs\",\n+\t[IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwInRdmaReads\",\n+\t[IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwInRdmaSends\",\n+\t[IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwInRdmaWrites\",\n+\t[IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwOutRdmaReads\",\n+\t[IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwOutRdmaSends\",\n+\t[IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwOutRdmaWrites\",\n+\t[IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwRdmaBnd\",\n+\t[IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"iwRdmaInv\",\n+\t[IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"RxUDP\",\n+\t[IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"TxUDP\",\n+\t[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =\n+\t\t\"RxECNMrkd\",\n+};\n+\n+static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(dev);\n+\n+\tsnprintf(str, IB_FW_VERSION_NAME_MAX, \"%u.%u\",\n+\t\t FW_MAJOR_VER(&iwdev->rf->sc_dev),\n+\t\t FW_MINOR_VER(&iwdev->rf->sc_dev));\n+}\n+\n+/**\n+ * irdma_alloc_hw_stats - Allocate a hw stats structure\n+ * @ibdev: device pointer from stack\n+ * @port_num: port number\n+ */\n+static struct rdma_hw_stats *irdma_alloc_hw_stats(struct ib_device *ibdev,\n+\t\t\t\t\t\t u8 port_num)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\tstruct irdma_sc_dev *dev = &iwdev->rf->sc_dev;\n+\tint num_counters = IRDMA_HW_STAT_INDEX_MAX_32 +\n+\t\t\t IRDMA_HW_STAT_INDEX_MAX_64;\n+\tunsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;\n+\n+\tBUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) !=\n+\t\t (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));\n+\n+\t/*\n+\t * PFs get the default update lifespan, but VFs only update once\n+\t * per second\n+\t */\n+\tif (!dev->is_pf)\n+\t\tlifespan = 1000;\n+\n+\treturn rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,\n+\t\t\t\t\t lifespan);\n+}\n+\n+/**\n+ * irdma_get_hw_stats - Populates the rdma_hw_stats structure\n+ * @ibdev: device pointer from stack\n+ * @stats: stats pointer from stack\n+ * @port_num: port number\n+ * @index: which hw counter the stack is requesting we update\n+ */\n+static int irdma_get_hw_stats(struct ib_device *ibdev,\n+\t\t\t struct rdma_hw_stats *stats, u8 port_num,\n+\t\t\t int index)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\tstruct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;\n+\n+\tif (iwdev->rf->rdma_ver > IRDMA_GEN_1)\n+\t\tirdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);\n+\n+\tmemcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));\n+\n+\treturn stats->num_counters;\n+}\n+\n+/**\n+ * irdma_query_gid - Query port GID\n+ * @ibdev: device pointer from stack\n+ * @port: port number\n+ * @index: Entry index\n+ * @gid: Global ID\n+ */\n+static int irdma_query_gid(struct ib_device *ibdev, u8 port, int index,\n+\t\t\t union ib_gid *gid)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(ibdev);\n+\n+\tmemset(gid->raw, 0, sizeof(gid->raw));\n+\tether_addr_copy(gid->raw, iwdev->netdev->dev_addr);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * mcast_list_add - Add a new mcast item to list\n+ * @rf: RDMA PCI function\n+ * @new_elem: pointer to element to add\n+ */\n+static void mcast_list_add(struct irdma_pci_f *rf,\n+\t\t\t struct mc_table_list *new_elem)\n+{\n+\tlist_add(&new_elem->list, &rf->mc_qht_list.list);\n+}\n+\n+/**\n+ * mcast_list_del - Remove an mcast item from list\n+ * @mc_qht_elem: pointer to mcast table list element\n+ */\n+static void mcast_list_del(struct mc_table_list *mc_qht_elem)\n+{\n+\tif (mc_qht_elem)\n+\t\tlist_del(&mc_qht_elem->list);\n+}\n+\n+/**\n+ * irdma_mcast_list_lookup_ip - Search mcast list for address\n+ * @rf: RDMA PCI function\n+ * @ip_mcast: pointer to mcast IP address\n+ */\n+static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,\n+\t\t\t\t\t\t u32 *ip_mcast)\n+{\n+\tstruct mc_table_list *mc_qht_el;\n+\tstruct list_head *pos, *q;\n+\n+\tlist_for_each_safe (pos, q, &rf->mc_qht_list.list) {\n+\t\tmc_qht_el = list_entry(pos, struct mc_table_list, list);\n+\t\tif (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,\n+\t\t\t sizeof(mc_qht_el->mc_info.dest_ip)))\n+\t\t\treturn mc_qht_el;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * irdma_mcast_cqp_op - perform a mcast cqp operation\n+ * @iwdev: device\n+ * @mc_grp_ctx: mcast group info\n+ * @op: operation\n+ *\n+ * returns error status\n+ */\n+static int irdma_mcast_cqp_op(struct irdma_device *iwdev,\n+\t\t\t struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)\n+{\n+\tstruct cqp_cmds_info *cqp_info;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tenum irdma_status_code status;\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, true);\n+\tif (!cqp_request)\n+\t\treturn -ENOMEM;\n+\n+\tcqp_request->info.in.u.mc_create.info = *mc_grp_ctx;\n+\tcqp_info = &cqp_request->info;\n+\tcqp_info->cqp_cmd = op;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;\n+\tcqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;\n+\tstatus = irdma_handle_cqp_op(iwdev->rf, cqp_request);\n+\tif (status) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP_%s failed\\n\",\n+\t\t\t (op == IRDMA_OP_MC_MODIFY) ? \"MODIFY\" : \"CREATE\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_mcast_mac - Get the multicast MAC for an IP address\n+ * @ip_addr: IPv4 or IPv6 address\n+ * @mac: pointer to result MAC address\n+ * @ipv4: flag indicating IPv4 or IPv6\n+ *\n+ */\n+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)\n+{\n+\tu8 *ip = (u8 *)ip_addr;\n+\n+\tif (ipv4) {\n+\t\tunsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,\n+\t\t\t\t\t\t0x00, 0x00};\n+\n+\t\tmac4[3] = ip[2] & 0x7F;\n+\t\tmac4[4] = ip[1];\n+\t\tmac4[5] = ip[0];\n+\t\tether_addr_copy(mac, mac4);\n+\t} else {\n+\t\tunsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,\n+\t\t\t\t\t\t0x00, 0x00};\n+\n+\t\tmac6[2] = ip[3];\n+\t\tmac6[3] = ip[2];\n+\t\tmac6[4] = ip[1];\n+\t\tmac6[5] = ip[0];\n+\t\tether_addr_copy(mac, mac6);\n+\t}\n+}\n+\n+/**\n+ * irdma_attach_mcast - attach a qp to a multicast group\n+ * @ibqp: ptr to qp\n+ * @ibgid: pointer to global ID\n+ * @lid: local ID\n+ *\n+ * returns error status\n+ */\n+static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)\n+{\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\tstruct irdma_device *iwdev = iwqp->iwdev;\n+\tstruct irdma_pci_f *rf = iwdev->rf;\n+\tstruct mc_table_list *mc_qht_elem;\n+\tstruct irdma_mcast_grp_ctx_entry_info mcg_info = {};\n+\tunsigned long flags;\n+\tu32 ip_addr[4] = {};\n+\tu32 mgn;\n+\tu32 no_mgs;\n+\tint ret = 0;\n+\tbool ipv4;\n+\tu16 vlan_id;\n+\tunion {\n+\t\tstruct sockaddr saddr;\n+\t\tstruct sockaddr_in saddr_in;\n+\t\tstruct sockaddr_in6 saddr_in6;\n+\t} sgid_addr;\n+\tunsigned char dmac[ETH_ALEN];\n+\n+\trdma_gid2ip(&sgid_addr.saddr, ibgid);\n+\tif (rdma_gid_attr_network_type(ibqp->av_sgid_attr) ==\n+\t RDMA_NETWORK_IPV6) {\n+\t\tirdma_copy_ip_ntohl(ip_addr,\n+\t\t\t\t sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);\n+\t\tirdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);\n+\t\tipv4 = false;\n+\t\tdev_info(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t \"qp_id=%d, IP6address=%pI6\\n\", ibqp->qp_num, ip_addr);\n+\t\tirdma_mcast_mac(ip_addr, dmac, false);\n+\t} else {\n+\t\tip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);\n+\t\tipv4 = true;\n+\t\tvlan_id = irdma_get_vlan_ipv4(ip_addr);\n+\t\tirdma_mcast_mac(ip_addr, dmac, true);\n+\t\tdev_info(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t \"qp_id=%d, IP4address=%pI4, MAC=%pM\\n\", ibqp->qp_num,\n+\t\t\t ip_addr, dmac);\n+\t}\n+\n+\tspin_lock_irqsave(&rf->qh_list_lock, flags);\n+\tmc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);\n+\tif (!mc_qht_elem) {\n+\t\tstruct irdma_dma_mem *dma_mem_mc;\n+\n+\t\tspin_unlock_irqrestore(&rf->qh_list_lock, flags);\n+\t\tmc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);\n+\t\tif (!mc_qht_elem)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tmc_qht_elem->mc_info.ipv4_valid = ipv4;\n+\t\tmemcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,\n+\t\t sizeof(mc_qht_elem->mc_info.dest_ip));\n+\t\tret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,\n+\t\t\t\t &mgn, &rf->next_mcg);\n+\t\tif (ret) {\n+\t\t\tkfree(mc_qht_elem);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tmc_qht_elem->mc_info.mgn = mgn;\n+\t\tdma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;\n+\t\tdma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,\n+\t\t\t\t\t IRDMA_HW_PAGE_SIZE);\n+\t\tdma_mem_mc->va = dma_alloc_coherent(hw_to_dev(&rf->hw),\n+\t\t\t\t\t\t dma_mem_mc->size,\n+\t\t\t\t\t\t &dma_mem_mc->pa,\n+\t\t\t\t\t\t GFP_KERNEL);\n+\t\tif (!dma_mem_mc->va) {\n+\t\t\tirdma_free_rsrc(rf, rf->allocated_mcgs, mgn);\n+\t\t\tkfree(mc_qht_elem);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tmc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;\n+\t\tmemcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,\n+\t\t sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));\n+\t\tmc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;\n+\t\tmc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;\n+\t\tif (vlan_id < VLAN_N_VID)\n+\t\t\tmc_qht_elem->mc_grp_ctx.vlan_valid = true;\n+\t\tmc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;\n+\t\tether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);\n+\n+\t\tspin_lock_irqsave(&rf->qh_list_lock, flags);\n+\t\tmcast_list_add(rf, mc_qht_elem);\n+\t} else {\n+\t\tif (mc_qht_elem->mc_grp_ctx.no_of_mgs ==\n+\t\t IRDMA_MAX_MGS_PER_CTX) {\n+\t\t\tspin_unlock_irqrestore(&rf->qh_list_lock, flags);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\tmcg_info.qp_id = iwqp->ibqp.qp_num;\n+\tno_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;\n+\trf->sc_dev.iw_uda_ops->mcast_grp_add(&mc_qht_elem->mc_grp_ctx,\n+\t\t\t\t\t &mcg_info);\n+\tspin_unlock_irqrestore(&rf->qh_list_lock, flags);\n+\n+\t/* Only if there is a change do we need to modify or create */\n+\tif (!no_mgs) {\n+\t\tret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,\n+\t\t\t\t\t IRDMA_OP_MC_CREATE);\n+\t} else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {\n+\t\tret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,\n+\t\t\t\t\t IRDMA_OP_MC_MODIFY);\n+\t} else {\n+\t\treturn 0;\n+\t}\n+\n+\tif (ret)\n+\t\tgoto error;\n+\n+\treturn 0;\n+\n+error:\n+\trf->sc_dev.iw_uda_ops->mcast_grp_del(&mc_qht_elem->mc_grp_ctx,\n+\t\t\t\t\t &mcg_info);\n+\tif (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {\n+\t\tmcast_list_del(mc_qht_elem);\n+\t\tdma_free_coherent(hw_to_dev(&rf->hw),\n+\t\t\t\t mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,\n+\t\t\t\t mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,\n+\t\t\t\t mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);\n+\t\tmc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;\n+\t\tirdma_free_rsrc(rf, rf->allocated_mcgs,\n+\t\t\t\tmc_qht_elem->mc_grp_ctx.mg_id);\n+\t\tkfree(mc_qht_elem);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * irdma_detach_mcast - detach a qp from a multicast group\n+ * @ibqp: ptr to qp\n+ * @ibgid: pointer to global ID\n+ * @lid: local ID\n+ *\n+ * returns error status\n+ */\n+static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)\n+{\n+\tstruct irdma_qp *iwqp = to_iwqp(ibqp);\n+\tstruct irdma_device *iwdev = iwqp->iwdev;\n+\tstruct irdma_pci_f *rf = iwdev->rf;\n+\tu32 ip_addr[4] = {};\n+\tstruct mc_table_list *mc_qht_elem;\n+\tstruct irdma_mcast_grp_ctx_entry_info mcg_info = {};\n+\tint ret;\n+\tunsigned long flags;\n+\tunion {\n+\t\tstruct sockaddr saddr;\n+\t\tstruct sockaddr_in saddr_in;\n+\t\tstruct sockaddr_in6 saddr_in6;\n+\t} sgid_addr;\n+\n+\trdma_gid2ip(&sgid_addr.saddr, ibgid);\n+\tif (rdma_gid_attr_network_type(ibqp->av_sgid_attr) ==\n+\t RDMA_NETWORK_IPV6) {\n+\t\tirdma_copy_ip_ntohl(ip_addr,\n+\t\t\t\t sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);\n+\t\tdev_info(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t \"qp_id=%d, IP6address=%pI6\\n\", ibqp->qp_num, ip_addr);\n+\t} else {\n+\t\tip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);\n+\t\tdev_info(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t \"qp_id=%d, IP4address=%pI4\\n\", ibqp->qp_num, ip_addr);\n+\t}\n+\n+\tspin_lock_irqsave(&rf->qh_list_lock, flags);\n+\tmc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);\n+\tif (!mc_qht_elem) {\n+\t\tspin_unlock_irqrestore(&rf->qh_list_lock, flags);\n+\t\tpr_info(\"address not found MCG\\n\");\n+\t\treturn 0; /* OK to remove group already removed */\n+\t}\n+\n+\tmcg_info.qp_id = iwqp->ibqp.qp_num;\n+\trf->sc_dev.iw_uda_ops->mcast_grp_del(&mc_qht_elem->mc_grp_ctx,\n+\t\t\t\t\t &mcg_info);\n+\tif (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {\n+\t\tmcast_list_del(mc_qht_elem);\n+\t\tspin_unlock_irqrestore(&rf->qh_list_lock, flags);\n+\t\tret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,\n+\t\t\t\t\t IRDMA_OP_MC_DESTROY);\n+\t\tif (ret) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: failed MC_DESTROY MCG\\n\");\n+\t\t\tspin_lock_irqsave(&rf->qh_list_lock, flags);\n+\t\t\tmcast_list_add(rf, mc_qht_elem);\n+\t\t\tspin_unlock_irqrestore(&rf->qh_list_lock, flags);\n+\t\t\treturn -EAGAIN;\n+\t\t}\n+\n+\t\tdma_free_coherent(hw_to_dev(&rf->hw),\n+\t\t\t\t mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,\n+\t\t\t\t mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,\n+\t\t\t\t mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);\n+\t\tmc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;\n+\t\tirdma_free_rsrc(rf, rf->allocated_mcgs,\n+\t\t\t\tmc_qht_elem->mc_grp_ctx.mg_id);\n+\t\tkfree(mc_qht_elem);\n+\t} else {\n+\t\tspin_unlock_irqrestore(&rf->qh_list_lock, flags);\n+\t\tret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,\n+\t\t\t\t\t IRDMA_OP_MC_MODIFY);\n+\t\tif (ret) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: failed Modify MCG\\n\");\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_create_ah - create address handle\n+ * @ib_ah: address handle\n+ * @attr: address handle attributes\n+ * @flags: flags for sleepable\n+ * @udata: User data\n+ *\n+ * returns a pointer to an address handle\n+ */\n+static int irdma_create_ah(struct ib_ah *ib_ah,\n+\t\t\t struct rdma_ah_attr *attr, u32 flags,\n+\t\t\t struct ib_udata *udata)\n+{\n+\tstruct irdma_pd *pd = to_iwpd(ib_ah->pd);\n+\tstruct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);\n+\tconst struct ib_gid_attr *sgid_attr;\n+\tstruct irdma_device *iwdev = to_iwdev(ib_ah->pd->device);\n+\tstruct irdma_pci_f *rf = iwdev->rf;\n+\tstruct irdma_sc_ah *sc_ah;\n+\tu32 ah_id = 0;\n+\tstruct irdma_ah_info *ah_info;\n+\tstruct irdma_create_ah_resp uresp;\n+\tunion {\n+\t\tstruct sockaddr saddr;\n+\t\tstruct sockaddr_in saddr_in;\n+\t\tstruct sockaddr_in6 saddr_in6;\n+\t} sgid_addr, dgid_addr;\n+\tint err;\n+\tu8 dmac[ETH_ALEN];\n+\n+\terr = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id,\n+\t\t\t &rf->next_ah);\n+\tif (err)\n+\t\treturn err;\n+\n+\tah->pd = pd;\n+\tsc_ah = &ah->sc_ah;\n+\tsc_ah->ah_info.ah_idx = ah_id;\n+\tsc_ah->ah_info.vsi = &iwdev->vsi;\n+\tiwdev->rf->sc_dev.iw_uda_ops->init_ah(&rf->sc_dev, sc_ah);\n+\tah->sgid_index = attr->grh.sgid_index;\n+\tsgid_attr = attr->grh.sgid_attr;\n+\tmemcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));\n+\trdma_gid2ip(&sgid_addr.saddr, &sgid_attr->gid);\n+\trdma_gid2ip(&dgid_addr.saddr, &attr->grh.dgid);\n+\tah->av.attrs = *attr;\n+\tah->av.net_type = rdma_gid_attr_network_type(sgid_attr);\n+\tah->av.sgid_addr.saddr = sgid_addr.saddr;\n+\tah->av.dgid_addr.saddr = dgid_addr.saddr;\n+\tah_info = &sc_ah->ah_info;\n+\tah_info->ah = sc_ah;\n+\tah_info->ah_idx = ah_id;\n+\tah_info->pd_idx = pd->sc_pd.pd_id;\n+\tif (attr->ah_flags & IB_AH_GRH) {\n+\t\tah_info->flow_label = attr->grh.flow_label;\n+\t\tah_info->hop_ttl = attr->grh.hop_limit;\n+\t\tah_info->tc_tos = attr->grh.traffic_class;\n+\t}\n+\n+\tether_addr_copy(dmac, attr->roce.dmac);\n+\tif (rdma_gid_attr_network_type(sgid_attr) == RDMA_NETWORK_IPV4) {\n+\t\tah_info->ipv4_valid = true;\n+\t\tah_info->dest_ip_addr[0] =\n+\t\t\tntohl(dgid_addr.saddr_in.sin_addr.s_addr);\n+\t\tah_info->src_ip_addr[0] =\n+\t\t\tntohl(sgid_addr.saddr_in.sin_addr.s_addr);\n+\t\tah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],\n+\t\t\t\t\t\t ah_info->dest_ip_addr[0]);\n+\t\tif (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr))\n+\t\t\tirdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);\n+\t} else {\n+\t\tirdma_copy_ip_ntohl(ah_info->dest_ip_addr,\n+\t\t\t\t dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);\n+\t\tirdma_copy_ip_ntohl(ah_info->src_ip_addr,\n+\t\t\t\t sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);\n+\t\tah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,\n+\t\t\t\t\t\t ah_info->dest_ip_addr);\n+\t\tif (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr))\n+\t\t\tirdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);\n+\t}\n+\n+\terr = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,\n+\t\t\t\t ah_info->mac_addr);\n+\tif (err)\n+\t\tgoto error;\n+\n+\tah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,\n+\t\t\t\t\t ah_info->ipv4_valid, dmac);\n+\n+\tif (ah_info->dst_arpindex == -1) {\n+\t\terr = -EINVAL;\n+\t\tgoto error;\n+\t}\n+\n+\tif (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb)\n+\t\tah_info->vlan_tag = 0;\n+\n+\tif (ah_info->vlan_tag < VLAN_N_VID) {\n+\t\tah_info->insert_vlan_tag = true;\n+\t\tah_info->vlan_tag |=\n+\t\t\trt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;\n+\t}\n+\n+\terr = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,\n+\t\t\t flags & RDMA_CREATE_AH_SLEEPABLE,\n+\t\t\t irdma_gsi_ud_qp_ah_cb, sc_ah);\n+\tif (err) {\n+\t\tibdev_dbg(to_ibdev(iwdev), \"VERBS: CQP-OP Create AH fail\");\n+\t\tgoto error;\n+\t}\n+\n+\tif (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {\n+\t\tint cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;\n+\n+\t\tdo {\n+\t\t\tirdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);\n+\t\t\tmdelay(1);\n+\t\t} while (!sc_ah->ah_info.ah_valid && --cnt);\n+\n+\t\tif (!cnt) {\n+\t\t\tibdev_dbg(to_ibdev(iwdev),\n+\t\t\t\t \"VERBS: CQP create AH timed out\");\n+\t\t\terr = -ETIMEDOUT;\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\n+\tif (udata) {\n+\t\turesp.ah_id = ah->sc_ah.ah_info.ah_idx;\n+\t\terr = ib_copy_to_udata(udata, &uresp,\n+\t\t\t\t min(sizeof(uresp), udata->outlen));\n+\t}\n+\treturn 0;\n+\n+error:\n+\tirdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * irdma_destroy_ah - Destroy address handle\n+ * @ibah: pointer to address handle\n+ * @flags: flags for sleepable\n+ */\n+static void irdma_destroy_ah(struct ib_ah *ibah, u32 flags)\n+{\n+\tstruct irdma_device *iwdev = to_iwdev(ibah->device);\n+\tstruct irdma_ah *ah = to_iwah(ibah);\n+\n+\tirdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,\n+\t\t\tfalse, NULL, ah);\n+\n+\tirdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,\n+\t\t\tah->sc_ah.ah_info.ah_idx);\n+}\n+\n+/**\n+ * irdma_query_ah - Query address handle\n+ * @ibah: pointer to address handle\n+ * @ah_attr: address handle attributes\n+ */\n+static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)\n+{\n+\tstruct irdma_ah *ah = to_iwah(ibah);\n+\n+\tmemset(ah_attr, 0, sizeof(*ah_attr));\n+\tif (ah->av.attrs.ah_flags & IB_AH_GRH) {\n+\t\tah_attr->ah_flags = IB_AH_GRH;\n+\t\tah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;\n+\t\tah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;\n+\t\tah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;\n+\t\tah_attr->grh.sgid_index = ah->sgid_index;\n+\t\tah_attr->grh.sgid_index = ah->sgid_index;\n+\t\tmemcpy(&ah_attr->grh.dgid, &ah->dgid,\n+\t\t sizeof(ah_attr->grh.dgid));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,\n+\t\t\t\t\t\t u8 port_num)\n+{\n+\treturn IB_LINK_LAYER_ETHERNET;\n+}\n+\n+static __be64 irdma_mac_to_guid(struct net_device *ndev)\n+{\n+\tunsigned char *mac = ndev->dev_addr;\n+\t__be64 guid;\n+\tunsigned char *dst = (unsigned char *)&guid;\n+\n+\tdst[0] = mac[0] ^ 2;\n+\tdst[1] = mac[1];\n+\tdst[2] = mac[2];\n+\tdst[3] = 0xff;\n+\tdst[4] = 0xfe;\n+\tdst[5] = mac[3];\n+\tdst[6] = mac[4];\n+\tdst[7] = mac[5];\n+\n+\treturn guid;\n+}\n+\n+static const struct ib_device_ops irdma_roce_dev_ops = {\n+\t.attach_mcast = irdma_attach_mcast,\n+\t.detach_mcast = irdma_detach_mcast,\n+\t.get_link_layer = irdma_get_link_layer,\n+\t.modify_qp = irdma_modify_qp_roce,\n+\t.query_ah = irdma_query_ah,\n+};\n+\n+static const struct ib_device_ops irdma_iw_dev_ops = {\n+\t.modify_qp = irdma_modify_qp,\n+\t.query_gid = irdma_query_gid,\n+};\n+\n+static const struct ib_device_ops irdma_dev_ops = {\n+\t.owner = THIS_MODULE,\n+\t.driver_id = RDMA_DRIVER_IRDMA,\n+\t.uverbs_abi_ver = IRDMA_ABI_VER,\n+\n+\t.alloc_hw_stats = irdma_alloc_hw_stats,\n+\t.alloc_mr = irdma_alloc_mr,\n+\t.alloc_mw = irdma_alloc_mw,\n+\t.alloc_pd = irdma_alloc_pd,\n+\t.alloc_ucontext = irdma_alloc_ucontext,\n+\t.create_ah = irdma_create_ah,\n+\t.create_cq = irdma_create_cq,\n+\t.create_qp = irdma_create_qp,\n+\t.dealloc_mw = irdma_dealloc_mw,\n+\t.dealloc_pd = irdma_dealloc_pd,\n+\t.dealloc_ucontext = irdma_dealloc_ucontext,\n+\t.dereg_mr = irdma_dereg_mr,\n+\t.destroy_ah = irdma_destroy_ah,\n+\t.destroy_cq = irdma_destroy_cq,\n+\t.destroy_qp = irdma_destroy_qp,\n+\t.disassociate_ucontext = irdma_disassociate_ucontext,\n+\t.drain_rq = irdma_drain_rq,\n+\t.drain_sq = irdma_drain_sq,\n+\t.get_dev_fw_str = irdma_get_dev_fw_str,\n+\t.get_dma_mr = irdma_get_dma_mr,\n+\t.get_hw_stats = irdma_get_hw_stats,\n+\t.get_port_immutable = irdma_port_immutable,\n+\t.map_mr_sg = irdma_map_mr_sg,\n+\t.mmap = irdma_mmap,\n+\t.poll_cq = irdma_poll_cq,\n+\t.post_recv = irdma_post_recv,\n+\t.post_send = irdma_post_send,\n+\t.query_device = irdma_query_device,\n+\t.query_pkey = irdma_query_pkey,\n+\t.query_port = irdma_query_port,\n+\t.query_qp = irdma_query_qp,\n+\t.reg_user_mr = irdma_reg_user_mr,\n+\t.req_notify_cq = irdma_req_notify_cq,\n+\t.resize_cq = irdma_resize_cq,\n+\tINIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),\n+\tINIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),\n+\tINIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),\n+\tINIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),\n+};\n+\n+/**\n+ * irdma_init_roce_device - initialization of roce rdma device\n+ * @iwibdev: irdma ib device\n+ */\n+static void irdma_init_roce_device(struct irdma_ib_device *iwibdev)\n+{\n+\tiwibdev->ibdev.uverbs_cmd_mask |=\n+\t\t(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |\n+\t\t(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);\n+\n+\tiwibdev->ibdev.node_type = RDMA_NODE_IB_CA;\n+\tiwibdev->ibdev.node_guid = irdma_mac_to_guid(iwibdev->iwdev->netdev);\n+\tib_set_device_ops(&iwibdev->ibdev, &irdma_roce_dev_ops);\n+}\n+\n+/**\n+ * irdma_init_roce_device - initialization of iwarp rdma device\n+ * @iwibdev: irdma ib device\n+ */\n+static int irdma_init_iw_device(struct irdma_ib_device *iwibdev)\n+{\n+\tstruct net_device *netdev = iwibdev->iwdev->netdev;\n+\n+\tiwibdev->ibdev.node_type = RDMA_NODE_RNIC;\n+\tether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);\n+\tiwibdev->ibdev.ops.iw_add_ref = irdma_add_ref;\n+\tiwibdev->ibdev.ops.iw_rem_ref = irdma_rem_ref;\n+\tiwibdev->ibdev.ops.iw_get_qp = irdma_get_qp;\n+\tiwibdev->ibdev.ops.iw_connect = irdma_connect;\n+\tiwibdev->ibdev.ops.iw_accept = irdma_accept;\n+\tiwibdev->ibdev.ops.iw_reject = irdma_reject;\n+\tiwibdev->ibdev.ops.iw_create_listen = irdma_create_listen;\n+\tiwibdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;\n+\tmemcpy(iwibdev->ibdev.iw_ifname, netdev->name,\n+\t sizeof(iwibdev->ibdev.iw_ifname));\n+\tib_set_device_ops(&iwibdev->ibdev, &irdma_iw_dev_ops);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_init_rdma_device - initialization of rdma device\n+ * @iwdev: irdma device\n+ */\n+static int irdma_init_rdma_device(struct irdma_device *iwdev)\n+{\n+\tstruct irdma_ib_device *iwibdev;\n+\tstruct pci_dev *pcidev = iwdev->rf->hw.pdev;\n+\tint ret;\n+\n+\tiwibdev = ib_alloc_device(irdma_ib_device, ibdev);\n+\tif (!iwibdev)\n+\t\treturn -ENOMEM;\n+\n+\tiwdev->iwibdev = iwibdev;\n+\tiwibdev->iwdev = iwdev;\n+\n+\tiwibdev->ibdev.uverbs_cmd_mask =\n+\t\t(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |\n+\t\t(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |\n+\t\t(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |\n+\t\t(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |\n+\t\t(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |\n+\t\t(1ull << IB_USER_VERBS_CMD_REG_MR) |\n+\t\t(1ull << IB_USER_VERBS_CMD_DEREG_MR) |\n+\t\t(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |\n+\t\t(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |\n+\t\t(1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |\n+\t\t(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |\n+\t\t(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |\n+\t\t(1ull << IB_USER_VERBS_CMD_CREATE_QP) |\n+\t\t(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |\n+\t\t(1ull << IB_USER_VERBS_CMD_QUERY_QP) |\n+\t\t(1ull << IB_USER_VERBS_CMD_POLL_CQ) |\n+\t\t(1ull << IB_USER_VERBS_CMD_CREATE_AH) |\n+\t\t(1ull << IB_USER_VERBS_CMD_DESTROY_AH) |\n+\t\t(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |\n+\t\t(1ull << IB_USER_VERBS_CMD_ALLOC_MW) |\n+\t\t(1ull << IB_USER_VERBS_CMD_BIND_MW) |\n+\t\t(1ull << IB_USER_VERBS_CMD_DEALLOC_MW) |\n+\t\t(1ull << IB_USER_VERBS_CMD_POST_RECV) |\n+\t\t(1ull << IB_USER_VERBS_CMD_POST_SEND);\n+\tiwibdev->ibdev.uverbs_ex_cmd_mask =\n+\t\t(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);\n+\n+\tif (iwdev->roce_mode) {\n+\t\tirdma_init_roce_device(iwibdev);\n+\t} else {\n+\t\tret = irdma_init_iw_device(iwibdev);\n+\t\tif (ret) {\n+\t\t\tib_dealloc_device(&iwibdev->ibdev);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\tiwibdev->ibdev.phys_port_cnt = 1;\n+\tiwibdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;\n+\tiwibdev->ibdev.dev.parent = &pcidev->dev;\n+\tib_set_device_ops(&iwibdev->ibdev, &irdma_dev_ops);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_port_ibevent - indicate port event\n+ * @iwdev: iwarp device\n+ */\n+void irdma_port_ibevent(struct irdma_device *iwdev)\n+{\n+\tstruct irdma_ib_device *iwibdev = iwdev->iwibdev;\n+\tstruct ib_event event;\n+\n+\tevent.device = &iwibdev->ibdev;\n+\tevent.element.port_num = 1;\n+\tevent.event =\n+\t\tiwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;\n+\tib_dispatch_event(&event);\n+}\n+\n+/**\n+ * irdma_destroy_rdma_device - destroy rdma device and free resources\n+ * @iwibdev: IB device ptr\n+ */\n+void irdma_destroy_rdma_device(struct irdma_ib_device *iwibdev)\n+{\n+\tib_unregister_device(&iwibdev->ibdev);\n+\tib_dealloc_device(&iwibdev->ibdev);\n+}\n+\n+/**\n+ * irdma_register_rdma_device - register iwarp device to IB\n+ * @iwdev: iwarp device\n+ */\n+int irdma_register_rdma_device(struct irdma_device *iwdev)\n+{\n+\tint ret;\n+\tstruct irdma_ib_device *iwibdev;\n+\n+\tret = irdma_init_rdma_device(iwdev);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tiwibdev = iwdev->iwibdev;\n+\trdma_set_device_sysfs_group(&iwibdev->ibdev, &irdma_attr_group);\n+\tret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);\n+\tif (ret)\n+\t\tgoto error;\n+\tret = ib_register_device(&iwibdev->ibdev, \"irdma%d\");\n+\tif (ret)\n+\t\tgoto error;\n+\n+\treturn 0;\n+\n+error:\n+\tib_dealloc_device(&iwdev->iwibdev->ibdev);\n+\tif (ret)\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\"VERBS: Register RDMA device fail\\n\");\n+\n+\treturn ret;\n+}\ndiff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h\nnew file mode 100644\nindex 0000000..8e8f4a7\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/verbs.h\n@@ -0,0 +1,199 @@\n+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef IRDMA_VERBS_H\n+#define IRDMA_VERBS_H\n+\n+#define IRDMA_MAX_SAVED_PHY_PGADDR\t4\n+\n+#define IRDMA_PKEY_TBL_SZ\t\t1\n+#define IRDMA_DEFAULT_PKEY\t\t0xFFFF\n+\n+struct irdma_ucontext {\n+\tstruct ib_ucontext ibucontext;\n+\tstruct irdma_device *iwdev;\n+\tstruct list_head cq_reg_mem_list;\n+\tspinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */\n+\tstruct list_head qp_reg_mem_list;\n+\tspinlock_t qp_reg_mem_list_lock; /* protect QP memory list */\n+\tint abi_ver;\n+};\n+\n+struct irdma_pd {\n+\tstruct ib_pd ibpd;\n+\tstruct irdma_sc_pd sc_pd;\n+};\n+\n+struct irdma_av {\n+\tu8 macaddr[16];\n+\tstruct rdma_ah_attr attrs;\n+\tunion {\n+\t\tstruct sockaddr saddr;\n+\t\tstruct sockaddr_in saddr_in;\n+\t\tstruct sockaddr_in6 saddr_in6;\n+\t} sgid_addr, dgid_addr;\n+\tu8 net_type;\n+};\n+\n+struct irdma_ah {\n+\tstruct ib_ah ibah;\n+\tstruct irdma_sc_ah sc_ah;\n+\tstruct irdma_pd *pd;\n+\tstruct irdma_av av;\n+\tu8 sgid_index;\n+\tunion ib_gid dgid;\n+};\n+\n+struct irdma_hmc_pble {\n+\tunion {\n+\t\tu32 idx;\n+\t\tdma_addr_t addr;\n+\t};\n+};\n+\n+struct irdma_cq_mr {\n+\tstruct irdma_hmc_pble cq_pbl;\n+\tdma_addr_t shadow;\n+\tbool split;\n+};\n+\n+struct irdma_qp_mr {\n+\tstruct irdma_hmc_pble sq_pbl;\n+\tstruct irdma_hmc_pble rq_pbl;\n+\tdma_addr_t shadow;\n+\tstruct page *sq_page;\n+};\n+\n+struct irdma_cq_buf {\n+\tstruct irdma_dma_mem kmem_buf;\n+\tstruct irdma_cq_uk cq_uk;\n+\tstruct irdma_hw *hw;\n+\tstruct list_head list;\n+\tstruct work_struct work;\n+};\n+\n+struct irdma_pbl {\n+\tstruct list_head list;\n+\tunion {\n+\t\tstruct irdma_qp_mr qp_mr;\n+\t\tstruct irdma_cq_mr cq_mr;\n+\t};\n+\n+\tbool pbl_allocated;\n+\tbool on_list;\n+\tu64 user_base;\n+\tstruct irdma_pble_alloc pble_alloc;\n+\tstruct irdma_mr *iwmr;\n+};\n+\n+struct irdma_mr {\n+\tunion {\n+\t\tstruct ib_mr ibmr;\n+\t\tstruct ib_mw ibmw;\n+\t\tstruct ib_fmr ibfmr;\n+\t};\n+\tstruct ib_umem *region;\n+\tu16 type;\n+\tu32 page_cnt;\n+\tu64 page_size;\n+\tu32 npages;\n+\tu32 stag;\n+\tu64 len;\n+\tu64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];\n+\tstruct irdma_pbl iwpbl;\n+};\n+\n+struct irdma_cq {\n+\tstruct ib_cq ibcq;\n+\tstruct irdma_sc_cq sc_cq;\n+\tu16 cq_head;\n+\tu16 cq_size;\n+\tu16 cq_num;\n+\tbool user_mode;\n+\tu32 polled_cmpls;\n+\tu32 cq_mem_size;\n+\tstruct irdma_dma_mem kmem;\n+\tstruct irdma_dma_mem kmem_shadow;\n+\tspinlock_t lock; /* for poll cq */\n+\tstruct irdma_pbl *iwpbl;\n+\tstruct irdma_pbl *iwpbl_shadow;\n+\tstruct list_head resize_list;\n+};\n+\n+struct disconn_work {\n+\tstruct work_struct work;\n+\tstruct irdma_qp *iwqp;\n+};\n+\n+struct iw_cm_id;\n+\n+struct irdma_qp_kmode {\n+\tstruct irdma_dma_mem dma_mem;\n+\tu64 *wrid_mem;\n+};\n+\n+struct irdma_qp {\n+\tstruct ib_qp ibqp;\n+\tstruct irdma_sc_qp sc_qp;\n+\tstruct irdma_device *iwdev;\n+\tstruct irdma_cq *iwscq;\n+\tstruct irdma_cq *iwrcq;\n+\tstruct irdma_pd *iwpd;\n+\tstruct irdma_qp_host_ctx_info ctx_info;\n+\tunion {\n+\t\tstruct irdma_iwarp_offload_info iwarp_info;\n+\t\tstruct irdma_roce_offload_info roce_info;\n+\t};\n+\n+\tunion {\n+\t\tstruct irdma_tcp_offload_info tcp_info;\n+\t\tstruct irdma_udp_offload_info udp_info;\n+\t};\n+\n+\tstruct irdma_ah roce_ah;\n+\tstruct list_head teardown_entry;\n+\tatomic_t refcount;\n+\tstruct iw_cm_id *cm_id;\n+\tvoid *cm_node;\n+\tstruct ib_mr *lsmm_mr;\n+\tstruct work_struct work;\n+\tatomic_t hw_mod_qp_pend;\n+\tenum ib_qp_state ibqp_state;\n+\tu32 qp_mem_size;\n+\tu32 last_aeq;\n+\tint max_send_wr;\n+\tint max_recv_wr;\n+\tatomic_t close_timer_started;\n+\tspinlock_t lock; /* serialize posting WRs to SQ/RQ */\n+\tstruct irdma_qp_context *iwqp_context;\n+\tvoid *pbl_vbase;\n+\tdma_addr_t pbl_pbase;\n+\tstruct page *page;\n+\tu8 active_conn : 1;\n+\tu8 user_mode : 1;\n+\tu8 hte_added : 1;\n+\tu8 flush_issued : 1;\n+\tu8 destroyed : 1;\n+\tu8 sig_all : 1;\n+\tu8 pau_mode : 1;\n+\tu8 rsvd : 1;\n+\tu8 iwarp_state;\n+\tu16 term_sq_flush_code;\n+\tu16 term_rq_flush_code;\n+\tu8 hw_iwarp_state;\n+\tu8 hw_tcp_state;\n+\tstruct irdma_qp_kmode kqp;\n+\tstruct irdma_dma_mem host_ctx;\n+\tstruct timer_list terminate_timer;\n+\tstruct irdma_pbl *iwpbl;\n+\tstruct irdma_dma_mem q2_ctx_mem;\n+\tstruct irdma_dma_mem ietf_mem;\n+\tstruct completion sq_drained;\n+\tstruct completion rq_drained;\n+\twait_queue_head_t waitq;\n+\twait_queue_head_t mod_qp_waitq;\n+\tu8 rts_ae_rcvd;\n+};\n+\n+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);\n+#endif /* IRDMA_VERBS_H */\ndiff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h\nindex 26213f4..036f1c3 100644\n--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h\n+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h\n@@ -95,6 +95,7 @@ enum rdma_driver_id {\n \tRDMA_DRIVER_OCRDMA,\n \tRDMA_DRIVER_NES,\n \tRDMA_DRIVER_I40IW,\n+\tRDMA_DRIVER_IRDMA = RDMA_DRIVER_I40IW,\n \tRDMA_DRIVER_VMW_PVRDMA,\n \tRDMA_DRIVER_QEDR,\n \tRDMA_DRIVER_HNS,\n", "prefixes": [ "rdma-next", "09/17" ] }