Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1182425/?format=api
{ "id": 1182425, "url": "http://patchwork.ozlabs.org/api/patches/1182425/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023182253.1115-3-shiraz.saleem@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20191023182253.1115-3-shiraz.saleem@intel.com>", "list_archive_url": null, "date": "2019-10-23T18:22:38", "name": "[rdma-nxt,02/16] RDMA/irdma: Implement device initialization definitions", "commit_ref": null, "pull_url": null, "state": "changes-requested", "archived": false, "hash": "120b333c6b76c84d5b275f828304a8d0acf06072", "submitter": { "id": 69500, "url": "http://patchwork.ozlabs.org/api/people/69500/?format=api", "name": "Saleem, Shiraz", "email": "shiraz.saleem@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023182253.1115-3-shiraz.saleem@intel.com/mbox/", "series": [ { "id": 138160, "url": "http://patchwork.ozlabs.org/api/series/138160/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=138160", "date": "2019-10-23T18:22:36", "name": "Add unified Intel Ethernet RDMA driver (irdma)", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/138160/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1182425/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1182425/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.137;\n\thelo=fraxinus.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from fraxinus.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 46z0323RRKz9sPL\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 24 Oct 2019 05:56:30 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 5289286594;\n\tWed, 23 Oct 2019 18:56:28 +0000 (UTC)", "from fraxinus.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id 8KIaAHZVV3y5; Wed, 23 Oct 2019 18:56:20 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby fraxinus.osuosl.org (Postfix) with ESMTP id 801648655C;\n\tWed, 23 Oct 2019 18:56:20 +0000 (UTC)", "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\tby ash.osuosl.org (Postfix) with ESMTP id 135D91BF48D\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:38:10 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id DDF37228EF\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:38:09 +0000 (UTC)", "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id E7r1fPcyBh76 for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:37:51 +0000 (UTC)", "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby silver.osuosl.org (Postfix) with ESMTPS id 1C284228D1\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:37:51 +0000 (UTC)", "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t23 Oct 2019 11:37:50 -0700", "from ssaleem-mobl.amr.corp.intel.com ([10.122.128.45])\n\tby fmsmga002.fm.intel.com with ESMTP; 23 Oct 2019 11:37:50 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.68,221,1569308400\"; d=\"scan'208\";a=\"228225046\"", "From": "Shiraz Saleem <shiraz.saleem@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Wed, 23 Oct 2019 13:22:38 -0500", "Message-Id": "<20191023182253.1115-3-shiraz.saleem@intel.com>", "X-Mailer": "git-send-email 2.21.0", "In-Reply-To": "<20191023182253.1115-1-shiraz.saleem@intel.com>", "References": "<20191023182253.1115-1-shiraz.saleem@intel.com>", "MIME-Version": "1.0", "X-Mailman-Approved-At": "Wed, 23 Oct 2019 18:56:19 +0000", "Subject": "[Intel-wired-lan] [PATCH rdma-nxt 02/16] RDMA/irdma: Implement\n\tdevice initialization definitions", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Cc": "Mustafa Ismail <mustafa.ismail@intel.com>,\n\tShiraz Saleem <shiraz.saleem@intel.com>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Mustafa Ismail <mustafa.ismail@intel.com>\n\nImplement device initialization routines, interrupt set-up,\nand allocate object bit-map tracking structures.\nAlso, add device specific attributes and register definitions.\n\nSigned-off-by: Mustafa Ismail <mustafa.ismail@intel.com>\nSigned-off-by: Shiraz Saleem <shiraz.saleem@intel.com>\n---\n drivers/infiniband/hw/irdma/hw.c | 2564 +++++++++++++++++++++++++++++++\n drivers/infiniband/hw/irdma/i40iw_hw.c | 210 +++\n drivers/infiniband/hw/irdma/i40iw_hw.h | 163 ++\n drivers/infiniband/hw/irdma/icrdma_hw.c | 75 +\n drivers/infiniband/hw/irdma/icrdma_hw.h | 63 +\n 5 files changed, 3075 insertions(+)\n create mode 100644 drivers/infiniband/hw/irdma/hw.c\n create mode 100644 drivers/infiniband/hw/irdma/i40iw_hw.c\n create mode 100644 drivers/infiniband/hw/irdma/i40iw_hw.h\n create mode 100644 drivers/infiniband/hw/irdma/icrdma_hw.c\n create mode 100644 drivers/infiniband/hw/irdma/icrdma_hw.h", "diff": "diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c\nnew file mode 100644\nindex 0000000..5c3dda4\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/hw.c\n@@ -0,0 +1,2564 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"main.h\"\n+\n+static struct irdma_rsrc_limits rsrc_limits_table[] = {\n+\t[0] = {\n+\t\t.qplimit = 4096,\n+\t},\n+\t[1] = {\n+\t\t.qplimit = 128,\n+\t},\n+\t[2] = {\n+\t\t.qplimit = 1024,\n+\t},\n+\t[3] = {\n+\t\t.qplimit = 2048,\n+\t},\n+\t[4] = {\n+\t\t.qplimit = 16384,\n+\t},\n+\t[5] = {\n+\t\t.qplimit = 65536,\n+\t},\n+};\n+\n+/* types of hmc objects */\n+static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {\n+\tIRDMA_HMC_IW_QP,\n+\tIRDMA_HMC_IW_CQ,\n+\tIRDMA_HMC_IW_HTE,\n+\tIRDMA_HMC_IW_ARP,\n+\tIRDMA_HMC_IW_APBVT_ENTRY,\n+\tIRDMA_HMC_IW_MR,\n+\tIRDMA_HMC_IW_XF,\n+\tIRDMA_HMC_IW_XFFL,\n+\tIRDMA_HMC_IW_Q1,\n+\tIRDMA_HMC_IW_Q1FL,\n+\tIRDMA_HMC_IW_TIMER,\n+\tIRDMA_HMC_IW_FSIMC,\n+\tIRDMA_HMC_IW_FSIAV,\n+\tIRDMA_HMC_IW_RRF,\n+\tIRDMA_HMC_IW_RRFFL,\n+\tIRDMA_HMC_IW_HDR,\n+\tIRDMA_HMC_IW_MD,\n+\tIRDMA_HMC_IW_OOISC,\n+\tIRDMA_HMC_IW_OOISCFFL,\n+};\n+\n+/**\n+ * irdma_iwarp_ce_handler - handle iwarp completions\n+ * @iwcq: iwarp cq receiving event\n+ */\n+static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)\n+{\n+\tstruct irdma_cq *cq = iwcq->back_cq;\n+\n+\tif (cq->ibcq.comp_handler)\n+\t\tcq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);\n+}\n+\n+/**\n+ * irdma_puda_ce_handler - handle puda completion events\n+ * @rf: RDMA PCI function\n+ * @cq: puda completion q for event\n+ */\n+static void irdma_puda_ce_handler(struct irdma_pci_f *rf,\n+\t\t\t\t struct irdma_sc_cq *cq)\n+{\n+\tstruct irdma_sc_dev *dev = (struct irdma_sc_dev *)&rf->sc_dev;\n+\tenum irdma_status_code status;\n+\tu32 compl_error;\n+\n+\tdo {\n+\t\tstatus = irdma_puda_poll_cmpl(dev, cq, &compl_error);\n+\t\tif (status == IRDMA_ERR_Q_EMPTY)\n+\t\t\tbreak;\n+\t\tif (status) {\n+\t\t\tdev_dbg(rfdev_to_dev(dev), \"ERR: puda status = %d\\n\",\n+\t\t\t\tstatus);\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (compl_error) {\n+\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\"ERR: puda compl_err =0x%x\\n\", compl_error);\n+\t\t\tbreak;\n+\t\t}\n+\t} while (1);\n+\n+\tdev->ccq_ops->ccq_arm(cq);\n+}\n+\n+/**\n+ * irdma_process_ceq - handle ceq for completions\n+ * @rf: RDMA PCI function\n+ * @ceq: ceq having cq for completion\n+ */\n+static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_sc_ceq *sc_ceq;\n+\tstruct irdma_sc_cq *cq;\n+\n+\tsc_ceq = &ceq->sc_ceq;\n+\tdo {\n+\t\tcq = dev->ceq_ops->process_ceq(dev, sc_ceq);\n+\t\tif (!cq)\n+\t\t\tbreak;\n+\n+\t\tif (cq->cq_type == IRDMA_CQ_TYPE_CQP)\n+\t\t\tup(&rf->cqp.cqp_compl_sem);\n+\t\telse if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)\n+\t\t\tirdma_iwarp_ce_handler(cq);\n+\t\telse if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||\n+\t\t\t cq->cq_type == IRDMA_CQ_TYPE_IEQ)\n+\t\t\tirdma_puda_ce_handler(rf, cq);\n+\t} while (1);\n+}\n+\n+/**\n+ * irdma_process_aeq - handle aeq events\n+ * @rf: RDMA PCI function\n+ */\n+static void irdma_process_aeq(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_aeq *aeq = &rf->aeq;\n+\tstruct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;\n+\tstruct irdma_aeqe_info aeinfo;\n+\tstruct irdma_aeqe_info *info = &aeinfo;\n+\tint ret;\n+\tstruct irdma_qp *iwqp = NULL;\n+\tstruct irdma_sc_cq *cq = NULL;\n+\tstruct irdma_cq *iwcq = NULL;\n+\tstruct irdma_sc_qp *qp = NULL;\n+\tstruct irdma_qp_host_ctx_info *ctx_info = NULL;\n+\tunsigned long flags;\n+\n+\tu32 aeqcnt = 0;\n+\n+\tif (!sc_aeq->size)\n+\t\treturn;\n+\n+\tdo {\n+\t\tmemset(info, 0, sizeof(*info));\n+\t\tret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);\n+\t\tif (ret)\n+\t\t\tbreak;\n+\n+\t\taeqcnt++;\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"AEQ: ae_id = 0x%x bool qp=%d qp_id = %d\\n\",\n+\t\t\tinfo->ae_id, info->qp, info->qp_cq_id);\n+\t\tif (info->qp) {\n+\t\t\tspin_lock_irqsave(&rf->qptable_lock, flags);\n+\t\t\tiwqp = rf->qp_table[info->qp_cq_id];\n+\t\t\tif (!iwqp) {\n+\t\t\t\tspin_unlock_irqrestore(&rf->qptable_lock,\n+\t\t\t\t\t\t flags);\n+\t\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\t\"AEQ: qp_id %d is already freed\\n\",\n+\t\t\t\t\tinfo->qp_cq_id);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tirdma_add_ref(&iwqp->ibqp);\n+\t\t\tspin_unlock_irqrestore(&rf->qptable_lock, flags);\n+\t\t\tqp = &iwqp->sc_qp;\n+\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\t\tiwqp->hw_tcp_state = info->tcp_state;\n+\t\t\tiwqp->hw_iwarp_state = info->iwarp_state;\n+\t\t\tiwqp->last_aeq = info->ae_id;\n+\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\tctx_info = &iwqp->ctx_info;\n+\t\t\tif (rdma_protocol_roce(&iwqp->iwdev->iwibdev->ibdev, 1))\n+\t\t\t\tctx_info->roce_info->err_rq_idx_valid = true;\n+\t\t\telse\n+\t\t\t\tctx_info->iwarp_info->err_rq_idx_valid = true;\n+\t\t} else {\n+\t\t\tif (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)\n+\t\t\t\tcontinue;\n+\t\t}\n+\n+\t\tswitch (info->ae_id) {\n+\t\t\tstruct irdma_cm_node *cm_node;\n+\t\tcase IRDMA_AE_LLP_CONNECTION_ESTABLISHED:\n+\t\t\tcm_node = iwqp->cm_node;\n+\t\t\tif (cm_node->accept_pend) {\n+\t\t\t\tatomic_dec(&cm_node->listener->pend_accepts_cnt);\n+\t\t\t\tcm_node->accept_pend = 0;\n+\t\t\t}\n+\t\t\tiwqp->rts_ae_rcvd = 1;\n+\t\t\twake_up_interruptible(&iwqp->waitq);\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_LLP_FIN_RECEIVED:\n+\t\tcase IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:\n+\t\t\tif (qp->term_flags)\n+\t\t\t\tbreak;\n+\t\t\tif (atomic_inc_return(&iwqp->close_timer_started) == 1) {\n+\t\t\t\tiwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;\n+\t\t\t\tif (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&\n+\t\t\t\t iwqp->ibqp_state == IB_QPS_RTS) {\n+\t\t\t\t\tirdma_next_iw_state(iwqp,\n+\t\t\t\t\t\t\t IRDMA_QP_STATE_CLOSING,\n+\t\t\t\t\t\t\t 0, 0, 0);\n+\t\t\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\t\t}\n+\t\t\t\tiwqp->cm_id->add_ref(iwqp->cm_id);\n+\t\t\t\tirdma_schedule_cm_timer(iwqp->cm_node,\n+\t\t\t\t\t\t\t(struct irdma_puda_buf *)iwqp,\n+\t\t\t\t\t\t\tIRDMA_TIMER_TYPE_CLOSE,\n+\t\t\t\t\t\t\t1, 0);\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_LLP_CLOSE_COMPLETE:\n+\t\t\tif (qp->term_flags)\n+\t\t\t\tirdma_terminate_done(qp, 0);\n+\t\t\telse\n+\t\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_BAD_CLOSE:\n+\t\t\t/* fall through */\n+\t\tcase IRDMA_AE_RESET_SENT:\n+\t\t\tirdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,\n+\t\t\t\t\t 0);\n+\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_LLP_CONNECTION_RESET:\n+\t\t\tif (atomic_read(&iwqp->close_timer_started))\n+\t\t\t\tbreak;\n+\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_QP_SUSPEND_COMPLETE:\n+\t\t\tatomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);\n+\t\t\twake_up(&iwqp->iwdev->suspend_wq);\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_TERMINATE_SENT:\n+\t\t\tirdma_terminate_send_fin(qp);\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_LLP_TERMINATE_RECEIVED:\n+\t\t\tirdma_terminate_received(qp, info);\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_CQ_OPERATION_ERROR:\n+\t\t\tdev_err(rfdev_to_dev(dev),\n+\t\t\t\t\"Processing an iWARP related AE for CQ misc = 0x%04X\\n\",\n+\t\t\t\tinfo->ae_id);\n+\t\t\tcq = (struct irdma_sc_cq *)(unsigned long)\n+\t\t\t info->compl_ctx;\n+\n+\t\t\tiwcq = (struct irdma_cq *)cq->back_cq;\n+\n+\t\t\tif (iwcq->ibcq.event_handler) {\n+\t\t\t\tstruct ib_event ibevent;\n+\n+\t\t\t\tibevent.device = iwcq->ibcq.device;\n+\t\t\t\tibevent.event = IB_EVENT_CQ_ERR;\n+\t\t\t\tibevent.element.cq = &iwcq->ibcq;\n+\t\t\t\tiwcq->ibcq.event_handler(&ibevent,\n+\t\t\t\t\t\t\t iwcq->ibcq.cq_context);\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_LLP_DOUBT_REACHABILITY:\n+\t\tcase IRDMA_AE_RESOURCE_EXHAUSTION:\n+\t\t\tbreak;\n+\t\tcase IRDMA_AE_PRIV_OPERATION_DENIED:\n+\t\tcase IRDMA_AE_STAG_ZERO_INVALID:\n+\t\tcase IRDMA_AE_IB_RREQ_AND_Q1_FULL:\n+\t\tcase IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:\n+\t\tcase IRDMA_AE_DDP_UBE_INVALID_MO:\n+\t\tcase IRDMA_AE_DDP_UBE_INVALID_QN:\n+\t\tcase IRDMA_AE_DDP_NO_L_BIT:\n+\t\tcase IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:\n+\t\tcase IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:\n+\t\tcase IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:\n+\t\tcase IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:\n+\t\tcase IRDMA_AE_INVALID_ARP_ENTRY:\n+\t\tcase IRDMA_AE_INVALID_TCP_OPTION_RCVD:\n+\t\tcase IRDMA_AE_STALE_ARP_ENTRY:\n+\t\tcase IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:\n+\t\tcase IRDMA_AE_LLP_SEGMENT_TOO_SMALL:\n+\t\tcase IRDMA_AE_LLP_SYN_RECEIVED:\n+\t\tcase IRDMA_AE_LLP_TOO_MANY_RETRIES:\n+\t\tcase IRDMA_AE_LCE_QP_CATASTROPHIC:\n+\t\tcase IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:\n+\t\tcase IRDMA_AE_LCE_CQ_CATASTROPHIC:\n+\t\tcase IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:\n+\t\t\tif (rdma_protocol_roce(&iwqp->iwdev->iwibdev->ibdev, 1))\n+\t\t\t\tctx_info->roce_info->err_rq_idx_valid = false;\n+\t\t\telse\n+\t\t\t\tctx_info->iwarp_info->err_rq_idx_valid = false;\n+\t\t\t/* fall through */\n+\t\tdefault:\n+\t\t\tdev_err(rfdev_to_dev(dev),\n+\t\t\t\t\"abnormal ae_id = 0x%x bool qp=%d qp_id = %d\\n\",\n+\t\t\t\tinfo->ae_id, info->qp, info->qp_cq_id);\n+\t\t\tif (rdma_protocol_roce(&iwqp->iwdev->iwibdev->ibdev, 1)) {\n+\t\t\t\tif (!info->sq && ctx_info->roce_info->err_rq_idx_valid) {\n+\t\t\t\t\tctx_info->roce_info->err_rq_idx = info->wqe_idx;\n+\t\t\t\t\tret = dev->iw_priv_qp_ops->qp_setctx_roce(&iwqp->sc_qp,\n+\t\t\t\t\t\t\t\t\t\t iwqp->host_ctx.va,\n+\t\t\t\t\t\t\t\t\t\t ctx_info);\n+\t\t\t\t}\n+\t\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tif (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) {\n+\t\t\t\tctx_info->iwarp_info->err_rq_idx = info->wqe_idx;\n+\t\t\t\tctx_info->tcp_info_valid = false;\n+\t\t\t\tctx_info->iwarp_info_valid = false;\n+\t\t\t\tret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,\n+\t\t\t\t\t\t\t\t iwqp->host_ctx.va,\n+\t\t\t\t\t\t\t\t ctx_info);\n+\t\t\t}\n+\t\t\tif (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&\n+\t\t\t iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {\n+\t\t\t\tirdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);\n+\t\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\t} else {\n+\t\t\t\tiwqp->sc_qp.term_flags = 1;\n+\t\t\t\tirdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,\n+\t\t\t\t\t\t 0);\n+\t\t\t\tirdma_cm_disconn(iwqp);\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (info->qp)\n+\t\t\tirdma_rem_ref(&iwqp->ibqp);\n+\t} while (1);\n+\n+\tif (aeqcnt)\n+\t\tdev->aeq_ops->repost_aeq_entries(dev, aeqcnt);\n+}\n+\n+/**\n+ * irdma_enable_intr - set up device interrupts\n+ * @dev: hardware control device structure\n+ * @msix_id: id of the interrupt to be enabled\n+ */\n+static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)\n+{\n+\tdev->irq_ops->irdma_en_irq(dev, msix_id);\n+}\n+\n+/**\n+ * irdma_dpc - tasklet for aeq and ceq 0\n+ * @data: RDMA PCI function\n+ */\n+static void irdma_dpc(unsigned long data)\n+{\n+\tstruct irdma_pci_f *rf = (struct irdma_pci_f *)data;\n+\n+\tif (rf->msix_shared)\n+\t\tirdma_process_ceq(rf, rf->ceqlist);\n+\tirdma_process_aeq(rf);\n+\tirdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);\n+}\n+\n+/**\n+ * irdma_ceq_dpc - dpc handler for CEQ\n+ * @data: data points to CEQ\n+ */\n+static void irdma_ceq_dpc(unsigned long data)\n+{\n+\tstruct irdma_ceq *iwceq = (struct irdma_ceq *)data;\n+\tstruct irdma_pci_f *rf = iwceq->rf;\n+\n+\tirdma_process_ceq(rf, iwceq);\n+\tirdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);\n+}\n+\n+/**\n+ * irdma_save_msix_info - copy msix vector information to iwarp device\n+ * @rf: RDMA PCI function\n+ *\n+ * Allocate iwdev msix table and copy the ldev msix info to the table\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_priv_ldev *ldev = &rf->ldev;\n+\tstruct irdma_qvlist_info *iw_qvlist;\n+\tstruct irdma_qv_info *iw_qvinfo;\n+\tstruct msix_entry *pmsix;\n+\tu32 ceq_idx;\n+\tu32 i;\n+\tu32 size;\n+\n+\tif (!ldev->msix_count) {\n+\t\tpr_err(\"No MSI-X vectors for RDMA\\n\");\n+\t\treturn IRDMA_ERR_CFG;\n+\t}\n+\n+\trf->msix_count = ldev->msix_count;\n+\tsize = sizeof(struct irdma_msix_vector) * rf->msix_count;\n+\tsize += sizeof(struct irdma_qvlist_info);\n+\tsize += sizeof(struct irdma_qv_info) * rf->msix_count - 1;\n+\trf->iw_msixtbl = kzalloc(size, GFP_KERNEL);\n+\tif (!rf->iw_msixtbl)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\trf->iw_qvlist = (struct irdma_qvlist_info *)\n+\t\t\t(&rf->iw_msixtbl[rf->msix_count]);\n+\tiw_qvlist = rf->iw_qvlist;\n+\tiw_qvinfo = iw_qvlist->qv_info;\n+\tiw_qvlist->num_vectors = rf->msix_count;\n+\tif (rf->msix_count <= num_online_cpus())\n+\t\trf->msix_shared = true;\n+\n+\tfor (i = 0, ceq_idx = 0, pmsix = ldev->msix_entries; i < rf->msix_count;\n+\t i++, iw_qvinfo++, pmsix++) {\n+\t\trf->iw_msixtbl[i].idx = pmsix->entry;\n+\t\trf->iw_msixtbl[i].irq = pmsix->vector;\n+\t\trf->iw_msixtbl[i].cpu_affinity = ceq_idx;\n+\t\tif (!i) {\n+\t\t\tiw_qvinfo->aeq_idx = 0;\n+\t\t\tif (rf->msix_shared)\n+\t\t\t\tiw_qvinfo->ceq_idx = ceq_idx++;\n+\t\t\telse\n+\t\t\t\tiw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;\n+\t\t} else {\n+\t\t\tiw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;\n+\t\t\tiw_qvinfo->ceq_idx = ceq_idx++;\n+\t\t}\n+\t\tiw_qvinfo->itr_idx = 3;\n+\t\tiw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_irq_handler - interrupt handler for aeq and ceq0\n+ * @irq: Interrupt request number\n+ * @data: RDMA PCI function\n+ */\n+static irqreturn_t irdma_irq_handler(int irq, void *data)\n+{\n+\tstruct irdma_pci_f *rf = data;\n+\n+\ttasklet_schedule(&rf->dpc_tasklet);\n+\n+\treturn IRQ_HANDLED;\n+}\n+\n+/**\n+ * irdma_ceq_handler - interrupt handler for ceq\n+ * @irq: interrupt request number\n+ * @data: ceq pointer\n+ */\n+static irqreturn_t irdma_ceq_handler(int irq, void *data)\n+{\n+\tstruct irdma_ceq *iwceq = data;\n+\n+\tif (iwceq->irq != irq)\n+\t\tdev_err(rfdev_to_dev(&iwceq->rf->sc_dev),\n+\t\t\t\"expected irq = %d received irq = %d\\n\", iwceq->irq,\n+\t\t\tirq);\n+\ttasklet_schedule(&iwceq->dpc_tasklet);\n+\n+\treturn IRQ_HANDLED;\n+}\n+\n+/**\n+ * irdma_destroy_irq - destroy device interrupts\n+ * @rf: RDMA PCI function\n+ * @msix_vec: msix vector to disable irq\n+ * @dev_id: parameter to pass to free_irq (used during irq setup)\n+ *\n+ * The function is called when destroying aeq/ceq\n+ */\n+static void irdma_destroy_irq(struct irdma_pci_f *rf,\n+\t\t\t struct irdma_msix_vector *msix_vec, void *dev_id)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\n+\tdev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);\n+\tirq_set_affinity_hint(msix_vec->irq, NULL);\n+\tfree_irq(msix_vec->irq, dev_id);\n+}\n+\n+/**\n+ * irdma_destroy_cqp - destroy control qp\n+ * @rf: RDMA PCI function\n+ * @free_hwcqp: 1 if hw cqp should be freed\n+ *\n+ * Issue destroy cqp request and\n+ * free the resources associated with the cqp\n+ */\n+static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)\n+{\n+\tenum irdma_status_code status = 0;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_cqp *cqp = &rf->cqp;\n+\n+\tif (free_hwcqp && dev->cqp_ops->cqp_destroy)\n+\t\tstatus = dev->cqp_ops->cqp_destroy(dev->cqp);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(dev), \"ERR: Destroy CQP failed %d\\n\",\n+\t\t\tstatus);\n+\n+\tirdma_cleanup_pending_cqp_op(rf);\n+\tdma_free_coherent(hw_to_dev(dev->hw), cqp->sq.size, cqp->sq.va,\n+\t\t\t cqp->sq.pa);\n+\tcqp->sq.va = NULL;\n+\tkfree(cqp->scratch_array);\n+\tcqp->scratch_array = NULL;\n+\tkfree(cqp->cqp_requests);\n+\tcqp->cqp_requests = NULL;\n+}\n+\n+/**\n+ * irdma_destroy_aeq - destroy aeq\n+ * @rf: RDMA PCI function\n+ *\n+ * Issue a destroy aeq request and\n+ * free the resources associated with the aeq\n+ * The function is called during driver unload\n+ */\n+static void irdma_destroy_aeq(struct irdma_pci_f *rf)\n+{\n+\tenum irdma_status_code status = IRDMA_ERR_NOT_READY;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_aeq *aeq = &rf->aeq;\n+\n+\tif (!rf->msix_shared)\n+\t\tirdma_destroy_irq(rf, rf->iw_msixtbl, (void *)rf);\n+\tif (rf->reset)\n+\t\tgoto exit;\n+\n+\tif (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))\n+\t\tstatus = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(dev), \"ERR: Destroy AEQ failed %d\\n\",\n+\t\t\tstatus);\n+\n+exit:\n+\tdma_free_coherent(hw_to_dev(dev->hw), aeq->mem.size, aeq->mem.va,\n+\t\t\t aeq->mem.pa);\n+\taeq->mem.va = NULL;\n+}\n+\n+/**\n+ * irdma_destroy_ceq - destroy ceq\n+ * @rf: RDMA PCI function\n+ * @iwceq: ceq to be destroyed\n+ *\n+ * Issue a destroy ceq request and\n+ * free the resources associated with the ceq\n+ */\n+static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)\n+{\n+\tenum irdma_status_code status;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\n+\tif (rf->reset)\n+\t\tgoto exit;\n+\n+\tstatus = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);\n+\tif (status) {\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"ERR: CEQ destroy command failed %d\\n\", status);\n+\t\tgoto exit;\n+\t}\n+\n+\tstatus = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"ERR: CEQ destroy completion failed %d\\n\", status);\n+exit:\n+\tdma_free_coherent(hw_to_dev(dev->hw), iwceq->mem.size, iwceq->mem.va,\n+\t\t\t iwceq->mem.pa);\n+\tiwceq->mem.va = NULL;\n+}\n+\n+/**\n+ * irdma_del_ceq_0 - destroy ceq 0\n+ * @rf: RDMA PCI function\n+ *\n+ * Disable the ceq 0 interrupt and destroy the ceq 0\n+ */\n+static void irdma_del_ceq_0(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_ceq *iwceq = rf->ceqlist;\n+\tstruct irdma_msix_vector *msix_vec;\n+\n+\tif (rf->msix_shared) {\n+\t\tmsix_vec = &rf->iw_msixtbl[0];\n+\t\tirdma_destroy_irq(rf, msix_vec, (void *)rf);\n+\t} else {\n+\t\tmsix_vec = &rf->iw_msixtbl[1];\n+\t\tirdma_destroy_irq(rf, msix_vec, (void *)iwceq);\n+\t}\n+\tirdma_destroy_ceq(rf, iwceq);\n+\trf->sc_dev.ceq_valid = false;\n+\trf->ceqs_count = 0;\n+}\n+\n+/**\n+ * irdma_del_ceqs - destroy all ceq's except CEQ 0 // RT mode FSL\n+ * @rf: RDMA PCI function\n+ *\n+ * Go through all of the device ceq's, except 0, and for each\n+ * ceq disable the ceq interrupt and destroy the ceq\n+ */\n+static void irdma_del_ceqs(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_ceq *iwceq = &rf->ceqlist[1];\n+\tstruct irdma_msix_vector *msix_vec;\n+\tu32 i = 0;\n+\n+\tif (rf->msix_shared)\n+\t\tmsix_vec = &rf->iw_msixtbl[1];\n+\telse\n+\t\tmsix_vec = &rf->iw_msixtbl[2];\n+\n+\tfor (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {\n+\t\tirdma_destroy_irq(rf, msix_vec, (void *)iwceq);\n+\t\tirdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,\n+\t\t\t\t IRDMA_OP_CEQ_DESTROY);\n+\t\tdma_free_coherent(hw_to_dev(rf->sc_dev.hw), iwceq->mem.size,\n+\t\t\t\t iwceq->mem.va, iwceq->mem.pa);\n+\t\tiwceq->mem.va = NULL;\n+\t}\n+\trf->ceqs_count = 1;\n+}\n+\n+/**\n+ * irdma_destroy_ccq - destroy control cq\n+ * @rf: RDMA PCI function\n+ *\n+ * Issue destroy ccq request and\n+ * free the resources associated with the ccq\n+ */\n+static void irdma_destroy_ccq(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_ccq *ccq = &rf->ccq;\n+\tenum irdma_status_code status = 0;\n+\n+\tif (!rf->reset)\n+\t\tstatus = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(dev), \"ERR: CCQ destroy failed %d\\n\",\n+\t\t\tstatus);\n+\tdma_free_coherent(hw_to_dev(dev->hw), ccq->mem_cq.size,\n+\t\t\t ccq->mem_cq.va, ccq->mem_cq.pa);\n+\tccq->mem_cq.va = NULL;\n+}\n+\n+/**\n+ * irdma_close_hmc_objects_type - delete hmc objects of a given type\n+ * @dev: iwarp device\n+ * @obj_type: the hmc object type to be deleted\n+ * @hmc_info: host memory info struct\n+ * @is_pf: true if the function is PF otherwise false\n+ * @reset: true if called before reset\n+ */\n+static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,\n+\t\t\t\t\t enum irdma_hmc_rsrc_type obj_type,\n+\t\t\t\t\t struct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t bool is_pf, bool reset)\n+{\n+\tstruct irdma_hmc_del_obj_info info = {};\n+\n+\tinfo.hmc_info = hmc_info;\n+\tinfo.rsrc_type = obj_type;\n+\tinfo.count = hmc_info->hmc_obj[obj_type].cnt;\n+\tinfo.is_pf = is_pf;\n+\tif (dev->hmc_ops->del_hmc_object(dev, &info, reset))\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"ERR: del HMC obj of type %d failed\\n\", obj_type);\n+}\n+\n+/**\n+ * irdma_del_hmc_objects - remove all device hmc objects\n+ * @dev: iwarp device\n+ * @hmc_info: hmc_info to free\n+ * @is_pf: true if hmc_info belongs to PF, not vf nor allocated\n+ *\t by PF on behalf of VF\n+ * @reset: true if called before reset\n+ * @vers: hardware version\n+ */\n+static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,\n+\t\t\t\t struct irdma_hmc_info *hmc_info, bool is_pf,\n+\t\t\t\t bool reset, enum irdma_vers vers)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {\n+\t\tif (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)\n+\t\t\tirdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],\n+\t\t\t\t\t\t hmc_info, is_pf, reset);\n+\t\tif (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)\n+\t\t\tbreak;\n+\t}\n+}\n+\n+/**\n+ * irdma_create_hmc_obj_type - create hmc object of a given type\n+ * @dev: hardware control device structure\n+ * @info: information for the hmc object to create\n+ */\n+static enum irdma_status_code\n+irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,\n+\t\t\t struct irdma_hmc_create_obj_info *info)\n+{\n+\treturn dev->hmc_ops->create_hmc_object(dev, info);\n+}\n+\n+/**\n+ * irdma_create_hmc_objs - create all hmc objects for the device\n+ * @rf: RDMA PCI function\n+ * @is_pf: true if the function is PF otherwise false\n+ * @vers: HW version\n+ *\n+ * Create the device hmc objects and allocate hmc pages\n+ * Return 0 if successful, otherwise clean up and return error\n+ */\n+static enum irdma_status_code\n+irdma_create_hmc_objs(struct irdma_pci_f *rf, bool is_pf, enum irdma_vers vers)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_hmc_create_obj_info info = {};\n+\tenum irdma_status_code status = 0;\n+\tint i;\n+\n+\tinfo.hmc_info = dev->hmc_info;\n+\tinfo.is_pf = is_pf;\n+\tinfo.entry_type = rf->sd_type;\n+\n+\tfor (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {\n+\t\tif (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {\n+\t\t\tinfo.rsrc_type = iw_hmc_obj_types[i];\n+\t\t\tinfo.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;\n+\t\t\tinfo.add_sd_cnt = 0;\n+\t\t\tstatus = irdma_create_hmc_obj_type(dev, &info);\n+\t\t\tif (status) {\n+\t\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\t\"ERR: create obj type %d status = %d\\n\",\n+\t\t\t\t\tiw_hmc_obj_types[i], status);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t\tif (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (!status)\n+\t\treturn dev->hmc_ops->static_hmc_pages_allocated(dev->cqp, 0,\n+\t\t\t\t\t\t\t\tdev->hmc_fn_id,\n+\t\t\t\t\t\t\t\ttrue, true);\n+\n+\twhile (i) {\n+\t\ti--;\n+\t\t/* destroy the hmc objects of a given type */\n+\t\tirdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],\n+\t\t\t\t\t dev->hmc_info, is_pf, false);\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_obj_aligned_mem - get aligned memory from device allocated memory\n+ * @rf: RDMA PCI function\n+ * @memptr: points to the memory addresses\n+ * @size: size of memory needed\n+ * @mask: mask for the aligned memory\n+ *\n+ * Get aligned memory of the requested size and\n+ * update the memptr to point to the new aligned memory\n+ * Return 0 if successful, otherwise return no memory error\n+ */\n+static enum irdma_status_code\n+irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr,\n+\t\t u32 size, u32 mask)\n+{\n+\tunsigned long va, newva;\n+\tunsigned long extra;\n+\n+\tva = (unsigned long)rf->obj_next.va;\n+\tnewva = va;\n+\tif (mask)\n+\t\tnewva = ALIGN(va, (unsigned long)mask + 1ULL);\n+\textra = newva - va;\n+\tmemptr->va = (u8 *)va + extra;\n+\tmemptr->pa = rf->obj_next.pa + extra;\n+\tmemptr->size = size;\n+\tif ((memptr->va + size) > (rf->obj_mem.va + rf->obj_mem.size))\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\trf->obj_next.va = memptr->va + size;\n+\trf->obj_next.pa = memptr->pa + size;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_create_cqp - create control qp\n+ * @rf: RDMA PCI function\n+ *\n+ * Return 0, if the cqp and all the resources associated with it\n+ * are successfully created, otherwise return error\n+ */\n+static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf)\n+{\n+\tenum irdma_status_code status;\n+\tu32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;\n+\tstruct irdma_dma_mem mem;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_cqp_init_info cqp_init_info = {};\n+\tstruct irdma_cqp *cqp = &rf->cqp;\n+\tu16 maj_err, min_err;\n+\tint i;\n+\n+\tcqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);\n+\tif (!cqp->cqp_requests)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tcqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);\n+\tif (!cqp->scratch_array) {\n+\t\tkfree(cqp->cqp_requests);\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t}\n+\n+\tdev->cqp = &cqp->sc_cqp;\n+\tdev->cqp->dev = dev;\n+\tcqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,\n+\t\t\t IRDMA_CQP_ALIGNMENT);\n+\tcqp->sq.va = dma_alloc_coherent(hw_to_dev(dev->hw), cqp->sq.size,\n+\t\t\t\t\t&cqp->sq.pa, GFP_KERNEL);\n+\tif (!cqp->sq.va) {\n+\t\tkfree(cqp->scratch_array);\n+\t\tkfree(cqp->cqp_requests);\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t}\n+\n+\tstatus = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),\n+\t\t\t\t IRDMA_HOST_CTX_ALIGNMENT_M);\n+\tif (status)\n+\t\tgoto exit;\n+\n+\tdev->cqp->host_ctx_pa = mem.pa;\n+\tdev->cqp->host_ctx = mem.va;\n+\t/* populate the cqp init info */\n+\tcqp_init_info.dev = dev;\n+\tcqp_init_info.sq_size = sqsize;\n+\tcqp_init_info.sq = cqp->sq.va;\n+\tcqp_init_info.sq_pa = cqp->sq.pa;\n+\tcqp_init_info.host_ctx_pa = mem.pa;\n+\tcqp_init_info.host_ctx = mem.va;\n+\tcqp_init_info.hmc_profile = rf->rsrc_profile;\n+\tcqp_init_info.ena_vf_count = rf->max_rdma_vfs;\n+\tcqp_init_info.scratch_array = cqp->scratch_array;\n+\tcqp_init_info.disable_packed = true;\n+\tcqp_init_info.protocol_used = rf->protocol_used;\n+\tstatus = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);\n+\tif (status) {\n+\t\tdev_dbg(rfdev_to_dev(dev), \"ERR: cqp init status %d\\n\",\n+\t\t\tstatus);\n+\t\tgoto exit;\n+\t}\n+\n+\tstatus = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);\n+\tif (status) {\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"ERR: cqp create failed - status %d maj_err %d min_err %d\\n\",\n+\t\t\tstatus, maj_err, min_err);\n+\t\tgoto exit;\n+\t}\n+\n+\tspin_lock_init(&cqp->req_lock);\n+\tspin_lock_init(&cqp->compl_lock);\n+\tINIT_LIST_HEAD(&cqp->cqp_avail_reqs);\n+\tINIT_LIST_HEAD(&cqp->cqp_pending_reqs);\n+\tsema_init(&cqp->cqp_compl_sem, 0);\n+\n+\t/* init the waitqueue of the cqp_requests and add them to the list */\n+\tfor (i = 0; i < sqsize; i++) {\n+\t\tinit_waitqueue_head(&cqp->cqp_requests[i].waitq);\n+\t\tlist_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);\n+\t}\n+\tinit_waitqueue_head(&cqp->remove_wq);\n+\treturn 0;\n+\n+exit:\n+\tirdma_destroy_cqp(rf, false);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_create_ccq - create control cq\n+ * @rf: RDMA PCI function\n+ *\n+ * Return 0, if the ccq and the resources associated with it\n+ * are successfully created, otherwise return error\n+ */\n+static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tenum irdma_status_code status;\n+\tstruct irdma_ccq_init_info info = {};\n+\tstruct irdma_ccq *ccq = &rf->ccq;\n+\n+\tdev->ccq = &ccq->sc_cq;\n+\tdev->ccq->dev = dev;\n+\tinfo.dev = dev;\n+\tccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);\n+\tccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,\n+\t\t\t\t IRDMA_CQ0_ALIGNMENT);\n+\tccq->mem_cq.va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t ccq->mem_cq.size, &ccq->mem_cq.pa,\n+\t\t\t\t\t GFP_KERNEL);\n+\tif (!ccq->mem_cq.va)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tstatus = irdma_obj_aligned_mem(rf, &ccq->shadow_area,\n+\t\t\t\t ccq->shadow_area.size,\n+\t\t\t\t IRDMA_SHADOWAREA_M);\n+\tif (status)\n+\t\tgoto exit;\n+\n+\tccq->sc_cq.back_cq = (void *)ccq;\n+\t/* populate the ccq init info */\n+\tinfo.cq_base = ccq->mem_cq.va;\n+\tinfo.cq_pa = ccq->mem_cq.pa;\n+\tinfo.num_elem = IW_CCQ_SIZE;\n+\tinfo.shadow_area = ccq->shadow_area.va;\n+\tinfo.shadow_area_pa = ccq->shadow_area.pa;\n+\tinfo.ceqe_mask = false;\n+\tinfo.ceq_id_valid = true;\n+\tinfo.shadow_read_threshold = 16;\n+\tinfo.vsi = &rf->default_vsi;\n+\tstatus = dev->ccq_ops->ccq_init(dev->ccq, &info);\n+\tif (!status)\n+\t\tstatus = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);\n+exit:\n+\tif (status) {\n+\t\tdma_free_coherent(hw_to_dev(dev->hw), ccq->mem_cq.size,\n+\t\t\t\t ccq->mem_cq.va, ccq->mem_cq.pa);\n+\t\tccq->mem_cq.va = NULL;\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_alloc_set_mac - set up a mac address table entry\n+ * @iwdev: device\n+ *\n+ * Allocate a mac ip entry and add it to the hw table Return 0\n+ * if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev)\n+{\n+\tenum irdma_status_code status;\n+\n+\tstatus = irdma_alloc_local_mac_entry(iwdev->rf,\n+\t\t\t\t\t &iwdev->mac_ip_table_idx);\n+\tif (!status) {\n+\t\tstatus = irdma_add_local_mac_entry(iwdev->rf,\n+\t\t\t\t\t\t (u8 *)iwdev->netdev->dev_addr,\n+\t\t\t\t\t\t (u8)iwdev->mac_ip_table_idx);\n+\t\tif (status)\n+\t\t\tirdma_del_local_mac_entry(iwdev->rf,\n+\t\t\t\t\t\t (u8)iwdev->mac_ip_table_idx);\n+\t}\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_configure_ceq_vector - set up the msix interrupt vector for ceq\n+ * @rf: RDMA PCI function\n+ * @iwceq: ceq associated with the vector\n+ * @ceq_id: the id number of the iwceq\n+ * @msix_vec: interrupt vector information\n+ *\n+ * Allocate interrupt resources and enable irq handling\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code\n+irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,\n+\t\t u32 ceq_id, struct irdma_msix_vector *msix_vec)\n+{\n+\tint status;\n+\n+\tif (rf->msix_shared && !ceq_id) {\n+\t\ttasklet_init(&rf->dpc_tasklet, irdma_dpc, (unsigned long)rf);\n+\t\tstatus = request_irq(msix_vec->irq, irdma_irq_handler, 0,\n+\t\t\t\t \"AEQCEQ\", rf);\n+\t} else {\n+\t\ttasklet_init(&iwceq->dpc_tasklet, irdma_ceq_dpc,\n+\t\t\t (unsigned long)iwceq);\n+\n+\t\tstatus = request_irq(msix_vec->irq, irdma_ceq_handler, 0, \"CEQ\",\n+\t\t\t\t iwceq);\n+\t}\n+\n+\tcpumask_clear(&msix_vec->mask);\n+\tcpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);\n+\tirq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);\n+\tif (status) {\n+\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\"ERR: ceq irq config fail\\n\");\n+\t\treturn IRDMA_ERR_CFG;\n+\t}\n+\n+\tmsix_vec->ceq_id = ceq_id;\n+\trf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_configure_aeq_vector - set up the msix vector for aeq\n+ * @rf: RDMA PCI function\n+ *\n+ * Allocate interrupt resources and enable irq handling\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_msix_vector *msix_vec = rf->iw_msixtbl;\n+\tu32 ret = 0;\n+\n+\tif (!rf->msix_shared) {\n+\t\ttasklet_init(&rf->dpc_tasklet, irdma_dpc, (unsigned long)rf);\n+\t\tret = request_irq(msix_vec->irq, irdma_irq_handler, 0, \"irdma\",\n+\t\t\t\t rf);\n+\t}\n+\tif (ret) {\n+\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\"ERR: aeq irq config fail\\n\");\n+\t\treturn IRDMA_ERR_CFG;\n+\t}\n+\n+\trf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_create_ceq - create completion event queue\n+ * @rf: RDMA PCI function\n+ * @iwceq: pointer to the ceq resources to be created\n+ * @ceq_id: the id number of the iwceq\n+ * @vsi: SC vsi struct\n+ *\n+ * Return 0, if the ceq and the resources associated with it\n+ * are successfully created, otherwise return error\n+ */\n+static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,\n+\t\t\t\t\t struct irdma_ceq *iwceq,\n+\t\t\t\t\t u32 ceq_id,\n+\t\t\t\t\t struct irdma_sc_vsi *vsi)\n+{\n+\tenum irdma_status_code status;\n+\tstruct irdma_ceq_init_info info = {};\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tu64 scratch;\n+\n+\tinfo.ceq_id = ceq_id;\n+\tiwceq->rf = rf;\n+\tiwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,\n+\t\t\t\tIRDMA_CEQ_ALIGNMENT);\n+\tiwceq->mem.va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t iwceq->mem.size, &iwceq->mem.pa,\n+\t\t\t\t\t GFP_KERNEL);\n+\tif (!iwceq->mem.va)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tinfo.ceq_id = ceq_id;\n+\tinfo.ceqe_base = iwceq->mem.va;\n+\tinfo.ceqe_pa = iwceq->mem.pa;\n+\tinfo.elem_cnt = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;\n+\tiwceq->sc_ceq.ceq_id = ceq_id;\n+\tinfo.dev = dev;\n+\tinfo.vsi = vsi;\n+\tscratch = (uintptr_t)&rf->cqp.sc_cqp;\n+\tstatus = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);\n+\tif (!status) {\n+\t\tif (dev->ceq_valid)\n+\t\t\tstatus = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,\n+\t\t\t\t\t\t IRDMA_OP_CEQ_CREATE);\n+\t\telse\n+\t\t\tstatus = dev->ceq_ops->cceq_create(&iwceq->sc_ceq,\n+\t\t\t\t\t\t\t scratch);\n+\t}\n+\n+\tif (status) {\n+\t\tdma_free_coherent(hw_to_dev(dev->hw), iwceq->mem.size,\n+\t\t\t\t iwceq->mem.va, iwceq->mem.pa);\n+\t\tiwceq->mem.va = NULL;\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource\n+ * @rf: RDMA PCI function\n+ *\n+ * Allocate a list for all device completion event queues\n+ * Create the ceq 0 and configure it's msix interrupt vector\n+ * Return 0, if successfully set up, otherwise return error\n+ */\n+static enum irdma_status_code irdma_setup_ceq_0(struct irdma_pci_f *rf)\n+{\n+\tu32 i;\n+\tstruct irdma_ceq *iwceq;\n+\tstruct irdma_msix_vector *msix_vec;\n+\tenum irdma_status_code status = 0;\n+\tu32 num_ceqs;\n+\n+\tnum_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);\n+\trf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);\n+\tif (!rf->ceqlist) {\n+\t\tstatus = IRDMA_ERR_NO_MEMORY;\n+\t\tgoto exit;\n+\t}\n+\n+\ti = rf->msix_shared ? 0 : 1;\n+\tiwceq = &rf->ceqlist[0];\n+\tstatus = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);\n+\tif (status) {\n+\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\"ERR: create ceq status = %d\\n\", status);\n+\t\tgoto exit;\n+\t}\n+\n+\tmsix_vec = &rf->iw_msixtbl[i];\n+\tiwceq->irq = msix_vec->irq;\n+\tiwceq->msix_idx = msix_vec->idx;\n+\tstatus = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);\n+\tif (status) {\n+\t\tirdma_destroy_ceq(rf, iwceq);\n+\t\tgoto exit;\n+\t}\n+\n+\tirdma_ena_intr(&rf->sc_dev, msix_vec->idx);\n+\trf->ceqs_count++;\n+\n+exit:\n+\tif (status && !rf->ceqs_count) {\n+\t\tkfree(rf->ceqlist);\n+\t\trf->ceqlist = NULL;\n+\t\treturn status;\n+\t}\n+\trf->sc_dev.ceq_valid = true;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_setup_ceqs - manage the device ceq's and their interrupt resources\n+ * @rf: RDMA PCI function\n+ * @vsi: VSI structure for this CEQ\n+ *\n+ * Allocate a list for all device completion event queues\n+ * Create the ceq's and configure their msix interrupt vectors\n+ * Return 0, if at least one ceq is successfully set up, otherwise return error\n+ */\n+static enum irdma_status_code irdma_setup_ceqs(struct irdma_pci_f *rf,\n+\t\t\t\t\t struct irdma_sc_vsi *vsi)\n+{\n+\tu32 i;\n+\tu32 ceq_id;\n+\tstruct irdma_ceq *iwceq;\n+\tstruct irdma_msix_vector *msix_vec;\n+\tenum irdma_status_code status = 0;\n+\tu32 num_ceqs;\n+\n+\tnum_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);\n+\ti = (rf->msix_shared) ? 1 : 2;\n+\tfor (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {\n+\t\tiwceq = &rf->ceqlist[ceq_id];\n+\t\tstatus = irdma_create_ceq(rf, iwceq, ceq_id, vsi);\n+\t\tif (status) {\n+\t\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\t\"ERR: create ceq status = %d\\n\", status);\n+\t\t\tbreak;\n+\t\t}\n+\t\tmsix_vec = &rf->iw_msixtbl[i];\n+\t\tiwceq->irq = msix_vec->irq;\n+\t\tiwceq->msix_idx = msix_vec->idx;\n+\t\tstatus = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);\n+\t\tif (status) {\n+\t\t\tirdma_destroy_ceq(rf, iwceq);\n+\t\t\tbreak;\n+\t\t}\n+\t\tirdma_ena_intr(&rf->sc_dev, msix_vec->idx);\n+\t\trf->ceqs_count++;\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_create_aeq - create async event queue\n+ * @rf: RDMA PCI function\n+ *\n+ * Return 0, if the aeq and the resources associated with it\n+ * are successfully created, otherwise return error\n+ */\n+static enum irdma_status_code irdma_create_aeq(struct irdma_pci_f *rf)\n+{\n+\tenum irdma_status_code status;\n+\tstruct irdma_aeq_init_info info = {};\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_aeq *aeq = &rf->aeq;\n+\tstruct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;\n+\tu64 scratch = 0;\n+\tu32 aeq_size;\n+\n+\taeq_size = 2 * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +\n+\t\t hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;\n+\taeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,\n+\t\t\t IRDMA_AEQ_ALIGNMENT);\n+\taeq->mem.va = dma_alloc_coherent(hw_to_dev(dev->hw), aeq->mem.size,\n+\t\t\t\t\t &aeq->mem.pa, GFP_KERNEL);\n+\tif (!aeq->mem.va)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tinfo.aeqe_base = aeq->mem.va;\n+\tinfo.aeq_elem_pa = aeq->mem.pa;\n+\tinfo.elem_cnt = aeq_size;\n+\tinfo.dev = dev;\n+\tstatus = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);\n+\tif (status)\n+\t\tgoto exit;\n+\n+\tstatus = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);\n+\tif (!status)\n+\t\tstatus = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);\n+exit:\n+\tif (status) {\n+\t\tdma_free_coherent(hw_to_dev(dev->hw), aeq->mem.size,\n+\t\t\t\t aeq->mem.va, aeq->mem.pa);\n+\t\taeq->mem.va = NULL;\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_setup_aeq - set up the device aeq\n+ * @rf: RDMA PCI function\n+ *\n+ * Create the aeq and configure its msix interrupt vector\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tenum irdma_status_code status;\n+\n+\tstatus = irdma_create_aeq(rf);\n+\tif (status)\n+\t\treturn status;\n+\n+\tstatus = irdma_cfg_aeq_vector(rf);\n+\tif (status) {\n+\t\tirdma_destroy_aeq(rf);\n+\t\treturn status;\n+\t}\n+\n+\tif (!rf->msix_shared)\n+\t\tirdma_ena_intr(dev, rf->iw_msixtbl[0].idx);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_initialize_ilq - create iwarp local queue for cm\n+ * @iwdev: iwarp device\n+ *\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev)\n+{\n+\tstruct irdma_puda_rsrc_info info = {};\n+\tenum irdma_status_code status;\n+\n+\tinfo.type = IRDMA_PUDA_RSRC_TYPE_ILQ;\n+\tinfo.cq_id = 1;\n+\tinfo.qp_id = 1;\n+\tinfo.count = 1;\n+\tinfo.pd_id = 1;\n+\tinfo.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);\n+\tinfo.rq_size = info.sq_size;\n+\tinfo.buf_size = 1024;\n+\tinfo.tx_buf_cnt = 2 * info.sq_size;\n+\tinfo.receive = irdma_receive_ilq;\n+\tinfo.xmit_complete = irdma_free_sqbuf;\n+\tstatus = irdma_puda_create_rsrc(&iwdev->vsi, &info);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\"ERR: ilq create fail\\n\");\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_initialize_ieq - create iwarp exception queue\n+ * @iwdev: iwarp device\n+ *\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_initialize_ieq(struct irdma_device *iwdev)\n+{\n+\tstruct irdma_puda_rsrc_info info = {};\n+\tenum irdma_status_code status;\n+\n+\tinfo.type = IRDMA_PUDA_RSRC_TYPE_IEQ;\n+\tinfo.cq_id = 2;\n+\tinfo.qp_id = iwdev->vsi.exception_lan_q;\n+\tinfo.count = 1;\n+\tinfo.pd_id = 2;\n+\tinfo.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);\n+\tinfo.rq_size = info.sq_size;\n+\tinfo.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;\n+\tinfo.tx_buf_cnt = 4096;\n+\tstatus = irdma_puda_create_rsrc(&iwdev->vsi, &info);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\"ERR: ieq create fail\\n\");\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_reinitialize_ieq - destroy and re-create ieq\n+ * @vsi: VSI structure\n+ */\n+void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)\n+{\n+\tstruct irdma_device *iwdev = vsi->back_vsi;\n+\n+\tirdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);\n+\tif (irdma_initialize_ieq(iwdev)) {\n+\t\tiwdev->reset = true;\n+\t\tirdma_request_reset(iwdev->rf);\n+\t}\n+}\n+\n+/**\n+ * irdma_hmc_setup - create hmc objects for the device\n+ * @rf: RDMA PCI function\n+ *\n+ * Set up the device private memory space for the number and size of\n+ * the hmc objects and create the objects\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_hmc_setup(struct irdma_pci_f *rf)\n+{\n+\tenum irdma_status_code status;\n+\tu32 qpcnt;\n+\n+\tif (rf->rdma_ver == IRDMA_GEN_1)\n+\t\tqpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;\n+\telse\n+\t\tqpcnt = rsrc_limits_table[rf->limits_sel].qplimit;\n+\n+\trf->sd_type = IRDMA_SD_TYPE_DIRECT;\n+\tstatus = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);\n+\tif (status)\n+\t\treturn status;\n+\n+\tstatus = irdma_create_hmc_objs(rf, true, rf->rdma_ver);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_del_init_mem - deallocate memory resources\n+ * @rf: RDMA PCI function\n+ */\n+static void irdma_del_init_mem(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\n+\tkfree(dev->hmc_info->sd_table.sd_entry);\n+\tdev->hmc_info->sd_table.sd_entry = NULL;\n+\tkfree(rf->mem_rsrc);\n+\trf->mem_rsrc = NULL;\n+\tdma_free_coherent(hw_to_dev(&rf->hw), rf->obj_mem.size,\n+\t\t\t rf->obj_mem.va, rf->obj_mem.pa);\n+\trf->obj_mem.va = NULL;\n+\tif (rf->rdma_ver != IRDMA_GEN_1) {\n+\t\tkfree(rf->allocated_ws_nodes);\n+\t\trf->allocated_ws_nodes = NULL;\n+\t}\n+\tkfree(rf->ceqlist);\n+\trf->ceqlist = NULL;\n+\tkfree(rf->iw_msixtbl);\n+\trf->iw_msixtbl = NULL;\n+\tkfree(rf->hmc_info_mem);\n+\trf->hmc_info_mem = NULL;\n+}\n+\n+/**\n+ * irdma_initialize_dev - initialize device\n+ * @rf: RDMA PCI function\n+ * @ldev: lan device information\n+ *\n+ * Allocate memory for the hmc objects and initialize iwdev\n+ * Return 0 if successful, otherwise clean up the resources\n+ * and return error\n+ */\n+static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf,\n+\t\t\t\t\t\t struct irdma_priv_ldev *ldev)\n+{\n+\tenum irdma_status_code status;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tstruct irdma_device_init_info info = {};\n+\tstruct irdma_dma_mem mem;\n+\tu32 size;\n+\n+\tsize = sizeof(struct irdma_hmc_pble_rsrc) +\n+\t sizeof(struct irdma_hmc_info) +\n+\t (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);\n+\n+\trf->hmc_info_mem = kzalloc(size, GFP_KERNEL);\n+\tif (!rf->hmc_info_mem)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\trf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;\n+\tdev->hmc_info = &rf->hw.hmc;\n+\tdev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)\n+\t\t\t\t (rf->pble_rsrc + 1);\n+\n+\tstatus = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,\n+\t\t\t\t IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);\n+\tif (status)\n+\t\tgoto error;\n+\n+\tinfo.fpm_query_buf_pa = mem.pa;\n+\tinfo.fpm_query_buf = mem.va;\n+\tinfo.init_hw = rf->init_hw;\n+\n+\tstatus = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,\n+\t\t\t\t IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);\n+\tif (status)\n+\t\tgoto error;\n+\n+\tinfo.fpm_commit_buf_pa = mem.pa;\n+\tinfo.fpm_commit_buf = mem.va;\n+\n+\tinfo.bar0 = rf->hw.hw_addr;\n+\tinfo.hmc_fn_id = (u8)ldev->fn_num;\n+\tinfo.is_pf = !ldev->ftype;\n+\tinfo.hw = &rf->hw;\n+\tinfo.vchnl_send = NULL;\n+\tstatus = irdma_sc_ctrl_init(rf->rdma_ver, &rf->sc_dev, &info);\n+\tif (status)\n+\t\tgoto error;\n+\n+\treturn status;\n+error:\n+\tkfree(rf->hmc_info_mem);\n+\trf->hmc_info_mem = NULL;\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_deinit_rt_device - clean up the device resources\n+ * @iwdev: iwarp device\n+ *\n+ * Destroy the ib device interface, remove the mac ip entry and\n+ * ipv4/ipv6 addresses, destroy the device queues and free the\n+ * pble and the hmc objects\n+ */\n+void irdma_deinit_rt_device(struct irdma_device *iwdev)\n+{\n+\tdev_info(rfdev_to_dev(&iwdev->rf->sc_dev), \"state = %d\\n\",\n+\t\t iwdev->init_state);\n+\n+\tswitch (iwdev->init_state) {\n+\tcase RDMA_DEV_REGISTERED:\n+\t\tiwdev->iw_status = 0;\n+\t\tirdma_port_ibevent(iwdev);\n+\t\tirdma_destroy_rdma_device(iwdev->iwibdev);\n+\t\t/* fallthrough */\n+\tcase IP_ADDR_REGISTERED:\n+\t\tif (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)\n+\t\t\tirdma_del_local_mac_entry(iwdev->rf,\n+\t\t\t\t\t\t (u8)iwdev->mac_ip_table_idx);\n+\t\t/* fallthrough */\n+\tcase PBLE_CHUNK_MEM:\n+\t\t/* fallthrough */\n+\tcase CEQS_CREATED:\n+\t\t/* fallthrough */\n+\tcase IEQ_CREATED:\n+\t\tirdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,\n+\t\t\t\t iwdev->reset);\n+\t\t/* fallthrough */\n+\tcase ILQ_CREATED:\n+\t\tif (iwdev->create_ilq)\n+\t\t\tirdma_puda_dele_rsrc(&iwdev->vsi,\n+\t\t\t\t\t IRDMA_PUDA_RSRC_TYPE_ILQ,\n+\t\t\t\t\t iwdev->reset);\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_warn(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t \"bad init_state = %d\\n\", iwdev->init_state);\n+\t\tbreak;\n+\t}\n+\n+\tirdma_cleanup_cm_core(&iwdev->cm_core);\n+\tif (iwdev->vsi.pestat) {\n+\t\tirdma_vsi_stats_free(&iwdev->vsi);\n+\t\tkfree(iwdev->vsi.pestat);\n+\t}\n+}\n+\n+/**\n+ * irdma_setup_init_state - set up the initial device struct\n+ * @rf: RDMA PCI function\n+ *\n+ * Initialize the iwarp device and its hdl information\n+ * using the ldev and client information\n+ * Return 0 if successful, otherwise return error\n+ */\n+static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_priv_ldev *ldev = &rf->ldev;\n+\tenum irdma_status_code status;\n+\n+\tstatus = irdma_save_msix_info(rf);\n+\tif (status)\n+\t\treturn status;\n+\n+\trf->hw.pdev = rf->pdev;\n+\trf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);\n+\trf->obj_mem.va = dma_alloc_coherent(hw_to_dev(&rf->hw),\n+\t\t\t\t\t rf->obj_mem.size, &rf->obj_mem.pa,\n+\t\t\t\t\t GFP_KERNEL);\n+\tif (!rf->obj_mem.va) {\n+\t\tkfree(rf->iw_msixtbl);\n+\t\trf->iw_msixtbl = NULL;\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t}\n+\n+\trf->obj_next = rf->obj_mem;\n+\trf->ooo = false;\n+\tinit_waitqueue_head(&rf->vchnl_waitq);\n+\n+\tstatus = irdma_initialize_dev(rf, ldev);\n+\tif (status) {\n+\t\tkfree(rf->iw_msixtbl);\n+\t\tdma_free_coherent(hw_to_dev(&rf->hw), rf->obj_mem.size,\n+\t\t\t\t rf->obj_mem.va, rf->obj_mem.pa);\n+\t\trf->obj_mem.va = NULL;\n+\t\trf->iw_msixtbl = NULL;\n+\t}\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_get_used_rsrc - determine resources used internally\n+ * @iwdev: iwarp device\n+ *\n+ * Called at the end of open to get all internal allocations\n+ */\n+static void irdma_get_used_rsrc(struct irdma_device *iwdev)\n+{\n+\tiwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds,\n+\t\t\t\t\t\t iwdev->rf->max_pd, 0);\n+\tiwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps,\n+\t\t\t\t\t\t iwdev->rf->max_qp, 0);\n+\tiwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs,\n+\t\t\t\t\t\t iwdev->rf->max_cq, 0);\n+\tiwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs,\n+\t\t\t\t\t\t iwdev->rf->max_mr, 0);\n+}\n+\n+/**\n+ * irdma_deinit_hw - De-initializes RDMA HW\n+ * @rf: RDMA device information\n+ *\n+ */\n+void irdma_deinit_ctrl_hw(struct irdma_pci_f *rf)\n+{\n+\tenum init_completion_state state = rf->init_state;\n+\n+\trf->init_state = INVALID_STATE;\n+\tif (rf->rsrc_created) {\n+\t\tirdma_destroy_pble_prm(rf->pble_rsrc);\n+\t\tirdma_del_ceqs(rf);\n+\t\trf->rsrc_created = false;\n+\t}\n+\tswitch (state) {\n+\tcase CEQ0_CREATED:\n+\t\tirdma_del_ceq_0(rf);\n+\t\t/* fallthrough */\n+\tcase AEQ_CREATED:\n+\t\tirdma_destroy_aeq(rf);\n+\t\t/* fallthrough */\n+\tcase CCQ_CREATED:\n+\t\tirdma_destroy_ccq(rf);\n+\t\t/* fallthrough */\n+\tcase HMC_OBJS_CREATED:\n+\t\tirdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,\n+\t\t\t\t rf->reset, rf->rdma_ver);\n+\t\t/* fallthrough */\n+\tcase CQP_CREATED:\n+\t\tif (rf->cqp.cqp_compl_thread) {\n+\t\t\trf->stop_cqp_thread = true;\n+\t\t\tup(&rf->cqp.cqp_compl_sem);\n+\t\t\tkthread_stop(rf->cqp.cqp_compl_thread);\n+\t\t}\n+\t\tirdma_destroy_cqp(rf, true);\n+\t\t/* fallthrough */\n+\tcase INITIAL_STATE:\n+\t\tirdma_del_init_mem(rf);\n+\t\tbreak;\n+\tcase INVALID_STATE:\n+\t\t/* fallthrough */\n+\tdefault:\n+\t\tpr_warn(\"bad init_state = %d\\n\", rf->init_state);\n+\t\tbreak;\n+\t}\n+}\n+\n+enum irdma_status_code irdma_rt_init_hw(struct irdma_pci_f *rf,\n+\t\t\t\t\tstruct irdma_device *iwdev,\n+\t\t\t\t\tstruct irdma_l2params *l2params)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tenum irdma_status_code status;\n+\tstruct irdma_vsi_init_info vsi_info = {};\n+\tstruct irdma_vsi_stats_info stats_info = {};\n+\n+\tirdma_sc_rt_init(dev);\n+\tvsi_info.vm_vf_type = dev->is_pf ? IRDMA_PF_TYPE : IRDMA_VF_TYPE;\n+\tvsi_info.dev = dev;\n+\tvsi_info.back_vsi = (void *)iwdev;\n+\tvsi_info.params = l2params;\n+\tvsi_info.pf_data_vsi_num = iwdev->vsi_num;\n+\tvsi_info.exception_lan_q = 2;\n+\tirdma_sc_vsi_init(&iwdev->vsi, &vsi_info);\n+\n+\tstatus = irdma_setup_cm_core(iwdev, rf->rdma_ver);\n+\tif (status)\n+\t\treturn status;\n+\n+\tstats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);\n+\tif (!stats_info.pestat)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tstats_info.fcn_id = dev->hmc_fn_id;\n+\tstatus = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);\n+\tif (status) {\n+\t\tkfree(stats_info.pestat);\n+\t\treturn status;\n+\t}\n+\n+\tdo {\n+\t\tif (iwdev->create_ilq) {\n+\t\t\tstatus = irdma_initialize_ilq(iwdev);\n+\t\t\tif (status)\n+\t\t\t\tbreak;\n+\t\t\tiwdev->init_state = ILQ_CREATED;\n+\t\t}\n+\t\tstatus = irdma_initialize_ieq(iwdev);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\tiwdev->init_state = IEQ_CREATED;\n+\t\tif (!rf->rsrc_created) {\n+\t\t\tstatus = irdma_setup_ceqs(rf, &iwdev->vsi);\n+\t\t\tif (status)\n+\t\t\t\tbreak;\n+\t\t\tiwdev->init_state = CEQS_CREATED;\n+\n+\t\t\tstatus = irdma_hmc_init_pble(&rf->sc_dev,\n+\t\t\t\t\t\t rf->pble_rsrc);\n+\t\t\tif (status) {\n+\t\t\t\tirdma_del_ceqs(rf);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tspin_lock_init(&rf->pble_rsrc->pble_lock);\n+\t\t\tiwdev->init_state = PBLE_CHUNK_MEM;\n+\t\t\trf->rsrc_created = true;\n+\t\t}\n+\n+\t\tiwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |\n+\t\t\t\t\t IB_DEVICE_MEM_WINDOW |\n+\t\t\t\t\t IB_DEVICE_MEM_MGT_EXTENSIONS;\n+\n+\t\tif (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)\n+\t\t\tirdma_alloc_set_mac(iwdev);\n+\t\tirdma_add_ip(iwdev);\n+\t\tiwdev->init_state = IP_ADDR_REGISTERED;\n+\t\tstatus = irdma_register_rdma_device(iwdev);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\tiwdev->init_state = RDMA_DEV_REGISTERED;\n+\t\tirdma_port_ibevent(iwdev);\n+\t\tiwdev->iw_status = 1;\n+\t\tirdma_get_used_rsrc(iwdev);\n+\t\tinit_waitqueue_head(&iwdev->suspend_wq);\n+\n+\t\treturn 0;\n+\t} while (0);\n+\n+\tdev_err(rfdev_to_dev(dev), \"VSI open FAIL status = %d last cmpl = %d\\n\",\n+\t\tstatus, iwdev->init_state);\n+\tirdma_deinit_rt_device(iwdev);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_ctrl_init_hw - Initializes RDMA HW\n+ * @rf: RDMA PCI function\n+ *\n+ */\n+enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)\n+{\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tenum irdma_status_code status;\n+\n+\tdo {\n+\t\tstatus = irdma_setup_init_state(rf);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\trf->init_state = INITIAL_STATE;\n+\n+\t\tstatus = irdma_create_cqp(rf);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\trf->init_state = CQP_CREATED;\n+\n+\t\tstatus = irdma_hmc_setup(rf);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\trf->init_state = HMC_OBJS_CREATED;\n+\n+\t\tstatus = irdma_initialize_hw_rsrc(rf);\n+\t\tif (status)\n+\t\t\tbreak;\n+\n+\t\tstatus = irdma_create_ccq(rf);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\trf->init_state = CCQ_CREATED;\n+\n+\t\tstatus = irdma_setup_aeq(rf);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\trf->init_state = AEQ_CREATED;\n+\t\trf->sc_dev.feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;\n+\n+\t\tif (rf->rdma_ver != IRDMA_GEN_1)\n+\t\t\tstatus = irdma_get_rdma_features(&rf->sc_dev);\n+\t\tif (!status) {\n+\t\t\tu32 fw_ver = dev->feature_info[IRDMA_FEATURE_FW_INFO];\n+\t\t\tu8 hw_rev = dev->hw_attrs.uk_attrs.hw_rev;\n+\n+\t\t\tif ((hw_rev == IRDMA_GEN_1 && fw_ver >= IRDMA_FW_VER_0x30010) ||\n+\t\t\t (hw_rev != IRDMA_GEN_1 && fw_ver >= IRDMA_FW_VER_0x1000D))\n+\n+\t\t\t\tdev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |\n+\t\t\t\t\t\t\t\t\tIRDMA_FEATURE_CQ_RESIZE;\n+\t\t}\n+\t\trf->cqp.cqp_compl_thread =\n+\t\t\tkthread_run(cqp_compl_thread, rf, \"cqp_compl_thread\");\n+\n+\t\tstatus = irdma_setup_ceq_0(rf);\n+\t\tif (status)\n+\t\t\tbreak;\n+\t\trf->init_state = CEQ0_CREATED;\n+\n+\t\trf->free_qp_wq =\n+\t\t\talloc_ordered_workqueue(\"free_qp_wq\", WQ_MEM_RECLAIM);\n+\t\tif (!rf->free_qp_wq) {\n+\t\t\tstatus = IRDMA_ERR_NO_MEMORY;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\trf->free_cqbuf_wq =\n+\t\t\talloc_ordered_workqueue(\"free_cqbuf_wq\", WQ_MEM_RECLAIM);\n+\t\tif (!rf->free_cqbuf_wq) {\n+\t\t\tstatus = IRDMA_ERR_NO_MEMORY;\n+\t\t\tbreak;\n+\t\t}\n+\t\tdev->ccq_ops->ccq_arm(dev->ccq);\n+\t\tdev_info(rfdev_to_dev(dev), \"IRDMA hardware initialization successful\\n\");\n+\t\treturn 0;\n+\t} while (0);\n+\n+\tpr_err(\"IRDMA hardware initialization FAILED init_state=%d status=%d\\n\",\n+\t rf->init_state, status);\n+\tirdma_deinit_ctrl_hw(rf);\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_initialize_hw_resources - initialize hw resource during open\n+ * @rf: RDMA PCI function\n+ */\n+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)\n+{\n+\tunsigned long num_pds;\n+\tu32 rsrc_size;\n+\tu32 max_mr;\n+\tu32 max_qp;\n+\tu32 max_cq;\n+\tu32 arp_table_size;\n+\tu32 mrdrvbits;\n+\tvoid *rsrc_ptr;\n+\tu32 num_ahs;\n+\tu32 num_mcg;\n+\n+\tif (rf->rdma_ver != IRDMA_GEN_1) {\n+\t\trf->allocated_ws_nodes =\n+\t\t\tkcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),\n+\t\t\t\tsizeof(unsigned long), GFP_KERNEL);\n+\t\tif (!rf->allocated_ws_nodes)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tset_bit(0, rf->allocated_ws_nodes);\n+\t\trf->max_ws_node_id = IRDMA_MAX_WS_NODES;\n+\t}\n+\tmax_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;\n+\tmax_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;\n+\tmax_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;\n+\tarp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;\n+\trf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;\n+\tnum_pds = rf->sc_dev.hw_attrs.max_hw_pds;\n+\trsrc_size = sizeof(struct irdma_arp_entry) * arp_table_size;\n+\trsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);\n+\trsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);\n+\trsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);\n+\trsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);\n+\trsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);\n+\tnum_ahs = max_qp * 4;\n+\trsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(num_ahs);\n+\tnum_mcg = max_qp;\n+\trsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(num_mcg);\n+\trsrc_size += sizeof(struct irdma_qp **) * max_qp;\n+\n+\trf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);\n+\tif (!rf->mem_rsrc) {\n+\t\tkfree(rf->allocated_ws_nodes);\n+\t\trf->allocated_ws_nodes = NULL;\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trf->max_qp = max_qp;\n+\trf->max_mr = max_mr;\n+\trf->max_cq = max_cq;\n+\trf->max_pd = num_pds;\n+\trf->arp_table_size = arp_table_size;\n+\trf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;\n+\trsrc_ptr = rf->mem_rsrc +\n+\t\t (sizeof(struct irdma_arp_entry) * arp_table_size);\n+\trf->max_ah = num_ahs;\n+\trf->max_mcg = num_mcg;\n+\trf->allocated_qps = rsrc_ptr;\n+\trf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(max_qp)];\n+\trf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(max_cq)];\n+\trf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(max_mr)];\n+\trf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(num_pds)];\n+\trf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(num_ahs)];\n+\trf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(num_mcg)];\n+\trf->qp_table = (struct irdma_qp **)\n+\t\t (&rf->allocated_arps[BITS_TO_LONGS(arp_table_size)]);\n+\n+\tset_bit(0, rf->allocated_mrs);\n+\tset_bit(0, rf->allocated_qps);\n+\tset_bit(0, rf->allocated_cqs);\n+\tset_bit(0, rf->allocated_pds);\n+\tset_bit(0, rf->allocated_arps);\n+\tset_bit(0, rf->allocated_ahs);\n+\tset_bit(0, rf->allocated_mcgs);\n+\tset_bit(2, rf->allocated_qps); /* qp 2 IEQ */\n+\tset_bit(1, rf->allocated_qps); /* qp 1 ILQ */\n+\tset_bit(1, rf->allocated_cqs);\n+\tset_bit(1, rf->allocated_pds);\n+\tset_bit(2, rf->allocated_cqs);\n+\tset_bit(2, rf->allocated_pds);\n+\n+\tspin_lock_init(&rf->rsrc_lock);\n+\tspin_lock_init(&rf->arp_lock);\n+\tspin_lock_init(&rf->qptable_lock);\n+\tspin_lock_init(&rf->qh_list_lock);\n+\n+\tINIT_LIST_HEAD(&rf->mc_qht_list.list);\n+\t/* stag index mask has a minimum of 14 bits */\n+\tmrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);\n+\trf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_cqp_ce_handler - handle cqp completions\n+ * @rf: RDMA PCI function\n+ * @cq: cq for cqp completions\n+ */\n+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)\n+{\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct irdma_sc_dev *dev = &rf->sc_dev;\n+\tu32 cqe_count = 0;\n+\tstruct irdma_ccq_cqe_info info;\n+\tunsigned long flags;\n+\tint ret;\n+\n+\tdo {\n+\t\tmemset(&info, 0, sizeof(info));\n+\t\tspin_lock_irqsave(&rf->cqp.compl_lock, flags);\n+\t\tret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);\n+\t\tspin_unlock_irqrestore(&rf->cqp.compl_lock, flags);\n+\t\tif (ret)\n+\t\t\tbreak;\n+\n+\t\tcqp_request = (struct irdma_cqp_request *)\n+\t\t\t (unsigned long)info.scratch;\n+\t\tif (info.error)\n+\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\"ERR: opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\\n\",\n+\t\t\t\tinfo.op_code, info.maj_err_code,\n+\t\t\t\tinfo.min_err_code);\n+\t\tif (cqp_request) {\n+\t\t\tcqp_request->compl_info.maj_err_code = info.maj_err_code;\n+\t\t\tcqp_request->compl_info.min_err_code = info.min_err_code;\n+\t\t\tcqp_request->compl_info.op_ret_val = info.op_ret_val;\n+\t\t\tcqp_request->compl_info.error = info.error;\n+\n+\t\t\tif (cqp_request->waiting) {\n+\t\t\t\tcqp_request->request_done = true;\n+\t\t\t\twake_up(&cqp_request->waitq);\n+\t\t\t\tirdma_put_cqp_request(&rf->cqp, cqp_request);\n+\t\t\t} else {\n+\t\t\t\tif (cqp_request->callback_fcn)\n+\t\t\t\t\tcqp_request->callback_fcn(cqp_request);\n+\t\t\t\tirdma_put_cqp_request(&rf->cqp, cqp_request);\n+\t\t\t}\n+\t\t}\n+\n+\t\tcqe_count++;\n+\t} while (1);\n+\n+\tif (cqe_count) {\n+\t\tirdma_process_bh(dev);\n+\t\tdev->ccq_ops->ccq_arm(cq);\n+\t}\n+}\n+\n+/**\n+ * cqp_thread - Handle cqp completions\n+ * @context: Pointer to RDMA PCI Function\n+ */\n+int cqp_compl_thread(void *context)\n+{\n+\tstruct irdma_pci_f *rf = context;\n+\tstruct irdma_sc_cq *cq = &rf->ccq.sc_cq;\n+\n+\tdo {\n+\t\tif (down_interruptible(&rf->cqp.cqp_compl_sem) ||\n+\t\t rf->stop_cqp_thread)\n+\t\t\treturn 0;\n+\n+\t\tirdma_cqp_ce_handler(rf, cq);\n+\t} while (!kthread_should_stop());\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_next_iw_state - modify qp state\n+ * @iwqp: iwarp qp to modify\n+ * @state: next state for qp\n+ * @del_hash: del hash\n+ * @term: term message\n+ * @termlen: length of term message\n+ */\n+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,\n+\t\t\t u8 termlen)\n+{\n+\tstruct irdma_modify_qp_info info = {};\n+\n+\tinfo.next_iwarp_state = state;\n+\tinfo.remove_hash_idx = del_hash;\n+\tinfo.cq_num_valid = true;\n+\tinfo.arp_cache_idx_valid = true;\n+\tinfo.dont_send_term = true;\n+\tinfo.dont_send_fin = true;\n+\tinfo.termlen = termlen;\n+\n+\tif (term & IRDMAQP_TERM_SEND_TERM_ONLY)\n+\t\tinfo.dont_send_term = false;\n+\tif (term & IRDMAQP_TERM_SEND_FIN_ONLY)\n+\t\tinfo.dont_send_fin = false;\n+\tif (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)\n+\t\tinfo.reset_tcp_conn = true;\n+\tiwqp->hw_iwarp_state = state;\n+\tirdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);\n+\tiwqp->iwarp_state = info.next_iwarp_state;\n+}\n+\n+/**\n+ * irdma_del_mac_entry - remove a mac entry from the hw table\n+ * @rf: RDMA PCI function\n+ * @idx: the index of the mac ip address to delete\n+ */\n+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)\n+{\n+\tstruct irdma_cqp *iwcqp = &rf->cqp;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tenum irdma_status_code status = 0;\n+\n+\tcqp_request = irdma_get_cqp_request(iwcqp, true);\n+\tif (!cqp_request) {\n+\t\tpr_err(\"cqp_request memory failed\\n\");\n+\t\treturn;\n+\t}\n+\n+\tcqp_info = &cqp_request->info;\n+\tcqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;\n+\tcqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;\n+\tcqp_info->in.u.del_local_mac_entry.entry_idx = idx;\n+\tcqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;\n+\tstatus = irdma_handle_cqp_op(rf, cqp_request);\n+\tif (status)\n+\t\tpr_err(\"CQP-OP Del MAC entry fail\");\n+}\n+\n+/**\n+ * irdma_add_mac_entry - add a mac ip address entry to the hw table\n+ * @rf: RDMA PCI function\n+ * @mac_addr: pointer to mac address\n+ * @idx: the index of the mac ip address to add\n+ */\n+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx)\n+{\n+\tstruct irdma_local_mac_entry_info *info;\n+\tstruct irdma_cqp *iwcqp = &rf->cqp;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tenum irdma_status_code status = 0;\n+\n+\tcqp_request = irdma_get_cqp_request(iwcqp, true);\n+\tif (!cqp_request) {\n+\t\tpr_err(\"cqp_request memory failed\\n\");\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t}\n+\n+\tcqp_info = &cqp_request->info;\n+\tcqp_info->post_sq = 1;\n+\tinfo = &cqp_info->in.u.add_local_mac_entry.info;\n+\tether_addr_copy(info->mac_addr, mac_addr);\n+\tinfo->entry_idx = idx;\n+\tcqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;\n+\tcqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;\n+\tcqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;\n+\tcqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(rf, cqp_request);\n+\tif (status)\n+\t\tpr_err(\"CQP-OP Add MAC entry fail\");\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_alloc_local_mac_entry - allocate a mac entry\n+ * @rf: RDMA PCI function\n+ * @mac_tbl_idx: the index of the new mac address\n+ *\n+ * Allocate a mac address entry and update the mac_tbl_idx\n+ * to hold the index of the newly created mac address\n+ * Return 0 if successful, otherwise return error\n+ */\n+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)\n+{\n+\tstruct irdma_cqp *iwcqp = &rf->cqp;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tenum irdma_status_code status = 0;\n+\n+\tcqp_request = irdma_get_cqp_request(iwcqp, true);\n+\tif (!cqp_request) {\n+\t\tpr_err(\"cqp_request memory failed\\n\");\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t}\n+\n+\t/* increment refcount, because we need the cqp request ret value */\n+\tatomic_inc(&cqp_request->refcount);\n+\tcqp_info = &cqp_request->info;\n+\tcqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;\n+\tcqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(rf, cqp_request);\n+\tif (!status)\n+\t\t*mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;\n+\telse\n+\t\tpr_err(\"CQP-OP Alloc MAC entry fail\");\n+\t/* decrement refcount and free the cqp request, if no longer used */\n+\tirdma_put_cqp_request(iwcqp, cqp_request);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt\n+ * @iwdev: iwarp device\n+ * @accel_local_port: port for apbvt\n+ * @add_port: add ordelete port\n+ */\n+static enum irdma_status_code\n+irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, u16 accel_local_port,\n+\t\t\t bool add_port)\n+{\n+\tstruct irdma_apbvt_info *info;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tenum irdma_status_code status;\n+\n+\tcqp_request = irdma_get_cqp_request(&iwdev->rf->cqp, add_port);\n+\tif (!cqp_request)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tcqp_info = &cqp_request->info;\n+\tinfo = &cqp_info->in.u.manage_apbvt_entry.info;\n+\tmemset(info, 0, sizeof(*info));\n+\tinfo->add = add_port;\n+\tinfo->port = accel_local_port;\n+\tcqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;\n+\tcqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(iwdev->rf, cqp_request);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(&iwdev->rf->sc_dev),\n+\t\t\t\"ERR: CQP-OP Manage APBVT entry fail\");\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_manage_apbvt - add or delete tcp port\n+ * @iwdev: iwarp device\n+ * @accel_local_port: port for apbvt\n+ * @add_port: add or delete port\n+ */\n+enum irdma_status_code irdma_manage_apbvt(struct irdma_device *iwdev,\n+\t\t\t\t\t u16 accel_local_port, bool add_port)\n+{\n+\tstruct irdma_cm_core *cm_core = &iwdev->cm_core;\n+\tenum irdma_status_code status = 0;\n+\tunsigned long flags;\n+\tbool in_use;\n+\n+\t/* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to\n+\t * protect against race where add APBVT CQP can race ahead of the delete\n+\t * APBVT for same port.\n+\t */\n+\tif (add_port) {\n+\t\tspin_lock_irqsave(&cm_core->apbvt_lock, flags);\n+\t\tin_use = __test_and_set_bit(accel_local_port,\n+\t\t\t\t\t cm_core->ports_in_use);\n+\t\tspin_unlock_irqrestore(&cm_core->apbvt_lock, flags);\n+\t\tif (in_use)\n+\t\t\treturn 0;\n+\t\treturn irdma_cqp_manage_apbvt_cmd(iwdev, accel_local_port,\n+\t\t\t\t\t\t true);\n+\t} else {\n+\t\tspin_lock_irqsave(&cm_core->apbvt_lock, flags);\n+\t\tin_use = irdma_port_in_use(cm_core, accel_local_port);\n+\t\tif (in_use) {\n+\t\t\tspin_unlock_irqrestore(&cm_core->apbvt_lock, flags);\n+\t\t\treturn 0;\n+\t\t}\n+\t\t__clear_bit(accel_local_port, cm_core->ports_in_use);\n+\t\tstatus = irdma_cqp_manage_apbvt_cmd(iwdev, accel_local_port,\n+\t\t\t\t\t\t false);\n+\t\tspin_unlock_irqrestore(&cm_core->apbvt_lock, flags);\n+\t\treturn status;\n+\t}\n+}\n+\n+/**\n+ * irdma_manage_arp_cache - manage hw arp cache\n+ * @rf: RDMA PCI function\n+ * @mac_addr: mac address ptr\n+ * @ip_addr: ip addr for arp cache\n+ * @ipv4: flag inicating IPv4\n+ * @action: add, delete or modify\n+ */\n+void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,\n+\t\t\t u32 *ip_addr, bool ipv4, u32 action)\n+{\n+\tstruct irdma_add_arp_cache_entry_info *info;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tint arp_index;\n+\n+\tarp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);\n+\tif (arp_index == -1)\n+\t\treturn;\n+\n+\tcqp_request = irdma_get_cqp_request(&rf->cqp, false);\n+\tif (!cqp_request)\n+\t\treturn;\n+\n+\tcqp_info = &cqp_request->info;\n+\tif (action == IRDMA_ARP_ADD) {\n+\t\tcqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;\n+\t\tinfo = &cqp_info->in.u.add_arp_cache_entry.info;\n+\t\tmemset(info, 0, sizeof(*info));\n+\t\tinfo->arp_index = (u16)arp_index;\n+\t\tinfo->permanent = true;\n+\t\tether_addr_copy(info->mac_addr, mac_addr);\n+\t\tcqp_info->in.u.add_arp_cache_entry.scratch =\n+\t\t\t(uintptr_t)cqp_request;\n+\t\tcqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;\n+\t} else {\n+\t\tcqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;\n+\t\tcqp_info->in.u.del_arp_cache_entry.scratch =\n+\t\t\t(uintptr_t)cqp_request;\n+\t\tcqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;\n+\t\tcqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;\n+\t}\n+\n+\tcqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;\n+\tcqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;\n+\tcqp_info->post_sq = 1;\n+\tif (irdma_handle_cqp_op(rf, cqp_request))\n+\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\"ERR: CQP-OP Add/Del Arp Cache entry fail\");\n+}\n+\n+/**\n+ * irdma_send_syn_cqp_callback - do syn/ack after qhash\n+ * @cqp_request: qhash cqp completion\n+ */\n+static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)\n+{\n+\tirdma_send_syn(cqp_request->param, 1);\n+}\n+\n+/**\n+ * irdma_manage_qhash - add or modify qhash\n+ * @iwdev: iwarp device\n+ * @cminfo: cm info for qhash\n+ * @etype: type (syn or quad)\n+ * @mtype: type of qhash\n+ * @cmnode: cmnode associated with connection\n+ * @wait: wait for completion\n+ */\n+enum irdma_status_code\n+irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,\n+\t\t enum irdma_quad_entry_type etype,\n+\t\t enum irdma_quad_hash_manage_type mtype, void *cmnode,\n+\t\t bool wait)\n+{\n+\tstruct irdma_qhash_table_info *info;\n+\tstruct irdma_sc_dev *dev = &iwdev->rf->sc_dev;\n+\tenum irdma_status_code status;\n+\tstruct irdma_cqp *iwcqp = &iwdev->rf->cqp;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\n+\tcqp_request = irdma_get_cqp_request(iwcqp, wait);\n+\tif (!cqp_request)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tcqp_info = &cqp_request->info;\n+\tinfo = &cqp_info->in.u.manage_qhash_table_entry.info;\n+\tmemset(info, 0, sizeof(*info));\n+\tinfo->vsi = &iwdev->vsi;\n+\tinfo->manage = mtype;\n+\tinfo->entry_type = etype;\n+\tif (cminfo->vlan_id < VLAN_N_VID) {\n+\t\tinfo->vlan_valid = true;\n+\t\tinfo->vlan_id = cminfo->vlan_id;\n+\t} else {\n+\t\tinfo->vlan_valid = false;\n+\t}\n+\tinfo->ipv4_valid = cminfo->ipv4;\n+\tinfo->user_pri = cminfo->user_pri;\n+\tether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);\n+\tinfo->qp_num = cminfo->qh_qpid;\n+\tinfo->dest_port = cminfo->loc_port;\n+\tinfo->dest_ip[0] = cminfo->loc_addr[0];\n+\tinfo->dest_ip[1] = cminfo->loc_addr[1];\n+\tinfo->dest_ip[2] = cminfo->loc_addr[2];\n+\tinfo->dest_ip[3] = cminfo->loc_addr[3];\n+\tif (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||\n+\t etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||\n+\t etype == IRDMA_QHASH_TYPE_UDP_MCAST ||\n+\t etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||\n+\t etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {\n+\t\tinfo->src_port = cminfo->rem_port;\n+\t\tinfo->src_ip[0] = cminfo->rem_addr[0];\n+\t\tinfo->src_ip[1] = cminfo->rem_addr[1];\n+\t\tinfo->src_ip[2] = cminfo->rem_addr[2];\n+\t\tinfo->src_ip[3] = cminfo->rem_addr[3];\n+\t}\n+\tif (cmnode) {\n+\t\tcqp_request->callback_fcn = irdma_send_syn_cqp_callback;\n+\t\tcqp_request->param = cmnode;\n+\t}\n+\tif (info->ipv4_valid)\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"CM: %s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\\n\",\n+\t\t\t!mtype ? \"DELETE\" : \"ADD\", info->dest_ip,\n+\t\t\tinfo->dest_port, info->mac_addr, cminfo->vlan_id);\n+\telse\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"CM: %s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\\n\",\n+\t\t\t!mtype ? \"DELETE\" : \"ADD\", info->dest_ip,\n+\t\t\tinfo->dest_port, info->mac_addr, cminfo->vlan_id);\n+\tcqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;\n+\tcqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;\n+\tcqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;\n+\tcqp_info->post_sq = 1;\n+\tstatus = irdma_handle_cqp_op(iwdev->rf, cqp_request);\n+\tif (status)\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"ERR: CQP-OP Manage Qhash Entry fail\");\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_hw_flush_wqes_callback - Check return code after flush\n+ * @cqp_request: qhash cqp completion\n+ */\n+static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)\n+{\n+\tstruct irdma_qp_flush_info *hw_info;\n+\tstruct irdma_sc_qp *qp;\n+\tstruct irdma_qp *iwqp;\n+\tstruct cqp_cmds_info *cqp_info;\n+\n+\tcqp_info = &cqp_request->info;\n+\thw_info = &cqp_request->info.in.u.qp_flush_wqes.info;\n+\tqp = cqp_info->in.u.qp_flush_wqes.qp;\n+\tiwqp = qp->qp_uk.back_qp;\n+\n+\tif (cqp_request->compl_info.maj_err_code)\n+\t\treturn;\n+\tif (hw_info->rq &&\n+\t (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||\n+\t cqp_request->compl_info.min_err_code == 0)) {\n+\t\t/* RQ WQE flush was requested but did not happen */\n+\t\tqp->qp_uk.rq_flush_complete = true;\n+\t\tcomplete(&iwqp->rq_drained);\n+\t}\n+\tif (hw_info->sq &&\n+\t (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||\n+\t cqp_request->compl_info.min_err_code == 0)) {\n+\t\tqp->qp_uk.sq_flush_complete = true;\n+\t\tcomplete(&iwqp->sq_drained);\n+\t}\n+}\n+\n+/**\n+ * irdma_hw_flush_wqes - flush qp's wqe\n+ * @rf: RDMA PCI function\n+ * @qp: hardware control qp\n+ * @info: info for flush\n+ * @wait: flag wait for completion\n+ */\n+enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf,\n+\t\t\t\t\t struct irdma_sc_qp *qp,\n+\t\t\t\t\t struct irdma_qp_flush_info *info,\n+\t\t\t\t\t bool wait)\n+{\n+\tenum irdma_status_code status;\n+\tstruct irdma_qp_flush_info *hw_info;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\tstruct irdma_qp *iwqp = qp->qp_uk.back_qp;\n+\tunsigned long flags = 0;\n+\n+\tcqp_request = irdma_get_cqp_request(&rf->cqp, wait);\n+\tif (!cqp_request)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tcqp_info = &cqp_request->info;\n+\tif (!wait)\n+\t\tcqp_request->callback_fcn = irdma_hw_flush_wqes_callback;\n+\thw_info = &cqp_request->info.in.u.qp_flush_wqes.info;\n+\tmemcpy(hw_info, info, sizeof(*hw_info));\n+\tcqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.qp_flush_wqes.qp = qp;\n+\tcqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;\n+\tstatus = irdma_handle_cqp_op(rf, cqp_request);\n+\tif (status) {\n+\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\"ERR: CQP-OP Flush WQE's fail\");\n+\t\tcomplete(&iwqp->sq_drained);\n+\t\tcomplete(&iwqp->rq_drained);\n+\t\tqp->qp_uk.sq_flush_complete = true;\n+\t\tqp->qp_uk.rq_flush_complete = true;\n+\t\treturn status;\n+\t}\n+\n+\tif (!wait || cqp_request->compl_info.maj_err_code)\n+\t\treturn 0;\n+\n+\tif (info->rq) {\n+\t\tif (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||\n+\t\t cqp_request->compl_info.min_err_code == 0) {\n+\t\t\t/* RQ WQE flush was requested but did not happen */\n+\t\t\tqp->qp_uk.rq_flush_complete = true;\n+\t\t\tcomplete(&iwqp->rq_drained);\n+\t\t}\n+\t}\n+\tif (info->sq) {\n+\t\tif (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||\n+\t\t cqp_request->compl_info.min_err_code == 0) {\n+\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\t\t/*\n+\t\t\t * Handling case where WQE is posted to empty SQ when\n+\t\t\t * flush has not completed\n+\t\t\t */\n+\t\t\tif (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {\n+\t\t\t\tstruct irdma_cqp_request *new_req;\n+\n+\t\t\t\tqp->flush_sq = false;\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t\tinfo->rq = false;\n+\t\t\t\tnew_req = irdma_get_cqp_request(&rf->cqp, true);\n+\t\t\t\tif (!new_req)\n+\t\t\t\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t\t\t\tcqp_info = &new_req->info;\n+\t\t\t\thw_info = &new_req->info.in.u.qp_flush_wqes.info;\n+\t\t\t\tmemcpy(hw_info, info, sizeof(*hw_info));\n+\t\t\t\tcqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;\n+\t\t\t\tcqp_info->post_sq = 1;\n+\t\t\t\tcqp_info->in.u.qp_flush_wqes.qp = qp;\n+\t\t\t\tcqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;\n+\n+\t\t\t\tstatus = irdma_handle_cqp_op(rf, new_req);\n+\t\t\t\tif (new_req->compl_info.maj_err_code ||\n+\t\t\t\t new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||\n+\t\t\t\t status) {\n+\t\t\t\t\tpr_warn(\"SQ in error but not flushed\");\n+\t\t\t\t\tqp->qp_uk.sq_flush_complete = true;\n+\t\t\t\t}\n+\t\t\t} else {\n+\t\t\t\t/* SQ WQE flush was requested but did not happen */\n+\t\t\t\tqp->qp_uk.sq_flush_complete = true;\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t\tcomplete(&iwqp->sq_drained);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tspin_lock_irqsave(&iwqp->lock, flags);\n+\t\t\tif (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {\n+\t\t\t\tqp->qp_uk.sq_flush_complete = true;\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t\tcomplete(&iwqp->sq_drained);\n+\t\t\t} else {\n+\t\t\t\tspin_unlock_irqrestore(&iwqp->lock, flags);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_gen_ae - generate AE\n+ * @rf: RDMA PCI function\n+ * @qp: qp associated with AE\n+ * @info: info for ae\n+ * @wait: wait for completion\n+ */\n+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,\n+\t\t struct irdma_gen_ae_info *info, bool wait)\n+{\n+\tstruct irdma_gen_ae_info *ae_info;\n+\tstruct irdma_cqp_request *cqp_request;\n+\tstruct cqp_cmds_info *cqp_info;\n+\n+\tcqp_request = irdma_get_cqp_request(&rf->cqp, wait);\n+\tif (!cqp_request)\n+\t\treturn;\n+\n+\tcqp_info = &cqp_request->info;\n+\tae_info = &cqp_request->info.in.u.gen_ae.info;\n+\tmemcpy(ae_info, info, sizeof(*ae_info));\n+\tcqp_info->cqp_cmd = IRDMA_OP_GEN_AE;\n+\tcqp_info->post_sq = 1;\n+\tcqp_info->in.u.gen_ae.qp = qp;\n+\tcqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;\n+\tif (irdma_handle_cqp_op(rf, cqp_request))\n+\t\tdev_dbg(rfdev_to_dev(&rf->sc_dev),\n+\t\t\t\"ERR: CQP OP failed attempting to generate ae_code=0x%x\\n\",\n+\t\t\tinfo->ae_code);\n+}\n+\n+/**\n+ * irdma_get_ib_wc - return change flush code to IB's\n+ * @opcode: iwarp flush code\n+ */\n+static enum ib_wc_status irdma_get_ib_wc(enum irdma_flush_opcode opcode)\n+{\n+\tswitch (opcode) {\n+\tcase FLUSH_PROT_ERR:\n+\t\treturn IB_WC_LOC_PROT_ERR;\n+\tcase FLUSH_REM_ACCESS_ERR:\n+\t\treturn IB_WC_REM_ACCESS_ERR;\n+\tcase FLUSH_LOC_QP_OP_ERR:\n+\t\treturn IB_WC_LOC_QP_OP_ERR;\n+\tcase FLUSH_REM_OP_ERR:\n+\t\treturn IB_WC_REM_OP_ERR;\n+\tcase FLUSH_LOC_LEN_ERR:\n+\t\treturn IB_WC_LOC_LEN_ERR;\n+\tcase FLUSH_GENERAL_ERR:\n+\t\treturn IB_WC_GENERAL_ERR;\n+\tcase FLUSH_FATAL_ERR:\n+\tdefault:\n+\t\treturn IB_WC_FATAL_ERR;\n+\t}\n+}\n+\n+/**\n+ * irdma_set_flush_info - set flush info\n+ * @pinfo: set flush info\n+ * @min: minor err\n+ * @maj: major err\n+ * @opcode: flush error code\n+ */\n+static void irdma_set_flush_info(struct irdma_qp_flush_info *pinfo, u16 *min,\n+\t\t\t\t u16 *maj, enum irdma_flush_opcode opcode)\n+{\n+\t*min = (u16)irdma_get_ib_wc(opcode);\n+\t*maj = CQE_MAJOR_DRV;\n+\tpinfo->userflushcode = true;\n+}\n+\n+/**\n+ * irdma_flush_wqes - flush wqe for qp\n+ * @rf: RDMA PCI function\n+ * @iwqp: qp to flush wqes\n+ */\n+void irdma_flush_wqes(struct irdma_pci_f *rf, struct irdma_qp *iwqp)\n+{\n+\tstruct irdma_qp_flush_info info = {};\n+\tstruct irdma_sc_qp *qp = &iwqp->sc_qp;\n+\n+\tinfo.sq = true;\n+\tinfo.rq = true;\n+\tif (qp->term_flags) {\n+\t\tirdma_set_flush_info(&info, &info.sq_minor_code,\n+\t\t\t\t &info.sq_major_code, qp->flush_code);\n+\t\tirdma_set_flush_info(&info, &info.rq_minor_code,\n+\t\t\t\t &info.rq_major_code, qp->flush_code);\n+\t}\n+\t(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, true);\n+}\ndiff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c\nnew file mode 100644\nindex 0000000..8a6333c\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c\n@@ -0,0 +1,210 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"osdep.h\"\n+#include \"type.h\"\n+#include \"i40iw_hw.h\"\n+#include \"status.h\"\n+#include \"protos.h\"\n+\n+#define I40E_CQPSQ_CQ_CQID_SHIFT 0\n+#define I40E_CQPSQ_CQ_CQID_MASK \\\n+\t(0xffffULL << I40E_CQPSQ_CQ_CQID_SHIFT)\n+\n+static u32 i40iw_regs[IRDMA_MAX_REGS] = {\n+\tI40E_PFPE_CQPTAIL,\n+\tI40E_PFPE_CQPDB,\n+\tI40E_PFPE_CCQPSTATUS,\n+\tI40E_PFPE_CCQPHIGH,\n+\tI40E_PFPE_CCQPLOW,\n+\tI40E_PFPE_CQARM,\n+\tI40E_PFPE_CQACK,\n+\tI40E_PFPE_AEQALLOC,\n+\tI40E_PFPE_CQPERRCODES,\n+\tI40E_PFPE_WQEALLOC,\n+\tI40E_PFINT_DYN_CTLN(0),\n+\tI40IW_DB_ADDR_OFFSET,\n+\n+\tI40E_GLPCI_LBARCTRL,\n+\tI40E_GLPE_CPUSTATUS0,\n+\tI40E_GLPE_CPUSTATUS1,\n+\tI40E_GLPE_CPUSTATUS2,\n+\tI40E_PFINT_AEQCTL,\n+\tI40E_PFINT_CEQCTL(0),\n+\tI40E_VSIQF_CTL(0),\n+\tI40E_PFHMC_PDINV,\n+\tI40E_GLHMC_VFPDINV(0)\n+};\n+\n+static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = {\n+\tI40E_GLPES_PFIP4RXDISCARD(0),\n+\tI40E_GLPES_PFIP4RXTRUNC(0),\n+\tI40E_GLPES_PFIP4TXNOROUTE(0),\n+\tI40E_GLPES_PFIP6RXDISCARD(0),\n+\tI40E_GLPES_PFIP6RXTRUNC(0),\n+\tI40E_GLPES_PFIP6TXNOROUTE(0),\n+\tI40E_GLPES_PFTCPRTXSEG(0),\n+\tI40E_GLPES_PFTCPRXOPTERR(0),\n+\tI40E_GLPES_PFTCPRXPROTOERR(0),\n+\tI40E_GLPES_PFRXVLANERR(0)\n+};\n+\n+static u32 i40iw_stat_offsets_64[IRDMA_HW_STAT_INDEX_MAX_64] = {\n+\tI40E_GLPES_PFIP4RXOCTSLO(0),\n+\tI40E_GLPES_PFIP4RXPKTSLO(0),\n+\tI40E_GLPES_PFIP4RXFRAGSLO(0),\n+\tI40E_GLPES_PFIP4RXMCPKTSLO(0),\n+\tI40E_GLPES_PFIP4TXOCTSLO(0),\n+\tI40E_GLPES_PFIP4TXPKTSLO(0),\n+\tI40E_GLPES_PFIP4TXFRAGSLO(0),\n+\tI40E_GLPES_PFIP4TXMCPKTSLO(0),\n+\tI40E_GLPES_PFIP6RXOCTSLO(0),\n+\tI40E_GLPES_PFIP6RXPKTSLO(0),\n+\tI40E_GLPES_PFIP6RXFRAGSLO(0),\n+\tI40E_GLPES_PFIP6RXMCPKTSLO(0),\n+\tI40E_GLPES_PFIP6TXOCTSLO(0),\n+\tI40E_GLPES_PFIP6TXPKTSLO(0),\n+\tI40E_GLPES_PFIP6TXFRAGSLO(0),\n+\tI40E_GLPES_PFIP6TXMCPKTSLO(0),\n+\tI40E_GLPES_PFTCPRXSEGSLO(0),\n+\tI40E_GLPES_PFTCPTXSEGLO(0),\n+\tI40E_GLPES_PFRDMARXRDSLO(0),\n+\tI40E_GLPES_PFRDMARXSNDSLO(0),\n+\tI40E_GLPES_PFRDMARXWRSLO(0),\n+\tI40E_GLPES_PFRDMATXRDSLO(0),\n+\tI40E_GLPES_PFRDMATXSNDSLO(0),\n+\tI40E_GLPES_PFRDMATXWRSLO(0),\n+\tI40E_GLPES_PFRDMAVBNDLO(0),\n+\tI40E_GLPES_PFRDMAVINVLO(0),\n+\tI40E_GLPES_PFIP4RXMCOCTSLO(0),\n+\tI40E_GLPES_PFIP4TXMCOCTSLO(0),\n+\tI40E_GLPES_PFIP6RXMCOCTSLO(0),\n+\tI40E_GLPES_PFIP6TXMCOCTSLO(0),\n+\tI40E_GLPES_PFUDPRXPKTSLO(0),\n+\tI40E_GLPES_PFUDPTXPKTSLO(0)\n+};\n+\n+static u64 i40iw_masks[IRDMA_MAX_MASKS] = {\n+\tI40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK,\n+\tI40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK,\n+\tI40E_CQPSQ_STAG_PDID_MASK,\n+\tI40E_CQPSQ_CQ_CEQID_MASK,\n+\tI40E_CQPSQ_CQ_CQID_MASK,\n+};\n+\n+static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {\n+\tI40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT,\n+\tI40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT,\n+\tI40E_CQPSQ_STAG_PDID_SHIFT,\n+\tI40E_CQPSQ_CQ_CEQID_SHIFT,\n+\tI40E_CQPSQ_CQ_CQID_SHIFT,\n+};\n+\n+static struct irdma_irq_ops i40iw_irq_ops;\n+\n+/**\n+ * i40iw_config_ceq- Configure CEQ interrupt\n+ * @dev: pointer to the device structure\n+ * @ceq_id: Completion Event Queue ID\n+ * @idx: vector index\n+ */\n+static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx)\n+{\n+\tu32 reg_val;\n+\n+\treg_val = (ceq_id << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT);\n+\treg_val |= (QUEUE_TYPE_CEQ << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);\n+\twr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val);\n+\n+\treg_val = (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);\n+\treg_val |= I40E_PFINT_DYN_CTLN_INTENA_MASK;\n+\twr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val);\n+\n+\treg_val = (IRDMA_GLINT_CEQCTL_CAUSE_ENA_M |\n+\t\t (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |\n+\t\t IRDMA_GLINT_CEQCTL_ITR_INDX_M);\n+\treg_val |= (NULL_QUEUE_INDEX << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT);\n+\n+\twr32(dev->hw, dev->hw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val);\n+}\n+\n+/**\n+ * i40iw_ena_irq - Enable interrupt\n+ * @dev: pointer to the device structure\n+ * @idx: vector index\n+ */\n+static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx)\n+{\n+\tu32 val;\n+\n+\tval = IRDMA_GLINT_DYN_CTL_INTENA_M | IRDMA_GLINT_DYN_CTL_CLEARPBA_M |\n+\t IRDMA_GLINT_DYN_CTL_ITR_INDX_M;\n+\twr32(dev->hw, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val);\n+}\n+\n+/**\n+ * irdma_disable_irq - Disable interrupt\n+ * @dev: pointer to the device structure\n+ * @idx: vector index\n+ */\n+static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx)\n+{\n+\twr32(dev->hw, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0);\n+}\n+\n+void i40iw_init_hw(struct irdma_sc_dev *dev)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < IRDMA_MAX_REGS; ++i)\n+\t\tdev->hw_regs[i] = i40iw_regs[i];\n+\n+\tfor (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_32; ++i)\n+\t\tdev->hw_stats_regs_32[i] = i40iw_stat_offsets_32[i];\n+\n+\tfor (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_64; ++i)\n+\t\tdev->hw_stats_regs_64[i] = i40iw_stat_offsets_64[i];\n+\n+\tfor (i = 0; i < IRDMA_MAX_SHIFTS; ++i)\n+\t\tdev->hw_shifts[i] = i40iw_shifts[i];\n+\n+\tfor (i = 0; i < IRDMA_MAX_MASKS; ++i)\n+\t\tdev->hw_masks[i] = i40iw_masks[i];\n+\n+\tdev->wqe_alloc_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t\t dev->hw_regs[IRDMA_WQEALLOC]);\n+\tdev->cq_arm_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t dev->hw_regs[IRDMA_CQARM]);\n+\tdev->aeq_alloc_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t\t dev->hw_regs[IRDMA_AEQALLOC]);\n+\tdev->cqp_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t dev->hw_regs[IRDMA_CQPDB]);\n+\tdev->cq_ack_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t dev->hw_regs[IRDMA_CQACK]);\n+\tdev->ceq_itr_mask_db = NULL;\n+\tdev->aeq_itr_mask_db = NULL;\n+\n+\tmemcpy(&i40iw_irq_ops, dev->irq_ops, sizeof(i40iw_irq_ops));\n+\ti40iw_irq_ops.irdma_en_irq = i40iw_ena_irq;\n+\ti40iw_irq_ops.irdma_dis_irq = i40iw_disable_irq;\n+\ti40iw_irq_ops.irdma_cfg_ceq = i40iw_config_ceq;\n+\tdev->irq_ops = &i40iw_irq_ops;\n+\n+\t/* Setup the hardware limits, hmc may limit further */\n+\tdev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;\n+\tdev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;\n+\tdev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;\n+\tdev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;\n+\tdev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;\n+\tdev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;\n+\tdev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;\n+\tdev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;\n+\tdev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA;\n+\tdev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA;\n+\tdev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;\n+\tdev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;\n+\tdev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;\n+\tdev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;\n+\tdev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;\n+\tdev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;\n+}\ndiff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h\nnew file mode 100644\nindex 0000000..0994a72\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h\n@@ -0,0 +1,163 @@\n+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef I40IW_HW_H\n+#define I40IW_HW_H\n+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */\n+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */\n+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */\n+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */\n+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */\n+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */\n+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */\n+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */\n+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */\n+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */\n+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */\n+\n+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */\n+\n+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */\n+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */\n+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */\n+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */\n+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */\n+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */\n+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */\n+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */\n+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */\n+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */\n+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */\n+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */\n+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */\n+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */\n+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */\n+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */\t/* Reset: PFR */\n+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */\n+\n+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+\n+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+\n+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */\n+\n+#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)\n+\n+#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)\n+\n+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */\n+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511\n+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0\n+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)\n+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11\n+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)\n+\n+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */\n+#define I40E_PFINT_CEQCTL_MAX_INDEX 511\n+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0\n+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)\n+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11\n+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)\n+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13\n+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)\n+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16\n+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)\n+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27\n+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)\n+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30\n+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)\n+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31\n+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)\n+\n+#define I40E_CQPSQ_STAG_PDID_SHIFT 48\n+#define I40E_CQPSQ_STAG_PDID_MASK (0x7FFFULL << I40E_CQPSQ_STAG_PDID_SHIFT)\n+\n+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0\n+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1ULL << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)\n+\n+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31\n+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1ULL << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)\n+\n+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3\n+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)\n+\n+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0\n+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)\n+\n+#define I40E_CQPSQ_CQ_CEQID_SHIFT 24\n+#define I40E_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40E_CQPSQ_CQ_CEQID_SHIFT)\n+\n+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))\n+\n+enum i40iw_device_caps_const {\n+\tI40IW_MAX_WQ_FRAGMENT_COUNT\t\t= 3,\n+\tI40IW_MAX_SGE_RD\t\t\t= 1,\n+\tI40IW_MAX_PUSH_PAGE_COUNT\t\t= 0,\n+\tI40IW_MAX_INLINE_DATA_SIZE\t\t= 48,\n+\tI40IW_MAX_IRD_SIZE\t\t\t= 63,\n+\tI40IW_MAX_ORD_SIZE\t\t\t= 127,\n+\tI40IW_MAX_WQ_ENTRIES\t\t\t= 2048,\n+\tI40IW_MAX_WQE_SIZE_RQ\t\t\t= 128,\n+\tI40IW_MAX_PDS\t\t\t\t= 32768,\n+\tI40IW_MAX_STATS_COUNT\t\t\t= 16,\n+\tI40IW_MAX_CQ_SIZE\t\t\t= 1048575,\n+\tI40IW_MAX_OUTBOUND_MSG_SIZE\t\t= 2147483647,\n+\tI40IW_MAX_INBOUND_MSG_SIZE\t\t= 2147483647,\n+};\n+\n+#define I40IW_QP_WQE_MIN_SIZE\t32\n+#define I40IW_QP_WQE_MAX_SIZE\t128\n+#define I40IW_QP_SW_MIN_WQSIZE\t4\n+\n+#define\tI40IW_MAX_RQ_WQE_SHIFT\t2\n+#define I40IW_MAX_QUANTA_PER_WR 2\n+\n+#define I40IW_QP_SW_MAX_SQ_QUANTA 2048\n+#define I40IW_QP_SW_MAX_RQ_QUANTA 16384\n+#define I40IW_QP_SW_MAX_WQ_QUANTA 2048\n+#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTA - IRDMA_SQ_RSVD) / I40IW_MAX_QUANTA_PER_WR)\n+#define I40IW_FIRST_VF_FPM_ID\t16\n+#define QUEUE_TYPE_CEQ\t\t2\n+#define NULL_QUEUE_INDEX\t0x7FF\n+\n+void i40iw_init_hw(struct irdma_sc_dev *dev);\n+#endif /* I40IW_HW_H */\ndiff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c\nnew file mode 100644\nindex 0000000..18f5e00\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c\n@@ -0,0 +1,75 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"osdep.h\"\n+#include \"type.h\"\n+#include \"icrdma_hw.h\"\n+\n+static u32 icrdma_regs[IRDMA_MAX_REGS] = {\n+\tPFPE_CQPTAIL,\n+\tPFPE_CQPDB,\n+\tPFPE_CCQPSTATUS,\n+\tPFPE_CCQPHIGH,\n+\tPFPE_CCQPLOW,\n+\tPFPE_CQARM,\n+\tPFPE_CQACK,\n+\tPFPE_AEQALLOC,\n+\tPFPE_CQPERRCODES,\n+\tPFPE_WQEALLOC,\n+\tGLINT_DYN_CTL(0),\n+\tICRDMA_DB_ADDR_OFFSET,\n+\n+\tGLPCI_LBARCTRL,\n+\tGLPE_CPUSTATUS0,\n+\tGLPE_CPUSTATUS1,\n+\tGLPE_CPUSTATUS2,\n+\tPFINT_AEQCTL,\n+\tGLINT_CEQCTL(0),\n+\tVSIQF_PE_CTL1(0),\n+\tPFHMC_PDINV,\n+\tGLHMC_VFPDINV(0)\n+};\n+\n+static u64 icrdma_masks[IRDMA_MAX_MASKS] = {\n+\tICRDMA_CCQPSTATUS_CCQP_DONE_M,\n+\tICRDMA_CCQPSTATUS_CCQP_ERR_M,\n+\tICRDMA_CQPSQ_STAG_PDID_M,\n+\tICRDMA_CQPSQ_CQ_CEQID_M,\n+\tICRDMA_CQPSQ_CQ_CQID_M,\n+};\n+\n+static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {\n+\tICRDMA_CCQPSTATUS_CCQP_DONE_S,\n+\tICRDMA_CCQPSTATUS_CCQP_ERR_S,\n+\tICRDMA_CQPSQ_STAG_PDID_S,\n+\tICRDMA_CQPSQ_CQ_CEQID_S,\n+\tICRDMA_CQPSQ_CQ_CQID_S,\n+};\n+\n+void icrdma_init_hw(struct irdma_sc_dev *dev)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < IRDMA_MAX_REGS; ++i)\n+\t\tdev->hw_regs[i] = icrdma_regs[i];\n+\n+\tfor (i = 0; i < IRDMA_MAX_SHIFTS; ++i)\n+\t\tdev->hw_shifts[i] = icrdma_shifts[i];\n+\n+\tfor (i = 0; i < IRDMA_MAX_MASKS; ++i)\n+\t\tdev->hw_masks[i] = icrdma_masks[i];\n+\n+\tdev->wqe_alloc_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t\t dev->hw_regs[IRDMA_WQEALLOC]);\n+\tdev->cq_arm_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t dev->hw_regs[IRDMA_CQARM]);\n+\tdev->aeq_alloc_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t\t dev->hw_regs[IRDMA_AEQALLOC]);\n+\tdev->cqp_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t dev->hw_regs[IRDMA_CQPDB]);\n+\tdev->cq_ack_db = (u32 __iomem *)(irdma_get_hw_addr(dev) +\n+\t\t\t\t dev->hw_regs[IRDMA_CQACK]);\n+\tdev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;\n+\n+\tdev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;\n+}\ndiff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h\nnew file mode 100644\nindex 0000000..75ee433\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h\n@@ -0,0 +1,63 @@\n+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef ICRDMA_HW_H\n+#define ICRDMA_HW_H\n+\n+#define VFPE_CQPTAIL1\t\t0x0000a000\n+#define VFPE_CQPDB1\t\t0x0000bc00\n+#define VFPE_CCQPSTATUS1\t0x0000b800\n+#define VFPE_CCQPHIGH1\t\t0x00009800\n+#define VFPE_CCQPLOW1\t\t0x0000ac00\n+#define VFPE_CQARM1\t\t0x0000b400\n+#define VFPE_CQARM1\t\t0x0000b400\n+#define VFPE_CQACK1\t\t0x0000b000\n+#define VFPE_AEQALLOC1\t\t0x0000a400\n+#define VFPE_CQPERRCODES1\t0x00009c00\n+#define VFPE_WQEALLOC1\t\t0x0000c000\n+#define VFINT_DYN_CTLN(_i)\t(0x00003800 + ((_i) * 4)) /* _i=0...63 */\n+\n+#define PFPE_CQPTAIL\t\t0x00500880\n+#define PFPE_CQPDB\t\t0x00500800\n+#define PFPE_CCQPSTATUS\t\t0x0050a000\n+#define PFPE_CCQPHIGH\t\t0x0050a100\n+#define PFPE_CCQPLOW\t\t0x0050a080\n+#define PFPE_CQARM\t\t0x00502c00\n+#define PFPE_CQACK\t\t0x00502c80\n+#define PFPE_AEQALLOC\t\t0x00502d00\n+#define GLINT_DYN_CTL(_INT)\t(0x00160000 + ((_INT) * 4)) /* _i=0...2047 */\n+#define GLPCI_LBARCTRL\t\t0x0009de74\n+#define GLPE_CPUSTATUS0\t\t0x0050ba5c\n+#define GLPE_CPUSTATUS1\t\t0x0050ba60\n+#define GLPE_CPUSTATUS2\t\t0x0050ba64\n+#define PFINT_AEQCTL\t\t0x0016cb00\n+#define PFPE_CQPERRCODES\t0x0050a200\n+#define PFPE_WQEALLOC\t\t0x00504400\n+#define GLINT_CEQCTL(_INT)\t(0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */\n+#define VSIQF_PE_CTL1(_VSI)\t(0x00414000 + ((_VSI) * 4)) /* _i=0...767 */\n+#define PFHMC_PDINV\t\t0x00520300\n+#define GLHMC_VFPDINV(_i)\t(0x00528300 + ((_i) * 4)) /* _i=0...31 */\n+\n+#define ICRDMA_DB_ADDR_OFFSET\t\t(8 * 1024 * 1024 - 64 * 1024)\n+\n+#define ICRDMA_VF_DB_ADDR_OFFSET\t(64 * 1024)\n+\n+/* CCQSTATUS */\n+#define ICRDMA_CCQPSTATUS_CCQP_DONE_S\t0\n+#define ICRDMA_CCQPSTATUS_CCQP_DONE_M\t(0x1ULL << ICRDMA_CCQPSTATUS_CCQP_DONE_S)\n+#define ICRDMA_CCQPSTATUS_CCQP_ERR_S\t31\n+#define ICRDMA_CCQPSTATUS_CCQP_ERR_M\t(0x1ULL << ICRDMA_CCQPSTATUS_CCQP_ERR_S)\n+#define ICRDMA_CQPSQ_STAG_PDID_S\t46\n+#define ICRDMA_CQPSQ_STAG_PDID_M\t(0x3ffffULL << ICRDMA_CQPSQ_STAG_PDID_S)\n+#define ICRDMA_CQPSQ_CQ_CEQID_S\t\t22\n+#define ICRDMA_CQPSQ_CQ_CEQID_M\t\t(0x3ffULL << ICRDMA_CQPSQ_CQ_CEQID_S)\n+#define ICRDMA_CQPSQ_CQ_CQID_S 0\n+#define ICRDMA_CQPSQ_CQ_CQID_M \\\n+\t(0x7ffffULL << ICRDMA_CQPSQ_CQ_CQID_S)\n+\n+enum icrdma_device_caps_const {\n+\tICRDMA_MAX_STATS_COUNT = 128,\n+};\n+\n+void icrdma_init_hw(struct irdma_sc_dev *dev);\n+#endif /* ICRDMA_HW_H*/\n", "prefixes": [ "rdma-nxt", "02/16" ] }