Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1124838/?format=api
{ "id": 1124838, "url": "http://patchwork.ozlabs.org/api/patches/1124838/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-6-shiraz.saleem@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20190629185405.1601-6-shiraz.saleem@intel.com>", "list_archive_url": null, "date": "2019-06-29T18:53:53", "name": "[rdma-next,05/17] RDMA/irdma: Add privileged UDA queue implementation", "commit_ref": null, "pull_url": null, "state": "rejected", "archived": false, "hash": "99e25b78bfa4406025289b1d31f6c2b228ece4a4", "submitter": { "id": 69500, "url": "http://patchwork.ozlabs.org/api/people/69500/?format=api", "name": "Saleem, Shiraz", "email": "shiraz.saleem@intel.com" }, "delegate": { "id": 68, "url": "http://patchwork.ozlabs.org/api/users/68/?format=api", "username": "jtkirshe", "first_name": "Jeff", "last_name": "Kirsher", "email": "jeffrey.t.kirsher@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-6-shiraz.saleem@intel.com/mbox/", "series": [ { "id": 116886, "url": "http://patchwork.ozlabs.org/api/series/116886/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=116886", "date": "2019-06-29T18:53:48", "name": "Add unified Intel Ethernet RDMA driver (irdma)", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/116886/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1124838/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/1124838/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.138; helo=whitealder.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com" ], "Received": [ "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 45bjVX6DPbz9s3Z\n\tfor <incoming@patchwork.ozlabs.org>;\n\tSun, 30 Jun 2019 04:54:44 +1000 (AEST)", "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 346FA86C4A;\n\tSat, 29 Jun 2019 18:54:43 +0000 (UTC)", "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id s-eW3dgtQYs9; Sat, 29 Jun 2019 18:54:22 +0000 (UTC)", "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 62F1286CC5;\n\tSat, 29 Jun 2019 18:54:22 +0000 (UTC)", "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\tby ash.osuosl.org (Postfix) with ESMTP id 729411BF3AD\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:21 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 641BC87E58\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:21 +0000 (UTC)", "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id uQSrCP0CO5UH for <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:18 +0000 (UTC)", "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby hemlock.osuosl.org (Postfix) with ESMTPS id 84BBA84B57\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:18 +0000 (UTC)", "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t29 Jun 2019 11:54:18 -0700", "from ssaleem-mobl.amr.corp.intel.com ([10.254.177.95])\n\tby fmsmga004.fm.intel.com with ESMTP; 29 Jun 2019 11:54:17 -0700" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6", "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "X-IronPort-AV": "E=Sophos;i=\"5.63,432,1557212400\"; d=\"scan'208\";a=\"185972864\"", "From": "Shiraz Saleem <shiraz.saleem@intel.com>", "To": "intel-wired-lan@lists.osuosl.org", "Date": "Sat, 29 Jun 2019 13:53:53 -0500", "Message-Id": "<20190629185405.1601-6-shiraz.saleem@intel.com>", "X-Mailer": "git-send-email 2.21.0", "In-Reply-To": "<20190629185405.1601-1-shiraz.saleem@intel.com>", "References": "<20190629185405.1601-1-shiraz.saleem@intel.com>", "MIME-Version": "1.0", "Subject": "[Intel-wired-lan] [PATCH rdma-next 05/17] RDMA/irdma: Add\n\tprivileged UDA queue implementation", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Cc": "Mustafa Ismail <mustafa.ismail@intel.com>,\n\tShiraz Saleem <shiraz.saleem@intel.com>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Mustafa Ismail <mustafa.ismail@intel.com>\n\nImplement privileged UDA queues to handle iWARP connection\npackets and receive exceptions.\n\nSigned-off-by: Mustafa Ismail <mustafa.ismail@intel.com>\nSigned-off-by: Shiraz Saleem <shiraz.saleem@intel.com>\n---\n drivers/infiniband/hw/irdma/puda.c | 1693 ++++++++++++++++++++++++++++++++++++\n drivers/infiniband/hw/irdma/puda.h | 187 ++++\n 2 files changed, 1880 insertions(+)\n create mode 100644 drivers/infiniband/hw/irdma/puda.c\n create mode 100644 drivers/infiniband/hw/irdma/puda.h", "diff": "diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c\nnew file mode 100644\nindex 0000000..3249f3f\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/puda.c\n@@ -0,0 +1,1693 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"osdep.h\"\n+#include \"status.h\"\n+#include \"hmc.h\"\n+#include \"defs.h\"\n+#include \"type.h\"\n+#include \"protos.h\"\n+#include \"puda.h\"\n+#include \"ws.h\"\n+\n+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,\n+\t\t\t struct irdma_puda_buf *buf);\n+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);\n+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, u32 wqe_idx);\n+/**\n+ * irdma_puda_get_listbuf - get buffer from puda list\n+ * @list: list to use for buffers (ILQ or IEQ)\n+ */\n+static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list)\n+{\n+\tstruct irdma_puda_buf *buf = NULL;\n+\n+\tif (!list_empty(list)) {\n+\t\tbuf = (struct irdma_puda_buf *)list->next;\n+\t\tlist_del((struct list_head *)&buf->list);\n+\t}\n+\n+\treturn buf;\n+}\n+\n+/**\n+ * irdma_puda_get_bufpool - return buffer from resource\n+ * @rsrc: resource to use for buffer\n+ */\n+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)\n+{\n+\tstruct irdma_puda_buf *buf = NULL;\n+\tstruct list_head *list = &rsrc->bufpool;\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n+\tbuf = irdma_puda_get_listbuf(list);\n+\tif (buf) {\n+\t\trsrc->avail_buf_count--;\n+\t\tbuf->vsi = rsrc->vsi;\n+\t} else {\n+\t\trsrc->stats_buf_alloc_fail++;\n+\t}\n+\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n+\n+\treturn buf;\n+}\n+\n+/**\n+ * irdma_puda_ret_bufpool - return buffer to rsrc list\n+ * @rsrc: resource to use for buffer\n+ * @buf: buffer to return to resource\n+ */\n+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,\n+\t\t\t struct irdma_puda_buf *buf)\n+{\n+\tunsigned long flags;\n+\n+\tbuf->do_lpb = false;\n+\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n+\tlist_add(&buf->list, &rsrc->bufpool);\n+\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n+\trsrc->avail_buf_count++;\n+}\n+\n+/**\n+ * irdma_puda_post_recvbuf - set wqe for rcv buffer\n+ * @rsrc: resource ptr\n+ * @wqe_idx: wqe index to use\n+ * @buf: puda buffer for rcv q\n+ * @initial: flag if during init time\n+ */\n+static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,\n+\t\t\t\t struct irdma_puda_buf *buf, bool initial)\n+{\n+\t__le64 *wqe;\n+\tstruct irdma_sc_qp *qp = &rsrc->qp;\n+\tu64 offset24 = 0;\n+\n+\tqp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;\n+\twqe = qp->qp_uk.rq_base[wqe_idx].elem;\n+\tif (!initial)\n+\t\tget_64bit_val(wqe, 24, &offset24);\n+\n+\toffset24 = (offset24) ? 0 : LS_64(1, IRDMAQPSQ_VALID);\n+\n+\tset_64bit_val(wqe, 16, 0);\n+\tset_64bit_val(wqe, 0, buf->mem.pa);\n+\tif (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {\n+\t\tset_64bit_val(wqe, 8,\n+\t\t\t LS_64(buf->mem.size, IRDMAQPSQ_GEN1_FRAG_LEN));\n+\t} else {\n+\t\tset_64bit_val(wqe, 8,\n+\t\t\t LS_64(buf->mem.size,\n+\t\t\t\t IRDMAQPSQ_FRAG_LEN) | (offset24 & IRDMAQPSQ_VALID_M));\n+\t}\n+\tdma_wmb(); /* make sure WQE is written before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, offset24);\n+}\n+\n+/**\n+ * irdma_puda_replenish_rq - post rcv buffers\n+ * @rsrc: resource to use for buffer\n+ * @initial: flag if during init time\n+ */\n+static enum irdma_status_code\n+irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)\n+{\n+\tu32 i;\n+\tu32 invalid_cnt = rsrc->rxq_invalid_cnt;\n+\tstruct irdma_puda_buf *buf = NULL;\n+\n+\tfor (i = 0; i < invalid_cnt; i++) {\n+\t\tbuf = irdma_puda_get_bufpool(rsrc);\n+\t\tif (!buf)\n+\t\t\treturn IRDMA_ERR_list_empty;\n+\t\tirdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);\n+\t\trsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);\n+\t\trsrc->rxq_invalid_cnt--;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_puda_alloc_buf - allocate mem for buffer\n+ * @dev: iwarp device\n+ * @len: length of buffer\n+ */\n+static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev,\n+\t\t\t\t\t\t u32 len)\n+{\n+\tstruct irdma_puda_buf *buf = NULL;\n+\tstruct irdma_virt_mem buf_mem;\n+\n+\tbuf_mem.size = sizeof(struct irdma_puda_buf);\n+\tbuf_mem.va = kzalloc(buf_mem.size, GFP_ATOMIC);\n+\tif (!buf_mem.va) {\n+\t\tdev_dbg(rfdev_to_dev(dev), \"PUDA: error virt_mem for buf\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tbuf = buf_mem.va;\n+\tbuf->mem.size = ALIGN(len, 1);\n+\tbuf->mem.va = dma_alloc_coherent(hw_to_dev(dev->hw), buf->mem.size,\n+\t\t\t\t\t &buf->mem.pa, GFP_KERNEL);\n+\tif (!buf->mem.va) {\n+\t\tdev_dbg(rfdev_to_dev(dev), \"PUDA: error dma_mem for buf\\n\");\n+\t\tkfree(buf_mem.va);\n+\t\treturn NULL;\n+\t}\n+\n+\tbuf->buf_mem.va = buf_mem.va;\n+\tbuf->buf_mem.size = buf_mem.size;\n+\n+\treturn buf;\n+}\n+\n+/**\n+ * irdma_puda_dele_buf - delete buffer back to system\n+ * @dev: iwarp device\n+ * @buf: buffer to free\n+ */\n+static void irdma_puda_dele_buf(struct irdma_sc_dev *dev,\n+\t\t\t\tstruct irdma_puda_buf *buf)\n+{\n+\tdma_free_coherent(hw_to_dev(dev->hw), buf->mem.size, buf->mem.va,\n+\t\t\t buf->mem.pa);\n+\tbuf->mem.va = NULL;\n+\tkfree(buf->buf_mem.va);\n+}\n+\n+/**\n+ * irdma_puda_get_next_send_wqe - return next wqe for processing\n+ * @qp: puda qp for wqe\n+ * @wqe_idx: wqe index for caller\n+ */\n+static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,\n+\t\t\t\t\t u32 *wqe_idx)\n+{\n+\t__le64 *wqe = NULL;\n+\tenum irdma_status_code ret_code = 0;\n+\n+\t*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);\n+\tif (!*wqe_idx)\n+\t\tqp->swqe_polarity = !qp->swqe_polarity;\n+\tIRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);\n+\tif (ret_code)\n+\t\treturn wqe;\n+\n+\twqe = qp->sq_base[*wqe_idx].elem;\n+\n+\treturn wqe;\n+}\n+\n+/**\n+ * irdma_puda_poll_info - poll cq for completion\n+ * @cq: cq for poll\n+ * @info: info return for successful completion\n+ */\n+static enum irdma_status_code\n+irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)\n+{\n+\tstruct irdma_cq_uk *cq_uk = &cq->cq_uk;\n+\tu64 qword0, qword2, qword3, qword6;\n+\t__le64 *cqe;\n+\t__le64 *ext_cqe = NULL;\n+\tu64 qword7 = 0;\n+\tu64 comp_ctx;\n+\tbool valid_bit;\n+\tbool ext_valid = 0;\n+\tu32 major_err, minor_err;\n+\tu32 peek_head;\n+\tbool error;\n+\tu8 polarity;\n+\n+\tcqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);\n+\tget_64bit_val(cqe, 24, &qword3);\n+\tvalid_bit = (bool)RS_64(qword3, IRDMA_CQ_VALID);\n+\tif (valid_bit != cq_uk->polarity)\n+\t\treturn IRDMA_ERR_Q_EMPTY;\n+\n+\tif (cq->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\text_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE);\n+\n+\tif (ext_valid) {\n+\t\tpeek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;\n+\t\text_cqe = cq_uk->cq_base[peek_head].buf;\n+\t\tget_64bit_val(ext_cqe, 24, &qword7);\n+\t\tpolarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);\n+\t\tif (!peek_head)\n+\t\t\tpolarity ^= 1;\n+\t\tif (polarity != cq_uk->polarity)\n+\t\t\treturn IRDMA_ERR_Q_EMPTY;\n+\n+\t\tIRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);\n+\t\tif (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))\n+\t\t\tcq_uk->polarity = !cq_uk->polarity;\n+\t\t/* update cq tail in cq shadow memory also */\n+\t\tIRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);\n+\t}\n+\n+\tirdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, \"PUDA CQE\", cqe, 32);\n+\tif (ext_valid)\n+\t\tirdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, \"PUDA EXT-CQE\",\n+\t\t\t\text_cqe, 32);\n+\n+\terror = (bool)RS_64(qword3, IRDMA_CQ_ERROR);\n+\tif (error) {\n+\t\tdev_dbg(rfdev_to_dev(cq->dev), \"PUDA: receive error\\n\");\n+\t\tmajor_err = (u32)(RS_64(qword3, IRDMA_CQ_MAJERR));\n+\t\tminor_err = (u32)(RS_64(qword3, IRDMA_CQ_MINERR));\n+\t\tinfo->compl_error = major_err << 16 | minor_err;\n+\t\treturn IRDMA_ERR_CQ_COMPL_ERROR;\n+\t}\n+\n+\tget_64bit_val(cqe, 0, &qword0);\n+\tget_64bit_val(cqe, 16, &qword2);\n+\n+\tinfo->q_type = (u8)RS_64(qword3, IRDMA_CQ_SQ);\n+\tinfo->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID);\n+\tif (cq->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\tinfo->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4);\n+\n+\tget_64bit_val(cqe, 8, &comp_ctx);\n+\tinfo->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;\n+\tinfo->wqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX);\n+\n+\tif (info->q_type == IRDMA_CQE_QTYPE_RQ) {\n+\t\tif (ext_valid) {\n+\t\t\tinfo->vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID);\n+\t\t\tif (info->vlan_valid) {\n+\t\t\t\tget_64bit_val(ext_cqe, 16, &qword6);\n+\t\t\t\tinfo->vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN);\n+\t\t\t}\n+\t\t\tinfo->smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID);\n+\t\t\tif (info->smac_valid) {\n+\t\t\t\tget_64bit_val(ext_cqe, 16, &qword6);\n+\t\t\t\tinfo->smac[0] = (u8)((qword6 >> 40) & 0xFF);\n+\t\t\t\tinfo->smac[1] = (u8)((qword6 >> 32) & 0xFF);\n+\t\t\t\tinfo->smac[2] = (u8)((qword6 >> 24) & 0xFF);\n+\t\t\t\tinfo->smac[3] = (u8)((qword6 >> 16) & 0xFF);\n+\t\t\t\tinfo->smac[4] = (u8)((qword6 >> 8) & 0xFF);\n+\t\t\t\tinfo->smac[5] = (u8)(qword6 & 0xFF);\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {\n+\t\t\tinfo->vlan_valid = (bool)RS_64(qword3, IRDMA_VLAN_TAG_VALID);\n+\t\t\tinfo->l4proto = (u8)RS_64(qword2, IRDMA_UDA_L4PROTO);\n+\t\t\tinfo->l3proto = (u8)RS_64(qword2, IRDMA_UDA_L3PROTO);\n+\t\t}\n+\n+\t\tinfo->payload_len = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_puda_poll_completion - processes completion for cq\n+ * @dev: iwarp device\n+ * @cq: cq getting interrupt\n+ * @compl_err: return any completion err\n+ */\n+enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,\n+\t\t\t\t\t struct irdma_sc_cq *cq,\n+\t\t\t\t\t u32 *compl_err)\n+{\n+\tstruct irdma_qp_uk *qp;\n+\tstruct irdma_cq_uk *cq_uk = &cq->cq_uk;\n+\tstruct irdma_puda_cmpl_info info = {};\n+\tenum irdma_status_code ret = 0;\n+\tstruct irdma_puda_buf *buf;\n+\tstruct irdma_puda_rsrc *rsrc;\n+\tvoid *sqwrid;\n+\tu8 cq_type = cq->cq_type;\n+\tunsigned long flags;\n+\n+\tif (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {\n+\t\trsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :\n+\t\t\t\t\t\t\tcq->vsi->ieq;\n+\t} else {\n+\t\tdev_dbg(rfdev_to_dev(dev), \"PUDA: qp_type error\\n\");\n+\t\treturn IRDMA_ERR_BAD_PTR;\n+\t}\n+\n+\tret = irdma_puda_poll_info(cq, &info);\n+\t*compl_err = info.compl_error;\n+\tif (ret == IRDMA_ERR_Q_EMPTY)\n+\t\treturn ret;\n+\tif (ret)\n+\t\tgoto done;\n+\n+\tqp = info.qp;\n+\tif (!qp || !rsrc) {\n+\t\tret = IRDMA_ERR_BAD_PTR;\n+\t\tgoto done;\n+\t}\n+\n+\tif (qp->qp_id != rsrc->qp_id) {\n+\t\tret = IRDMA_ERR_BAD_PTR;\n+\t\tgoto done;\n+\t}\n+\n+\tif (info.q_type == IRDMA_CQE_QTYPE_RQ) {\n+\t\tbuf = (struct irdma_puda_buf *)(uintptr_t)\n+\t\t\t qp->rq_wrid_array[info.wqe_idx];\n+\t\t/* Get all the tcpip information in the buf header */\n+\t\tret = irdma_puda_get_tcpip_info(&info, buf);\n+\t\tif (ret) {\n+\t\t\trsrc->stats_rcvd_pkt_err++;\n+\t\t\tif (cq_type == IRDMA_CQ_TYPE_ILQ) {\n+\t\t\t\tirdma_ilq_putback_rcvbuf(&rsrc->qp,\n+\t\t\t\t\t\t\t info.wqe_idx);\n+\t\t\t} else {\n+\t\t\t\tirdma_puda_ret_bufpool(rsrc, buf);\n+\t\t\t\tirdma_puda_replenish_rq(rsrc, false);\n+\t\t\t}\n+\t\t\tgoto done;\n+\t\t}\n+\n+\t\trsrc->stats_pkt_rcvd++;\n+\t\trsrc->compl_rxwqe_idx = info.wqe_idx;\n+\t\tdev_dbg(rfdev_to_dev(dev), \"PUDA: RQ completion\\n\");\n+\t\trsrc->receive(rsrc->vsi, buf);\n+\t\tif (cq_type == IRDMA_CQ_TYPE_ILQ)\n+\t\t\tirdma_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);\n+\t\telse\n+\t\t\tirdma_puda_replenish_rq(rsrc, false);\n+\n+\t} else {\n+\t\tdev_dbg(rfdev_to_dev(dev), \"PUDA: SQ completion\\n\");\n+\t\tsqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;\n+\t\tIRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);\n+\t\trsrc->xmit_complete(rsrc->vsi, sqwrid);\n+\t\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n+\t\trsrc->tx_wqe_avail_cnt++;\n+\t\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n+\t\tif (!list_empty(&rsrc->txpend))\n+\t\t\tirdma_puda_send_buf(rsrc, NULL);\n+\t}\n+\n+done:\n+\tIRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);\n+\tif (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))\n+\t\tcq_uk->polarity = !cq_uk->polarity;\n+\t/* update cq tail in cq shadow memory also */\n+\tIRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);\n+\tset_64bit_val(cq_uk->shadow_area, 0,\n+\t\t IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * irdma_puda_send - complete send wqe for transmit\n+ * @qp: puda qp for send\n+ * @info: buffer information for transmit\n+ */\n+enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,\n+\t\t\t\t struct irdma_puda_send_info *info)\n+{\n+\t__le64 *wqe;\n+\tu32 iplen, l4len;\n+\tu64 hdr[2];\n+\tu32 wqe_idx;\n+\tu8 iipt;\n+\n+\t/* number of 32 bits DWORDS in header */\n+\tl4len = info->tcplen >> 2;\n+\tif (info->ipv4) {\n+\t\tiipt = 3;\n+\t\tiplen = 5;\n+\t} else {\n+\t\tiipt = 1;\n+\t\tiplen = 10;\n+\t}\n+\n+\twqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tqp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;\n+\t/* Third line of WQE descriptor */\n+\t/* maclen is in words */\n+\n+\tif (qp->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\thdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */\n+\t\thdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) |\n+\t\t\t LS_64(l4len, IRDMA_UDA_QPSQ_L4LEN) |\n+\t\t\t LS_64(info->ah_id, IRDMAQPSQ_AHID) |\n+\t\t\t LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) |\n+\t\t\t LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID);\n+\n+\t\t/* Forth line of WQE descriptor */\n+\n+\t\tset_64bit_val(wqe, 0, info->paddr);\n+\t\tset_64bit_val(wqe, 8,\n+\t\t\t LS_64(info->len, IRDMAQPSQ_FRAG_LEN) |\n+\t\t\t LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID));\n+\t} else {\n+\t\thdr[0] = LS_64((info->maclen >> 1), IRDMA_UDA_QPSQ_MACLEN) |\n+\t\t\t LS_64(iplen, IRDMA_UDA_QPSQ_IPLEN) |\n+\t\t\t LS_64(1, IRDMA_UDA_QPSQ_L4T) |\n+\t\t\t LS_64(iipt, IRDMA_UDA_QPSQ_IIPT) |\n+\t\t\t LS_64(l4len, IRDMA_GEN1_UDA_QPSQ_L4LEN);\n+\n+\t\thdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) |\n+\t\t\t LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) |\n+\t\t\t LS_64(info->do_lpb, IRDMA_UDA_QPSQ_DOLOOPBACK) |\n+\t\t\t LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID);\n+\n+\t\t/* Forth line of WQE descriptor */\n+\n+\t\tset_64bit_val(wqe, 0, info->paddr);\n+\t\tset_64bit_val(wqe, 8,\n+\t\t\t LS_64(info->len, IRDMAQPSQ_GEN1_FRAG_LEN));\n+\t}\n+\n+\tset_64bit_val(wqe, 16, hdr[0]);\n+\tdma_wmb(); /* make sure WQE is written before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr[1]);\n+\n+\tirdma_debug_buf(qp->dev, IRDMA_DEBUG_PUDA, \"PUDA SEND WQE\", wqe, 32);\n+\tirdma_qp_post_wr(&qp->qp_uk);\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_puda_send_buf - transmit puda buffer\n+ * @rsrc: resource to use for buffer\n+ * @buf: puda buffer to transmit\n+ */\n+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,\n+\t\t\t struct irdma_puda_buf *buf)\n+{\n+\tstruct irdma_puda_send_info info;\n+\tenum irdma_status_code ret = 0;\n+\tunsigned long flags;\n+\n+\tspin_lock_irqsave(&rsrc->bufpool_lock, flags);\n+\t/* if no wqe available or not from a completion and we have\n+\t * pending buffers, we must queue new buffer\n+\t */\n+\tif (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {\n+\t\tlist_add_tail(&buf->list, &rsrc->txpend);\n+\t\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n+\t\trsrc->stats_sent_pkt_q++;\n+\t\tif (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)\n+\t\t\tdev_dbg(rfdev_to_dev(rsrc->dev),\n+\t\t\t\t\"PUDA: adding to txpend\\n\");\n+\t\treturn;\n+\t}\n+\trsrc->tx_wqe_avail_cnt--;\n+\t/* if we are coming from a completion and have pending buffers\n+\t * then Get one from pending list\n+\t */\n+\tif (!buf) {\n+\t\tbuf = irdma_puda_get_listbuf(&rsrc->txpend);\n+\t\tif (!buf)\n+\t\t\tgoto done;\n+\t}\n+\n+\tinfo.scratch = (void *)buf;\n+\tinfo.paddr = buf->mem.pa;\n+\tinfo.len = buf->totallen;\n+\tinfo.tcplen = buf->tcphlen;\n+\tinfo.ipv4 = buf->ipv4;\n+\n+\tif (rsrc->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\tinfo.ah_id = buf->ah_id;\n+\t} else {\n+\t\tinfo.maclen = buf->maclen;\n+\t\tinfo.do_lpb = buf->do_lpb;\n+\t}\n+\n+\tret = irdma_puda_send(&rsrc->qp, &info);\n+\tif (ret) {\n+\t\trsrc->tx_wqe_avail_cnt++;\n+\t\trsrc->stats_sent_pkt_q++;\n+\t\tlist_add(&buf->list, &rsrc->txpend);\n+\t\tif (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)\n+\t\t\tdev_dbg(rfdev_to_dev(rsrc->dev),\n+\t\t\t\t\"PUDA: adding to puda_send\\n\");\n+\t} else {\n+\t\trsrc->stats_pkt_sent++;\n+\t}\n+done:\n+\tspin_unlock_irqrestore(&rsrc->bufpool_lock, flags);\n+}\n+\n+/**\n+ * irdma_puda_qp_setctx - during init, set qp's context\n+ * @rsrc: qp's resource\n+ */\n+static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)\n+{\n+\tstruct irdma_sc_qp *qp = &rsrc->qp;\n+\t__le64 *qp_ctx = qp->hw_host_ctx;\n+\n+\tset_64bit_val(qp_ctx, 8, qp->sq_pa);\n+\tset_64bit_val(qp_ctx, 16, qp->rq_pa);\n+\tset_64bit_val(qp_ctx, 24,\n+\t\t LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) |\n+\t\t LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE));\n+\tset_64bit_val(qp_ctx, 48,\n+\t\t LS_64(rsrc->buf_size, IRDMAQPC_SNDMSS));\n+\tset_64bit_val(qp_ctx, 56, 0);\n+\tif (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)\n+\t\tset_64bit_val(qp_ctx, 64, 1);\n+\tset_64bit_val(qp_ctx, 136,\n+\t\t LS_64(rsrc->cq_id, IRDMAQPC_TXCQNUM) |\n+\t\t LS_64(rsrc->cq_id, IRDMAQPC_RXCQNUM));\n+\tset_64bit_val(qp_ctx, 144,\n+\t\t LS_64(rsrc->stats_idx, IRDMAQPC_STAT_INDEX));\n+\tset_64bit_val(qp_ctx, 160,\n+\t\t LS_64(1, IRDMAQPC_PRIVEN) |\n+\t\t LS_64(rsrc->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE));\n+\tset_64bit_val(qp_ctx, 168,\n+\t\t LS_64((uintptr_t)qp, IRDMAQPC_QPCOMPCTX));\n+\tset_64bit_val(qp_ctx, 176,\n+\t\t LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) |\n+\t\t LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) |\n+\t\t LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE));\n+\n+\tirdma_debug_buf(rsrc->dev, IRDMA_DEBUG_PUDA, \"PUDA QP CONTEXT\", qp_ctx,\n+\t\t\tIRDMA_QP_CTX_SIZE);\n+}\n+\n+/**\n+ * irdma_puda_qp_wqe - setup wqe for qp create\n+ * @dev: Device\n+ * @qp: Resource qp\n+ */\n+static enum irdma_status_code irdma_puda_qp_wqe(struct irdma_sc_dev *dev,\n+\t\t\t\t\t\tstruct irdma_sc_qp *qp)\n+{\n+\tstruct irdma_sc_cqp *cqp;\n+\t__le64 *wqe;\n+\tu64 hdr;\n+\tstruct irdma_ccq_cqe_info compl_info;\n+\tenum irdma_status_code status = 0;\n+\n+\tcqp = dev->cqp;\n+\twqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_RING_FULL;\n+\n+\tset_64bit_val(wqe, 16, qp->hw_host_ctx_pa);\n+\tset_64bit_val(wqe, 40, qp->shadow_area_pa);\n+\n+\thdr = qp->qp_uk.qp_id |\n+\t LS_64(IRDMA_CQP_OP_CREATE_QP, IRDMA_CQPSQ_OPCODE) |\n+\t LS_64(IRDMA_QP_TYPE_UDA, IRDMA_CQPSQ_QP_QPTYPE) |\n+\t LS_64(1, IRDMA_CQPSQ_QP_CQNUMVALID) |\n+\t LS_64(2, IRDMA_CQPSQ_QP_NEXTIWSTATE) |\n+\t LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);\n+\tdma_wmb(); /* make sure WQE is written before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\tirdma_debug_buf(cqp->dev, IRDMA_DEBUG_PUDA, \"PUDA QP CREATE\", wqe, 40);\n+\tirdma_sc_cqp_post_sq(cqp);\n+\tstatus = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n+\t\t\t\t\t\t IRDMA_CQP_OP_CREATE_QP,\n+\t\t\t\t\t\t &compl_info);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_puda_qp_create - create qp for resource\n+ * @rsrc: resource to use for buffer\n+ */\n+static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)\n+{\n+\tstruct irdma_sc_qp *qp = &rsrc->qp;\n+\tstruct irdma_qp_uk *ukqp = &qp->qp_uk;\n+\tenum irdma_status_code ret = 0;\n+\tu32 sq_size, rq_size;\n+\tstruct irdma_dma_mem *mem;\n+\n+\tsq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;\n+\trq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;\n+\trsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),\n+\t\t\t\t IRDMA_HW_PAGE_SIZE);\n+\trsrc->qpmem.va = dma_alloc_coherent(hw_to_dev(rsrc->dev->hw),\n+\t\t\t\t\t rsrc->qpmem.size, &rsrc->qpmem.pa,\n+\t\t\t\t\t GFP_KERNEL);\n+\tif (!rsrc->qpmem.va)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tmem = &rsrc->qpmem;\n+\tmemset(mem->va, 0, rsrc->qpmem.size);\n+\tqp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, false);\n+\tqp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, false);\n+\tqp->pd = &rsrc->sc_pd;\n+\tqp->qp_type = IRDMA_QP_TYPE_UDA;\n+\tqp->dev = rsrc->dev;\n+\tqp->qp_uk.back_qp = (void *)rsrc;\n+\tqp->sq_pa = mem->pa;\n+\tqp->rq_pa = qp->sq_pa + sq_size;\n+\tqp->vsi = rsrc->vsi;\n+\tukqp->sq_base = mem->va;\n+\tukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];\n+\tukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;\n+\tukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;\n+\tqp->shadow_area_pa = qp->rq_pa + rq_size;\n+\tqp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;\n+\tqp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);\n+\tukqp->qp_id = rsrc->qp_id;\n+\tukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;\n+\tukqp->rq_wrid_array = rsrc->rq_wrid_array;\n+\tukqp->sq_size = rsrc->sq_size;\n+\tukqp->rq_size = rsrc->rq_size;\n+\n+\tIRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);\n+\tIRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);\n+\tIRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);\n+\tukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;\n+\n+\tret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);\n+\tif (ret) {\n+\t\tdma_free_coherent(hw_to_dev(rsrc->dev->hw), rsrc->qpmem.size,\n+\t\t\t\t rsrc->qpmem.va, rsrc->qpmem.pa);\n+\t\trsrc->qpmem.va = NULL;\n+\t\treturn ret;\n+\t}\n+\n+\tirdma_qp_add_qos(qp);\n+\tirdma_puda_qp_setctx(rsrc);\n+\n+\tif (rsrc->dev->ceq_valid)\n+\t\tret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);\n+\telse\n+\t\tret = irdma_puda_qp_wqe(rsrc->dev, qp);\n+\tif (ret) {\n+\t\tirdma_qp_rem_qos(qp);\n+\t\trsrc->dev->ws_remove(qp->vsi, qp->user_pri);\n+\t\tdma_free_coherent(hw_to_dev(rsrc->dev->hw), rsrc->qpmem.size,\n+\t\t\t\t rsrc->qpmem.va, rsrc->qpmem.pa);\n+\t\trsrc->qpmem.va = NULL;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * irdma_puda_cq_wqe - setup wqe for CQ create\n+ * @dev: Device\n+ * @cq: resource for cq\n+ */\n+static enum irdma_status_code irdma_puda_cq_wqe(struct irdma_sc_dev *dev,\n+\t\t\t\t\t\tstruct irdma_sc_cq *cq)\n+{\n+\t__le64 *wqe;\n+\tstruct irdma_sc_cqp *cqp;\n+\tu64 hdr;\n+\tstruct irdma_ccq_cqe_info compl_info;\n+\tenum irdma_status_code status = 0;\n+\n+\tcqp = dev->cqp;\n+\twqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_RING_FULL;\n+\n+\tset_64bit_val(wqe, 0, cq->cq_uk.cq_size);\n+\tset_64bit_val(wqe, 8, RS_64_1(cq, 1));\n+\tset_64bit_val(wqe, 16,\n+\t\t LS_64(cq->shadow_read_threshold,\n+\t\t\t IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD));\n+\tset_64bit_val(wqe, 32, cq->cq_pa);\n+\tset_64bit_val(wqe, 40, cq->shadow_area_pa);\n+\tset_64bit_val(wqe, 56,\n+\t\t LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) |\n+\t\t LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));\n+\n+\thdr = cq->cq_uk.cq_id |\n+\t LS_64(IRDMA_CQP_OP_CREATE_CQ, IRDMA_CQPSQ_OPCODE) |\n+\t LS_64(1, IRDMA_CQPSQ_CQ_CHKOVERFLOW) |\n+\t LS_64(1, IRDMA_CQPSQ_CQ_ENCEQEMASK) |\n+\t LS_64(1, IRDMA_CQPSQ_CQ_CEQIDVALID) |\n+\t LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);\n+\tdma_wmb(); /* make sure WQE is written before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\tirdma_debug_buf(dev, IRDMA_DEBUG_PUDA, \"PUDA CREATE CQ\", wqe,\n+\t\t\tIRDMA_CQP_WQE_SIZE * 8);\n+\tirdma_sc_cqp_post_sq(dev->cqp);\n+\tstatus = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n+\t\t\t\t\t\t IRDMA_CQP_OP_CREATE_CQ,\n+\t\t\t\t\t\t &compl_info);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_puda_cq_create - create cq for resource\n+ * @rsrc: resource for which cq to create\n+ */\n+static enum irdma_status_code irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)\n+{\n+\tstruct irdma_sc_dev *dev = rsrc->dev;\n+\tstruct irdma_sc_cq *cq = &rsrc->cq;\n+\tenum irdma_status_code ret = 0;\n+\tu32 cqsize;\n+\tstruct irdma_dma_mem *mem;\n+\tstruct irdma_cq_init_info info = {};\n+\tstruct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;\n+\n+\tcq->vsi = rsrc->vsi;\n+\tcqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));\n+\trsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),\n+\t\t\t\t IRDMA_CQ0_ALIGNMENT);\n+\trsrc->cqmem.va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t rsrc->cqmem.size, &rsrc->cqmem.pa,\n+\t\t\t\t\t GFP_KERNEL);\n+\tif (!rsrc->cqmem.va)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\tmem = &rsrc->cqmem;\n+\tinfo.dev = dev;\n+\tinfo.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?\n+\t\t IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;\n+\tinfo.shadow_read_threshold = rsrc->cq_size >> 2;\n+\tinfo.cq_base_pa = mem->pa;\n+\tinfo.shadow_area_pa = mem->pa + cqsize;\n+\tinit_info->cq_base = mem->va;\n+\tinit_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize);\n+\tinit_info->cq_size = rsrc->cq_size;\n+\tinit_info->cq_id = rsrc->cq_id;\n+\tinfo.ceqe_mask = true;\n+\tinfo.ceq_id_valid = true;\n+\tinfo.vsi = rsrc->vsi;\n+\n+\tret = dev->iw_priv_cq_ops->cq_init(cq, &info);\n+\tif (ret)\n+\t\tgoto error;\n+\n+\tif (rsrc->dev->ceq_valid)\n+\t\tret = irdma_cqp_cq_create_cmd(dev, cq);\n+\telse\n+\t\tret = irdma_puda_cq_wqe(dev, cq);\n+error:\n+\tif (ret) {\n+\t\tdma_free_coherent(hw_to_dev(dev->hw), rsrc->cqmem.size,\n+\t\t\t\t rsrc->cqmem.va, rsrc->cqmem.pa);\n+\t\trsrc->cqmem.va = NULL;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * irdma_puda_free_qp - free qp for resource\n+ * @rsrc: resource for which qp to free\n+ */\n+static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)\n+{\n+\tenum irdma_status_code ret;\n+\tstruct irdma_ccq_cqe_info compl_info;\n+\tstruct irdma_sc_dev *dev = rsrc->dev;\n+\n+\tif (rsrc->dev->ceq_valid) {\n+\t\tirdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);\n+\t\trsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);\n+\t\treturn;\n+\t}\n+\n+\tret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp, 0, false, true, true);\n+\tif (ret)\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"PUDA: error puda qp destroy wqe, status = %d\\n\", ret);\n+\tif (!ret) {\n+\t\tret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n+\t\t\t\t\t\t\t IRDMA_CQP_OP_DESTROY_QP,\n+\t\t\t\t\t\t\t &compl_info);\n+\t\tif (ret)\n+\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\"PUDA: error puda qp destroy failed, status = %d\\n\",\n+\t\t\t\tret);\n+\t}\n+\n+\trsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);\n+}\n+\n+/**\n+ * irdma_puda_free_cq - free cq for resource\n+ * @rsrc: resource for which cq to free\n+ */\n+static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)\n+{\n+\tenum irdma_status_code ret;\n+\tstruct irdma_ccq_cqe_info compl_info;\n+\tstruct irdma_sc_dev *dev = rsrc->dev;\n+\n+\tif (rsrc->dev->ceq_valid) {\n+\t\tirdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);\n+\t\treturn;\n+\t}\n+\n+\tret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);\n+\tif (ret)\n+\t\tdev_dbg(rfdev_to_dev(dev), \"PUDA: error ieq cq destroy\\n\");\n+\tif (!ret) {\n+\t\tret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,\n+\t\t\t\t\t\t\t IRDMA_CQP_OP_DESTROY_CQ,\n+\t\t\t\t\t\t\t &compl_info);\n+\t\tif (ret)\n+\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\"PUDA: error ieq qp destroy done\\n\");\n+\t}\n+}\n+\n+/**\n+ * irdma_puda_dele_rsrc - delete all resources during close\n+ * @vsi: VSI structure of device\n+ * @type: type of resource to dele\n+ * @reset: true if reset chip\n+ */\n+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,\n+\t\t\t bool reset)\n+{\n+\tstruct irdma_sc_dev *dev = vsi->dev;\n+\tstruct irdma_puda_rsrc *rsrc;\n+\tstruct irdma_puda_buf *buf = NULL;\n+\tstruct irdma_puda_buf *nextbuf = NULL;\n+\tstruct irdma_virt_mem *vmem;\n+\tstruct irdma_sc_ceq *ceq;\n+\n+\tceq = vsi->dev->ceq[0];\n+\tswitch (type) {\n+\tcase IRDMA_PUDA_RSRC_TYPE_ILQ:\n+\t\trsrc = vsi->ilq;\n+\t\tvmem = &vsi->ilq_mem;\n+\t\tvsi->ilq = NULL;\n+\t\tif (ceq && ceq->reg_cq)\n+\t\t\tirdma_sc_remove_cq_ctx(ceq, &rsrc->cq);\n+\t\tbreak;\n+\tcase IRDMA_PUDA_RSRC_TYPE_IEQ:\n+\t\trsrc = vsi->ieq;\n+\t\tvmem = &vsi->ieq_mem;\n+\t\tvsi->ieq = NULL;\n+\t\tif (ceq && ceq->reg_cq)\n+\t\t\tirdma_sc_remove_cq_ctx(ceq, &rsrc->cq);\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"PUDA: error resource type = 0x%x\\n\", type);\n+\t\treturn;\n+\t}\n+\n+\tswitch (rsrc->cmpl) {\n+\tcase PUDA_HASH_CRC_COMPLETE:\n+\t\tirdma_free_hash_desc(rsrc->hash_desc);\n+\t\t/* fallthrough */\n+\tcase PUDA_QP_CREATED:\n+\t\tif (!reset)\n+\t\t\tirdma_puda_free_qp(rsrc);\n+\t\telse\n+\t\t\tirdma_qp_rem_qos(&rsrc->qp);\n+\n+\t\tdma_free_coherent(hw_to_dev(dev->hw), rsrc->qpmem.size,\n+\t\t\t\t rsrc->qpmem.va, rsrc->qpmem.pa);\n+\t\trsrc->qpmem.va = NULL;\n+\t\t/* fallthrough */\n+\tcase PUDA_CQ_CREATED:\n+\t\tif (!reset)\n+\t\t\tirdma_puda_free_cq(rsrc);\n+\n+\t\tdma_free_coherent(hw_to_dev(dev->hw), rsrc->cqmem.size,\n+\t\t\t\t rsrc->cqmem.va, rsrc->cqmem.pa);\n+\t\trsrc->cqmem.va = NULL;\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_dbg(rfdev_to_dev(rsrc->dev), \"PUDA: error no resources\\n\");\n+\t\tbreak;\n+\t}\n+\t/* Free all allocated puda buffers for both tx and rx */\n+\tbuf = rsrc->alloclist;\n+\twhile (buf) {\n+\t\tnextbuf = buf->next;\n+\t\tirdma_puda_dele_buf(dev, buf);\n+\t\tbuf = nextbuf;\n+\t\trsrc->alloc_buf_count--;\n+\t}\n+\n+\tkfree(vmem->va);\n+}\n+\n+/**\n+ * irdma_puda_allocbufs - allocate buffers for resource\n+ * @rsrc: resource for buffer allocation\n+ * @count: number of buffers to create\n+ */\n+static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc,\n+\t\t\t\t\t\t u32 count)\n+{\n+\tu32 i;\n+\tstruct irdma_puda_buf *buf;\n+\tstruct irdma_puda_buf *nextbuf;\n+\n+\tfor (i = 0; i < count; i++) {\n+\t\tbuf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);\n+\t\tif (!buf) {\n+\t\t\trsrc->stats_buf_alloc_fail++;\n+\t\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t\t}\n+\t\tirdma_puda_ret_bufpool(rsrc, buf);\n+\t\trsrc->alloc_buf_count++;\n+\t\tif (!rsrc->alloclist) {\n+\t\t\trsrc->alloclist = buf;\n+\t\t} else {\n+\t\t\tnextbuf = rsrc->alloclist;\n+\t\t\trsrc->alloclist = buf;\n+\t\t\tbuf->next = nextbuf;\n+\t\t}\n+\t}\n+\n+\trsrc->avail_buf_count = rsrc->alloc_buf_count;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_puda_create_rsrc - create resource (ilq or ieq)\n+ * @vsi: sc VSI struct\n+ * @info: resource information\n+ */\n+enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,\n+\t\t\t\t\t struct irdma_puda_rsrc_info *info)\n+{\n+\tstruct irdma_sc_dev *dev = vsi->dev;\n+\tenum irdma_status_code ret = 0;\n+\tstruct irdma_puda_rsrc *rsrc;\n+\tu32 pudasize;\n+\tu32 sqwridsize, rqwridsize;\n+\tstruct irdma_virt_mem *vmem;\n+\tstruct irdma_sc_ceq *ceq;\n+\n+\tinfo->count = 1;\n+\tpudasize = sizeof(struct irdma_puda_rsrc);\n+\tsqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);\n+\trqwridsize = info->rq_size * 8;\n+\tswitch (info->type) {\n+\tcase IRDMA_PUDA_RSRC_TYPE_ILQ:\n+\t\tvmem = &vsi->ilq_mem;\n+\t\tbreak;\n+\tcase IRDMA_PUDA_RSRC_TYPE_IEQ:\n+\t\tvmem = &vsi->ieq_mem;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn IRDMA_NOT_SUPPORTED;\n+\t}\n+\tvmem->size = pudasize + sqwridsize + rqwridsize;\n+\tvmem->va = kzalloc(vmem->size, GFP_ATOMIC);\n+\tif (!vmem->va)\n+\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\trsrc = vmem->va;\n+\tspin_lock_init(&rsrc->bufpool_lock);\n+\tswitch (info->type) {\n+\tcase IRDMA_PUDA_RSRC_TYPE_ILQ:\n+\t\tvsi->ilq = vmem->va;\n+\t\tvsi->ilq_count = info->count;\n+\t\trsrc->receive = info->receive;\n+\t\trsrc->xmit_complete = info->xmit_complete;\n+\t\tbreak;\n+\tcase IRDMA_PUDA_RSRC_TYPE_IEQ:\n+\t\tvsi->ieq_count = info->count;\n+\t\tvsi->ieq = vmem->va;\n+\t\trsrc->receive = irdma_ieq_receive;\n+\t\trsrc->xmit_complete = irdma_ieq_tx_compl;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn IRDMA_NOT_SUPPORTED;\n+\t}\n+\n+\trsrc->type = info->type;\n+\trsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)\n+\t\t\t ((u8 *)vmem->va + pudasize);\n+\trsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);\n+\t/* Initialize all ieq lists */\n+\tINIT_LIST_HEAD(&rsrc->bufpool);\n+\tINIT_LIST_HEAD(&rsrc->txpend);\n+\n+\trsrc->tx_wqe_avail_cnt = info->sq_size - 1;\n+\tdev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);\n+\trsrc->qp_id = info->qp_id;\n+\trsrc->cq_id = info->cq_id;\n+\trsrc->sq_size = info->sq_size;\n+\trsrc->rq_size = info->rq_size;\n+\trsrc->cq_size = info->rq_size + info->sq_size;\n+\tif (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\tif (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)\n+\t\t\trsrc->cq_size += info->rq_size;\n+\t}\n+\trsrc->buf_size = info->buf_size;\n+\trsrc->dev = dev;\n+\trsrc->vsi = vsi;\n+\trsrc->stats_idx = info->stats_idx;\n+\trsrc->stats_idx_valid = info->stats_idx_valid;\n+\n+\tret = irdma_puda_cq_create(rsrc);\n+\tif (!ret) {\n+\t\trsrc->cmpl = PUDA_CQ_CREATED;\n+\t\tret = irdma_puda_qp_create(rsrc);\n+\t}\n+\tif (ret) {\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"PUDA: error qp_create type=%d, status=%d\\n\",\n+\t\t\trsrc->type, ret);\n+\t\tgoto error;\n+\t}\n+\trsrc->cmpl = PUDA_QP_CREATED;\n+\n+\tceq = vsi->dev->ceq[0];\n+\tif (ceq->reg_cq)\n+\t\tret = irdma_sc_add_cq_ctx(ceq, &rsrc->cq);\n+\n+\tif (ret) {\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"PUDA: error unable to add to cq_ctx\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\tret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);\n+\tif (ret) {\n+\t\tdev_dbg(rfdev_to_dev(dev), \"PUDA: error allloc_buf\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\trsrc->rxq_invalid_cnt = info->rq_size;\n+\tret = irdma_puda_replenish_rq(rsrc, true);\n+\tif (ret)\n+\t\tgoto error;\n+\n+\tif (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {\n+\t\tif (!irdma_init_hash_desc(&rsrc->hash_desc)) {\n+\t\t\trsrc->check_crc = true;\n+\t\t\trsrc->cmpl = PUDA_HASH_CRC_COMPLETE;\n+\t\t\tret = 0;\n+\t\t}\n+\t}\n+\n+\tdev->ccq_ops->ccq_arm(&rsrc->cq);\n+\treturn ret;\n+\n+error:\n+\tirdma_puda_dele_rsrc(vsi, info->type, false);\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq\n+ * @qp: ilq's qp resource\n+ * @wqe_idx: wqe index of completed rcvbuf\n+ */\n+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, u32 wqe_idx)\n+{\n+\t__le64 *wqe;\n+\tu64 offset8, offset24;\n+\n+\twqe = qp->qp_uk.rq_base[wqe_idx].elem;\n+\tget_64bit_val(wqe, 24, &offset24);\n+\tif (qp->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\tget_64bit_val(wqe, 8, &offset8);\n+\t\tif (offset24)\n+\t\t\toffset8 &= ~LS_64(1, IRDMAQPSQ_VALID);\n+\t\telse\n+\t\t\toffset8 |= LS_64(1, IRDMAQPSQ_VALID);\n+\t\tset_64bit_val(wqe, 8, offset8);\n+\t\tdma_wmb(); /* make sure WQE is written before valid bit is set */\n+\t}\n+\tif (offset24)\n+\t\toffset24 = 0;\n+\telse\n+\t\toffset24 = LS_64(1, IRDMAQPSQ_VALID);\n+\n+\tset_64bit_val(wqe, 24, offset24);\n+}\n+\n+/**\n+ * irdma_ieq_get_fpdu_len - given length return fpdu length\n+ * @len: length of fpdu\n+ */\n+static u16 irdma_ieq_get_fpdu_len(u16 len)\n+{\n+\tu16 fpdu_len;\n+\n+\tfpdu_len = len + IRDMA_IEQ_MPA_FRAMING;\n+\tfpdu_len = (fpdu_len + 3) & 0xfffffffc;\n+\n+\treturn fpdu_len;\n+}\n+\n+/**\n+ * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf\n+ * @buf: rcv buffer with partial\n+ * @txbuf: tx buffer for sending back\n+ * @buf_offset: rcv buffer offset to copy from\n+ * @txbuf_offset: at offset in tx buf to copy\n+ * @len: length of data to copy\n+ */\n+static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,\n+\t\t\t\t struct irdma_puda_buf *txbuf,\n+\t\t\t\t u16 buf_offset, u32 txbuf_offset, u32 len)\n+{\n+\tvoid *mem1 = (u8 *)buf->mem.va + buf_offset;\n+\tvoid *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;\n+\n+\tmemcpy(mem2, mem1, len);\n+}\n+\n+/**\n+ * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling\n+ * @buf: reeive buffer with partial\n+ * @txbuf: buffer to prepare\n+ */\n+static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,\n+\t\t\t\t struct irdma_puda_buf *txbuf)\n+{\n+\ttxbuf->tcphlen = buf->tcphlen;\n+\ttxbuf->ipv4 = buf->ipv4;\n+\n+\tif (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\ttxbuf->hdrlen = txbuf->tcphlen;\n+\t\tirdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,\n+\t\t\t\t\ttxbuf->hdrlen);\n+\t} else {\n+\t\ttxbuf->maclen = buf->maclen;\n+\t\ttxbuf->hdrlen = buf->hdrlen;\n+\t\tirdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);\n+\t}\n+}\n+\n+/**\n+ * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range\n+ * @buf: receive exception buffer\n+ * @fps: first partial sequence number\n+ */\n+static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)\n+{\n+\tu32 offset;\n+\n+\tif (buf->seqnum < fps) {\n+\t\toffset = fps - buf->seqnum;\n+\t\tif (offset > buf->datalen)\n+\t\t\treturn;\n+\t\tbuf->data += offset;\n+\t\tbuf->datalen -= (u16)offset;\n+\t\tbuf->seqnum = fps;\n+\t}\n+}\n+\n+/**\n+ * irdma_ieq_compl_pfpdu - write txbuf with full fpdu\n+ * @ieq: ieq resource\n+ * @rxlist: ieq's received buffer list\n+ * @pbufl: temporary list for buffers for fpddu\n+ * @txbuf: tx buffer for fpdu\n+ * @fpdu_len: total length of fpdu\n+ */\n+static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,\n+\t\t\t\t struct list_head *rxlist,\n+\t\t\t\t struct list_head *pbufl,\n+\t\t\t\t struct irdma_puda_buf *txbuf, u16 fpdu_len)\n+{\n+\tstruct irdma_puda_buf *buf;\n+\tu32 nextseqnum;\n+\tu16 txoffset, bufoffset;\n+\n+\tbuf = irdma_puda_get_listbuf(pbufl);\n+\tif (!buf)\n+\t\treturn;\n+\n+\tnextseqnum = buf->seqnum + fpdu_len;\n+\tirdma_ieq_setup_tx_buf(buf, txbuf);\n+\tif (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\ttxoffset = txbuf->hdrlen;\n+\t\ttxbuf->totallen = txbuf->hdrlen + fpdu_len;\n+\t\ttxbuf->data = (u8 *)txbuf->mem.va + txoffset;\n+\t} else {\n+\t\ttxoffset = buf->hdrlen;\n+\t\ttxbuf->totallen = buf->hdrlen + fpdu_len;\n+\t\ttxbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;\n+\t}\n+\tbufoffset = (u16)(buf->data - (u8 *)buf->mem.va);\n+\n+\tdo {\n+\t\tif (buf->datalen >= fpdu_len) {\n+\t\t\t/* copied full fpdu */\n+\t\t\tirdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,\n+\t\t\t\t\t\tfpdu_len);\n+\t\t\tbuf->datalen -= fpdu_len;\n+\t\t\tbuf->data += fpdu_len;\n+\t\t\tbuf->seqnum = nextseqnum;\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* copy partial fpdu */\n+\t\tirdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,\n+\t\t\t\t\tbuf->datalen);\n+\t\ttxoffset += buf->datalen;\n+\t\tfpdu_len -= buf->datalen;\n+\t\tirdma_puda_ret_bufpool(ieq, buf);\n+\t\tbuf = irdma_puda_get_listbuf(pbufl);\n+\t\tif (!buf)\n+\t\t\treturn;\n+\n+\t\tbufoffset = (u16)(buf->data - (u8 *)buf->mem.va);\n+\t} while (1);\n+\n+\t/* last buffer on the list*/\n+\tif (buf->datalen)\n+\t\tlist_add(&buf->list, rxlist);\n+\telse\n+\t\tirdma_puda_ret_bufpool(ieq, buf);\n+}\n+\n+/**\n+ * irdma_ieq_create_pbufl - create buffer list for single fpdu\n+ * @pfpdu: pointer to fpdu\n+ * @rxlist: resource list for receive ieq buffes\n+ * @pbufl: temp. list for buffers for fpddu\n+ * @buf: first receive buffer\n+ * @fpdu_len: total length of fpdu\n+ */\n+static enum irdma_status_code\n+irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist,\n+\t\t struct list_head *pbufl, struct irdma_puda_buf *buf,\n+\t\t u16 fpdu_len)\n+{\n+\tenum irdma_status_code status = 0;\n+\tstruct irdma_puda_buf *nextbuf;\n+\tu32 nextseqnum;\n+\tu16 plen = fpdu_len - buf->datalen;\n+\tbool done = false;\n+\n+\tnextseqnum = buf->seqnum + buf->datalen;\n+\tdo {\n+\t\tnextbuf = irdma_puda_get_listbuf(rxlist);\n+\t\tif (!nextbuf) {\n+\t\t\tstatus = IRDMA_ERR_list_empty;\n+\t\t\tbreak;\n+\t\t}\n+\t\tlist_add_tail(&nextbuf->list, pbufl);\n+\t\tif (nextbuf->seqnum != nextseqnum) {\n+\t\t\tpfpdu->bad_seq_num++;\n+\t\t\tstatus = IRDMA_ERR_SEQ_NUM;\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (nextbuf->datalen >= plen) {\n+\t\t\tdone = true;\n+\t\t} else {\n+\t\t\tplen -= nextbuf->datalen;\n+\t\t\tnextseqnum = nextbuf->seqnum + nextbuf->datalen;\n+\t\t}\n+\n+\t} while (!done);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_ieq_handle_partial - process partial fpdu buffer\n+ * @ieq: ieq resource\n+ * @pfpdu: partial management per user qp\n+ * @buf: receive buffer\n+ * @fpdu_len: fpdu len in the buffer\n+ */\n+static enum irdma_status_code\n+irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,\n+\t\t\t struct irdma_puda_buf *buf, u16 fpdu_len)\n+{\n+\tenum irdma_status_code status = 0;\n+\tu8 *crcptr;\n+\tu32 mpacrc;\n+\tu32 seqnum = buf->seqnum;\n+\tstruct list_head pbufl; /* partial buffer list */\n+\tstruct irdma_puda_buf *txbuf = NULL;\n+\tstruct list_head *rxlist = &pfpdu->rxlist;\n+\n+\tINIT_LIST_HEAD(&pbufl);\n+\tlist_add(&buf->list, &pbufl);\n+\n+\tstatus = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);\n+\tif (status)\n+\t\tgoto error;\n+\n+\ttxbuf = irdma_puda_get_bufpool(ieq);\n+\tif (!txbuf) {\n+\t\tpfpdu->no_tx_bufs++;\n+\t\tstatus = IRDMA_ERR_NO_TXBUFS;\n+\t\tgoto error;\n+\t}\n+\n+\tirdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);\n+\tirdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);\n+\n+\tcrcptr = txbuf->data + fpdu_len - 4;\n+\tmpacrc = *(u32 *)crcptr;\n+\tif (ieq->check_crc) {\n+\t\tstatus = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,\n+\t\t\t\t\t\t(fpdu_len - 4), mpacrc);\n+\t\tif (status) {\n+\t\t\tdev_dbg(rfdev_to_dev(ieq->dev),\n+\t\t\t\t\"IEQ: error bad crc\\n\");\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\n+\tirdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, \"IEQ TX BUFFER\",\n+\t\t\ttxbuf->mem.va, txbuf->totallen);\n+\tif (ieq->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)\n+\t\ttxbuf->ah_id = pfpdu->ah->ah_info.ah_idx;\n+\ttxbuf->do_lpb = true;\n+\tirdma_puda_send_buf(ieq, txbuf);\n+\tpfpdu->rcv_nxt = seqnum + fpdu_len;\n+\treturn status;\n+\n+error:\n+\twhile (!list_empty(&pbufl)) {\n+\t\tbuf = (struct irdma_puda_buf *)(pbufl.prev);\n+\t\tlist_del(&buf->list);\n+\t\tlist_add(&buf->list, rxlist);\n+\t}\n+\tif (txbuf)\n+\t\tirdma_puda_ret_bufpool(ieq, txbuf);\n+\n+\treturn status;\n+}\n+\n+/**\n+ * irdma_ieq_process_buf - process buffer rcvd for ieq\n+ * @ieq: ieq resource\n+ * @pfpdu: partial management per user qp\n+ * @buf: receive buffer\n+ */\n+static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,\n+\t\t\t\t\t\t struct irdma_pfpdu *pfpdu,\n+\t\t\t\t\t\t struct irdma_puda_buf *buf)\n+{\n+\tu16 fpdu_len = 0;\n+\tu16 datalen = buf->datalen;\n+\tu8 *datap = buf->data;\n+\tu8 *crcptr;\n+\tu16 ioffset = 0;\n+\tu32 mpacrc;\n+\tu32 seqnum = buf->seqnum;\n+\tu16 len = 0;\n+\tu16 full = 0;\n+\tbool partial = false;\n+\tstruct irdma_puda_buf *txbuf;\n+\tstruct list_head *rxlist = &pfpdu->rxlist;\n+\tenum irdma_status_code ret = 0;\n+\n+\tioffset = (u16)(buf->data - (u8 *)buf->mem.va);\n+\twhile (datalen) {\n+\t\tfpdu_len = irdma_ieq_get_fpdu_len(ntohs(*(__be16 *)datap));\n+\t\tif (fpdu_len > pfpdu->max_fpdu_data) {\n+\t\t\tdev_dbg(rfdev_to_dev(ieq->dev),\n+\t\t\t\t\"IEQ: error bad fpdu_len\\n\");\n+\t\t\tlist_add(&buf->list, rxlist);\n+\t\t\treturn IRDMA_ERR_MPA_CRC;\n+\t\t}\n+\n+\t\tif (datalen < fpdu_len) {\n+\t\t\tpartial = true;\n+\t\t\tbreak;\n+\t\t}\n+\t\tcrcptr = datap + fpdu_len - 4;\n+\t\tmpacrc = *(u32 *)crcptr;\n+\t\tif (ieq->check_crc)\n+\t\t\tret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,\n+\t\t\t\t\t\t fpdu_len - 4, mpacrc);\n+\t\tif (ret) {\n+\t\t\tlist_add(&buf->list, rxlist);\n+\t\t\tdev_dbg(rfdev_to_dev(ieq->dev),\n+\t\t\t\t\"ERR: IRDMA_ERR_MPA_CRC\\n\");\n+\t\t\treturn IRDMA_ERR_MPA_CRC;\n+\t\t}\n+\t\tfull++;\n+\t\tpfpdu->fpdu_processed++;\n+\t\tdatap += fpdu_len;\n+\t\tlen += fpdu_len;\n+\t\tdatalen -= fpdu_len;\n+\t}\n+\tif (full) {\n+\t\t/* copy full pdu's in the txbuf and send them out */\n+\t\ttxbuf = irdma_puda_get_bufpool(ieq);\n+\t\tif (!txbuf) {\n+\t\t\tpfpdu->no_tx_bufs++;\n+\t\t\tlist_add(&buf->list, rxlist);\n+\t\t\treturn IRDMA_ERR_NO_TXBUFS;\n+\t\t}\n+\t\t/* modify txbuf's buffer header */\n+\t\tirdma_ieq_setup_tx_buf(buf, txbuf);\n+\t\t/* copy full fpdu's to new buffer */\n+\t\tif (ieq->dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) {\n+\t\t\tirdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,\n+\t\t\t\t\t\ttxbuf->hdrlen, len);\n+\t\t\ttxbuf->totallen = txbuf->hdrlen + len;\n+\t\t\ttxbuf->ah_id = pfpdu->ah->ah_info.ah_idx;\n+\t\t} else {\n+\t\t\tirdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,\n+\t\t\t\t\t\tbuf->hdrlen, len);\n+\t\t\ttxbuf->totallen = buf->hdrlen + len;\n+\t\t}\n+\t\tirdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);\n+\t\tirdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, \"IEQ TX BUFFER\",\n+\t\t\t\ttxbuf->mem.va, txbuf->totallen);\n+\t\ttxbuf->do_lpb = true;\n+\t\tirdma_puda_send_buf(ieq, txbuf);\n+\n+\t\tif (!datalen) {\n+\t\t\tpfpdu->rcv_nxt = buf->seqnum + len;\n+\t\t\tirdma_puda_ret_bufpool(ieq, buf);\n+\t\t\treturn 0;\n+\t\t}\n+\t\tbuf->data = datap;\n+\t\tbuf->seqnum = seqnum + len;\n+\t\tbuf->datalen = datalen;\n+\t\tpfpdu->rcv_nxt = buf->seqnum;\n+\t}\n+\tif (partial)\n+\t\treturn irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_ieq_process_fpdus - process fpdu's buffers on its list\n+ * @qp: qp for which partial fpdus\n+ * @ieq: ieq resource\n+ */\n+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,\n+\t\t\t struct irdma_puda_rsrc *ieq)\n+{\n+\tstruct irdma_pfpdu *pfpdu = &qp->pfpdu;\n+\tstruct list_head *rxlist = &pfpdu->rxlist;\n+\tstruct irdma_puda_buf *buf;\n+\tenum irdma_status_code status;\n+\n+\tdo {\n+\t\tif (list_empty(rxlist))\n+\t\t\tbreak;\n+\t\tbuf = irdma_puda_get_listbuf(rxlist);\n+\t\tif (!buf) {\n+\t\t\tdev_dbg(rfdev_to_dev(ieq->dev), \"IEQ: error no buf\\n\");\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (buf->seqnum != pfpdu->rcv_nxt) {\n+\t\t\t/* This could be out of order or missing packet */\n+\t\t\tpfpdu->out_of_order++;\n+\t\t\tlist_add(&buf->list, rxlist);\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* keep processing buffers from the head of the list */\n+\t\tstatus = irdma_ieq_process_buf(ieq, pfpdu, buf);\n+\t\tif (status == IRDMA_ERR_MPA_CRC) {\n+\t\t\tpfpdu->mpa_crc_err = true;\n+\t\t\twhile (!list_empty(rxlist)) {\n+\t\t\t\tbuf = irdma_puda_get_listbuf(rxlist);\n+\t\t\t\tirdma_puda_ret_bufpool(ieq, buf);\n+\t\t\t\tpfpdu->crc_err++;\n+\t\t\t}\n+\t\t\t/* create CQP for AE */\n+\t\t\tirdma_ieq_mpa_crc_ae(ieq->dev, qp);\n+\t\t}\n+\t} while (!status);\n+}\n+\n+/**\n+ * irdma_ieq_create_ah - create an address handle for IEQ\n+ * @qp: qp pointer\n+ * @buf: buf received on IEQ used to create AH\n+ */\n+static enum irdma_status_code irdma_ieq_create_ah(struct irdma_sc_qp *qp,\n+\t\t\t\t\t\t struct irdma_puda_buf *buf)\n+{\n+\tstruct irdma_ah_info ah_info = {};\n+\n+\tqp->pfpdu.ah_buf = buf;\n+\tirdma_puda_ieq_get_ah_info(qp, &ah_info);\n+\treturn irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,\n+\t\t\t\t IRDMA_PUDA_RSRC_TYPE_IEQ, qp,\n+\t\t\t\t &qp->pfpdu.ah);\n+}\n+\n+/**\n+ * irdma_ieq_handle_exception - handle qp's exception\n+ * @ieq: ieq resource\n+ * @qp: qp receiving excpetion\n+ * @buf: receive buffer\n+ */\n+static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,\n+\t\t\t\t struct irdma_sc_qp *qp,\n+\t\t\t\t struct irdma_puda_buf *buf)\n+{\n+\tstruct irdma_pfpdu *pfpdu = &qp->pfpdu;\n+\tu32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;\n+\tu32 rcv_wnd = hw_host_ctx[23];\n+\t/* first partial seq # in q2 */\n+\tu32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);\n+\tstruct list_head *rxlist = &pfpdu->rxlist;\n+\tunsigned long flags = 0;\n+\tu8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;\n+\n+\tirdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, \"IEQ RX BUFFER\", buf->mem.va,\n+\t\t\tbuf->totallen);\n+\n+\tspin_lock_irqsave(&pfpdu->lock, flags);\n+\tpfpdu->total_ieq_bufs++;\n+\tif (pfpdu->mpa_crc_err) {\n+\t\tpfpdu->crc_err++;\n+\t\tgoto error;\n+\t}\n+\tif (pfpdu->mode && fps != pfpdu->fps) {\n+\t\t/* clean up qp as it is new partial sequence */\n+\t\tirdma_ieq_cleanup_qp(ieq, qp);\n+\t\tdev_dbg(rfdev_to_dev(ieq->dev),\n+\t\t\t\"IEQ: restarting new partial\\n\");\n+\t\tpfpdu->mode = false;\n+\t}\n+\n+\tif (!pfpdu->mode) {\n+\t\tirdma_debug_buf(ieq->dev, IRDMA_DEBUG_IEQ, \"Q2 BUFFER\",\n+\t\t\t\t(u64 *)qp->q2_buf, 128);\n+\t\t/* First_Partial_Sequence_Number check */\n+\t\tpfpdu->rcv_nxt = fps;\n+\t\tpfpdu->fps = fps;\n+\t\tpfpdu->mode = true;\n+\t\tpfpdu->max_fpdu_data = (buf->ipv4) ?\n+\t\t\t\t (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :\n+\t\t\t\t (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);\n+\t\tpfpdu->pmode_count++;\n+\t\tINIT_LIST_HEAD(rxlist);\n+\t\tirdma_ieq_check_first_buf(buf, fps);\n+\t}\n+\n+\tif (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {\n+\t\tpfpdu->bad_seq_num++;\n+\t\tgoto error;\n+\t}\n+\n+\tif (!list_empty(rxlist)) {\n+\t\tif (buf->seqnum != pfpdu->nextseqnum) {\n+\t\t\tirdma_send_ieq_ack(qp);\n+\t\t\t/* throw away out-of-order, duplicates*/\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\t/* Insert buf before head */\n+\tlist_add_tail(&buf->list, rxlist);\n+\tpfpdu->nextseqnum = buf->seqnum + buf->datalen;\n+\tpfpdu->lastrcv_buf = buf;\n+\tif (hw_rev > IRDMA_GEN_1 && !pfpdu->ah) {\n+\t\tirdma_ieq_create_ah(qp, buf);\n+\t\tif (!pfpdu->ah)\n+\t\t\tgoto error;\n+\t\tgoto exit;\n+\t}\n+\tif (hw_rev == IRDMA_GEN_1)\n+\t\tirdma_ieq_process_fpdus(qp, ieq);\n+\telse if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)\n+\t\tirdma_ieq_process_fpdus(qp, ieq);\n+exit:\n+\tspin_unlock_irqrestore(&pfpdu->lock, flags);\n+\treturn;\n+\n+error:\n+\tirdma_puda_ret_bufpool(ieq, buf);\n+\tspin_unlock_irqrestore(&pfpdu->lock, flags);\n+}\n+\n+/**\n+ * irdma_ieq_receive - received exception buffer\n+ * @vsi: VSI of device\n+ * @buf: exception buffer received\n+ */\n+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,\n+\t\t\t struct irdma_puda_buf *buf)\n+{\n+\tstruct irdma_puda_rsrc *ieq = vsi->ieq;\n+\tstruct irdma_sc_qp *qp = NULL;\n+\tu32 wqe_idx = ieq->compl_rxwqe_idx;\n+\n+\tqp = irdma_ieq_get_qp(vsi->dev, buf);\n+\tif (!qp) {\n+\t\tieq->stats_bad_qp_id++;\n+\t\tirdma_puda_ret_bufpool(ieq, buf);\n+\t} else {\n+\t\tirdma_ieq_handle_exception(ieq, qp, buf);\n+\t}\n+\t/*\n+\t * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq()\n+\t * on which wqe_idx to start replenish rq\n+\t */\n+\tif (!ieq->rxq_invalid_cnt)\n+\t\tieq->rx_wqe_idx = wqe_idx;\n+\tieq->rxq_invalid_cnt++;\n+}\n+\n+/**\n+ * irdma_ieq_tx_compl - put back after sending completed exception buffer\n+ * @vsi: sc VSI struct\n+ * @sqwrid: pointer to puda buffer\n+ */\n+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)\n+{\n+\tstruct irdma_puda_rsrc *ieq = vsi->ieq;\n+\tstruct irdma_puda_buf *buf = sqwrid;\n+\n+\tirdma_puda_ret_bufpool(ieq, buf);\n+}\n+\n+/**\n+ * irdma_ieq_cleanup_qp - qp is being destroyed\n+ * @ieq: ieq resource\n+ * @qp: all pending fpdu buffers\n+ */\n+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)\n+{\n+\tstruct irdma_puda_buf *buf;\n+\tstruct irdma_pfpdu *pfpdu = &qp->pfpdu;\n+\tstruct list_head *rxlist = &pfpdu->rxlist;\n+\n+\tif (!pfpdu->mode)\n+\t\treturn;\n+\n+\twhile (!list_empty(rxlist)) {\n+\t\tbuf = irdma_puda_get_listbuf(rxlist);\n+\t\tirdma_puda_ret_bufpool(ieq, buf);\n+\t}\n+\n+\tif (qp->pfpdu.ah) {\n+\t\tirdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);\n+\t\tqp->pfpdu.ah = NULL;\n+\t\tqp->pfpdu.ah_buf = NULL;\n+\t}\n+}\ndiff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h\nnew file mode 100644\nindex 0000000..1fed17a\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/puda.h\n@@ -0,0 +1,187 @@\n+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef IRDMA_PUDA_H\n+#define IRDMA_PUDA_H\n+\n+#define IRDMA_IEQ_MPA_FRAMING\t6\n+#define IRDMA_TCP_OFFSET\t40\n+#define IRDMA_IPV4_PAD\t\t20\n+\n+enum puda_rsrc_type {\n+\tIRDMA_PUDA_RSRC_TYPE_ILQ = 1,\n+\tIRDMA_PUDA_RSRC_TYPE_IEQ,\n+\tIRDMA_PUDA_RSRC_TYPE_MAX, /* Must be last entry */\n+};\n+\n+enum puda_rsrc_complete {\n+\tPUDA_CQ_CREATED = 1,\n+\tPUDA_QP_CREATED,\n+\tPUDA_TX_COMPLETE,\n+\tPUDA_RX_COMPLETE,\n+\tPUDA_HASH_CRC_COMPLETE,\n+};\n+\n+struct irdma_sc_dev;\n+struct irdma_sc_qp;\n+struct irdma_sc_cq;\n+\n+struct irdma_puda_cmpl_info {\n+\tstruct irdma_qp_uk *qp;\n+\tu8 q_type;\n+\tbool vlan_valid;\n+\tu8 l3proto;\n+\tu8 l4proto;\n+\tu16 vlan;\n+\tu32 payload_len;\n+\tu32 compl_error; /* No_err=0, else major and minor err code */\n+\tu32 qp_id;\n+\tu32 wqe_idx;\n+\tbool ipv4;\n+\tbool smac_valid;\n+\tu8 smac[ETH_ALEN];\n+};\n+\n+struct irdma_puda_send_info {\n+\tu64 paddr; /* Physical address */\n+\tu32 len;\n+\tu32 ah_id;\n+\tu8 tcplen;\n+\tu8 maclen;\n+\tbool ipv4;\n+\tbool do_lpb;\n+\tvoid *scratch;\n+};\n+\n+struct irdma_puda_buf {\n+\tstruct list_head list; /* MUST be first entry */\n+\tstruct irdma_dma_mem mem; /* DMA memory for the buffer */\n+\tstruct irdma_puda_buf *next; /* for alloclist in rsrc struct */\n+\tstruct irdma_virt_mem buf_mem; /* Buffer memory for this buffer */\n+\tvoid *scratch;\n+\tu8 *iph;\n+\tu8 *tcph;\n+\tu8 *data;\n+\tu16 datalen;\n+\tu16 vlan_id;\n+\tu8 tcphlen; /* tcp length in bytes */\n+\tu8 maclen; /* mac length in bytes */\n+\tu32 totallen; /* machlen+iphlen+tcphlen+datalen */\n+\tatomic_t refcount;\n+\tu8 hdrlen;\n+\tbool ipv4;\n+\tbool vlan_valid;\n+\tbool do_lpb; /* Loopback buffer */\n+\tu32 seqnum;\n+\tu32 ah_id;\n+\tbool smac_valid;\n+\tu8 smac[ETH_ALEN];\n+\tstruct irdma_sc_vsi *vsi;\n+};\n+\n+struct irdma_puda_rsrc_info {\n+\tvoid (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);\n+\tvoid (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);\n+\tenum puda_rsrc_type type; /* ILQ or IEQ */\n+\tu32 count;\n+\tu32 pd_id;\n+\tu32 cq_id;\n+\tu32 qp_id;\n+\tu32 sq_size;\n+\tu32 rq_size;\n+\tu32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */\n+\tu16 buf_size;\n+\tu8 stats_idx;\n+\tbool stats_idx_valid;\n+};\n+\n+struct irdma_puda_rsrc {\n+\tstruct irdma_sc_cq cq;\n+\tstruct irdma_sc_qp qp;\n+\tstruct irdma_sc_pd sc_pd;\n+\tstruct irdma_sc_dev *dev;\n+\tstruct irdma_sc_vsi *vsi;\n+\tstruct irdma_dma_mem cqmem;\n+\tstruct irdma_dma_mem qpmem;\n+\tstruct irdma_virt_mem ilq_mem;\n+\tenum puda_rsrc_complete cmpl;\n+\tenum puda_rsrc_type type;\n+\tu16 buf_size; /*buf must be max datalen + tcpip hdr + mac */\n+\tu32 cq_id;\n+\tu32 qp_id;\n+\tu32 sq_size;\n+\tu32 rq_size;\n+\tu32 cq_size;\n+\tstruct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;\n+\tu64 *rq_wrid_array;\n+\tu32 compl_rxwqe_idx;\n+\tu32 rx_wqe_idx;\n+\tu32 rxq_invalid_cnt;\n+\tu32 tx_wqe_avail_cnt;\n+\tbool check_crc;\n+\tstruct shash_desc *hash_desc;\n+\tstruct list_head txpend;\n+\tstruct list_head bufpool; /* free buffers pool list for recv and xmit */\n+\tu32 alloc_buf_count;\n+\tu32 avail_buf_count; /* snapshot of currently available buffers */\n+\tspinlock_t bufpool_lock;\n+\tstruct irdma_puda_buf *alloclist;\n+\tvoid (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);\n+\tvoid (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);\n+\t/* puda stats */\n+\tu64 stats_buf_alloc_fail;\n+\tu64 stats_pkt_rcvd;\n+\tu64 stats_pkt_sent;\n+\tu64 stats_rcvd_pkt_err;\n+\tu64 stats_sent_pkt_q;\n+\tu64 stats_bad_qp_id;\n+\tu8 stats_idx;\n+\tbool stats_idx_valid;\n+};\n+\n+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);\n+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,\n+\t\t\t struct irdma_puda_buf *buf);\n+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,\n+\t\t\t struct irdma_puda_buf *buf);\n+enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,\n+\t\t\t\t struct irdma_puda_send_info *info);\n+enum irdma_status_code\n+irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,\n+\t\t struct irdma_puda_rsrc_info *info);\n+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,\n+\t\t\t bool reset);\n+enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,\n+\t\t\t\t\t struct irdma_sc_cq *cq,\n+\t\t\t\t\t u32 *compl_err);\n+\n+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,\n+\t\t\t\t struct irdma_puda_buf *buf);\n+enum irdma_status_code\n+irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,\n+\t\t\t struct irdma_puda_buf *buf);\n+enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,\n+\t\t\t\t\t void *addr, u32 len, u32 val);\n+enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc);\n+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);\n+void irdma_free_hash_desc(struct shash_desc *desc);\n+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,\n+\t\t\t\t u32 seqnum);\n+enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev,\n+\t\t\t\t\t struct irdma_sc_qp *qp);\n+enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,\n+\t\t\t\t\t struct irdma_sc_cq *cq);\n+void irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);\n+void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);\n+void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,\n+\t\t\t\tstruct irdma_ah_info *ah_info);\n+enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,\n+\t\t\t\t\t struct irdma_ah_info *ah_info,\n+\t\t\t\t\t bool wait, enum puda_rsrc_type type,\n+\t\t\t\t\t void *cb_param,\n+\t\t\t\t\t struct irdma_sc_ah **ah);\n+void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);\n+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,\n+\t\t\t struct irdma_puda_rsrc *ieq);\n+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp);\n+#endif /*IRDMA_PROTOS_H */\n", "prefixes": [ "rdma-next", "05/17" ] }