get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1182398/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1182398,
    "url": "http://patchwork.ozlabs.org/api/patches/1182398/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023182253.1115-12-shiraz.saleem@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20191023182253.1115-12-shiraz.saleem@intel.com>",
    "list_archive_url": null,
    "date": "2019-10-23T18:22:47",
    "name": "[rdma-nxt,11/16] RDMA/irdma: Add user/kernel shared libraries",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": false,
    "hash": "655aa3d13c5d1048c1dbb984b32fe9e1dbd19ce5",
    "submitter": {
        "id": 69500,
        "url": "http://patchwork.ozlabs.org/api/people/69500/?format=api",
        "name": "Saleem, Shiraz",
        "email": "shiraz.saleem@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20191023182253.1115-12-shiraz.saleem@intel.com/mbox/",
    "series": [
        {
            "id": 138160,
            "url": "http://patchwork.ozlabs.org/api/series/138160/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=138160",
            "date": "2019-10-23T18:22:36",
            "name": "Add unified Intel Ethernet RDMA driver (irdma)",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/138160/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1182398/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1182398/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org; spf=pass (sender SPF authorized)\n\tsmtp.mailfrom=osuosl.org (client-ip=140.211.166.133;\n\thelo=hemlock.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 46yzfG3fDXz9sPF\n\tfor <incoming@patchwork.ozlabs.org>;\n\tThu, 24 Oct 2019 05:38:30 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 023C7881A8;\n\tWed, 23 Oct 2019 18:38:29 +0000 (UTC)",
            "from hemlock.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id kYajJyC8Zaym; Wed, 23 Oct 2019 18:38:24 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby hemlock.osuosl.org (Postfix) with ESMTP id 6DE0588193;\n\tWed, 23 Oct 2019 18:38:24 +0000 (UTC)",
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\tby ash.osuosl.org (Postfix) with ESMTP id DD4D11BF48D\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:38:21 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id A1F6922CC6\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:38:21 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id CLHzqWgYTY3C for <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:37:59 +0000 (UTC)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby silver.osuosl.org (Postfix) with ESMTPS id 86DED22920\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tWed, 23 Oct 2019 18:37:57 +0000 (UTC)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t23 Oct 2019 11:37:57 -0700",
            "from ssaleem-mobl.amr.corp.intel.com ([10.122.128.45])\n\tby fmsmga002.fm.intel.com with ESMTP; 23 Oct 2019 11:37:56 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.68,221,1569308400\"; d=\"scan'208\";a=\"228225080\"",
        "From": "Shiraz Saleem <shiraz.saleem@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Wed, 23 Oct 2019 13:22:47 -0500",
        "Message-Id": "<20191023182253.1115-12-shiraz.saleem@intel.com>",
        "X-Mailer": "git-send-email 2.21.0",
        "In-Reply-To": "<20191023182253.1115-1-shiraz.saleem@intel.com>",
        "References": "<20191023182253.1115-1-shiraz.saleem@intel.com>",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [PATCH rdma-nxt 11/16] RDMA/irdma: Add\n\tuser/kernel shared libraries",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Cc": "Mustafa Ismail <mustafa.ismail@intel.com>,\n\tShiraz Saleem <shiraz.saleem@intel.com>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Mustafa Ismail <mustafa.ismail@intel.com>\n\nBuilding the WQE descriptors for different verb\noperations are similar in kernel and user-space.\nAdd these shared libraries.\n\nSigned-off-by: Mustafa Ismail <mustafa.ismail@intel.com>\nSigned-off-by: Shiraz Saleem <shiraz.saleem@intel.com>\n---\n drivers/infiniband/hw/irdma/uk.c   | 1739 ++++++++++++++++++++++++++++++++++++\n drivers/infiniband/hw/irdma/user.h |  449 ++++++++++\n 2 files changed, 2188 insertions(+)\n create mode 100644 drivers/infiniband/hw/irdma/uk.c\n create mode 100644 drivers/infiniband/hw/irdma/user.h",
    "diff": "diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c\nnew file mode 100644\nindex 0000000..f513773\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/uk.c\n@@ -0,0 +1,1739 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"osdep.h\"\n+#include \"status.h\"\n+#include \"defs.h\"\n+#include \"user.h\"\n+#include \"irdma.h\"\n+\n+/**\n+ * irdma_set_fragment - set fragment in wqe\n+ * @wqe: wqe for setting fragment\n+ * @offset: offset value\n+ * @sge: sge length and stag\n+ * @valid: The wqe valid\n+ */\n+static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,\n+\t\t\t       u8 valid)\n+{\n+\tif (sge) {\n+\t\tset_64bit_val(wqe, offset,\n+\t\t\t      LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));\n+\t\tset_64bit_val(wqe, offset + 8,\n+\t\t\t      LS_64(valid, IRDMAQPSQ_VALID) |\n+\t\t\t      LS_64(sge->len, IRDMAQPSQ_FRAG_LEN) |\n+\t\t\t      LS_64(sge->stag, IRDMAQPSQ_FRAG_STAG));\n+\t} else {\n+\t\tset_64bit_val(wqe, offset, 0);\n+\t\tset_64bit_val(wqe, offset + 8,\n+\t\t\t      LS_64(valid, IRDMAQPSQ_VALID));\n+\t}\n+}\n+\n+/**\n+ * irdma_set_fragment_gen_1 - set fragment in wqe\n+ * @wqe: wqe for setting fragment\n+ * @offset: offset value\n+ * @sge: sge length and stag\n+ * @valid: wqe valid flag\n+ */\n+static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,\n+\t\t\t\t     struct irdma_sge *sge, u8 valid)\n+{\n+\tif (sge) {\n+\t\tset_64bit_val(wqe, offset,\n+\t\t\t      LS_64(sge->tag_off, IRDMAQPSQ_FRAG_TO));\n+\t\tset_64bit_val(wqe, offset + 8,\n+\t\t\t      LS_64(sge->len, IRDMAQPSQ_GEN1_FRAG_LEN) |\n+\t\t\t      LS_64(sge->stag, IRDMAQPSQ_GEN1_FRAG_STAG));\n+\t} else {\n+\t\tset_64bit_val(wqe, offset, 0);\n+\t\tset_64bit_val(wqe, offset + 8, 0);\n+\t}\n+}\n+\n+/**\n+ * irdma_nop_1 - insert a NOP wqe\n+ * @qp: hw qp ptr\n+ */\n+static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)\n+{\n+\tu64 hdr;\n+\t__le64 *wqe;\n+\tu32 wqe_idx;\n+\tbool signaled = false;\n+\n+\tif (!qp->sq_ring.head)\n+\t\treturn IRDMA_ERR_PARAM;\n+\n+\twqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);\n+\twqe = qp->sq_base[wqe_idx].elem;\n+\n+\tqp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;\n+\n+\tset_64bit_val(wqe, 0, 0);\n+\tset_64bit_val(wqe, 8, 0);\n+\tset_64bit_val(wqe, 16, 0);\n+\n+\thdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |\n+\t      LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\t/* make sure WQE is written before valid bit is set */\n+\tdma_wmb();\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_clear_wqes - clear next 128 sq entries\n+ * @qp: hw qp ptr\n+ * @qp_wqe_idx: wqe_idx\n+ */\n+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)\n+{\n+\tu64 wqe_addr;\n+\tu32 wqe_idx;\n+\n+\tif (!(qp_wqe_idx & 0x7F)) {\n+\t\twqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;\n+\t\twqe_addr = (u64)qp->sq_base->elem + IRDMA_WQE_SIZE_32 * wqe_idx;\n+\n+\t\tif (wqe_idx)\n+\t\t\tmemset((void *)wqe_addr, qp->swqe_polarity ? 0 : 0xFF, 0x1000);\n+\t\telse\n+\t\t\tmemset((void *)wqe_addr, qp->swqe_polarity ? 0xFF : 0, 0x1000);\n+\t}\n+}\n+\n+/**\n+ * irdma_qp_post_wr - ring doorbell\n+ * @qp: hw qp ptr\n+ */\n+void irdma_qp_post_wr(struct irdma_qp_uk *qp)\n+{\n+\tu64 temp;\n+\tu32 hw_sq_tail;\n+\tu32 sw_sq_head;\n+\n+\t/* valid bit is written and loads completed before reading shadow */\n+\tmb();\n+\n+\t/* read the doorbell shadow area */\n+\tget_64bit_val(qp->shadow_area, 0, &temp);\n+\n+\thw_sq_tail = (u32)RS_64(temp, IRDMA_QP_DBSA_HW_SQ_TAIL);\n+\tsw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);\n+\tif (sw_sq_head != qp->initial_ring.head) {\n+\t\tif (qp->push_mode) {\n+\t\t\twritel(qp->qp_id, qp->wqe_alloc_db);\n+\t\t\tqp->push_mode = false;\n+\t\t} else if (sw_sq_head != hw_sq_tail) {\n+\t\t\tif (sw_sq_head > qp->initial_ring.head) {\n+\t\t\t\tif (hw_sq_tail >= qp->initial_ring.head &&\n+\t\t\t\t    hw_sq_tail < sw_sq_head)\n+\t\t\t\t\twritel(qp->qp_id, qp->wqe_alloc_db);\n+\t\t\t} else {\n+\t\t\t\tif (hw_sq_tail >= qp->initial_ring.head ||\n+\t\t\t\t    hw_sq_tail < sw_sq_head)\n+\t\t\t\t\twritel(qp->qp_id, qp->wqe_alloc_db);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tqp->initial_ring.head = qp->sq_ring.head;\n+}\n+\n+/**\n+ * irdma_qp_ring_push_db -  ring qp doorbell\n+ * @qp: hw qp ptr\n+ * @wqe_idx: wqe index\n+ */\n+static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)\n+{\n+\tset_32bit_val(qp->push_db, 0,\n+\t\t      LS_32(wqe_idx >> 3, IRDMA_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);\n+\tqp->initial_ring.head = qp->sq_ring.head;\n+\tqp->push_mode = true;\n+}\n+\n+void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,\n+\t\t       u32 wqe_idx, bool post_sq)\n+{\n+\t__le64 *push;\n+\n+\tif (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=\n+\t\t    IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&\n+\t    !(qp->push_mode)) {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t} else {\n+\t\tpush = (__le64 *)((uintptr_t)qp->push_wqe +\n+\t\t\t\t  (wqe_idx & 0x7) * 0x20);\n+\t\tmemcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);\n+\t\t\tirdma_qp_ring_push_db(qp, wqe_idx);\n+\t}\n+}\n+\n+/**\n+ * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go\n+ * @qp: hw qp ptr\n+ * @wqe_idx: return wqe index\n+ * @quanta: size of WR in quanta\n+ * @total_size: size of WR in bytes\n+ * @info: info on WR\n+ */\n+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,\n+\t\t\t\t   u16 quanta, u32 total_size,\n+\t\t\t\t   struct irdma_post_sq_info *info)\n+{\n+\t__le64 *wqe;\n+\t__le64 *wqe_0 = NULL;\n+\tu16 nop_cnt;\n+\tu16 i;\n+\n+\tnop_cnt = IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %\n+\t\t  qp->uk_attrs->max_hw_sq_chunk;\n+\tif (nop_cnt)\n+\t\tnop_cnt = qp->uk_attrs->max_hw_sq_chunk - nop_cnt;\n+\n+\tif (quanta > nop_cnt) {\n+\t\t/* Need to pad with NOP */\n+\t\t/* Make sure SQ has room for nop_cnt + quanta */\n+\t\tif ((u32)(quanta + nop_cnt) >\n+\t\t\tIRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))\n+\t\t\treturn NULL;\n+\n+\t\t/* pad with NOP */\n+\t\tfor (i = 0; i < nop_cnt; i++) {\n+\t\t\tirdma_nop_1(qp);\n+\t\t\tIRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);\n+\t\t}\n+\t\tinfo->push_wqe = false;\n+\t} else {\n+\t\t/* no need to pad with NOP */\n+\t\tif (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))\n+\t\t\treturn NULL;\n+\t}\n+\n+\t*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);\n+\tif (!*wqe_idx)\n+\t\tqp->swqe_polarity = !qp->swqe_polarity;\n+\n+\tIRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);\n+\n+\twqe = qp->sq_base[*wqe_idx].elem;\n+\tif (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&\n+\t    (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {\n+\t\twqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;\n+\t\twqe_0[3] = cpu_to_le64(LS_64(!qp->swqe_polarity, IRDMAQPSQ_VALID));\n+\t}\n+\tqp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;\n+\tqp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;\n+\tqp->sq_wrtrk_array[*wqe_idx].quanta = quanta;\n+\n+\treturn wqe;\n+}\n+\n+/**\n+ * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe\n+ * @qp: hw qp ptr\n+ * @wqe_idx: return wqe index\n+ */\n+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)\n+{\n+\t__le64 *wqe;\n+\tenum irdma_status_code ret_code;\n+\n+\tif (IRDMA_RING_FULL_ERR(qp->rq_ring))\n+\t\treturn NULL;\n+\n+\tIRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);\n+\tif (ret_code)\n+\t\treturn NULL;\n+\n+\tif (!*wqe_idx)\n+\t\tqp->rwqe_polarity = !qp->rwqe_polarity;\n+\t/* rq_wqe_size_multiplier is no of 32 byte quanta in in one rq wqe */\n+\twqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;\n+\n+\treturn wqe;\n+}\n+\n+/**\n+ * irdma_rdma_write - rdma write operation\n+ * @qp: hw qp ptr\n+ * @info: post sq information\n+ * @post_sq: flag to post sq\n+ */\n+static enum irdma_status_code irdma_rdma_write(struct irdma_qp_uk *qp,\n+\t\t\t\t\t       struct irdma_post_sq_info *info,\n+\t\t\t\t\t       bool post_sq)\n+{\n+\tu64 hdr;\n+\t__le64 *wqe;\n+\tstruct irdma_rdma_write *op_info;\n+\tu32 i, wqe_idx;\n+\tu32 total_size = 0, byte_off;\n+\tenum irdma_status_code ret_code;\n+\tu32 frag_cnt, addl_frag_cnt;\n+\tbool read_fence = false;\n+\tu16 quanta;\n+\n+\tinfo->push_wqe = qp->push_db ? true : false;\n+\n+\top_info = &info->op.rdma_write;\n+\tif (op_info->num_lo_sges > qp->max_sq_frag_cnt)\n+\t\treturn IRDMA_ERR_INVALID_FRAG_COUNT;\n+\n+\tfor (i = 0; i < op_info->num_lo_sges; i++)\n+\t\ttotal_size += op_info->lo_sg_list[i].len;\n+\n+\tread_fence |= info->read_fence;\n+\n+\tif (info->imm_data_valid)\n+\t\tfrag_cnt = op_info->num_lo_sges + 1;\n+\telse\n+\t\tfrag_cnt = op_info->num_lo_sges;\n+\taddl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;\n+\tret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,\n+\t\t\t\t\t info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\tset_64bit_val(wqe, 16,\n+\t\t      LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));\n+\n+\tif (info->imm_data_valid) {\n+\t\tset_64bit_val(wqe, 0,\n+\t\t\t      LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));\n+\t\ti = 0;\n+\t} else {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, 0,\n+\t\t\t\t\t    op_info->lo_sg_list,\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\ti = 1;\n+\t}\n+\n+\tfor (byte_off = 32; i < op_info->num_lo_sges; i++) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off,\n+\t\t\t\t\t    &op_info->lo_sg_list[i],\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\tbyte_off += 16;\n+\t}\n+\n+\t/* if not an odd number set valid bit in next fragment */\n+\tif (qp->uk_attrs->hw_rev > IRDMA_GEN_1 && !(frag_cnt & 0x01) &&\n+\t    frag_cnt) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\tif (qp->uk_attrs->hw_rev == IRDMA_GEN_2)\n+\t\t\t++addl_frag_cnt;\n+\t}\n+\n+\thdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |\n+\t      LS_64(info->op_type, IRDMAQPSQ_OPCODE) |\n+\t      LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |\n+\t      LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |\n+\t      LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |\n+\t      LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |\n+\t      LS_64(read_fence, IRDMAQPSQ_READFENCE) |\n+\t      LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |\n+\t      LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\tif (info->push_wqe) {\n+\t\tirdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);\n+\t} else {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_rdma_read - rdma read command\n+ * @qp: hw qp ptr\n+ * @info: post sq information\n+ * @inv_stag: flag for inv_stag\n+ * @post_sq: flag to post sq\n+ */\n+static enum irdma_status_code irdma_rdma_read(struct irdma_qp_uk *qp,\n+\t\t\t\t\t      struct irdma_post_sq_info *info,\n+\t\t\t\t\t      bool inv_stag, bool post_sq)\n+{\n+\tstruct irdma_rdma_read *op_info;\n+\tenum irdma_status_code ret_code;\n+\tu32 i, byte_off, total_size = 0;\n+\tbool local_fence = false;\n+\tu32 addl_frag_cnt;\n+\t__le64 *wqe;\n+\tu32 wqe_idx;\n+\tu16 quanta;\n+\tu64 hdr;\n+\n+\tinfo->push_wqe = qp->push_db ? true : false;\n+\n+\top_info = &info->op.rdma_read;\n+\tif (qp->max_sq_frag_cnt < op_info->num_lo_sges)\n+\t\treturn IRDMA_ERR_INVALID_FRAG_COUNT;\n+\n+\tfor (i = 0; i < op_info->num_lo_sges; i++)\n+\t\ttotal_size += op_info->lo_sg_list[i].len;\n+\n+\tret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,\n+\t\t\t\t\t info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\taddl_frag_cnt = op_info->num_lo_sges > 1 ?\n+\t\t\t(op_info->num_lo_sges - 1) : 0;\n+\tlocal_fence |= info->local_fence;\n+\n+\tqp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,\n+\t\t\t\t    qp->swqe_polarity);\n+\tfor (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off,\n+\t\t\t\t\t    &op_info->lo_sg_list[i],\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\tbyte_off += 16;\n+\t}\n+\n+\t/* if not an odd number set valid bit in next fragment */\n+\tif (qp->uk_attrs->hw_rev > IRDMA_GEN_1 &&\n+\t    !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\tif (qp->uk_attrs->hw_rev == IRDMA_GEN_2)\n+\t\t\t++addl_frag_cnt;\n+\t}\n+\tset_64bit_val(wqe, 16,\n+\t\t      LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));\n+\thdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |\n+\t      LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |\n+\t      LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |\n+\t      LS_64((inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ),\n+\t\t    IRDMAQPSQ_OPCODE) |\n+\t      LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |\n+\t      LS_64(info->read_fence || qp->force_fence ? 1 : 0,\n+\t\t    IRDMAQPSQ_READFENCE) |\n+\t      LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |\n+\t      LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\tif (info->push_wqe) {\n+\t\tirdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);\n+\t} else {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_send - rdma send command\n+ * @qp: hw qp ptr\n+ * @info: post sq information\n+ * @post_sq: flag to post sq\n+ */\n+static enum irdma_status_code irdma_send(struct irdma_qp_uk *qp,\n+\t\t\t\t\t struct irdma_post_sq_info *info,\n+\t\t\t\t\t bool post_sq)\n+{\n+\t__le64 *wqe;\n+\tstruct irdma_post_send *op_info;\n+\tu64 hdr;\n+\tu32 i, wqe_idx, total_size = 0, byte_off;\n+\tenum irdma_status_code ret_code;\n+\tu32 frag_cnt, addl_frag_cnt;\n+\tbool read_fence = false;\n+\tu16 quanta;\n+\n+\tinfo->push_wqe = qp->push_db ? true : false;\n+\n+\top_info = &info->op.send;\n+\tif (qp->max_sq_frag_cnt < op_info->num_sges)\n+\t\treturn IRDMA_ERR_INVALID_FRAG_COUNT;\n+\n+\tfor (i = 0; i < op_info->num_sges; i++)\n+\t\ttotal_size += op_info->sg_list[i].len;\n+\n+\tif (info->imm_data_valid)\n+\t\tfrag_cnt = op_info->num_sges + 1;\n+\telse\n+\t\tfrag_cnt = op_info->num_sges;\n+\tret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,\n+\t\t\t\t\t info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\tread_fence |= info->read_fence;\n+\taddl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;\n+\tif (info->imm_data_valid) {\n+\t\tset_64bit_val(wqe, 0,\n+\t\t\t      LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));\n+\t\ti = 0;\n+\t} else {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\ti = 1;\n+\t}\n+\n+\tfor (byte_off = 32; i < op_info->num_sges; i++) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\tbyte_off += 16;\n+\t}\n+\n+\t/* if not an odd number set valid bit in next fragment */\n+\tif (qp->uk_attrs->hw_rev > IRDMA_GEN_1 && !(frag_cnt & 0x01) &&\n+\t    frag_cnt) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,\n+\t\t\t\t\t    qp->swqe_polarity);\n+\t\tif (qp->uk_attrs->hw_rev == IRDMA_GEN_2)\n+\t\t\t++addl_frag_cnt;\n+\t}\n+\n+\tset_64bit_val(wqe, 16,\n+\t\t      LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |\n+\t\t      LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));\n+\thdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |\n+\t      LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |\n+\t      LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |\n+\t      LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |\n+\t      LS_64(info->op_type, IRDMAQPSQ_OPCODE) |\n+\t      LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |\n+\t      LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |\n+\t      LS_64(read_fence, IRDMAQPSQ_READFENCE) |\n+\t      LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |\n+\t      LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |\n+\t      LS_64(info->l4len, IRDMAQPSQ_L4LEN) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\tif (info->push_wqe) {\n+\t\tirdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);\n+\t} else {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe\n+ * @wqe: wqe for setting fragment\n+ * @op_info: info for setting bind wqe values\n+ */\n+static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,\n+\t\t\t\t\tstruct irdma_bind_window *op_info)\n+{\n+\tset_64bit_val(wqe, 0, (uintptr_t)op_info->va);\n+\tset_64bit_val(wqe, 8,\n+\t\t      LS_64(op_info->mw_stag, IRDMAQPSQ_PARENTMRSTAG) |\n+\t\t      LS_64(op_info->mr_stag, IRDMAQPSQ_MWSTAG));\n+\tset_64bit_val(wqe, 16, op_info->bind_len);\n+}\n+\n+/**\n+ * irdma_copy_inline_data_gen_1 - Copy inline data to wqe\n+ * @dest: pointer to wqe\n+ * @src: pointer to inline data\n+ * @len: length of inline data to copy\n+ * @polarity: compatibility parameter\n+ */\n+static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,\n+\t\t\t\t\t u8 polarity)\n+{\n+\tif (len <= 16) {\n+\t\tmemcpy(dest, src, len);\n+\t} else {\n+\t\tmemcpy(dest, src, 16);\n+\t\tsrc += 16;\n+\t\tdest = dest + 32;\n+\t\tmemcpy(dest, src, len - 16);\n+\t}\n+}\n+\n+/**\n+ * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta\n+ * @data_size: data size for inline\n+ * @quanta: size of sq wqe returned\n+ * @max_size: maximum allowed inline size\n+ *\n+ * Gets the quanta based on inline and immediate data.\n+ */\n+static enum irdma_status_code\n+irdma_inline_data_size_to_quanta_gen_1(u32 data_size, u16 *quanta, u32 max_size)\n+{\n+\tif (data_size > max_size)\n+\t\treturn IRDMA_ERR_INVALID_INLINE_DATA_SIZE;\n+\n+\tif (data_size <= 16)\n+\t\t*quanta = IRDMA_QP_WQE_MIN_QUANTA;\n+\telse\n+\t\t*quanta = 2;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_set_mw_bind_wqe - set mw bind in wqe\n+ * @wqe: wqe for setting mw bind\n+ * @op_info: info for setting wqe values\n+ */\n+static void irdma_set_mw_bind_wqe(__le64 *wqe,\n+\t\t\t\t  struct irdma_bind_window *op_info)\n+{\n+\tset_64bit_val(wqe, 0, (uintptr_t)op_info->va);\n+\tset_64bit_val(wqe, 8,\n+\t\t      LS_64(op_info->mr_stag, IRDMAQPSQ_PARENTMRSTAG) |\n+\t\t      LS_64(op_info->mw_stag, IRDMAQPSQ_MWSTAG));\n+\tset_64bit_val(wqe, 16, op_info->bind_len);\n+}\n+\n+/**\n+ * irdma_copy_inline_data - Copy inline data to wqe\n+ * @dest: pointer to wqe\n+ * @src: pointer to inline data\n+ * @len: length of inline data to copy\n+ * @polarity: polarity of wqe valid bit\n+ */\n+static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)\n+{\n+\tu8 inline_valid = polarity << IRDMA_INLINE_VALID_S;\n+\tu32 copy_size;\n+\n+\tdest += 8;\n+\tif (len <= 8) {\n+\t\tmemcpy(dest, src, len);\n+\t\treturn;\n+\t}\n+\n+\t*((u64 *)dest) = *((u64 *)src);\n+\tlen -= 8;\n+\tsrc += 8;\n+\tdest += 24; /* point to additional 32 byte quanta */\n+\n+\twhile (len) {\n+\t\tcopy_size = len < 31 ? len : 31;\n+\t\tmemcpy(dest, src, copy_size);\n+\t\t*(dest + 31) = inline_valid;\n+\t\tlen -= copy_size;\n+\t\tdest += 32;\n+\t\tsrc += copy_size;\n+\t}\n+}\n+\n+/**\n+ * irdma_inline_data_size_to_quanta - based on inline data, quanta\n+ * @data_size: data size for inline\n+ * @quanta: size of sq wqe returned\n+ * @max_size: maximum allowed inline size\n+ *\n+ * Gets the quanta based on inline and immediate data.\n+ */\n+static enum irdma_status_code\n+irdma_inline_data_size_to_quanta(u32 data_size, u16 *quanta, u32 max_size)\n+{\n+\tif (data_size > max_size)\n+\t\treturn IRDMA_ERR_INVALID_INLINE_DATA_SIZE;\n+\n+\tif (data_size <= 8)\n+\t\t*quanta = IRDMA_QP_WQE_MIN_QUANTA;\n+\telse if (data_size <= 39)\n+\t\t*quanta = 2;\n+\telse if (data_size <= 70)\n+\t\t*quanta = 3;\n+\telse if (data_size <= 101)\n+\t\t*quanta = 4;\n+\telse if (data_size <= 132)\n+\t\t*quanta = 5;\n+\telse if (data_size <= 163)\n+\t\t*quanta = 6;\n+\telse if (data_size <= 194)\n+\t\t*quanta = 7;\n+\telse\n+\t\t*quanta = 8;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_inline_rdma_write - inline rdma write operation\n+ * @qp: hw qp ptr\n+ * @info: post sq information\n+ * @post_sq: flag to post sq\n+ */\n+static enum irdma_status_code\n+irdma_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,\n+\t\t\tbool post_sq)\n+{\n+\t__le64 *wqe;\n+\tstruct irdma_inline_rdma_write *op_info;\n+\tu64 hdr = 0;\n+\tu32 wqe_idx;\n+\tenum irdma_status_code ret_code;\n+\tbool read_fence = false;\n+\tu16 quanta;\n+\n+\tinfo->push_wqe = qp->push_db ? true : false;\n+\top_info = &info->op.inline_rdma_write;\n+\tret_code = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len, &quanta,\n+\t\t\t\t\t\t\t     qp->uk_attrs->max_hw_inline);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,\n+\t\t\t\t\t info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\tread_fence |= info->read_fence;\n+\tset_64bit_val(wqe, 16,\n+\t\t      LS_64(op_info->rem_addr.tag_off, IRDMAQPSQ_FRAG_TO));\n+\n+\thdr = LS_64(op_info->rem_addr.stag, IRDMAQPSQ_REMSTAG) |\n+\t      LS_64(info->op_type, IRDMAQPSQ_OPCODE) |\n+\t      LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |\n+\t      LS_64(info->report_rtt ? 1 : 0, IRDMAQPSQ_REPORTRTT) |\n+\t      LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |\n+\t      LS_64(info->imm_data_valid ? 1 : 0, IRDMAQPSQ_IMMDATAFLAG) |\n+\t      LS_64(info->push_wqe ? 1 : 0, IRDMAQPSQ_PUSHWQE) |\n+\t      LS_64(read_fence, IRDMAQPSQ_READFENCE) |\n+\t      LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |\n+\t      LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tif (info->imm_data_valid)\n+\t\tset_64bit_val(wqe, 0,\n+\t\t\t      LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));\n+\n+\tqp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,\n+\t\t\t\t\tqp->swqe_polarity);\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\tif (info->push_wqe) {\n+\t\tirdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);\n+\t} else {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_inline_send - inline send operation\n+ * @qp: hw qp ptr\n+ * @info: post sq information\n+ * @post_sq: flag to post sq\n+ */\n+static enum irdma_status_code irdma_inline_send(struct irdma_qp_uk *qp,\n+\t\t\t\t\t\tstruct irdma_post_sq_info *info,\n+\t\t\t\t\t\tbool post_sq)\n+{\n+\t__le64 *wqe;\n+\tstruct irdma_post_inline_send *op_info;\n+\tu64 hdr;\n+\tu32 wqe_idx;\n+\tenum irdma_status_code ret_code;\n+\tbool read_fence = false;\n+\tu16 quanta;\n+\n+\tinfo->push_wqe = qp->push_db ? true : false;\n+\top_info = &info->op.inline_send;\n+\n+\tret_code = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len,\n+\t\t\t\t\t\t\t     &quanta,\n+\t\t\t\t\t\t\t     qp->uk_attrs->max_hw_inline);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,\n+\t\t\t\t\t info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\tset_64bit_val(wqe, 16,\n+\t\t      LS_64(op_info->qkey, IRDMAQPSQ_DESTQKEY) |\n+\t\t      LS_64(op_info->dest_qp, IRDMAQPSQ_DESTQPN));\n+\n+\tread_fence |= info->read_fence;\n+\thdr = LS_64(info->stag_to_inv, IRDMAQPSQ_REMSTAG) |\n+\t      LS_64(op_info->ah_id, IRDMAQPSQ_AHID) |\n+\t      LS_64(info->op_type, IRDMAQPSQ_OPCODE) |\n+\t      LS_64(op_info->len, IRDMAQPSQ_INLINEDATALEN) |\n+\t      LS_64((info->imm_data_valid ? 1 : 0), IRDMAQPSQ_IMMDATAFLAG) |\n+\t      LS_64((info->report_rtt ? 1 : 0), IRDMAQPSQ_REPORTRTT) |\n+\t      LS_64(1, IRDMAQPSQ_INLINEDATAFLAG) |\n+\t      LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |\n+\t      LS_64(read_fence, IRDMAQPSQ_READFENCE) |\n+\t      LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) |\n+\t      LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(info->udp_hdr, IRDMAQPSQ_UDPHEADER) |\n+\t      LS_64(info->l4len, IRDMAQPSQ_L4LEN) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tif (info->imm_data_valid)\n+\t\tset_64bit_val(wqe, 0,\n+\t\t\t      LS_64(info->imm_data, IRDMAQPSQ_IMMDATA));\n+\tqp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,\n+\t\t\t\t\tqp->swqe_polarity);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\tif (info->push_wqe) {\n+\t\tirdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);\n+\t} else {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_stag_local_invalidate - stag invalidate operation\n+ * @qp: hw qp ptr\n+ * @info: post sq information\n+ * @post_sq: flag to post sq\n+ */\n+static enum irdma_status_code\n+irdma_stag_local_invalidate(struct irdma_qp_uk *qp,\n+\t\t\t    struct irdma_post_sq_info *info, bool post_sq)\n+{\n+\t__le64 *wqe;\n+\tstruct irdma_inv_local_stag *op_info;\n+\tu64 hdr;\n+\tu32 wqe_idx;\n+\tbool local_fence = false;\n+\tstruct irdma_sge sge = {};\n+\n+\tinfo->push_wqe = qp->push_db ? true : false;\n+\top_info = &info->op.inv_local_stag;\n+\tlocal_fence = info->local_fence;\n+\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,\n+\t\t\t\t\t 0, info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\tsge.stag = op_info->target_stag;\n+\tqp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);\n+\n+\tset_64bit_val(wqe, 16, 0);\n+\n+\thdr = LS_64(IRDMA_OP_TYPE_INV_STAG, IRDMAQPSQ_OPCODE) |\n+\t      LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |\n+\t      LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |\n+\t      LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |\n+\t      LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\tif (info->push_wqe) {\n+\t\tirdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,\n+\t\t\t\t  post_sq);\n+\t} else {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_mw_bind - bind Memory Window\n+ * @qp: hw qp ptr\n+ * @info: post sq information\n+ * @post_sq: flag to post sq\n+ */\n+static enum irdma_status_code irdma_mw_bind(struct irdma_qp_uk *qp,\n+\t\t\t\t\t    struct irdma_post_sq_info *info,\n+\t\t\t\t\t    bool post_sq)\n+{\n+\t__le64 *wqe;\n+\tstruct irdma_bind_window *op_info;\n+\tu64 hdr;\n+\tu32 wqe_idx;\n+\tbool local_fence = false;\n+\n+\tinfo->push_wqe = qp->push_db ? true : false;\n+\top_info = &info->op.bind_window;\n+\tlocal_fence |= info->local_fence;\n+\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,\n+\t\t\t\t\t 0, info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\tqp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);\n+\n+\thdr = LS_64(IRDMA_OP_TYPE_BIND_MW, IRDMAQPSQ_OPCODE) |\n+\t      LS_64(((op_info->ena_reads << 2) | (op_info->ena_writes << 3)),\n+\t\t    IRDMAQPSQ_STAGRIGHTS) |\n+\t      LS_64((op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ?  1 : 0),\n+\t\t    IRDMAQPSQ_VABASEDTO) |\n+\t      LS_64((op_info->mem_window_type_1 ? 1 : 0),\n+\t\t    IRDMAQPSQ_MEMWINDOWTYPE) |\n+\t      LS_64((info->push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) |\n+\t      LS_64(info->read_fence, IRDMAQPSQ_READFENCE) |\n+\t      LS_64(local_fence, IRDMAQPSQ_LOCALFENCE) |\n+\t      LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\tif (info->push_wqe) {\n+\t\tirdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,\n+\t\t\t\t  post_sq);\n+\t} else {\n+\t\tif (post_sq)\n+\t\t\tirdma_qp_post_wr(qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_post_receive - post receive wqe\n+ * @qp: hw qp ptr\n+ * @info: post rq information\n+ */\n+static enum irdma_status_code\n+irdma_post_receive(struct irdma_qp_uk *qp, struct irdma_post_rq_info *info)\n+{\n+\tu32 total_size = 0, wqe_idx, i, byte_off;\n+\tu32 addl_frag_cnt;\n+\t__le64 *wqe;\n+\tu64 hdr;\n+\n+\tif (qp->max_rq_frag_cnt < info->num_sges)\n+\t\treturn IRDMA_ERR_INVALID_FRAG_COUNT;\n+\n+\tfor (i = 0; i < info->num_sges; i++)\n+\t\ttotal_size += info->sg_list[i].len;\n+\n+\twqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tqp->rq_wrid_array[wqe_idx] = info->wr_id;\n+\taddl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;\n+\tqp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,\n+\t\t\t\t    qp->rwqe_polarity);\n+\n+\tfor (i = 1, byte_off = 32; i < info->num_sges; i++) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],\n+\t\t\t\t\t    qp->rwqe_polarity);\n+\t\tbyte_off += 16;\n+\t}\n+\n+\t/* if not an odd number set valid bit in next fragment */\n+\tif (qp->uk_attrs->hw_rev > IRDMA_GEN_1 && !(info->num_sges & 0x01) &&\n+\t    info->num_sges) {\n+\t\tqp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,\n+\t\t\t\t\t    qp->rwqe_polarity);\n+\t\tif (qp->uk_attrs->hw_rev == IRDMA_GEN_2)\n+\t\t\t++addl_frag_cnt;\n+\t}\n+\n+\tset_64bit_val(wqe, 16, 0);\n+\thdr = LS_64(addl_frag_cnt, IRDMAQPSQ_ADDFRAGCNT) |\n+\t      LS_64(qp->rwqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_cq_resize - reset the cq buffer info\n+ * @cq: cq to resize\n+ * @cq_base: new cq buffer addr\n+ * @cq_size: number of cqes\n+ */\n+static void irdma_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)\n+{\n+\tcq->cq_base = cq_base;\n+\tcq->cq_size = cq_size;\n+\tIRDMA_RING_INIT(cq->cq_ring, cq->cq_size);\n+\tcq->polarity = 1;\n+}\n+\n+/**\n+ * irdma_cq_set_resized_cnt - record the count of the resized buffers\n+ * @cq: cq to resize\n+ * @cq_cnt: the count of the resized cq buffers\n+ */\n+static void irdma_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)\n+{\n+\tu64 temp_val;\n+\tu16 sw_cq_sel;\n+\tu8 arm_next_se;\n+\tu8 arm_next;\n+\tu8 arm_seq_num;\n+\n+\tget_64bit_val(cq->shadow_area, 32, &temp_val);\n+\n+\tsw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);\n+\tsw_cq_sel += cq_cnt;\n+\n+\tarm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);\n+\tarm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);\n+\tarm_next = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT);\n+\n+\ttemp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |\n+\t\t   LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |\n+\t\t   LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |\n+\t\t   LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);\n+\n+\tset_64bit_val(cq->shadow_area, 32, temp_val);\n+}\n+\n+/**\n+ * irdma_cq_request_notification - cq notification request (door bell)\n+ * @cq: hw cq\n+ * @cq_notify: notification type\n+ */\n+static void irdma_cq_request_notification(struct irdma_cq_uk *cq,\n+\t\t\t\t\t  enum irdma_cmpl_notify cq_notify)\n+{\n+\tu64 temp_val;\n+\tu16 sw_cq_sel;\n+\tu8 arm_next_se = 0;\n+\tu8 arm_next = 0;\n+\tu8 arm_seq_num;\n+\n+\tget_64bit_val(cq->shadow_area, 32, &temp_val);\n+\tarm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM);\n+\tarm_seq_num++;\n+\tsw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT);\n+\tarm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE);\n+\tarm_next_se |= 1;\n+\tif (cq_notify == IRDMA_CQ_COMPL_EVENT)\n+\t\tarm_next = 1;\n+\ttemp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) |\n+\t\t   LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) |\n+\t\t   LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) |\n+\t\t   LS_64(arm_next, IRDMA_CQ_DBSA_ARM_NEXT);\n+\n+\tset_64bit_val(cq->shadow_area, 32, temp_val);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\twritel(cq->cq_id, cq->cqe_alloc_db);\n+}\n+\n+/**\n+ * irdma_cq_post_entries - update tail in shadow memory\n+ * @cq: hw cq\n+ * @count: # of entries processed\n+ */\n+static enum irdma_status_code irdma_cq_post_entries(struct irdma_cq_uk *cq,\n+\t\t\t\t\t\t    u8 count)\n+{\n+\tIRDMA_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);\n+\tset_64bit_val(cq->shadow_area, 0,\n+\t\t      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_cq_poll_cmpl - get cq completion info\n+ * @cq: hw cq\n+ * @info: cq poll information returned\n+ */\n+static enum irdma_status_code\n+irdma_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)\n+{\n+\tu64 comp_ctx, qword0, qword2, qword3, qword4, qword6, qword7, wqe_qword;\n+\t__le64 *cqe, *sw_wqe;\n+\tstruct irdma_qp_uk *qp;\n+\tstruct irdma_ring *pring = NULL;\n+\tu32 wqe_idx, q_type, array_idx = 0;\n+\tenum irdma_status_code ret_code = 0;\n+\tbool move_cq_head = true;\n+\tu8 polarity;\n+\tbool ext_valid;\n+\t__le64 *ext_cqe;\n+\tu32 peek_head;\n+\n+\tif (cq->avoid_mem_cflct)\n+\t\tcqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);\n+\telse\n+\t\tcqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);\n+\n+\tget_64bit_val(cqe, 24, &qword3);\n+\tpolarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);\n+\tif (polarity != cq->polarity)\n+\t\treturn IRDMA_ERR_Q_EMPTY;\n+\n+\t/* Ensure CQE contents are read after valid bit is checked */\n+\tdma_rmb();\n+\n+\text_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE);\n+\tif (ext_valid) {\n+\t\tif (cq->avoid_mem_cflct) {\n+\t\t\text_cqe = (__le64 *)((u8 *)cqe + 32);\n+\t\t\tget_64bit_val(ext_cqe, 24, &qword7);\n+\t\t\tpolarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);\n+\t\t} else {\n+\t\t\tpeek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;\n+\t\t\text_cqe = cq->cq_base[peek_head].buf;\n+\t\t\tget_64bit_val(ext_cqe, 24, &qword7);\n+\t\t\tpolarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);\n+\t\t\tif (!peek_head)\n+\t\t\t\tpolarity ^= 1;\n+\t\t}\n+\t\tif (polarity != cq->polarity)\n+\t\t\treturn IRDMA_ERR_Q_EMPTY;\n+\n+\t\t/* Ensure ext CQE contents are read after ext valid bit is checked */\n+\t\tdma_rmb();\n+\n+\t\tinfo->imm_valid = (bool)RS_64(qword7, IRDMA_CQ_IMMVALID);\n+\t\tif (info->imm_valid) {\n+\t\t\tget_64bit_val(ext_cqe, 0, &qword4);\n+\t\t\tinfo->imm_data = (u32)RS_64(qword4, IRDMA_CQ_IMMDATALOW32);\n+\t\t}\n+\t\tinfo->ud_smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID);\n+\t\tinfo->ud_vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID);\n+\t\tif (info->ud_smac_valid || info->ud_vlan_valid) {\n+\t\t\tget_64bit_val(ext_cqe, 16, &qword6);\n+\t\t\tif (info->ud_vlan_valid)\n+\t\t\t\tinfo->ud_vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN);\n+\t\t\tif (info->ud_smac_valid) {\n+\t\t\t\tinfo->ud_smac[5] = qword6 & 0xFF;\n+\t\t\t\tinfo->ud_smac[4] = (qword6 >> 8) & 0xFF;\n+\t\t\t\tinfo->ud_smac[3] = (qword6 >> 16) & 0xFF;\n+\t\t\t\tinfo->ud_smac[2] = (qword6 >> 24) & 0xFF;\n+\t\t\t\tinfo->ud_smac[1] = (qword6 >> 32) & 0xFF;\n+\t\t\t\tinfo->ud_smac[0] = (qword6 >> 40) & 0xFF;\n+\t\t\t}\n+\t\t}\n+\t} else {\n+\t\tinfo->imm_valid = false;\n+\t\tinfo->ud_smac_valid = false;\n+\t\tinfo->ud_vlan_valid = false;\n+\t}\n+\n+\tq_type = (u8)RS_64(qword3, IRDMA_CQ_SQ);\n+\tinfo->error = (bool)RS_64(qword3, IRDMA_CQ_ERROR);\n+\tinfo->push_dropped = (bool)RS_64(qword3, IRDMACQ_PSHDROP);\n+\tinfo->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4);\n+\tif (info->error) {\n+\t\tinfo->major_err = RS_64(qword3, IRDMA_CQ_MAJERR);\n+\t\tinfo->minor_err = RS_64(qword3, IRDMA_CQ_MINERR);\n+\t\tif (info->major_err == IRDMA_FLUSH_MAJOR_ERR)\n+\t\t\tinfo->comp_status = IRDMA_COMPL_STATUS_FLUSHED;\n+\t\telse if (info->major_err == IRDMA_LEN_MAJOR_ERR)\n+\t\t\tinfo->comp_status = IRDMA_COMPL_STATUS_INVALID_LEN;\n+\t\telse\n+\t\t\tinfo->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;\n+\t} else {\n+\t\tinfo->comp_status = IRDMA_COMPL_STATUS_SUCCESS;\n+\t}\n+\n+\tget_64bit_val(cqe, 0, &qword0);\n+\tget_64bit_val(cqe, 16, &qword2);\n+\n+\tinfo->tcp_seq_num_rtt = (u32)RS_64(qword0, IRDMACQ_TCPSEQNUMRTT);\n+\tinfo->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID);\n+\tinfo->ud_src_qpn = (u32)RS_64(qword2, IRDMACQ_UDSRCQPN);\n+\n+\tget_64bit_val(cqe, 8, &comp_ctx);\n+\n+\tinfo->solicited_event = (bool)RS_64(qword3, IRDMACQ_SOEVENT);\n+\n+\tqp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;\n+\tif (!qp) {\n+\t\tret_code = IRDMA_ERR_Q_DESTROYED;\n+\t\tgoto exit;\n+\t}\n+\twqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX);\n+\tinfo->qp_handle = (irdma_qp_handle)(unsigned long)qp;\n+\n+\tif (q_type == IRDMA_CQE_QTYPE_RQ) {\n+\t\tarray_idx = wqe_idx / qp->rq_wqe_size_multiplier;\n+\t\tif (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||\n+\t\t    info->comp_status == IRDMA_COMPL_STATUS_INVALID_LEN) {\n+\t\t\tif (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {\n+\t\t\t\tret_code = IRDMA_ERR_Q_EMPTY;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tinfo->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];\n+\t\t\tarray_idx = qp->rq_ring.tail;\n+\t\t} else {\n+\t\t\tinfo->wr_id = qp->rq_wrid_array[array_idx];\n+\t\t}\n+\n+\t\tif (info->imm_valid)\n+\t\t\tinfo->op_type = IRDMA_OP_TYPE_REC_IMM;\n+\t\telse\n+\t\t\tinfo->op_type = IRDMA_OP_TYPE_REC;\n+\t\tif (qword3 & IRDMACQ_STAG_M) {\n+\t\t\tinfo->stag_invalid_set = true;\n+\t\t\tinfo->inv_stag = (u32)RS_64(qword2, IRDMACQ_INVSTAG);\n+\t\t} else {\n+\t\t\tinfo->stag_invalid_set = false;\n+\t\t}\n+\t\tinfo->bytes_xfered = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN);\n+\t\tIRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);\n+\t\tif (!IRDMA_RING_MORE_WORK(qp->rq_ring) &&\n+\t\t    info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)\n+\t\t\tqp->rq_flush_complete = true;\n+\t\tpring = &qp->rq_ring;\n+\t} else { /* q_type is IRDMA_CQE_QTYPE_SQ */\n+\t\tif (qp->first_sq_wq) {\n+\t\t\tqp->first_sq_wq = false;\n+\t\t\tif (!wqe_idx && qp->sq_ring.head == qp->sq_ring.tail) {\n+\t\t\t\tIRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);\n+\t\t\t\tIRDMA_RING_MOVE_TAIL(cq->cq_ring);\n+\t\t\t\tset_64bit_val(cq->shadow_area, 0,\n+\t\t\t\t\t      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));\n+\t\t\t\tmemset(info, 0,\n+\t\t\t\t       sizeof(struct irdma_cq_poll_info));\n+\t\t\t\treturn irdma_cq_poll_cmpl(cq, info);\n+\t\t\t}\n+\t\t}\n+\t\t/*cease posting push mode on push drop*/\n+\t\tif (info->push_dropped)\n+\t\t\tqp->push_mode = false;\n+\n+\t\tif (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {\n+\t\t\tinfo->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;\n+\t\t\tif (!info->comp_status)\n+\t\t\t\tinfo->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;\n+\t\t\tinfo->op_type = (u8)RS_64(qword3, IRDMACQ_OP);\n+\t\t\tsw_wqe = qp->sq_base[wqe_idx].elem;\n+\t\t\tget_64bit_val(sw_wqe, 24, &wqe_qword);\n+\t\t\tIRDMA_RING_SET_TAIL(qp->sq_ring,\n+\t\t\t\t\t    wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);\n+\t\t} else {\n+\t\t\tif (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {\n+\t\t\t\tret_code = IRDMA_ERR_Q_EMPTY;\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\n+\t\t\tdo {\n+\t\t\t\tu8 op_type;\n+\t\t\t\tu32 tail;\n+\n+\t\t\t\ttail = qp->sq_ring.tail;\n+\t\t\t\tsw_wqe = qp->sq_base[tail].elem;\n+\t\t\t\tget_64bit_val(sw_wqe, 24,\n+\t\t\t\t\t      &wqe_qword);\n+\t\t\t\top_type = (u8)RS_64(wqe_qword, IRDMAQPSQ_OPCODE);\n+\t\t\t\tinfo->op_type = op_type;\n+\t\t\t\tIRDMA_RING_SET_TAIL(qp->sq_ring,\n+\t\t\t\t\t\t    tail + qp->sq_wrtrk_array[tail].quanta);\n+\t\t\t\tif (op_type != IRDMAQP_OP_NOP) {\n+\t\t\t\t\tinfo->wr_id = qp->sq_wrtrk_array[tail].wrid;\n+\t\t\t\t\tinfo->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t} while (1);\n+\t\t\tif (!IRDMA_RING_MORE_WORK(qp->sq_ring))\n+\t\t\t\tqp->sq_flush_complete = true;\n+\t\t}\n+\t\tpring = &qp->sq_ring;\n+\t}\n+\n+\tret_code = 0;\n+\n+exit:\n+\tif (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)\n+\t\tif (pring && IRDMA_RING_MORE_WORK(*pring))\n+\t\t\tmove_cq_head = false;\n+\n+\tif (move_cq_head) {\n+\t\tIRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);\n+\t\tif (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))\n+\t\t\tcq->polarity ^= 1;\n+\n+\t\tif (ext_valid && !cq->avoid_mem_cflct) {\n+\t\t\tIRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);\n+\t\t\tif (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))\n+\t\t\t\tcq->polarity ^= 1;\n+\t\t}\n+\n+\t\tIRDMA_RING_MOVE_TAIL(cq->cq_ring);\n+\t\tif (!cq->avoid_mem_cflct && ext_valid)\n+\t\t\tIRDMA_RING_MOVE_TAIL(cq->cq_ring);\n+\t\tset_64bit_val(cq->shadow_area, 0,\n+\t\t\t      IRDMA_RING_CURRENT_HEAD(cq->cq_ring));\n+\t} else {\n+\t\tqword3 &= ~IRDMA_CQ_WQEIDX_M;\n+\t\tqword3 |= LS_64(pring->tail, IRDMA_CQ_WQEIDX);\n+\t\tset_64bit_val(cqe, 24, qword3);\n+\t}\n+\n+\treturn ret_code;\n+}\n+\n+/**\n+ * irdma_qp_roundup - return round up qp wq depth\n+ * @wqdepth: wq depth in quanta to round up\n+ */\n+static int irdma_qp_round_up(u32 wqdepth)\n+{\n+\tint scount = 1;\n+\n+\tfor (wqdepth--; scount <= 16; scount *= 2)\n+\t\twqdepth |= wqdepth >> scount;\n+\n+\treturn ++wqdepth;\n+}\n+\n+/**\n+ * irdma_get_wqe_shift - get shift count for maximum wqe size\n+ * @uk_attrs: qp HW attributes\n+ * @sge: Maximum Scatter Gather Elements wqe\n+ * @inline_data: Maximum inline data size\n+ * @shift: Returns the shift needed based on sge\n+ *\n+ * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.\n+ * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32\n+ * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe\n+ * size of 64 bytes).\n+ * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe\n+ * size of 256 bytes).\n+ */\n+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,\n+\t\t\t u32 inline_data, u8 *shift)\n+{\n+\t*shift = 0;\n+\tif (uk_attrs->hw_rev > IRDMA_GEN_1) {\n+\t\tif (sge > 1 || inline_data > 8) {\n+\t\t\tif (sge < 4 && inline_data <= 39)\n+\t\t\t\t*shift = 1;\n+\t\t\telse if (sge < 8 && inline_data <= 101)\n+\t\t\t\t*shift = 2;\n+\t\t\telse\n+\t\t\t\t*shift = 3;\n+\t\t}\n+\t} else if (sge > 1 || inline_data > 16) {\n+\t\t*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;\n+\t}\n+}\n+\n+/*\n+ * irdma_get_sqdepth - get SQ depth (quanta)\n+ * @uk_attrs: qp HW attributes\n+ * @sq_size: SQ size\n+ * @shift: shift which determines size of WQE\n+ * @sqdepth: depth of SQ\n+ *\n+ */\n+enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,\n+\t\t\t\t\t u32 sq_size, u8 shift, u32 *sqdepth)\n+{\n+\t*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);\n+\n+\tif (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))\n+\t\t*sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;\n+\telse if (*sqdepth > uk_attrs->max_hw_wq_quanta)\n+\t\treturn IRDMA_ERR_INVALID_SIZE;\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * irdma_get_rqdepth - get RQ depth (quanta)\n+ * @uk_attrs: qp HW attributes\n+ * @rq_size: RQ size\n+ * @shift: shift which determines size of WQE\n+ * @rqdepth: depth of RQ\n+ */\n+enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,\n+\t\t\t\t\t u32 rq_size, u8 shift, u32 *rqdepth)\n+{\n+\t*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);\n+\n+\tif (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))\n+\t\t*rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;\n+\telse if (*rqdepth > uk_attrs->max_hw_rq_quanta)\n+\t\treturn IRDMA_ERR_INVALID_SIZE;\n+\n+\treturn 0;\n+}\n+\n+static struct irdma_qp_uk_ops iw_qp_uk_ops = {\n+\t.iw_inline_rdma_write = irdma_inline_rdma_write,\n+\t.iw_inline_send = irdma_inline_send,\n+\t.iw_mw_bind = irdma_mw_bind,\n+\t.iw_post_nop = irdma_nop,\n+\t.iw_post_receive = irdma_post_receive,\n+\t.iw_qp_post_wr = irdma_qp_post_wr,\n+\t.iw_qp_ring_push_db = irdma_qp_ring_push_db,\n+\t.iw_rdma_read = irdma_rdma_read,\n+\t.iw_rdma_write = irdma_rdma_write,\n+\t.iw_send = irdma_send,\n+\t.iw_stag_local_invalidate = irdma_stag_local_invalidate,\n+};\n+\n+static struct irdma_wqe_uk_ops iw_wqe_uk_ops = {\n+\t.iw_copy_inline_data = irdma_copy_inline_data,\n+\t.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,\n+\t.iw_set_fragment = irdma_set_fragment,\n+\t.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,\n+};\n+\n+static struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {\n+\t.iw_copy_inline_data = irdma_copy_inline_data_gen_1,\n+\t.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,\n+\t.iw_set_fragment = irdma_set_fragment_gen_1,\n+\t.iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,\n+};\n+\n+static struct irdma_cq_ops iw_cq_ops = {\n+\t.iw_cq_clean = irdma_clean_cq,\n+\t.iw_cq_poll_cmpl = irdma_cq_poll_cmpl,\n+\t.iw_cq_post_entries = irdma_cq_post_entries,\n+\t.iw_cq_request_notification = irdma_cq_request_notification,\n+\t.iw_cq_resize = irdma_cq_resize,\n+\t.iw_cq_set_resized_cnt = irdma_cq_set_resized_cnt,\n+};\n+\n+static struct irdma_device_uk_ops iw_device_uk_ops = {\n+\t.iw_cq_uk_init = irdma_cq_uk_init,\n+\t.iw_qp_uk_init = irdma_qp_uk_init,\n+};\n+\n+/**\n+ * irdma_setup_connection_wqes - setup WQEs necessary to complete\n+ * connection.\n+ * @qp: hw qp (user and kernel)\n+ * @info: qp initialization info\n+ */\n+static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,\n+\t\t\t\t\tstruct irdma_qp_uk_init_info *info)\n+{\n+\tu16 move_cnt = 1;\n+\n+\tif (info->abi_ver > 5 &&\n+\t    (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))\n+\t\tmove_cnt = 3;\n+\n+\tIRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);\n+\tIRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);\n+\tIRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);\n+}\n+\n+/**\n+ * irdma_qp_uk_init - initialize shared qp\n+ * @qp: hw qp (user and kernel)\n+ * @info: qp initialization info\n+ *\n+ * initializes the vars used in both user and kernel mode.\n+ * size of the wqe depends on numbers of max. fragements\n+ * allowed. Then size of wqe * the number of wqes should be the\n+ * amount of memory allocated for sq and rq.\n+ */\n+enum irdma_status_code irdma_qp_uk_init(struct irdma_qp_uk *qp,\n+\t\t\t\t\tstruct irdma_qp_uk_init_info *info)\n+{\n+\tenum irdma_status_code ret_code = 0;\n+\tu32 sq_ring_size;\n+\tu8 sqshift, rqshift;\n+\n+\tqp->uk_attrs = info->uk_attrs;\n+\tif (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||\n+\t    info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)\n+\t\treturn IRDMA_ERR_INVALID_FRAG_COUNT;\n+\n+\tirdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);\n+\tif (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {\n+\t\tirdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,\n+\t\t\t\t    info->max_inline_data, &sqshift);\n+\t\tif (info->abi_ver > 4)\n+\t\t\trqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;\n+\t} else {\n+\t\tirdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,\n+\t\t\t\t    info->max_inline_data, &sqshift);\n+\t}\n+\tqp->qp_caps = info->qp_caps;\n+\tqp->sq_base = info->sq;\n+\tqp->rq_base = info->rq;\n+\tqp->shadow_area = info->shadow_area;\n+\tqp->sq_wrtrk_array = info->sq_wrtrk_array;\n+\tqp->rq_wrid_array = info->rq_wrid_array;\n+\tqp->wqe_alloc_db = info->wqe_alloc_db;\n+\tqp->qp_id = info->qp_id;\n+\tqp->sq_size = info->sq_size;\n+\tqp->push_mode = false;\n+\tqp->max_sq_frag_cnt = info->max_sq_frag_cnt;\n+\tsq_ring_size = qp->sq_size << sqshift;\n+\tIRDMA_RING_INIT(qp->sq_ring, sq_ring_size);\n+\tIRDMA_RING_INIT(qp->initial_ring, sq_ring_size);\n+\tif (info->first_sq_wq) {\n+\t\tirdma_setup_connection_wqes(qp, info);\n+\t\tqp->swqe_polarity = 1;\n+\t\tqp->first_sq_wq = true;\n+\t} else {\n+\t\tqp->swqe_polarity = 0;\n+\t}\n+\tqp->swqe_polarity_deferred = 1;\n+\tqp->rwqe_polarity = 0;\n+\tqp->rq_size = info->rq_size;\n+\tqp->max_rq_frag_cnt = info->max_rq_frag_cnt;\n+\tqp->max_inline_data = info->max_inline_data;\n+\tqp->rq_wqe_size = rqshift;\n+\tIRDMA_RING_INIT(qp->rq_ring, qp->rq_size);\n+\tqp->rq_wqe_size_multiplier = 1 << rqshift;\n+\tqp->qp_ops = iw_qp_uk_ops;\n+\tif (qp->uk_attrs->hw_rev == IRDMA_GEN_1)\n+\t\tqp->wqe_ops = iw_wqe_uk_ops_gen_1;\n+\telse\n+\t\tqp->wqe_ops = iw_wqe_uk_ops;\n+\n+\treturn ret_code;\n+}\n+\n+/**\n+ * irdma_cq_uk_init - initialize shared cq (user and kernel)\n+ * @cq: hw cq\n+ * @info: hw cq initialization info\n+ */\n+enum irdma_status_code irdma_cq_uk_init(struct irdma_cq_uk *cq,\n+\t\t\t\t\tstruct irdma_cq_uk_init_info *info)\n+{\n+\tcq->cq_base = (struct irdma_cqe *)info->cq_base;\n+\tcq->cq_id = info->cq_id;\n+\tcq->cq_size = info->cq_size;\n+\tcq->cqe_alloc_db = info->cqe_alloc_db;\n+\tcq->cq_ack_db = info->cq_ack_db;\n+\tcq->shadow_area = info->shadow_area;\n+\tcq->avoid_mem_cflct = info->avoid_mem_cflct;\n+\tIRDMA_RING_INIT(cq->cq_ring, cq->cq_size);\n+\tcq->polarity = 1;\n+\tcq->ops = iw_cq_ops;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_device_init_uk - setup routines for iwarp shared device\n+ * @dev: iwarp shared (user and kernel)\n+ */\n+void irdma_device_init_uk(struct irdma_dev_uk *dev)\n+{\n+\tdev->ops_uk = iw_device_uk_ops;\n+}\n+\n+/**\n+ * irdma_clean_cq - clean cq entries\n+ * @q: completion context\n+ * @cq: cq to clean\n+ */\n+void irdma_clean_cq(void *q, struct irdma_cq_uk *cq)\n+{\n+\t__le64 *cqe;\n+\tu64 qword3, comp_ctx;\n+\tu32 cq_head;\n+\tu8 polarity, temp;\n+\n+\tcq_head = cq->cq_ring.head;\n+\ttemp = cq->polarity;\n+\tdo {\n+\t\tif (cq->avoid_mem_cflct)\n+\t\t\tcqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;\n+\t\telse\n+\t\t\tcqe = cq->cq_base[cq_head].buf;\n+\t\tget_64bit_val(cqe, 24, &qword3);\n+\t\tpolarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);\n+\n+\t\tif (polarity != temp)\n+\t\t\tbreak;\n+\n+\t\tget_64bit_val(cqe, 8, &comp_ctx);\n+\t\tif ((void *)(unsigned long)comp_ctx == q)\n+\t\t\tset_64bit_val(cqe, 8, 0);\n+\n+\t\tcq_head = (cq_head + 1) % cq->cq_ring.size;\n+\t\tif (!cq_head)\n+\t\t\ttemp ^= 1;\n+\t} while (true);\n+}\n+\n+/**\n+ * irdma_nop - post a nop\n+ * @qp: hw qp ptr\n+ * @wr_id: work request id\n+ * @signaled: signaled for completion\n+ * @post_sq: ring doorbell\n+ */\n+enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,\n+\t\t\t\t bool signaled, bool post_sq)\n+{\n+\t__le64 *wqe;\n+\tu64 hdr;\n+\tu32 wqe_idx;\n+\tstruct irdma_post_sq_info info = {};\n+\n+\tinfo.push_wqe = false;\n+\tinfo.wr_id = wr_id;\n+\twqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,\n+\t\t\t\t\t 0, &info);\n+\tif (!wqe)\n+\t\treturn IRDMA_ERR_QP_TOOMANY_WRS_POSTED;\n+\n+\tirdma_clr_wqes(qp, wqe_idx);\n+\n+\tset_64bit_val(wqe, 0, 0);\n+\tset_64bit_val(wqe, 8, 0);\n+\tset_64bit_val(wqe, 16, 0);\n+\n+\thdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) |\n+\t      LS_64(signaled, IRDMAQPSQ_SIGCOMPL) |\n+\t      LS_64(qp->swqe_polarity, IRDMAQPSQ_VALID);\n+\n+\tdma_wmb(); /* make sure WQE is populated before valid bit is set */\n+\n+\tset_64bit_val(wqe, 24, hdr);\n+\tif (post_sq)\n+\t\tirdma_qp_post_wr(qp);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ\n+ * @frag_cnt: number of fragments\n+ * @quanta: quanta for frag_cnt\n+ */\n+enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)\n+{\n+\tswitch (frag_cnt) {\n+\tcase 0:\n+\tcase 1:\n+\t\t*quanta = IRDMA_QP_WQE_MIN_QUANTA;\n+\t\tbreak;\n+\tcase 2:\n+\tcase 3:\n+\t\t*quanta = 2;\n+\t\tbreak;\n+\tcase 4:\n+\tcase 5:\n+\t\t*quanta = 3;\n+\t\tbreak;\n+\tcase 6:\n+\tcase 7:\n+\t\t*quanta = 4;\n+\t\tbreak;\n+\tcase 8:\n+\tcase 9:\n+\t\t*quanta = 5;\n+\t\tbreak;\n+\tcase 10:\n+\tcase 11:\n+\t\t*quanta = 6;\n+\t\tbreak;\n+\tcase 12:\n+\tcase 13:\n+\t\t*quanta = 7;\n+\t\tbreak;\n+\tcase 14:\n+\tcase 15: /* when immediate data is present */\n+\t\t*quanta = 8;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn IRDMA_ERR_INVALID_FRAG_COUNT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ\n+ * @frag_cnt: number of fragments\n+ * @wqe_size: size in bytes given frag_cnt\n+ */\n+enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)\n+{\n+\tswitch (frag_cnt) {\n+\tcase 0:\n+\tcase 1:\n+\t\t*wqe_size = 32;\n+\t\tbreak;\n+\tcase 2:\n+\tcase 3:\n+\t\t*wqe_size = 64;\n+\t\tbreak;\n+\tcase 4:\n+\tcase 5:\n+\tcase 6:\n+\tcase 7:\n+\t\t*wqe_size = 128;\n+\t\tbreak;\n+\tcase 8:\n+\tcase 9:\n+\tcase 10:\n+\tcase 11:\n+\tcase 12:\n+\tcase 13:\n+\tcase 14:\n+\t\t*wqe_size = 256;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn IRDMA_ERR_INVALID_FRAG_COUNT;\n+\t}\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h\nnew file mode 100644\nindex 0000000..d3fba4d\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/user.h\n@@ -0,0 +1,449 @@\n+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef IRDMA_USER_H\n+#define IRDMA_USER_H\n+\n+#define irdma_handle void *\n+#define irdma_adapter_handle irdma_handle\n+#define irdma_qp_handle irdma_handle\n+#define irdma_cq_handle irdma_handle\n+#define irdma_pd_id irdma_handle\n+#define irdma_stag_handle irdma_handle\n+#define irdma_stag_index u32\n+#define irdma_stag u32\n+#define irdma_stag_key u8\n+#define irdma_tagged_offset u64\n+#define irdma_access_privileges u32\n+#define irdma_physical_fragment u64\n+#define irdma_address_list u64 *\n+#define irdma_sgl struct irdma_sge *\n+\n+#define\tIRDMA_MAX_MR_SIZE       0x7FFFFFFFL\n+\n+#define IRDMA_ACCESS_FLAGS_LOCALREAD\t\t0x01\n+#define IRDMA_ACCESS_FLAGS_LOCALWRITE\t\t0x02\n+#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY\t0x04\n+#define IRDMA_ACCESS_FLAGS_REMOTEREAD\t\t0x05\n+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY\t0x08\n+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE\t\t0x0a\n+#define IRDMA_ACCESS_FLAGS_BIND_WINDOW\t\t0x10\n+#define IRDMA_ACCESS_FLAGS_ALL\t\t\t0x1f\n+\n+#define IRDMA_OP_TYPE_RDMA_WRITE\t\t0x00\n+#define IRDMA_OP_TYPE_RDMA_READ\t\t\t0x01\n+#define IRDMA_OP_TYPE_SEND\t\t\t0x03\n+#define IRDMA_OP_TYPE_SEND_INV\t\t\t0x04\n+#define IRDMA_OP_TYPE_SEND_SOL\t\t\t0x05\n+#define IRDMA_OP_TYPE_SEND_SOL_INV\t\t0x06\n+#define IRDMA_OP_TYPE_RDMA_WRITE_SOL\t\t0x0d\n+#define IRDMA_OP_TYPE_BIND_MW\t\t\t0x08\n+#define IRDMA_OP_TYPE_FAST_REG_NSMR\t\t0x09\n+#define IRDMA_OP_TYPE_INV_STAG\t\t\t0x0a\n+#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG\t0x0b\n+#define IRDMA_OP_TYPE_NOP\t\t\t0x0c\n+#define IRDMA_OP_TYPE_REC\t0x3e\n+#define IRDMA_OP_TYPE_REC_IMM\t0x3f\n+\n+#define IRDMA_FLUSH_MAJOR_ERR\t1\n+#define IRDMA_LEN_MAJOR_ERR\t2\n+\n+enum irdma_device_caps_const {\n+\tIRDMA_WQE_SIZE =\t\t\t4,\n+\tIRDMA_CQP_WQE_SIZE =\t\t\t8,\n+\tIRDMA_CQE_SIZE =\t\t\t4,\n+\tIRDMA_EXTENDED_CQE_SIZE =\t\t8,\n+\tIRDMA_AEQE_SIZE =\t\t\t2,\n+\tIRDMA_CEQE_SIZE =\t\t\t1,\n+\tIRDMA_CQP_CTX_SIZE =\t\t\t8,\n+\tIRDMA_SHADOW_AREA_SIZE =\t\t8,\n+\tIRDMA_QUERY_FPM_BUF_SIZE =\t\t176,\n+\tIRDMA_COMMIT_FPM_BUF_SIZE =\t\t176,\n+\tIRDMA_GATHER_STATS_BUF_SIZE =\t\t1024,\n+\tIRDMA_MIN_IW_QP_ID =\t\t\t0,\n+\tIRDMA_MAX_IW_QP_ID =\t\t\t262143,\n+\tIRDMA_MIN_CEQID =\t\t\t0,\n+\tIRDMA_MAX_CEQID =\t\t\t1023,\n+\tIRDMA_CEQ_MAX_COUNT =\t\t\tIRDMA_MAX_CEQID + 1,\n+\tIRDMA_MIN_CQID =\t\t\t0,\n+\tIRDMA_MAX_CQID =\t\t\t524287,\n+\tIRDMA_MIN_AEQ_ENTRIES =\t\t\t1,\n+\tIRDMA_MAX_AEQ_ENTRIES =\t\t\t524287,\n+\tIRDMA_MIN_CEQ_ENTRIES =\t\t\t1,\n+\tIRDMA_MAX_CEQ_ENTRIES =\t\t\t524288,\n+\tIRDMA_MIN_CQ_SIZE =\t\t\t1,\n+\tIRDMA_MAX_CQ_SIZE =\t\t\t1048575,\n+\tIRDMA_DB_ID_ZERO =\t\t\t0,\n+\tIRDMA_MAX_WQ_FRAGMENT_COUNT =\t\t13,\n+\tIRDMA_MAX_SGE_RD =\t\t\t13,\n+\tIRDMA_MAX_OUTBOUND_MSG_SIZE =\t\t2147483647,\n+\tIRDMA_MAX_INBOUND_MSG_SIZE =\t\t2147483647,\n+\tIRDMA_MAX_PUSH_PAGE_COUNT =\t\t4096,\n+\tIRDMA_MAX_PE_ENA_VF_COUNT =\t\t32,\n+\tIRDMA_MAX_VF_FPM_ID =\t\t\t47,\n+\tIRDMA_MAX_SQ_PAYLOAD_SIZE =\t\t2145386496,\n+\tIRDMA_MAX_INLINE_DATA_SIZE =\t\t96,\n+\tIRDMA_MAX_IRD_SIZE =\t\t\t127,\n+\tIRDMA_MAX_ORD_SIZE =\t\t\t255,\n+\tIRDMA_MAX_WQ_ENTRIES =\t\t\t32768,\n+\tIRDMA_Q2_BUF_SIZE =\t\t\t256,\n+\tIRDMA_QP_CTX_SIZE =\t\t\t256,\n+\tIRDMA_MAX_PDS =\t\t\t\t262144,\n+};\n+\n+enum irdma_addressing_type {\n+\tIRDMA_ADDR_TYPE_ZERO_BASED = 0,\n+\tIRDMA_ADDR_TYPE_VA_BASED   = 1,\n+};\n+\n+enum irdma_cmpl_status {\n+\tIRDMA_COMPL_STATUS_SUCCESS = 0,\n+\tIRDMA_COMPL_STATUS_FLUSHED,\n+\tIRDMA_COMPL_STATUS_INVALID_WQE,\n+\tIRDMA_COMPL_STATUS_QP_CATASTROPHIC,\n+\tIRDMA_COMPL_STATUS_REMOTE_TERMINATION,\n+\tIRDMA_COMPL_STATUS_INVALID_STAG,\n+\tIRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,\n+\tIRDMA_COMPL_STATUS_ACCESS_VIOLATION,\n+\tIRDMA_COMPL_STATUS_INVALID_PD_ID,\n+\tIRDMA_COMPL_STATUS_WRAP_ERROR,\n+\tIRDMA_COMPL_STATUS_STAG_INVALID_PDID,\n+\tIRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,\n+\tIRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,\n+\tIRDMA_COMPL_STATUS_STAG_NOT_INVALID,\n+\tIRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,\n+\tIRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,\n+\tIRDMA_COMPL_STATUS_INVALID_FBO,\n+\tIRDMA_COMPL_STATUS_INVALID_LEN,\n+\tIRDMA_COMPL_STATUS_INVALID_ACCESS,\n+\tIRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,\n+\tIRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,\n+\tIRDMA_COMPL_STATUS_INVALID_REGION,\n+\tIRDMA_COMPL_STATUS_INVALID_WINDOW,\n+\tIRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,\n+\tIRDMA_COMPL_STATUS_UNKNOWN,\n+};\n+\n+enum irdma_cmpl_notify {\n+\tIRDMA_CQ_COMPL_EVENT     = 0,\n+\tIRDMA_CQ_COMPL_SOLICITED = 1,\n+};\n+\n+enum irdma_qp_caps {\n+\tIRDMA_WRITE_WITH_IMM = 1,\n+\tIRDMA_SEND_WITH_IMM  = 2,\n+\tIRDMA_ROCE\t     = 4,\n+};\n+\n+struct irdma_qp_uk;\n+struct irdma_cq_uk;\n+struct irdma_qp_uk_init_info;\n+struct irdma_cq_uk_init_info;\n+\n+struct irdma_sge {\n+\tirdma_tagged_offset tag_off;\n+\tu32 len;\n+\tirdma_stag stag;\n+};\n+\n+struct irdma_ring {\n+\tu32 head;\n+\tu32 tail;\n+\tu32 size;\n+};\n+\n+struct irdma_cqe {\n+\t__le64 buf[IRDMA_CQE_SIZE];\n+};\n+\n+struct irdma_extended_cqe {\n+\t__le64 buf[IRDMA_EXTENDED_CQE_SIZE];\n+};\n+\n+struct irdma_post_send {\n+\tirdma_sgl sg_list;\n+\tu32 num_sges;\n+\tu32 qkey;\n+\tu32 dest_qp;\n+\tu32 ah_id;\n+};\n+\n+struct irdma_post_inline_send {\n+\tvoid *data;\n+\tu32 len;\n+\tu32 qkey;\n+\tu32 dest_qp;\n+\tu32 ah_id;\n+};\n+\n+struct irdma_rdma_write {\n+\tirdma_sgl lo_sg_list;\n+\tu32 num_lo_sges;\n+\tstruct irdma_sge rem_addr;\n+};\n+\n+struct irdma_inline_rdma_write {\n+\tvoid *data;\n+\tu32 len;\n+\tstruct irdma_sge rem_addr;\n+};\n+\n+struct irdma_rdma_read {\n+\tirdma_sgl lo_sg_list;\n+\tu32 num_lo_sges;\n+\tstruct irdma_sge rem_addr;\n+};\n+\n+struct irdma_bind_window {\n+\tirdma_stag mr_stag;\n+\tu64 bind_len;\n+\tvoid *va;\n+\tenum irdma_addressing_type addressing_type;\n+\tbool ena_reads;\n+\tbool ena_writes;\n+\tirdma_stag mw_stag;\n+\tbool mem_window_type_1;\n+};\n+\n+struct irdma_inv_local_stag {\n+\tirdma_stag target_stag;\n+};\n+\n+struct irdma_post_sq_info {\n+\tu64 wr_id;\n+\tu8 op_type;\n+\tu8 l4len;\n+\tbool signaled;\n+\tbool read_fence;\n+\tbool local_fence;\n+\tbool inline_data;\n+\tbool imm_data_valid;\n+\tbool push_wqe;\n+\tbool report_rtt;\n+\tbool udp_hdr;\n+\tu32 imm_data;\n+\tu32 stag_to_inv;\n+\tbool defer_flag;\n+\tunion {\n+\t\tstruct irdma_post_send send;\n+\t\tstruct irdma_rdma_write rdma_write;\n+\t\tstruct irdma_rdma_read rdma_read;\n+\t\tstruct irdma_bind_window bind_window;\n+\t\tstruct irdma_inv_local_stag inv_local_stag;\n+\t\tstruct irdma_inline_rdma_write inline_rdma_write;\n+\t\tstruct irdma_post_inline_send inline_send;\n+\t} op;\n+};\n+\n+struct irdma_post_rq_info {\n+\tu64 wr_id;\n+\tirdma_sgl sg_list;\n+\tu32 num_sges;\n+};\n+\n+struct irdma_cq_poll_info {\n+\tu64 wr_id;\n+\tirdma_qp_handle qp_handle;\n+\tu32 bytes_xfered;\n+\tu32 tcp_seq_num_rtt;\n+\tu32 qp_id;\n+\tu32 ud_src_qpn;\n+\tu32 imm_data;\n+\tirdma_stag inv_stag; /* or L_R_Key */\n+\tenum irdma_cmpl_status comp_status;\n+\tu16 major_err;\n+\tu16 minor_err;\n+\tu16 ud_vlan;\n+\tu8 ud_smac[6];\n+\tu8 op_type;\n+\tbool stag_invalid_set; /* or L_R_Key set */\n+\tbool push_dropped;\n+\tbool error;\n+\tbool solicited_event;\n+\tbool ipv4;\n+\tbool ud_vlan_valid;\n+\tbool ud_smac_valid;\n+\tbool imm_valid;\n+};\n+\n+struct irdma_qp_uk_ops {\n+\tenum irdma_status_code (*iw_rdma_write)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t\tstruct irdma_post_sq_info *info,\n+\t\t\t\t\t\tbool post_sq);\n+\tenum irdma_status_code (*iw_inline_send)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t\t struct irdma_post_sq_info *info,\n+\t\t\t\t\t\t bool post_sq);\n+\tenum irdma_status_code (*iw_mw_bind)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t     struct irdma_post_sq_info *info,\n+\t\t\t\t\t     bool post_sq);\n+\tenum irdma_status_code (*iw_post_nop)(struct irdma_qp_uk *qp, u64 wr_id,\n+\t\t\t\t\t      bool signaled, bool post_sq);\n+\tenum irdma_status_code (*iw_post_receive)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t\t  struct irdma_post_rq_info *info);\n+\tvoid (*iw_qp_post_wr)(struct irdma_qp_uk *qp);\n+\tvoid (*iw_qp_ring_push_db)(struct irdma_qp_uk *qp, u32 wqe_index);\n+\tenum irdma_status_code (*iw_rdma_read)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t       struct irdma_post_sq_info *info,\n+\t\t\t\t\t       bool inv_stag, bool post_sq);\n+\tenum irdma_status_code (*iw_inline_rdma_write)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t\t       struct irdma_post_sq_info *info,\n+\t\t\t\t\t\t       bool post_sq);\n+\tenum irdma_status_code (*iw_send)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t  struct irdma_post_sq_info *info,\n+\t\t\t\t\t  bool post_sq);\n+\tenum irdma_status_code (*iw_stag_local_invalidate)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t\t\t   struct irdma_post_sq_info *info,\n+\t\t\t\t\t\t\t   bool post_sq);\n+};\n+\n+struct irdma_wqe_uk_ops {\n+\tvoid (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);\n+\tenum irdma_status_code (*iw_inline_data_size_to_quanta)(u32 data_size,\n+\t\t\t\t\t\t\t\tu16 *quanta,\n+\t\t\t\t\t\t\t\tu32 max_size);\n+\tvoid (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,\n+\t\t\t\tu8 valid);\n+\tvoid (*iw_set_mw_bind_wqe)(__le64 *wqe,\n+\t\t\t\t   struct irdma_bind_window *op_info);\n+};\n+\n+struct irdma_cq_ops {\n+\tvoid (*iw_cq_clean)(void *q, struct irdma_cq_uk *cq);\n+\tenum irdma_status_code (*iw_cq_poll_cmpl)(struct irdma_cq_uk *cq,\n+\t\t\t\t\t\t  struct irdma_cq_poll_info *info);\n+\tenum irdma_status_code (*iw_cq_post_entries)(struct irdma_cq_uk *cq,\n+\t\t\t\t\t\t     u8 count);\n+\tvoid (*iw_cq_request_notification)(struct irdma_cq_uk *cq,\n+\t\t\t\t\t   enum irdma_cmpl_notify cq_notify);\n+\tvoid (*iw_cq_resize)(struct irdma_cq_uk *cq, void *cq_base, int size);\n+\tvoid (*iw_cq_set_resized_cnt)(struct irdma_cq_uk *qp, u16 cnt);\n+};\n+\n+struct irdma_dev_uk;\n+\n+struct irdma_device_uk_ops {\n+\tenum irdma_status_code (*iw_cq_uk_init)(struct irdma_cq_uk *cq,\n+\t\t\t\t\t\tstruct irdma_cq_uk_init_info *info);\n+\tenum irdma_status_code (*iw_qp_uk_init)(struct irdma_qp_uk *qp,\n+\t\t\t\t\t\tstruct irdma_qp_uk_init_info *info);\n+};\n+\n+struct irdma_dev_uk {\n+\tstruct irdma_device_uk_ops ops_uk;\n+};\n+\n+struct irdma_sq_uk_wr_trk_info {\n+\tu64 wrid;\n+\tu32 wr_len;\n+\tu16 quanta;\n+\tu8 reserved[2];\n+};\n+\n+struct irdma_qp_quanta {\n+\t__le64 elem[IRDMA_WQE_SIZE];\n+};\n+\n+struct irdma_qp_uk {\n+\tstruct irdma_qp_quanta *sq_base;\n+\tstruct irdma_qp_quanta *rq_base;\n+\tstruct irdma_uk_attrs *uk_attrs;\n+\tu32 __iomem *wqe_alloc_db;\n+\tstruct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;\n+\tu64 *rq_wrid_array;\n+\t__le64 *shadow_area;\n+\tu32 *push_db;\n+\t__le64 *push_wqe;\n+\tstruct irdma_ring sq_ring;\n+\tstruct irdma_ring rq_ring;\n+\tstruct irdma_ring initial_ring;\n+\tu32 qp_id;\n+\tu32 qp_caps;\n+\tu32 sq_size;\n+\tu32 rq_size;\n+\tu32 max_sq_frag_cnt;\n+\tu32 max_rq_frag_cnt;\n+\tu32 max_inline_data;\n+\tstruct irdma_qp_uk_ops qp_ops;\n+\tstruct irdma_wqe_uk_ops wqe_ops;\n+\tu8 swqe_polarity;\n+\tu8 swqe_polarity_deferred;\n+\tu8 rwqe_polarity;\n+\tu8 rq_wqe_size;\n+\tu8 rq_wqe_size_multiplier;\n+\tbool deferred_flag;\n+\tbool push_mode; /* whether the last post wqe was pushed */\n+\tbool first_sq_wq;\n+\tvoid *back_qp;\n+\tbool force_fence;\n+\tbool sq_flush_complete; /* Indicates flush was seen and SQ was empty after the flush */\n+\tbool rq_flush_complete; /* Indicates flush was seen and RQ was empty after the flush */\n+\tu8 dbg_rq_flushed;\n+};\n+\n+struct irdma_cq_uk {\n+\tstruct irdma_cqe *cq_base;\n+\tu32 __iomem *cqe_alloc_db;\n+\tu32 __iomem *cq_ack_db;\n+\t__le64 *shadow_area;\n+\tu32 cq_id;\n+\tu32 cq_size;\n+\tstruct irdma_ring cq_ring;\n+\tu8 polarity;\n+\tbool avoid_mem_cflct;\n+\tstruct irdma_cq_ops ops;\n+};\n+\n+struct irdma_qp_uk_init_info {\n+\tstruct irdma_qp_quanta *sq;\n+\tstruct irdma_qp_quanta *rq;\n+\tstruct irdma_uk_attrs *uk_attrs;\n+\tu32 __iomem *wqe_alloc_db;\n+\t__le64 *shadow_area;\n+\tstruct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;\n+\tu64 *rq_wrid_array;\n+\tu32 qp_id;\n+\tu32 qp_caps;\n+\tu32 sq_size;\n+\tu32 rq_size;\n+\tu32 max_sq_frag_cnt;\n+\tu32 max_rq_frag_cnt;\n+\tu32 max_inline_data;\n+\tu8 first_sq_wq;\n+\tint abi_ver;\n+};\n+\n+struct irdma_cq_uk_init_info {\n+\tu32 __iomem *cqe_alloc_db;\n+\tu32 __iomem *cq_ack_db;\n+\tstruct irdma_cqe *cq_base;\n+\t__le64 *shadow_area;\n+\tu32 cq_size;\n+\tu32 cq_id;\n+\tbool avoid_mem_cflct;\n+};\n+\n+void irdma_device_init_uk(struct irdma_dev_uk *dev);\n+void irdma_qp_post_wr(struct irdma_qp_uk *qp);\n+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,\n+\t\t\t\t   u16 quanta, u32 total_size,\n+\t\t\t\t   struct irdma_post_sq_info *info);\n+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);\n+enum irdma_status_code irdma_cq_uk_init(struct irdma_cq_uk *cq,\n+\t\t\t\t\tstruct irdma_cq_uk_init_info *info);\n+enum irdma_status_code irdma_qp_uk_init(struct irdma_qp_uk *qp,\n+\t\t\t\t\tstruct irdma_qp_uk_init_info *info);\n+void irdma_clean_cq(void *q, struct irdma_cq_uk *cq);\n+enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,\n+\t\t\t\t bool signaled, bool post_sq);\n+enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);\n+enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);\n+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,\n+\t\t\t u32 inline_data, u8 *shift);\n+enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,\n+\t\t\t\t\t u32 sq_size, u8 shift, u32 *wqdepth);\n+enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,\n+\t\t\t\t\t u32 rq_size, u8 shift, u32 *wqdepth);\n+void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,\n+\t\t       u32 wqe_idx, bool post_sq);\n+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);\n+#endif /* IRDMA_USER_H */\n",
    "prefixes": [
        "rdma-nxt",
        "11/16"
    ]
}