get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1124842/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1124842,
    "url": "http://patchwork.ozlabs.org/api/patches/1124842/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-5-shiraz.saleem@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20190629185405.1601-5-shiraz.saleem@intel.com>",
    "list_archive_url": null,
    "date": "2019-06-29T18:53:52",
    "name": "[rdma-next,04/17] RDMA/irdma: Add HMC backing store setup functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "rejected",
    "archived": false,
    "hash": "0a77fe08710b2fb5cacd8cddf4bc567dfdf77493",
    "submitter": {
        "id": 69500,
        "url": "http://patchwork.ozlabs.org/api/people/69500/?format=api",
        "name": "Saleem, Shiraz",
        "email": "shiraz.saleem@intel.com"
    },
    "delegate": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/users/68/?format=api",
        "username": "jtkirshe",
        "first_name": "Jeff",
        "last_name": "Kirsher",
        "email": "jeffrey.t.kirsher@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20190629185405.1601-5-shiraz.saleem@intel.com/mbox/",
    "series": [
        {
            "id": 116886,
            "url": "http://patchwork.ozlabs.org/api/series/116886/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=116886",
            "date": "2019-06-29T18:53:48",
            "name": "Add unified Intel Ethernet RDMA driver (irdma)",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/116886/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1124842/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1124842/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=osuosl.org\n\t(client-ip=140.211.166.136; helo=silver.osuosl.org;\n\tenvelope-from=intel-wired-lan-bounces@osuosl.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org;\n\tdmarc=fail (p=none dis=none) header.from=intel.com"
        ],
        "Received": [
            "from silver.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 45bjW01Z2Cz9s3Z\n\tfor <incoming@patchwork.ozlabs.org>;\n\tSun, 30 Jun 2019 04:55:07 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby silver.osuosl.org (Postfix) with ESMTP id 864B8204C7;\n\tSat, 29 Jun 2019 18:55:06 +0000 (UTC)",
            "from silver.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id S3sLkJGWoYaO; Sat, 29 Jun 2019 18:54:48 +0000 (UTC)",
            "from ash.osuosl.org (ash.osuosl.org [140.211.166.34])\n\tby silver.osuosl.org (Postfix) with ESMTP id C128C204D6;\n\tSat, 29 Jun 2019 18:54:32 +0000 (UTC)",
            "from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138])\n\tby ash.osuosl.org (Postfix) with ESMTP id 7F1FE1BF3AD\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:29 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n\tby whitealder.osuosl.org (Postfix) with ESMTP id 79FEC86BC5\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:29 +0000 (UTC)",
            "from whitealder.osuosl.org ([127.0.0.1])\n\tby localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id UboG0GQK8aYO for <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:19 +0000 (UTC)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby whitealder.osuosl.org (Postfix) with ESMTPS id DC96186B19\n\tfor <intel-wired-lan@lists.osuosl.org>;\n\tSat, 29 Jun 2019 18:54:17 +0000 (UTC)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t29 Jun 2019 11:54:17 -0700",
            "from ssaleem-mobl.amr.corp.intel.com ([10.254.177.95])\n\tby fmsmga004.fm.intel.com with ESMTP; 29 Jun 2019 11:54:17 -0700"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "domain auto-whitelisted by SQLgrey-1.7.6",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.63,432,1557212400\"; d=\"scan'208\";a=\"185972861\"",
        "From": "Shiraz Saleem <shiraz.saleem@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org",
        "Date": "Sat, 29 Jun 2019 13:53:52 -0500",
        "Message-Id": "<20190629185405.1601-5-shiraz.saleem@intel.com>",
        "X-Mailer": "git-send-email 2.21.0",
        "In-Reply-To": "<20190629185405.1601-1-shiraz.saleem@intel.com>",
        "References": "<20190629185405.1601-1-shiraz.saleem@intel.com>",
        "MIME-Version": "1.0",
        "Subject": "[Intel-wired-lan] [PATCH rdma-next 04/17] RDMA/irdma: Add HMC\n\tbacking store setup functions",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n\t<intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>, \n\t<mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Cc": "Mustafa Ismail <mustafa.ismail@intel.com>,\n\tShiraz Saleem <shiraz.saleem@intel.com>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Mustafa Ismail <mustafa.ismail@intel.com>\n\nHW uses host memory as a backing store for a number of\nprotocol context objects and queue state tracking.\nThe Host Memory Cache (HMC) is a component responsible for\nmanaging these objects stored in host memory.\n\nAdd the functions and data structures to manage the allocation\nof backing pages used by the HMC for the various objects\n\nSigned-off-by: Mustafa Ismail <mustafa.ismail@intel.com>\nSigned-off-by: Shiraz Saleem <shiraz.saleem@intel.com>\n---\n drivers/infiniband/hw/irdma/hmc.c | 706 ++++++++++++++++++++++++++++++++++++++\n drivers/infiniband/hw/irdma/hmc.h | 219 ++++++++++++\n 2 files changed, 925 insertions(+)\n create mode 100644 drivers/infiniband/hw/irdma/hmc.c\n create mode 100644 drivers/infiniband/hw/irdma/hmc.h",
    "diff": "diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c\nnew file mode 100644\nindex 0000000..ea42fd4\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/hmc.c\n@@ -0,0 +1,706 @@\n+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#include \"osdep.h\"\n+#include \"status.h\"\n+#include \"hmc.h\"\n+#include \"defs.h\"\n+#include \"type.h\"\n+#include \"protos.h\"\n+\n+/**\n+ * irdma_find_sd_index_limit - finds segment descriptor index limit\n+ * @hmc_info: pointer to the HMC configuration information structure\n+ * @type: type of HMC resources we're searching\n+ * @idx: starting index for the object\n+ * @cnt: number of objects we're trying to create\n+ * @sd_idx: pointer to return index of the segment descriptor in question\n+ * @sd_limit: pointer to return the maximum number of segment descriptors\n+ *\n+ * This function calculates the segment descriptor index and index limit\n+ * for the resource defined by irdma_hmc_rsrc_type.\n+ */\n+\n+static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,\n+\t\t\t\t      u32 idx, u32 cnt, u32 *sd_idx,\n+\t\t\t\t      u32 *sd_limit)\n+{\n+\tu64 fpm_addr, fpm_limit;\n+\n+\tfpm_addr = hmc_info->hmc_obj[(type)].base +\n+\t\t   hmc_info->hmc_obj[type].size * idx;\n+\tfpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;\n+\t*sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);\n+\t*sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);\n+\t*sd_limit += 1;\n+}\n+\n+/**\n+ * irdma_find_pd_index_limit - finds page descriptor index limit\n+ * @hmc_info: pointer to the HMC configuration information struct\n+ * @type: HMC resource type we're examining\n+ * @idx: starting index for the object\n+ * @cnt: number of objects we're trying to create\n+ * @pd_idx: pointer to return page descriptor index\n+ * @pd_limit: pointer to return page descriptor index limit\n+ *\n+ * Calculates the page descriptor index and index limit for the resource\n+ * defined by irdma_hmc_rsrc_type.\n+ */\n+\n+static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,\n+\t\t\t\t      u32 idx, u32 cnt, u32 *pd_idx,\n+\t\t\t\t      u32 *pd_limit)\n+{\n+\tu64 fpm_adr, fpm_limit;\n+\n+\tfpm_adr = hmc_info->hmc_obj[type].base +\n+\t\t  hmc_info->hmc_obj[type].size * idx;\n+\tfpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);\n+\t*pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);\n+\t*pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);\n+\t*pd_limit += 1;\n+}\n+\n+/**\n+ * irdma_set_sd_entry - setup entry for sd programming\n+ * @pa: physical addr\n+ * @idx: sd index\n+ * @type: paged or direct sd\n+ * @entry: sd entry ptr\n+ */\n+static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,\n+\t\t\t       struct irdma_update_sd_entry *entry)\n+{\n+\tentry->data = pa | (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |\n+\t\t      (((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S) |\n+\t\t      (1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S);\n+\tentry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));\n+}\n+\n+/**\n+ * irdma_clr_sd_entry - setup entry for sd clear\n+ * @idx: sd index\n+ * @type: paged or direct sd\n+ * @entry: sd entry ptr\n+ */\n+static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,\n+\t\t\t       struct irdma_update_sd_entry *entry)\n+{\n+\tentry->data = (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |\n+\t\t      (((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S);\n+\tentry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));\n+}\n+\n+/**\n+ * irdma_hmc_sd_one - setup 1 sd entry for cqp\n+ * @dev: pointer to the device structure\n+ * @hmc_fn_id: hmc's function id\n+ * @pa: physical addr\n+ * @sd_idx: sd index\n+ * @type: paged or direct sd\n+ * @setsd: flag to set or clear sd\n+ */\n+enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,\n+\t\t\t\t\tu64 pa, u32 sd_idx,\n+\t\t\t\t\tenum irdma_sd_entry_type type,\n+\t\t\t\t\tbool setsd)\n+{\n+\tstruct irdma_update_sds_info sdinfo;\n+\n+\tsdinfo.cnt = 1;\n+\tsdinfo.hmc_fn_id = hmc_fn_id;\n+\tif (setsd)\n+\t\tirdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);\n+\telse\n+\t\tirdma_clr_sd_entry(sd_idx, type, sdinfo.entry);\n+\treturn dev->cqp->process_cqp_sds(dev, &sdinfo);\n+}\n+\n+/**\n+ * irdma_hmc_sd_grp - setup group of sd entries for cqp\n+ * @dev: pointer to the device structure\n+ * @hmc_info: pointer to the HMC configuration information struct\n+ * @sd_index: sd index\n+ * @sd_cnt: number of sd entries\n+ * @setsd: flag to set or clear sd\n+ */\n+static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,\n+\t\t\t\t\t       struct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t       u32 sd_index, u32 sd_cnt,\n+\t\t\t\t\t       bool setsd)\n+{\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+\tstruct irdma_update_sds_info sdinfo = {};\n+\tu64 pa;\n+\tu32 i;\n+\tenum irdma_status_code ret_code = 0;\n+\n+\tsdinfo.hmc_fn_id = hmc_info->hmc_fn_id;\n+\tfor (i = sd_index; i < sd_index + sd_cnt; i++) {\n+\t\tsd_entry = &hmc_info->sd_table.sd_entry[i];\n+\t\tif (!sd_entry || (!sd_entry->valid && setsd) ||\n+\t\t    (sd_entry->valid && !setsd))\n+\t\t\tcontinue;\n+\t\tif (setsd) {\n+\t\t\tpa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?\n+\t\t\t\t     sd_entry->u.pd_table.pd_page_addr.pa :\n+\t\t\t\t     sd_entry->u.bp.addr.pa;\n+\t\t\tirdma_set_sd_entry(pa, i, sd_entry->entry_type,\n+\t\t\t\t\t   &sdinfo.entry[sdinfo.cnt]);\n+\t\t} else {\n+\t\t\tirdma_clr_sd_entry(i, sd_entry->entry_type,\n+\t\t\t\t\t   &sdinfo.entry[sdinfo.cnt]);\n+\t\t}\n+\t\tsdinfo.cnt++;\n+\t\tif (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {\n+\t\t\tret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);\n+\t\t\tif (ret_code) {\n+\t\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\t\"HMC: sd_programming failed err=%d\\n\",\n+\t\t\t\t\tret_code);\n+\t\t\t\treturn ret_code;\n+\t\t\t}\n+\n+\t\t\tsdinfo.cnt = 0;\n+\t\t}\n+\t}\n+\tif (sdinfo.cnt)\n+\t\tret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);\n+\n+\treturn ret_code;\n+}\n+\n+/**\n+ * irdma_hmc_finish_add_sd_reg - program sd entries for objects\n+ * @dev: pointer to the device structure\n+ * @info: create obj info\n+ */\n+static enum irdma_status_code\n+irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,\n+\t\t\t    struct irdma_hmc_create_obj_info *info)\n+{\n+\tif (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)\n+\t\treturn IRDMA_ERR_INVALID_HMC_OBJ_INDEX;\n+\n+\tif ((info->start_idx + info->count) >\n+\t    info->hmc_info->hmc_obj[info->rsrc_type].cnt)\n+\t\treturn IRDMA_ERR_INVALID_HMC_OBJ_COUNT;\n+\n+\tif (!info->add_sd_cnt)\n+\t\treturn 0;\n+\treturn irdma_hmc_sd_grp(dev, info->hmc_info,\n+\t\t\t\tinfo->hmc_info->sd_indexes[0], info->add_sd_cnt,\n+\t\t\t\ttrue);\n+}\n+\n+/**\n+ * irdma_sc_create_hmc_obj - allocate backing store for hmc objects\n+ * @dev: pointer to the device structure\n+ * @info: pointer to irdma_hmc_create_obj_info struct\n+ *\n+ * This will allocate memory for PDs and backing pages and populate\n+ * the sd and pd entries.\n+ */\n+enum irdma_status_code\n+irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,\n+\t\t\tstruct irdma_hmc_create_obj_info *info)\n+{\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+\tu32 sd_idx, sd_lmt;\n+\tu32 pd_idx = 0, pd_lmt = 0;\n+\tu32 pd_idx1 = 0, pd_lmt1 = 0;\n+\tu32 i, j;\n+\tbool pd_error = false;\n+\tenum irdma_status_code ret_code = 0;\n+\n+\tif (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)\n+\t\treturn IRDMA_ERR_INVALID_HMC_OBJ_INDEX;\n+\n+\tif ((info->start_idx + info->count) >\n+\t    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"HMC: error type %u, start = %u, req cnt %u, cnt = %u\\n\",\n+\t\t\tinfo->rsrc_type, info->start_idx, info->count,\n+\t\t\tinfo->hmc_info->hmc_obj[info->rsrc_type].cnt);\n+\t\treturn IRDMA_ERR_INVALID_HMC_OBJ_COUNT;\n+\t}\n+\n+\tirdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,\n+\t\t\t\t  info->start_idx, info->count, &sd_idx,\n+\t\t\t\t  &sd_lmt);\n+\tif (sd_idx >= info->hmc_info->sd_table.sd_cnt ||\n+\t    sd_lmt > info->hmc_info->sd_table.sd_cnt) {\n+\t\treturn IRDMA_ERR_INVALID_SD_INDEX;\n+\t}\n+\n+\tirdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,\n+\t\t\t\t  info->start_idx, info->count, &pd_idx,\n+\t\t\t\t  &pd_lmt);\n+\n+\tfor (j = sd_idx; j < sd_lmt; j++) {\n+\t\tret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,\n+\t\t\t\t\t\t    info->entry_type,\n+\t\t\t\t\t\t    IRDMA_HMC_DIRECT_BP_SIZE);\n+\t\tif (ret_code)\n+\t\t\tgoto exit_sd_error;\n+\n+\t\tsd_entry = &info->hmc_info->sd_table.sd_entry[j];\n+\t\tif (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&\n+\t\t    (dev->hmc_info == info->hmc_info &&\n+\t\t     info->rsrc_type != IRDMA_HMC_IW_PBLE)) {\n+\t\t\tpd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));\n+\t\t\tpd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);\n+\t\t\tfor (i = pd_idx1; i < pd_lmt1; i++) {\n+\t\t\t\t/* update the pd table entry */\n+\t\t\t\tret_code = irdma_add_pd_table_entry(dev,\n+\t\t\t\t\t\t\t\t    info->hmc_info,\n+\t\t\t\t\t\t\t\t    i, NULL);\n+\t\t\t\tif (ret_code) {\n+\t\t\t\t\tpd_error = true;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tif (pd_error) {\n+\t\t\t\twhile (i && (i > pd_idx1)) {\n+\t\t\t\t\tirdma_remove_pd_bp(dev, info->hmc_info,\n+\t\t\t\t\t\t\t   i - 1);\n+\t\t\t\t\ti--;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t\tif (sd_entry->valid)\n+\t\t\tcontinue;\n+\n+\t\tinfo->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;\n+\t\tinfo->add_sd_cnt++;\n+\t\tsd_entry->valid = true;\n+\t}\n+\treturn irdma_hmc_finish_add_sd_reg(dev, info);\n+\n+exit_sd_error:\n+\twhile (j && (j > sd_idx)) {\n+\t\tsd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];\n+\t\tswitch (sd_entry->entry_type) {\n+\t\tcase IRDMA_SD_TYPE_PAGED:\n+\t\t\tpd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);\n+\t\t\tpd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));\n+\t\t\tfor (i = pd_idx1; i < pd_lmt1; i++)\n+\t\t\t\tirdma_prep_remove_pd_page(info->hmc_info, i);\n+\t\t\tbreak;\n+\t\tcase IRDMA_SD_TYPE_DIRECT:\n+\t\t\tirdma_prep_remove_pd_page(info->hmc_info, (j - 1));\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tret_code = IRDMA_ERR_INVALID_SD_TYPE;\n+\t\t\tbreak;\n+\t\t}\n+\t\tj--;\n+\t}\n+\n+\treturn ret_code;\n+}\n+\n+/**\n+ * irdma_finish_del_sd_reg - delete sd entries for objects\n+ * @dev: pointer to the device structure\n+ * @info: dele obj info\n+ * @reset: true if called before reset\n+ */\n+static enum irdma_status_code\n+irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,\n+\t\t\tstruct irdma_hmc_del_obj_info *info, bool reset)\n+{\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+\tenum irdma_status_code ret_code = 0;\n+\tu32 i, sd_idx;\n+\tstruct irdma_dma_mem *mem;\n+\n+\tif (dev->is_pf && !reset)\n+\t\tret_code = irdma_hmc_sd_grp(dev, info->hmc_info,\n+\t\t\t\t\t    info->hmc_info->sd_indexes[0],\n+\t\t\t\t\t    info->del_sd_cnt, false);\n+\n+\tif (ret_code)\n+\t\tdev_dbg(rfdev_to_dev(dev), \"HMC: error cqp sd sd_grp\\n\");\n+\tfor (i = 0; i < info->del_sd_cnt; i++) {\n+\t\tsd_idx = info->hmc_info->sd_indexes[i];\n+\t\tsd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];\n+\t\tif (!sd_entry)\n+\t\t\tcontinue;\n+\t\tmem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?\n+\t\t\t      &sd_entry->u.pd_table.pd_page_addr :\n+\t\t\t      &sd_entry->u.bp.addr;\n+\n+\t\tif (!mem || !mem->va) {\n+\t\t\tdev_dbg(rfdev_to_dev(dev), \"HMC: error cqp sd mem\\n\");\n+\t\t} else {\n+\t\t\tdma_free_coherent(hw_to_dev(dev->hw), mem->size,\n+\t\t\t\t\t  mem->va, mem->pa);\n+\t\t\tmem->va = NULL;\n+\t\t}\n+\t}\n+\n+\treturn ret_code;\n+}\n+\n+/**\n+ * irdma_sc_del_hmc_obj - remove pe hmc objects\n+ * @dev: pointer to the device structure\n+ * @info: pointer to irdma_hmc_del_obj_info struct\n+ * @reset: true if called before reset\n+ *\n+ * This will de-populate the SDs and PDs.  It frees\n+ * the memory for PDS and backing storage.  After this function is returned,\n+ * caller should deallocate memory allocated previously for\n+ * book-keeping information about PDs and backing storage.\n+ */\n+enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,\n+\t\t\t\t\t    struct irdma_hmc_del_obj_info *info,\n+\t\t\t\t\t    bool reset)\n+{\n+\tstruct irdma_hmc_pd_table *pd_table;\n+\tu32 sd_idx, sd_lmt;\n+\tu32 pd_idx, pd_lmt, rel_pd_idx;\n+\tu32 i, j;\n+\tenum irdma_status_code ret_code = 0;\n+\n+\tif (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"HMC: error start_idx[%04d]  >= [type %04d].cnt[%04d]\\n\",\n+\t\t\tinfo->start_idx, info->rsrc_type,\n+\t\t\tinfo->hmc_info->hmc_obj[info->rsrc_type].cnt);\n+\t\treturn IRDMA_ERR_INVALID_HMC_OBJ_INDEX;\n+\t}\n+\n+\tif ((info->start_idx + info->count) >\n+\t    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {\n+\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\"HMC: error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\\n\",\n+\t\t\tinfo->start_idx, info->count, info->rsrc_type,\n+\t\t\tinfo->hmc_info->hmc_obj[info->rsrc_type].cnt);\n+\t\treturn IRDMA_ERR_INVALID_HMC_OBJ_COUNT;\n+\t}\n+\n+\tirdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,\n+\t\t\t\t  info->start_idx, info->count, &pd_idx,\n+\t\t\t\t  &pd_lmt);\n+\n+\tfor (j = pd_idx; j < pd_lmt; j++) {\n+\t\tsd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;\n+\n+\t\tif (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)\n+\t\t\tcontinue;\n+\n+\t\tif (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=\n+\t\t    IRDMA_SD_TYPE_PAGED)\n+\t\t\tcontinue;\n+\n+\t\trel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;\n+\t\tpd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;\n+\t\tif (pd_table->pd_entry &&\n+\t\t    pd_table->pd_entry[rel_pd_idx].valid) {\n+\t\t\tret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);\n+\t\t\tif (ret_code) {\n+\t\t\t\tdev_dbg(rfdev_to_dev(dev),\n+\t\t\t\t\t\"HMC: remove_pd_bp error\\n\");\n+\t\t\t\treturn ret_code;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tirdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,\n+\t\t\t\t  info->start_idx, info->count, &sd_idx,\n+\t\t\t\t  &sd_lmt);\n+\tif (sd_idx >= info->hmc_info->sd_table.sd_cnt ||\n+\t    sd_lmt > info->hmc_info->sd_table.sd_cnt) {\n+\t\tdev_dbg(rfdev_to_dev(dev), \"HMC: invalid sd_idx\\n\");\n+\t\treturn IRDMA_ERR_INVALID_SD_INDEX;\n+\t}\n+\n+\tfor (i = sd_idx; i < sd_lmt; i++) {\n+\t\tpd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;\n+\t\tif (!info->hmc_info->sd_table.sd_entry[i].valid)\n+\t\t\tcontinue;\n+\t\tswitch (info->hmc_info->sd_table.sd_entry[i].entry_type) {\n+\t\tcase IRDMA_SD_TYPE_DIRECT:\n+\t\t\tret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);\n+\t\t\tif (!ret_code) {\n+\t\t\t\tinfo->hmc_info->sd_indexes[info->del_sd_cnt] =\n+\t\t\t\t\t(u16)i;\n+\t\t\t\tinfo->del_sd_cnt++;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase IRDMA_SD_TYPE_PAGED:\n+\t\t\tret_code = irdma_prep_remove_pd_page(info->hmc_info, i);\n+\t\t\tif (ret_code)\n+\t\t\t\tbreak;\n+\t\t\tif (dev->hmc_info != info->hmc_info &&\n+\t\t\t    info->rsrc_type == IRDMA_HMC_IW_PBLE &&\n+\t\t\t    pd_table->pd_entry) {\n+\t\t\t\tkfree(pd_table->pd_entry_virt_mem.va);\n+\t\t\t\tpd_table->pd_entry = NULL;\n+\t\t\t}\n+\t\t\tinfo->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;\n+\t\t\tinfo->del_sd_cnt++;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\treturn irdma_finish_del_sd_reg(dev, info, reset);\n+}\n+\n+/**\n+ * irdma_add_sd_table_entry - Adds a segment descriptor to the table\n+ * @hw: pointer to our hw struct\n+ * @hmc_info: pointer to the HMC configuration information struct\n+ * @sd_index: segment descriptor index to manipulate\n+ * @type: what type of segment descriptor we're manipulating\n+ * @direct_mode_sz: size to alloc in direct mode\n+ */\n+enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,\n+\t\t\t\t\t\tstruct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t\tu32 sd_index,\n+\t\t\t\t\t\tenum irdma_sd_entry_type type,\n+\t\t\t\t\t\tu64 direct_mode_sz)\n+{\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+\tstruct irdma_dma_mem dma_mem;\n+\tu64 alloc_len;\n+\n+\tsd_entry = &hmc_info->sd_table.sd_entry[sd_index];\n+\tif (!sd_entry->valid) {\n+\t\tif (type == IRDMA_SD_TYPE_PAGED)\n+\t\t\talloc_len = IRDMA_HMC_PAGED_BP_SIZE;\n+\t\telse\n+\t\t\talloc_len = direct_mode_sz;\n+\n+\t\t/* allocate a 4K pd page or 2M backing page */\n+\t\tdma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);\n+\t\tdma_mem.va = dma_alloc_coherent(hw_to_dev(hw),\n+\t\t\t\t\t\tdma_mem.size, &dma_mem.pa,\n+\t\t\t\t\t\tGFP_ATOMIC);\n+\t\tif (!dma_mem.va)\n+\t\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\t\tif (type == IRDMA_SD_TYPE_PAGED) {\n+\t\t\tstruct irdma_virt_mem *vmem =\n+\t\t\t\t&sd_entry->u.pd_table.pd_entry_virt_mem;\n+\n+\t\t\tvmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;\n+\t\t\tvmem->va = kzalloc(vmem->size, GFP_ATOMIC);\n+\t\t\tif (!vmem->va) {\n+\t\t\t\tdma_free_coherent(hw_to_dev(hw), dma_mem.size,\n+\t\t\t\t\t\t  dma_mem.va, dma_mem.pa);\n+\t\t\t\tdma_mem.va = NULL;\n+\t\t\t\treturn IRDMA_ERR_NO_MEMORY;\n+\t\t\t}\n+\t\t\tsd_entry->u.pd_table.pd_entry = vmem->va;\n+\n+\t\t\tmemcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,\n+\t\t\t       sizeof(sd_entry->u.pd_table.pd_page_addr));\n+\t\t} else {\n+\t\t\tmemcpy(&sd_entry->u.bp.addr, &dma_mem,\n+\t\t\t       sizeof(sd_entry->u.bp.addr));\n+\n+\t\t\tsd_entry->u.bp.sd_pd_index = sd_index;\n+\t\t}\n+\n+\t\thmc_info->sd_table.sd_entry[sd_index].entry_type = type;\n+\t\tIRDMA_INC_SD_REFCNT(&hmc_info->sd_table);\n+\t}\n+\tif (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)\n+\t\tIRDMA_INC_BP_REFCNT(&sd_entry->u.bp);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_add_pd_table_entry - Adds page descriptor to the specified table\n+ * @dev: pointer to our device structure\n+ * @hmc_info: pointer to the HMC configuration information structure\n+ * @pd_index: which page descriptor index to manipulate\n+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.\n+ *\n+ * This function:\n+ *\t1. Initializes the pd entry\n+ *\t2. Adds pd_entry in the pd_table\n+ *\t3. Mark the entry valid in irdma_hmc_pd_entry structure\n+ *\t4. Initializes the pd_entry's ref count to 1\n+ * assumptions:\n+ *\t1. The memory for pd should be pinned down, physically contiguous and\n+ *\t   aligned on 4K boundary and zeroed memory.\n+ *\t2. It should be 4K in size.\n+ */\n+enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,\n+\t\t\t\t\t\tstruct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t\tu32 pd_index,\n+\t\t\t\t\t\tstruct irdma_dma_mem *rsrc_pg)\n+{\n+\tstruct irdma_hmc_pd_table *pd_table;\n+\tstruct irdma_hmc_pd_entry *pd_entry;\n+\tstruct irdma_dma_mem mem;\n+\tstruct irdma_dma_mem *page = &mem;\n+\tu32 sd_idx, rel_pd_idx;\n+\tu64 *pd_addr;\n+\tu64 page_desc;\n+\n+\tif (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)\n+\t\treturn IRDMA_ERR_INVALID_PAGE_DESC_INDEX;\n+\n+\tsd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);\n+\tif (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=\n+\t    IRDMA_SD_TYPE_PAGED)\n+\t\treturn 0;\n+\n+\trel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);\n+\tpd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;\n+\tpd_entry = &pd_table->pd_entry[rel_pd_idx];\n+\tif (!pd_entry->valid) {\n+\t\tif (rsrc_pg) {\n+\t\t\tpd_entry->rsrc_pg = true;\n+\t\t\tpage = rsrc_pg;\n+\t\t} else {\n+\t\t\tpage->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,\n+\t\t\t\t\t   IRDMA_HMC_PD_BP_BUF_ALIGNMENT);\n+\t\t\tpage->va = dma_alloc_coherent(hw_to_dev(dev->hw),\n+\t\t\t\t\t\t      page->size, &page->pa,\n+\t\t\t\t\t\t      GFP_KERNEL);\n+\t\t\tif (!page->va)\n+\t\t\t\treturn IRDMA_ERR_NO_MEMORY;\n+\n+\t\t\tpd_entry->rsrc_pg = false;\n+\t\t}\n+\n+\t\tmemcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));\n+\t\tpd_entry->bp.sd_pd_index = pd_index;\n+\t\tpd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;\n+\t\tpage_desc = page->pa | 0x1;\n+\t\tpd_addr = pd_table->pd_page_addr.va;\n+\t\tpd_addr += rel_pd_idx;\n+\t\tmemcpy(pd_addr, &page_desc, sizeof(*pd_addr));\n+\t\tpd_entry->sd_index = sd_idx;\n+\t\tpd_entry->valid = true;\n+\t\tIRDMA_INC_PD_REFCNT(pd_table);\n+\t\tif (hmc_info->hmc_fn_id < dev->hw_attrs.first_hw_vf_fpm_id)\n+\t\t\tIRDMA_INVALIDATE_PF_HMC_PD(dev, sd_idx, rel_pd_idx);\n+\t\telse if (dev->hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)\n+\t\t\tIRDMA_INVALIDATE_VF_HMC_PD(dev, sd_idx, rel_pd_idx,\n+\t\t\t\t\t\t   hmc_info->hmc_fn_id);\n+\t}\n+\tIRDMA_INC_BP_REFCNT(&pd_entry->bp);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_remove_pd_bp - remove a backing page from a page descriptor\n+ * @dev: pointer to our HW structure\n+ * @hmc_info: pointer to the HMC configuration information structure\n+ * @idx: the page index\n+ *\n+ * This function:\n+ *\t1. Marks the entry in pd table (for paged address mode) or in sd table\n+ *\t   (for direct address mode) invalid.\n+ *\t2. Write to register PMPDINV to invalidate the backing page in FV cache\n+ *\t3. Decrement the ref count for the pd _entry\n+ * assumptions:\n+ *\t1. Caller can deallocate the memory used by backing storage after this\n+ *\t   function returns.\n+ */\n+enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,\n+\t\t\t\t\t  struct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t  u32 idx)\n+{\n+\tstruct irdma_hmc_pd_entry *pd_entry;\n+\tstruct irdma_hmc_pd_table *pd_table;\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+\tu32 sd_idx, rel_pd_idx;\n+\tstruct irdma_dma_mem *mem;\n+\tu64 *pd_addr;\n+\n+\tsd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;\n+\trel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;\n+\tif (sd_idx >= hmc_info->sd_table.sd_cnt)\n+\t\treturn IRDMA_ERR_INVALID_PAGE_DESC_INDEX;\n+\n+\tsd_entry = &hmc_info->sd_table.sd_entry[sd_idx];\n+\tif (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)\n+\t\treturn IRDMA_ERR_INVALID_SD_TYPE;\n+\n+\tpd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;\n+\tpd_entry = &pd_table->pd_entry[rel_pd_idx];\n+\tIRDMA_DEC_BP_REFCNT(&pd_entry->bp);\n+\tif (pd_entry->bp.ref_cnt)\n+\t\treturn 0;\n+\n+\tpd_entry->valid = false;\n+\tIRDMA_DEC_PD_REFCNT(pd_table);\n+\tpd_addr = pd_table->pd_page_addr.va;\n+\tpd_addr += rel_pd_idx;\n+\tmemset(pd_addr, 0, sizeof(u64));\n+\tif (dev->is_pf) {\n+\t\tif (dev->hmc_fn_id == hmc_info->hmc_fn_id)\n+\t\t\tIRDMA_INVALIDATE_PF_HMC_PD(dev, sd_idx, idx);\n+\t\telse\n+\t\t\tIRDMA_INVALIDATE_VF_HMC_PD(dev, sd_idx, idx,\n+\t\t\t\t\t\t   hmc_info->hmc_fn_id);\n+\t}\n+\n+\tif (!pd_entry->rsrc_pg) {\n+\t\tmem = &pd_entry->bp.addr;\n+\t\tif (!mem || !mem->va)\n+\t\t\treturn IRDMA_ERR_PARAM;\n+\n+\t\tdma_free_coherent(hw_to_dev(dev->hw), mem->size, mem->va,\n+\t\t\t\t  mem->pa);\n+\t\tmem->va = NULL;\n+\t}\n+\tif (!pd_table->ref_cnt)\n+\t\tkfree(pd_table->pd_entry_virt_mem.va);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry\n+ * @hmc_info: pointer to the HMC configuration information structure\n+ * @idx: the page index\n+ */\n+enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t       u32 idx)\n+{\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+\n+\tsd_entry = &hmc_info->sd_table.sd_entry[idx];\n+\tIRDMA_DEC_BP_REFCNT(&sd_entry->u.bp);\n+\tif (sd_entry->u.bp.ref_cnt)\n+\t\treturn IRDMA_ERR_NOT_READY;\n+\n+\tIRDMA_DEC_SD_REFCNT(&hmc_info->sd_table);\n+\tsd_entry->valid = false;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.\n+ * @hmc_info: pointer to the HMC configuration information structure\n+ * @idx: segment descriptor index to find the relevant page descriptor\n+ */\n+enum irdma_status_code\n+irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)\n+{\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+\n+\tsd_entry = &hmc_info->sd_table.sd_entry[idx];\n+\n+\tif (sd_entry->u.pd_table.ref_cnt)\n+\t\treturn IRDMA_ERR_NOT_READY;\n+\n+\tsd_entry->valid = false;\n+\tIRDMA_DEC_SD_REFCNT(&hmc_info->sd_table);\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h\nnew file mode 100644\nindex 0000000..9a529f8\n--- /dev/null\n+++ b/drivers/infiniband/hw/irdma/hmc.h\n@@ -0,0 +1,219 @@\n+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */\n+/* Copyright (c) 2019, Intel Corporation. */\n+\n+#ifndef IRDMA_HMC_H\n+#define IRDMA_HMC_H\n+\n+#include \"defs.h\"\n+\n+#define IRDMA_HMC_MAX_BP_COUNT\t\t\t512\n+#define IRDMA_MAX_SD_ENTRIES\t\t\t11\n+#define IRDMA_HW_DBG_HMC_INVALID_BP_MARK\t0xca\n+#define IRDMA_HMC_INFO_SIGNATURE\t\t0x484d5347\n+#define IRDMA_HMC_PD_CNT_IN_SD\t\t\t512\n+#define IRDMA_HMC_DIRECT_BP_SIZE\t\t0x200000\n+#define IRDMA_HMC_MAX_SD_COUNT\t\t\t8192\n+#define IRDMA_HMC_PAGED_BP_SIZE\t\t\t4096\n+#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT\t\t4096\n+#define IRDMA_FIRST_VF_FPM_ID\t\t\t8\n+#define FPM_MULTIPLIER\t\t\t\t1024\n+\n+#define IRDMA_INC_SD_REFCNT(sd_table)\t((sd_table)->ref_cnt++)\n+#define IRDMA_INC_PD_REFCNT(pd_table)\t((pd_table)->ref_cnt++)\n+#define IRDMA_INC_BP_REFCNT(bp)\t\t((bp)->ref_cnt++)\n+\n+#define IRDMA_DEC_SD_REFCNT(sd_table)\t((sd_table)->ref_cnt--)\n+#define IRDMA_DEC_PD_REFCNT(pd_table)\t((pd_table)->ref_cnt--)\n+#define IRDMA_DEC_BP_REFCNT(bp)\t\t((bp)->ref_cnt--)\n+\n+/**\n+ * IRDMA_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware\n+ * @hw: pointer to our hw struct\n+ * @sd_idx: segment descriptor index\n+ * @pd_idx: page descriptor index\n+ */\n+#define IRDMA_INVALIDATE_PF_HMC_PD(dev, sd_idx, pd_idx)\t\t\t\\\n+\twr32((dev)->hw, (dev)->hw_regs[IRDMA_PFHMC_PDINV],\t\t\\\n+\t\t(((sd_idx) << IRDMA_PFHMC_PDINV_PMSDIDX_S) |\t\t\\\n+\t\t(0x1 << IRDMA_PFHMC_PDINV_PMSDPARTSEL_S) |\t\t\\\n+\t\t((pd_idx) << IRDMA_PFHMC_PDINV_PMPDIDX_S)))\n+\n+/**\n+ * IRDMA_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware\n+ * @hw: pointer to our hw struct\n+ * @sd_idx: segment descriptor index\n+ * @pd_idx: page descriptor index\n+ * @hmc_fn_id: VF's function id\n+ */\n+#define IRDMA_INVALIDATE_VF_HMC_PD(dev, sd_idx, pd_idx, hmc_fn_id)\t\\\n+\twr32((dev)->hw,\t\t\t\t\t\t\t\\\n+\t     (dev)->hw_regs[IRDMA_GLHMC_VFPDINV] +\t\t\t\\\n+\t     4 * ((hmc_fn_id) - (dev)->hw_attrs.first_hw_vf_fpm_id),\t\\\n+\t     (((sd_idx) << IRDMA_PFHMC_PDINV_PMSDIDX_S) |\t\t\\\n+\t      ((pd_idx) << IRDMA_PFHMC_PDINV_PMPDIDX_S)))\n+\n+enum irdma_hmc_rsrc_type {\n+\tIRDMA_HMC_IW_QP\t\t = 0,\n+\tIRDMA_HMC_IW_CQ\t\t = 1,\n+\tIRDMA_HMC_IW_RESERVED\t = 2,\n+\tIRDMA_HMC_IW_HTE\t = 3,\n+\tIRDMA_HMC_IW_ARP\t = 4,\n+\tIRDMA_HMC_IW_APBVT_ENTRY = 5,\n+\tIRDMA_HMC_IW_MR\t\t = 6,\n+\tIRDMA_HMC_IW_XF\t\t = 7,\n+\tIRDMA_HMC_IW_XFFL\t = 8,\n+\tIRDMA_HMC_IW_Q1\t\t = 9,\n+\tIRDMA_HMC_IW_Q1FL\t = 10,\n+\tIRDMA_HMC_IW_TIMER       = 11,\n+\tIRDMA_HMC_IW_FSIMC       = 12,\n+\tIRDMA_HMC_IW_FSIAV       = 13,\n+\tIRDMA_HMC_IW_PBLE\t = 14,\n+\tIRDMA_HMC_IW_RRF\t = 15,\n+\tIRDMA_HMC_IW_RRFFL       = 16,\n+\tIRDMA_HMC_IW_HDR\t = 17,\n+\tIRDMA_HMC_IW_MD\t\t = 18,\n+\tIRDMA_HMC_IW_OOISC       = 19,\n+\tIRDMA_HMC_IW_OOISCFFL    = 20,\n+\tIRDMA_HMC_IW_MAX, /* Must be last entry */\n+};\n+\n+enum irdma_sd_entry_type {\n+\tIRDMA_SD_TYPE_INVALID = 0,\n+\tIRDMA_SD_TYPE_PAGED   = 1,\n+\tIRDMA_SD_TYPE_DIRECT  = 2,\n+};\n+\n+struct irdma_hmc_obj_info {\n+\tu64 base;\n+\tu32 max_cnt;\n+\tu32 cnt;\n+\tu64 size;\n+};\n+\n+struct irdma_hmc_bp {\n+\tenum irdma_sd_entry_type entry_type;\n+\tstruct irdma_dma_mem addr;\n+\tu32 sd_pd_index;\n+\tu32 ref_cnt;\n+};\n+\n+struct irdma_hmc_pd_entry {\n+\tstruct irdma_hmc_bp bp;\n+\tu32 sd_index;\n+\tbool rsrc_pg;\n+\tbool valid;\n+};\n+\n+struct irdma_hmc_pd_table {\n+\tstruct irdma_dma_mem pd_page_addr;\n+\tstruct irdma_hmc_pd_entry *pd_entry;\n+\tstruct irdma_virt_mem pd_entry_virt_mem;\n+\tu32 ref_cnt;\n+\tu32 sd_index;\n+};\n+\n+struct irdma_hmc_sd_entry {\n+\tenum irdma_sd_entry_type entry_type;\n+\tbool valid;\n+\tunion {\n+\t\tstruct irdma_hmc_pd_table pd_table;\n+\t\tstruct irdma_hmc_bp bp;\n+\t} u;\n+};\n+\n+struct irdma_hmc_sd_table {\n+\tstruct irdma_virt_mem addr;\n+\tu32 sd_cnt;\n+\tu32 ref_cnt;\n+\tstruct irdma_hmc_sd_entry *sd_entry;\n+};\n+\n+struct irdma_hmc_info {\n+\tu32 signature;\n+\tu8 hmc_fn_id;\n+\tu16 first_sd_index;\n+\tstruct irdma_hmc_obj_info *hmc_obj;\n+\tstruct irdma_virt_mem hmc_obj_virt_mem;\n+\tstruct irdma_hmc_sd_table sd_table;\n+\tu16 sd_indexes[IRDMA_HMC_MAX_SD_COUNT];\n+};\n+\n+struct irdma_update_sd_entry {\n+\tu64 cmd;\n+\tu64 data;\n+};\n+\n+struct irdma_update_sds_info {\n+\tu32 cnt;\n+\tu8 hmc_fn_id;\n+\tstruct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];\n+};\n+\n+struct irdma_ccq_cqe_info;\n+struct irdma_hmc_fcn_info {\n+\tvoid (*callback_fcn)(struct irdma_sc_dev *dev, void *cqp_callback_param,\n+\t\t\t     struct irdma_ccq_cqe_info *ccq_cqe_info);\n+\tvoid *cqp_callback_param;\n+\tu32 vf_id;\n+\tu16 iw_vf_idx;\n+\tbool free_fcn;\n+};\n+\n+struct irdma_hmc_create_obj_info {\n+\tstruct irdma_hmc_info *hmc_info;\n+\tstruct irdma_virt_mem add_sd_virt_mem;\n+\tu32 rsrc_type;\n+\tu32 start_idx;\n+\tu32 count;\n+\tu32 add_sd_cnt;\n+\tenum irdma_sd_entry_type entry_type;\n+\tbool is_pf;\n+};\n+\n+struct irdma_hmc_del_obj_info {\n+\tstruct irdma_hmc_info *hmc_info;\n+\tstruct irdma_virt_mem del_sd_virt_mem;\n+\tu32 rsrc_type;\n+\tu32 start_idx;\n+\tu32 count;\n+\tu32 del_sd_cnt;\n+\tbool is_pf;\n+};\n+\n+enum irdma_status_code irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,\n+\t\t\t\t\t  struct irdma_dma_mem *src_mem,\n+\t\t\t\t\t  u64 src_offset, u64 size);\n+enum irdma_status_code\n+irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,\n+\t\t\tstruct irdma_hmc_create_obj_info *info);\n+enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,\n+\t\t\t\t\t    struct irdma_hmc_del_obj_info *info,\n+\t\t\t\t\t    bool reset);\n+enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,\n+\t\t\t\t\tu64 pa, u32 sd_idx,\n+\t\t\t\t\tenum irdma_sd_entry_type type,\n+\t\t\t\t\tbool setsd);\n+enum irdma_status_code\n+irdma_update_sds_noccq(struct irdma_sc_dev *dev,\n+\t\t       struct irdma_update_sds_info *info);\n+struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,\n+\t\t\t\t\t u8 hmc_fn_id);\n+struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,\n+\t\t\t\t\t\t u8 hmc_fn_id);\n+enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,\n+\t\t\t\t\t\tstruct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t\tu32 sd_index,\n+\t\t\t\t\t\tenum irdma_sd_entry_type type,\n+\t\t\t\t\t\tu64 direct_mode_sz);\n+enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,\n+\t\t\t\t\t\tstruct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t\tu32 pd_index,\n+\t\t\t\t\t\tstruct irdma_dma_mem *rsrc_pg);\n+enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,\n+\t\t\t\t\t  struct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t  u32 idx);\n+enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,\n+\t\t\t\t\t       u32 idx);\n+enum irdma_status_code\n+irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);\n+#endif /* IRDMA_HMC_H */\n",
    "prefixes": [
        "rdma-next",
        "04/17"
    ]
}