get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/2165662/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2165662,
    "url": "http://patchwork.ozlabs.org/api/patches/2165662/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20251117134912.18566-7-larysa.zaremba@intel.com/",
    "project": {
        "id": 46,
        "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api",
        "name": "Intel Wired Ethernet development",
        "link_name": "intel-wired-lan",
        "list_id": "intel-wired-lan.osuosl.org",
        "list_email": "intel-wired-lan@osuosl.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20251117134912.18566-7-larysa.zaremba@intel.com>",
    "list_archive_url": null,
    "date": "2025-11-17T13:48:46",
    "name": "[iwl-next,v5,06/15] libie: add bookkeeping support for control queue messages",
    "commit_ref": null,
    "pull_url": null,
    "state": "under-review",
    "archived": false,
    "hash": "c86f18e45f0a5c6299c8ceb8988dfb51087ad000",
    "submitter": {
        "id": 84900,
        "url": "http://patchwork.ozlabs.org/api/people/84900/?format=api",
        "name": "Larysa Zaremba",
        "email": "larysa.zaremba@intel.com"
    },
    "delegate": {
        "id": 109701,
        "url": "http://patchwork.ozlabs.org/api/users/109701/?format=api",
        "username": "anguy11",
        "first_name": "Anthony",
        "last_name": "Nguyen",
        "email": "anthony.l.nguyen@intel.com"
    },
    "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20251117134912.18566-7-larysa.zaremba@intel.com/mbox/",
    "series": [
        {
            "id": 482391,
            "url": "http://patchwork.ozlabs.org/api/series/482391/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=482391",
            "date": "2025-11-17T13:48:40",
            "name": "Introduce iXD driver",
            "version": 5,
            "mbox": "http://patchwork.ozlabs.org/series/482391/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2165662/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2165662/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<intel-wired-lan-bounces@osuosl.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@legolas.ozlabs.org",
            "intel-wired-lan@lists.osuosl.org"
        ],
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=osuosl.org header.i=@osuosl.org header.a=rsa-sha256\n header.s=default header.b=luu8c6Lb;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=osuosl.org\n (client-ip=140.211.166.136; helo=smtp3.osuosl.org;\n envelope-from=intel-wired-lan-bounces@osuosl.org;\n receiver=patchwork.ozlabs.org)"
        ],
        "Received": [
            "from smtp3.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4d98Hl4Jsfz1yDb\n\tfor <incoming@patchwork.ozlabs.org>; Tue, 18 Nov 2025 00:49:39 +1100 (AEDT)",
            "from localhost (localhost [127.0.0.1])\n\tby smtp3.osuosl.org (Postfix) with ESMTP id A805760DAD;\n\tMon, 17 Nov 2025 13:49:37 +0000 (UTC)",
            "from smtp3.osuosl.org ([127.0.0.1])\n by localhost (smtp3.osuosl.org [127.0.0.1]) (amavis, port 10024) with ESMTP\n id 4eWlsZinXs3E; Mon, 17 Nov 2025 13:49:36 +0000 (UTC)",
            "from lists1.osuosl.org (lists1.osuosl.org [140.211.166.142])\n\tby smtp3.osuosl.org (Postfix) with ESMTP id 3BA5360717;\n\tMon, 17 Nov 2025 13:49:36 +0000 (UTC)",
            "from smtp3.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n by lists1.osuosl.org (Postfix) with ESMTP id 319BAD5\n for <intel-wired-lan@lists.osuosl.org>; Mon, 17 Nov 2025 13:49:34 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n by smtp3.osuosl.org (Postfix) with ESMTP id 84EF260717\n for <intel-wired-lan@lists.osuosl.org>; Mon, 17 Nov 2025 13:49:33 +0000 (UTC)",
            "from smtp3.osuosl.org ([127.0.0.1])\n by localhost (smtp3.osuosl.org [127.0.0.1]) (amavis, port 10024) with ESMTP\n id yOtbHHsUdd4w for <intel-wired-lan@lists.osuosl.org>;\n Mon, 17 Nov 2025 13:49:32 +0000 (UTC)",
            "from mgamail.intel.com (mgamail.intel.com [198.175.65.12])\n by smtp3.osuosl.org (Postfix) with ESMTPS id 2723660DAD\n for <intel-wired-lan@lists.osuosl.org>; Mon, 17 Nov 2025 13:49:32 +0000 (UTC)",
            "from fmviesa007.fm.intel.com ([10.60.135.147])\n by orvoesa104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 17 Nov 2025 05:49:32 -0800",
            "from irvmail002.ir.intel.com ([10.43.11.120])\n by fmviesa007.fm.intel.com with ESMTP; 17 Nov 2025 05:49:25 -0800",
            "from mglak.igk.intel.com (mglak.igk.intel.com [10.237.112.146])\n by irvmail002.ir.intel.com (Postfix) with ESMTP id 8D4F737E36;\n Mon, 17 Nov 2025 13:49:23 +0000 (GMT)"
        ],
        "X-Virus-Scanned": [
            "amavis at osuosl.org",
            "amavis at osuosl.org"
        ],
        "X-Comment": "SPF check N/A for local connections - client-ip=140.211.166.142;\n helo=lists1.osuosl.org; envelope-from=intel-wired-lan-bounces@osuosl.org;\n receiver=<UNKNOWN> ",
        "DKIM-Filter": [
            "OpenDKIM Filter v2.11.0 smtp3.osuosl.org 3BA5360717",
            "OpenDKIM Filter v2.11.0 smtp3.osuosl.org 2723660DAD"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=osuosl.org;\n\ts=default; t=1763387376;\n\tbh=J6PP+g+7kVcPZ7VVEwhJa6geNSqRS+8hcf5sv6Qlt/8=;\n\th=From:To:Cc:Date:In-Reply-To:References:Subject:List-Id:\n\t List-Unsubscribe:List-Archive:List-Post:List-Help:List-Subscribe:\n\t From;\n\tb=luu8c6LbgYgGtIjoAj24Twiblo8nexXhE3IIIqIaxnzMfgDrenTfW2/bWY7nrqC5B\n\t MBFmpsfR2lYEAVGUgFsVsmdPZXOIEOaU2Ds01ArkmuifAICjAqwF4/PISkoKfV3QDC\n\t P6hdQEax4G+YqpZRqFAF4g45gP5PL8otPschbAlYwJK84gUqH7rXzpSrknriDPdcmw\n\t ZwOqeNHFLzs21FSwYyOcyRe5rQyq9z/ueCBw30J4iIuZVVLuJ4eSv6aAeQyYbVltvn\n\t UgzR9aCM6lKxXT9gFWFCXb35a3ZRttBryejwyHpAvvGruMt6ta+I/+4tqJn+YjSxYg\n\t KiXcSS1eI3qwg==",
        "Received-SPF": "Pass (mailfrom) identity=mailfrom; client-ip=198.175.65.12;\n helo=mgamail.intel.com; envelope-from=larysa.zaremba@intel.com;\n receiver=<UNKNOWN>",
        "DMARC-Filter": "OpenDMARC Filter v1.4.2 smtp3.osuosl.org 2723660DAD",
        "X-CSE-ConnectionGUID": [
            "Yn8gnRTgTXG6dMaZQcTCGQ==",
            "gy64cb9qR+OyidWqFYZA7g=="
        ],
        "X-CSE-MsgGUID": [
            "wlfOfAoUSBW+w2C763cSqA==",
            "WmiiTToYTEKKLgU3qDhD5Q=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6800,10657,11616\"; a=\"76846102\"",
            "E=Sophos;i=\"6.19,311,1754982000\"; d=\"scan'208\";a=\"76846102\"",
            "E=Sophos;i=\"6.19,311,1754982000\"; d=\"scan'208\";a=\"190115716\""
        ],
        "X-ExtLoop1": "1",
        "From": "Larysa Zaremba <larysa.zaremba@intel.com>",
        "To": "intel-wired-lan@lists.osuosl.org, Tony Nguyen <anthony.l.nguyen@intel.com>",
        "Cc": "aleksander.lobakin@intel.com, sridhar.samudrala@intel.com,\n \"Singhai, Anjali\" <anjali.singhai@intel.com>,\n Michal Swiatkowski <michal.swiatkowski@linux.intel.com>,\n Larysa Zaremba <larysa.zaremba@intel.com>,\n \"Fijalkowski, Maciej\" <maciej.fijalkowski@intel.com>,\n Emil Tantilov <emil.s.tantilov@intel.com>,\n Madhu Chittim <madhu.chittim@intel.com>, Josh Hay <joshua.a.hay@intel.com>,\n \"Keller, Jacob E\" <jacob.e.keller@intel.com>,\n jayaprakash.shanmugam@intel.com, natalia.wochtman@intel.com,\n Jiri Pirko <jiri@resnulli.us>, \"David S. Miller\" <davem@davemloft.net>,\n Eric Dumazet <edumazet@google.com>, Jakub Kicinski <kuba@kernel.org>,\n Paolo Abeni <pabeni@redhat.com>, Simon Horman <horms@kernel.org>,\n Jonathan Corbet <corbet@lwn.net>,\n Richard Cochran <richardcochran@gmail.com>,\n Przemek Kitszel <przemyslaw.kitszel@intel.com>,\n Andrew Lunn <andrew+netdev@lunn.ch>, netdev@vger.kernel.org,\n linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,\n Phani R Burra <phani.r.burra@intel.com>",
        "Date": "Mon, 17 Nov 2025 14:48:46 +0100",
        "Message-ID": "<20251117134912.18566-7-larysa.zaremba@intel.com>",
        "X-Mailer": "git-send-email 2.47.0",
        "In-Reply-To": "<20251117134912.18566-1-larysa.zaremba@intel.com>",
        "References": "<20251117134912.18566-1-larysa.zaremba@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-Mailman-Original-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1763387372; x=1794923372;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=sunIO+XPlLIf/gR79GUgKSAsqLx/LYKrNVaAlzwt/ww=;\n b=Ipc8VvngOZ4TYMKm+Xk6U6z5Dn2bDRPHYc6kYuv9mIcl0b9+Z4i2C4be\n VmMGklttFmrITOcskSYY1b2Qxg4KS7nLv1RFEXnO8KB4AEBa3e0UTQRTl\n lRAih1hBvV9rPh8Cy/T3oqHSGJSOpo7K+AkVu7OARVrTLtany0T+GeoRs\n ywzAGvES1V2188qup6eDXJjX3a/zP14izjumvbMl054W865j2XaHMf65P\n 8/7Grj842Y+QVUDYJI3UGUpAx2sADuOKl6yQHjlL6UuRjEale0W0WyEmU\n hcIs4ESOSsPKtbqjEFOhqe5jlB5Lcv0H9LVOQzpsrSt8wgC27LniFeP62\n Q==;",
        "X-Mailman-Original-Authentication-Results": [
            "smtp3.osuosl.org;\n dmarc=pass (p=none dis=none)\n header.from=intel.com",
            "smtp3.osuosl.org;\n dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com\n header.a=rsa-sha256 header.s=Intel header.b=Ipc8Vvng"
        ],
        "Subject": "[Intel-wired-lan] [PATCH iwl-next v5 06/15] libie: add bookkeeping\n support for control queue messages",
        "X-BeenThere": "intel-wired-lan@osuosl.org",
        "X-Mailman-Version": "2.1.30",
        "Precedence": "list",
        "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n <intel-wired-lan.osuosl.org>",
        "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>",
        "List-Post": "<mailto:intel-wired-lan@osuosl.org>",
        "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>",
        "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>",
        "Errors-To": "intel-wired-lan-bounces@osuosl.org",
        "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>"
    },
    "content": "From: Phani R Burra <phani.r.burra@intel.com>\n\nAll send control queue messages are allocated/freed in libie itself and\ntracked with the unique transaction (Xn) ids until they receive response or\ntime out. Responses can be received out of order, therefore transactions\nare stored in an array and tracked though a bitmap.\n\nPre-allocated DMA memory is used where possible. It reduces the driver\noverhead in handling memory allocation/free and message timeouts.\n\nReviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>\nSigned-off-by: Phani R Burra <phani.r.burra@intel.com>\nCo-developed-by: Victor Raj <victor.raj@intel.com>\nSigned-off-by: Victor Raj <victor.raj@intel.com>\nCo-developed-by: Pavan Kumar Linga <pavan.kumar.linga@intel.com>\nSigned-off-by: Pavan Kumar Linga <pavan.kumar.linga@intel.com>\nCo-developed-by: Larysa Zaremba <larysa.zaremba@intel.com>\nSigned-off-by: Larysa Zaremba <larysa.zaremba@intel.com>\n---\n drivers/net/ethernet/intel/libie/controlq.c | 579 ++++++++++++++++++++\n include/linux/intel/libie/controlq.h        | 172 ++++++\n 2 files changed, 751 insertions(+)",
    "diff": "diff --git a/drivers/net/ethernet/intel/libie/controlq.c b/drivers/net/ethernet/intel/libie/controlq.c\nindex 80b0f1c2cc0a..f7097477d007 100644\n--- a/drivers/net/ethernet/intel/libie/controlq.c\n+++ b/drivers/net/ethernet/intel/libie/controlq.c\n@@ -602,6 +602,585 @@ u32 libie_ctlq_recv(struct libie_ctlq_info *ctlq, struct libie_ctlq_msg *msg,\n }\n EXPORT_SYMBOL_NS_GPL(libie_ctlq_recv, \"LIBIE_CP\");\n \n+/**\n+ * libie_ctlq_xn_pop_free - get a free Xn entry from the free list\n+ * @xnm: Xn transaction manager\n+ *\n+ * Retrieve a free Xn entry from the free list.\n+ *\n+ * Return: valid Xn entry pointer or NULL if there are no free Xn entries.\n+ */\n+static struct libie_ctlq_xn *\n+libie_ctlq_xn_pop_free(struct libie_ctlq_xn_manager *xnm)\n+{\n+\tstruct libie_ctlq_xn *xn;\n+\tu32 free_idx;\n+\n+\tguard(spinlock)(&xnm->free_xns_bm_lock);\n+\n+\tif (unlikely(xnm->shutdown))\n+\t\treturn NULL;\n+\n+\tfree_idx = find_next_bit(xnm->free_xns_bm, LIBIE_CTLQ_MAX_XN_ENTRIES,\n+\t\t\t\t 0);\n+\tif (free_idx == LIBIE_CTLQ_MAX_XN_ENTRIES)\n+\t\treturn NULL;\n+\n+\t__clear_bit(free_idx, xnm->free_xns_bm);\n+\txn = &xnm->ring[free_idx];\n+\txn->cookie = xnm->cookie++;\n+\n+\treturn xn;\n+}\n+\n+/**\n+ * __libie_ctlq_xn_push_free - unsafely push a Xn entry into the free list\n+ * @xnm: Xn transaction manager\n+ * @xn: xn entry to be added into the free list\n+ */\n+static void __libie_ctlq_xn_push_free(struct libie_ctlq_xn_manager *xnm,\n+\t\t\t\t      struct libie_ctlq_xn *xn)\n+{\n+\t__set_bit(xn->index, xnm->free_xns_bm);\n+\n+\tif (likely(!xnm->shutdown))\n+\t\treturn;\n+\n+\tif (bitmap_full(xnm->free_xns_bm, LIBIE_CTLQ_MAX_XN_ENTRIES))\n+\t\tcomplete(&xnm->can_destroy);\n+}\n+\n+/**\n+ * libie_ctlq_xn_push_free - push a Xn entry into the free list\n+ * @xnm: Xn transaction manager\n+ * @xn: xn entry to be added into the free list, not locked\n+ *\n+ * Safely add a used Xn entry back to the free list.\n+ */\n+static void libie_ctlq_xn_push_free(struct libie_ctlq_xn_manager *xnm,\n+\t\t\t\t    struct libie_ctlq_xn *xn)\n+{\n+\tguard(spinlock)(&xnm->free_xns_bm_lock);\n+\n+\t__libie_ctlq_xn_push_free(xnm, xn);\n+}\n+\n+/**\n+ * libie_ctlq_xn_put - put an Xn that will not be used in the current thread\n+ * @xnm: Xn transaction manager\n+ * @xn: async xn entry to be put for now, not locked\n+ *\n+ * If the Xn manager is being shutdown, nothing will handle the related\n+ * async request.\n+ */\n+static void libie_ctlq_xn_put(struct libie_ctlq_xn_manager *xnm,\n+\t\t\t      struct libie_ctlq_xn *xn)\n+{\n+\tguard(spinlock)(&xnm->free_xns_bm_lock);\n+\n+\tif (unlikely(xnm->shutdown))\n+\t\t__libie_ctlq_xn_push_free(xnm, xn);\n+}\n+\n+/**\n+ * libie_ctlq_xn_deinit_dma - free the DMA memory allocated for send messages\n+ * @dev: device pointer\n+ * @xnm: pointer to the transaction manager\n+ * @num_entries: number of Xn entries to free the DMA for\n+ */\n+static void libie_ctlq_xn_deinit_dma(struct device *dev,\n+\t\t\t\t     struct libie_ctlq_xn_manager *xnm,\n+\t\t\t\t      u32 num_entries)\n+{\n+\tfor (u32 i = 0; i < num_entries; i++) {\n+\t\tstruct libie_ctlq_xn *xn = &xnm->ring[i];\n+\n+\t\tlibie_cp_free_dma_mem(dev, xn->dma_mem);\n+\t\tkfree(xn->dma_mem);\n+\t}\n+}\n+\n+/**\n+ * libie_ctlq_xn_init_dma - pre-allocate DMA memory for send messages that use\n+ * stack variables\n+ * @dev: device pointer\n+ * @xnm: pointer to transaction manager\n+ *\n+ * Return: %0 on success or error if memory allocation fails\n+ */\n+static int libie_ctlq_xn_init_dma(struct device *dev,\n+\t\t\t\t  struct libie_ctlq_xn_manager *xnm)\n+{\n+\tu32 i;\n+\n+\tfor (i = 0; i < LIBIE_CTLQ_MAX_XN_ENTRIES; i++) {\n+\t\tstruct libie_ctlq_xn *xn = &xnm->ring[i];\n+\t\tstruct libie_cp_dma_mem *dma_mem;\n+\n+\t\tdma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);\n+\t\tif (!dma_mem)\n+\t\t\tgoto dealloc_dma;\n+\n+\t\tdma_mem->va = libie_cp_alloc_dma_mem(dev, dma_mem,\n+\t\t\t\t\t\t     LIBIE_CTLQ_MAX_BUF_LEN);\n+\t\tif (!dma_mem->va) {\n+\t\t\tkfree(dma_mem);\n+\t\t\tgoto dealloc_dma;\n+\t\t}\n+\n+\t\txn->dma_mem = dma_mem;\n+\t}\n+\n+\treturn 0;\n+\n+dealloc_dma:\n+\tlibie_ctlq_xn_deinit_dma(dev, xnm, i);\n+\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ * libie_ctlq_xn_process_recv - process Xn data in receive message\n+ * @params: Xn receive param information to handle a receive message\n+ * @ctlq_msg: received control queue message\n+ *\n+ * Process a control queue receive message and send a complete event\n+ * notification.\n+ *\n+ * Return: true if a message has been processed, false otherwise.\n+ */\n+static bool\n+libie_ctlq_xn_process_recv(struct libie_ctlq_xn_recv_params *params,\n+\t\t\t   struct libie_ctlq_msg *ctlq_msg)\n+{\n+\tstruct libie_ctlq_xn_manager *xnm = params->xnm;\n+\tstruct libie_ctlq_xn *xn;\n+\tu16 msg_cookie, xn_index;\n+\tstruct kvec *response;\n+\tint status;\n+\tu16 data;\n+\n+\tdata = ctlq_msg->sw_cookie;\n+\txn_index = FIELD_GET(LIBIE_CTLQ_XN_INDEX_M, data);\n+\tmsg_cookie = FIELD_GET(LIBIE_CTLQ_XN_COOKIE_M, data);\n+\tstatus = ctlq_msg->chnl_retval ? -EFAULT : 0;\n+\n+\txn = &xnm->ring[xn_index];\n+\tif (ctlq_msg->chnl_opcode != xn->virtchnl_opcode ||\n+\t    msg_cookie != xn->cookie)\n+\t\treturn false;\n+\n+\tspin_lock(&xn->xn_lock);\n+\tif (xn->state != LIBIE_CTLQ_XN_ASYNC &&\n+\t    xn->state != LIBIE_CTLQ_XN_WAITING) {\n+\t\tspin_unlock(&xn->xn_lock);\n+\t\treturn false;\n+\t}\n+\n+\tresponse = &ctlq_msg->recv_mem;\n+\tif (xn->state == LIBIE_CTLQ_XN_ASYNC) {\n+\t\txn->resp_cb(xn->send_ctx, response, status);\n+\t\txn->state = LIBIE_CTLQ_XN_IDLE;\n+\t\tspin_unlock(&xn->xn_lock);\n+\t\tlibie_ctlq_xn_push_free(xnm, xn);\n+\n+\t\treturn true;\n+\t}\n+\n+\txn->recv_mem = *response;\n+\txn->state = status ? LIBIE_CTLQ_XN_COMPLETED_FAILED :\n+\t\t\t     LIBIE_CTLQ_XN_COMPLETED_SUCCESS;\n+\n+\tcomplete(&xn->cmd_completion_event);\n+\tspin_unlock(&xn->xn_lock);\n+\n+\treturn true;\n+}\n+\n+/**\n+ * libie_xn_check_async_timeout - Check for asynchronous message timeouts\n+ * @xnm: Xn transaction manager\n+ *\n+ * Call the corresponding callback to notify the caller about the timeout.\n+ */\n+static void libie_xn_check_async_timeout(struct libie_ctlq_xn_manager *xnm)\n+{\n+\tu32 idx;\n+\n+\tfor_each_clear_bit(idx, xnm->free_xns_bm, LIBIE_CTLQ_MAX_XN_ENTRIES) {\n+\t\tstruct libie_ctlq_xn *xn = &xnm->ring[idx];\n+\t\tu64 timeout_ms;\n+\n+\t\tspin_lock(&xn->xn_lock);\n+\n+\t\ttimeout_ms = ktime_ms_delta(ktime_get(), xn->timestamp);\n+\t\tif (xn->state != LIBIE_CTLQ_XN_ASYNC ||\n+\t\t    timeout_ms < xn->timeout_ms) {\n+\t\t\tspin_unlock(&xn->xn_lock);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\txn->resp_cb(xn->send_ctx, NULL, -ETIMEDOUT);\n+\t\txn->state = LIBIE_CTLQ_XN_IDLE;\n+\t\tspin_unlock(&xn->xn_lock);\n+\t\tlibie_ctlq_xn_push_free(xnm, xn);\n+\t}\n+}\n+\n+/**\n+ * libie_ctlq_xn_recv - process control queue receive message\n+ * @params: Xn receive param information to handle a receive message\n+ *\n+ * Process a receive message and update the receive queue buffer.\n+ *\n+ * Return: remaining budget.\n+ */\n+u32 libie_ctlq_xn_recv(struct libie_ctlq_xn_recv_params *params)\n+{\n+\tstruct libie_ctlq_msg ctlq_msg;\n+\tu32 budget = params->budget;\n+\n+\twhile (budget && libie_ctlq_recv(params->ctlq, &ctlq_msg, 1)) {\n+\t\tbudget--;\n+\t\tif (!libie_ctlq_xn_process_recv(params, &ctlq_msg))\n+\t\t\tparams->ctlq_msg_handler(params->xnm->ctx, &ctlq_msg);\n+\t}\n+\n+\tlibie_ctlq_post_rx_buffs(params->ctlq);\n+\tlibie_xn_check_async_timeout(params->xnm);\n+\n+\treturn budget;\n+}\n+EXPORT_SYMBOL_NS_GPL(libie_ctlq_xn_recv, \"LIBIE_CP\");\n+\n+/**\n+ * libie_cp_map_dma_mem - map a given virtual address for DMA\n+ * @dev: device information\n+ * @va: virtual address to be mapped\n+ * @size: size of the memory\n+ * @direction: DMA direction either from/to device\n+ * @dma_mem: memory for DMA information to be stored\n+ *\n+ * Return: true on success, false on DMA map failure.\n+ */\n+static bool libie_cp_map_dma_mem(struct device *dev, void *va, size_t size,\n+\t\t\t\t int direction,\n+\t\t\t\t  struct libie_cp_dma_mem *dma_mem)\n+{\n+\tdma_mem->pa = dma_map_single(dev, va, size, direction);\n+\n+\treturn dma_mapping_error(dev, dma_mem->pa) ? false : true;\n+}\n+\n+/**\n+ * libie_cp_unmap_dma_mem - unmap previously mapped DMA address\n+ * @dev: device information\n+ * @dma_mem: DMA memory information\n+ */\n+static void libie_cp_unmap_dma_mem(struct device *dev,\n+\t\t\t\t   const struct libie_cp_dma_mem *dma_mem)\n+{\n+\tdma_unmap_single(dev, dma_mem->pa, dma_mem->size,\n+\t\t\t dma_mem->direction);\n+}\n+\n+/**\n+ * libie_ctlq_xn_process_send - process and send a control queue message\n+ * @params: Xn send param information for sending a control queue message\n+ * @xn: Assigned Xn entry for tracking the control queue message\n+ *\n+ * Return: %0 on success, -%errno on failure.\n+ */\n+static\n+int libie_ctlq_xn_process_send(struct libie_ctlq_xn_send_params *params,\n+\t\t\t       struct libie_ctlq_xn *xn)\n+{\n+\tsize_t buf_len = params->send_buf.iov_len;\n+\tstruct device *dev = params->ctlq->dev;\n+\tvoid *buf = params->send_buf.iov_base;\n+\tstruct libie_cp_dma_mem *dma_mem;\n+\tu16 cookie;\n+\tint ret;\n+\n+\tif (!buf || !buf_len)\n+\t\treturn -EOPNOTSUPP;\n+\n+\tif (libie_cp_can_send_onstack(buf_len)) {\n+\t\tdma_mem = xn->dma_mem;\n+\t\tmemcpy(dma_mem->va, buf, buf_len);\n+\t} else {\n+\t\tdma_mem = &xn->send_dma_mem;\n+\t\tdma_mem->va = buf;\n+\t\tdma_mem->size = buf_len;\n+\t\tdma_mem->direction = DMA_TO_DEVICE;\n+\n+\t\tif (!libie_cp_map_dma_mem(dev, buf, buf_len, DMA_TO_DEVICE,\n+\t\t\t\t\t  dma_mem))\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\tcookie = FIELD_PREP(LIBIE_CTLQ_XN_COOKIE_M, xn->cookie) |\n+\t\t FIELD_PREP(LIBIE_CTLQ_XN_INDEX_M, xn->index);\n+\n+\tscoped_guard(spinlock, &params->ctlq->lock) {\n+\t\tif (!params->ctlq_msg || params->resp_cb) {\n+\t\t\tstruct libie_ctlq_info *ctlq = params->ctlq;\n+\n+\t\t\t*ctlq->tx_msg[ctlq->next_to_use] =\n+\t\t\t\tparams->ctlq_msg ? *params->ctlq_msg :\n+\t\t\t\t(struct libie_ctlq_msg) {\n+\t\t\t\t\t.opcode = LIBIE_CTLQ_SEND_MSG_TO_CP\n+\t\t\t\t};\n+\t\t\tparams->ctlq_msg = ctlq->tx_msg[ctlq->next_to_use];\n+\t\t}\n+\n+\t\tparams->ctlq_msg->sw_cookie = cookie;\n+\t\tparams->ctlq_msg->send_mem = *dma_mem;\n+\t\tparams->ctlq_msg->data_len = buf_len;\n+\t\tparams->ctlq_msg->chnl_opcode = params->chnl_opcode;\n+\t\tret = libie_ctlq_send(params->ctlq, params->ctlq_msg, 1);\n+\t}\n+\n+\tif (ret && !libie_cp_can_send_onstack(buf_len))\n+\t\tlibie_cp_unmap_dma_mem(dev, dma_mem);\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * libie_ctlq_xn_send - Function to send a control queue message\n+ * @params: Xn send param information for sending a control queue message\n+ *\n+ * Send a control queue (mailbox or config) message.\n+ * Based on the params value, the call can be completed synchronously or\n+ * asynchronously.\n+ *\n+ * Return: %0 on success, -%errno on failure.\n+ */\n+int libie_ctlq_xn_send(struct libie_ctlq_xn_send_params *params)\n+{\n+\tbool free_send = !libie_cp_can_send_onstack(params->send_buf.iov_len);\n+\tstruct libie_ctlq_xn *xn;\n+\tint ret;\n+\n+\tif (params->send_buf.iov_len > LIBIE_CTLQ_MAX_BUF_LEN) {\n+\t\tret = -EINVAL;\n+\t\tgoto free_buf;\n+\t}\n+\n+\txn = libie_ctlq_xn_pop_free(params->xnm);\n+\t/* no free transactions available */\n+\tif (unlikely(!xn)) {\n+\t\tret = -EAGAIN;\n+\t\tgoto free_buf;\n+\t}\n+\n+\tspin_lock(&xn->xn_lock);\n+\n+\txn->state = params->resp_cb ? LIBIE_CTLQ_XN_ASYNC :\n+\t\t\t\t      LIBIE_CTLQ_XN_WAITING;\n+\txn->ctlq = params->ctlq;\n+\txn->virtchnl_opcode = params->chnl_opcode;\n+\n+\tif (params->resp_cb) {\n+\t\txn->send_ctx = params->send_ctx;\n+\t\txn->resp_cb = params->resp_cb;\n+\t\txn->timeout_ms = params->timeout_ms;\n+\t\txn->timestamp = ktime_get();\n+\t}\n+\n+\tret = libie_ctlq_xn_process_send(params, xn);\n+\tif (ret)\n+\t\tgoto release_xn;\n+\telse\n+\t\tfree_send = false;\n+\n+\tspin_unlock(&xn->xn_lock);\n+\n+\tif (params->resp_cb) {\n+\t\tlibie_ctlq_xn_put(params->xnm, xn);\n+\t\treturn 0;\n+\t}\n+\n+\twait_for_completion_timeout(&xn->cmd_completion_event,\n+\t\t\t\t    msecs_to_jiffies(params->timeout_ms));\n+\n+\tspin_lock(&xn->xn_lock);\n+\tswitch (xn->state) {\n+\tcase LIBIE_CTLQ_XN_WAITING:\n+\t\tret = -ETIMEDOUT;\n+\t\tbreak;\n+\tcase LIBIE_CTLQ_XN_COMPLETED_SUCCESS:\n+\t\tparams->recv_mem = xn->recv_mem;\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EBADMSG;\n+\t\tbreak;\n+\t}\n+\n+\t/* Free the receive buffer in case of failure. On timeout, receive\n+\t * buffer is not allocated.\n+\t */\n+\tif (ret && ret != -ETIMEDOUT)\n+\t\tlibie_ctlq_release_rx_buf(&xn->recv_mem);\n+\n+release_xn:\n+\txn->state = LIBIE_CTLQ_XN_IDLE;\n+\treinit_completion(&xn->cmd_completion_event);\n+\tspin_unlock(&xn->xn_lock);\n+\tlibie_ctlq_xn_push_free(params->xnm, xn);\n+free_buf:\n+\tif (free_send)\n+\t\tparams->rel_tx_buf(params->send_buf.iov_base);\n+\n+\treturn ret;\n+}\n+EXPORT_SYMBOL_NS_GPL(libie_ctlq_xn_send, \"LIBIE_CP\");\n+\n+/**\n+ * libie_ctlq_xn_send_clean - cleanup the send control queue message buffers\n+ * @params: Xn clean param information for send complete handling\n+ *\n+ * Cleanup the send buffers for the given control queue, if force is set, then\n+ * clear all the outstanding send messages irrrespective their send status.\n+ * Force should be used during deinit or reset.\n+ *\n+ * Return: number of send buffers cleaned.\n+ */\n+u32 libie_ctlq_xn_send_clean(const struct libie_ctlq_xn_clean_params *params)\n+{\n+\tstruct libie_ctlq_info *ctlq = params->ctlq;\n+\tstruct device *dev = ctlq->dev;\n+\tu32 ntc, i;\n+\n+\tspin_lock(&ctlq->lock);\n+\tntc = ctlq->next_to_clean;\n+\n+\tfor (i = 0; i < params->num_msgs; i++) {\n+\t\tstruct libie_ctlq_msg *msg = ctlq->tx_msg[ntc];\n+\t\tstruct libie_ctlq_desc *desc;\n+\t\tu64 qword;\n+\n+\t\tdesc = &ctlq->descs[ntc];\n+\t\tqword = le64_to_cpu(desc->qword0);\n+\n+\t\tif (!FIELD_GET(LIBIE_CTLQ_DESC_FLAG_DD, qword))\n+\t\t\tbreak;\n+\n+\t\tdma_rmb();\n+\n+\t\tif (!libie_cp_can_send_onstack(msg->data_len)) {\n+\t\t\tlibie_cp_unmap_dma_mem(dev, &msg->send_mem);\n+\t\t\tparams->rel_tx_buf(msg->send_mem.va);\n+\t\t}\n+\n+\t\tmemset(msg, 0, sizeof(*msg));\n+\t\tdesc->qword0 = 0;\n+\n+\t\tif (unlikely(++ntc == ctlq->ring_len))\n+\t\t\tntc = 0;\n+\t}\n+\n+\tctlq->next_to_clean = ntc;\n+\tspin_unlock(&ctlq->lock);\n+\n+\treturn i;\n+}\n+EXPORT_SYMBOL_NS_GPL(libie_ctlq_xn_send_clean, \"LIBIE_CP\");\n+\n+/**\n+ * libie_ctlq_xn_deinit - deallocate and free the transaction manager resources\n+ * @xnm: pointer to the transaction manager\n+ * @ctx: controlq context structure\n+ *\n+ * All Rx processing must be stopped beforehand.\n+ */\n+void libie_ctlq_xn_deinit(struct libie_ctlq_xn_manager *xnm,\n+\t\t\t  struct libie_ctlq_ctx *ctx)\n+{\n+\tbool must_wait = false;\n+\tu32 i;\n+\n+\t/* Should be no new clear bits after this */\n+\tspin_lock(&xnm->free_xns_bm_lock);\n+\txnm->shutdown = true;\n+\n+\tfor_each_clear_bit(i, xnm->free_xns_bm, LIBIE_CTLQ_MAX_XN_ENTRIES) {\n+\t\tstruct libie_ctlq_xn *xn = &xnm->ring[i];\n+\n+\t\tspin_lock(&xn->xn_lock);\n+\n+\t\tif (xn->state == LIBIE_CTLQ_XN_WAITING ||\n+\t\t    xn->state == LIBIE_CTLQ_XN_IDLE) {\n+\t\t\tcomplete(&xn->cmd_completion_event);\n+\t\t\tmust_wait = true;\n+\t\t} else if (xn->state == LIBIE_CTLQ_XN_ASYNC) {\n+\t\t\t__libie_ctlq_xn_push_free(xnm, xn);\n+\t\t}\n+\n+\t\tspin_unlock(&xn->xn_lock);\n+\t}\n+\n+\tspin_unlock(&xnm->free_xns_bm_lock);\n+\n+\tif (must_wait)\n+\t\twait_for_completion(&xnm->can_destroy);\n+\n+\tlibie_ctlq_xn_deinit_dma(&ctx->mmio_info.pdev->dev, xnm,\n+\t\t\t\t LIBIE_CTLQ_MAX_XN_ENTRIES);\n+\tkfree(xnm);\n+\tlibie_ctlq_deinit(ctx);\n+}\n+EXPORT_SYMBOL_NS_GPL(libie_ctlq_xn_deinit, \"LIBIE_CP\");\n+\n+/**\n+ * libie_ctlq_xn_init - initialize the Xn transaction manager\n+ * @params: Xn init param information for allocating Xn manager resources\n+ *\n+ * Return: %0 on success, -%errno on failure.\n+ */\n+int libie_ctlq_xn_init(struct libie_ctlq_xn_init_params *params)\n+{\n+\tstruct libie_ctlq_xn_manager *xnm;\n+\tint ret;\n+\n+\tret = libie_ctlq_init(params->ctx, params->cctlq_info, params->num_qs);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\txnm = kzalloc(sizeof(*xnm), GFP_KERNEL);\n+\tif (!xnm)\n+\t\tgoto ctlq_deinit;\n+\n+\tret = libie_ctlq_xn_init_dma(&params->ctx->mmio_info.pdev->dev, xnm);\n+\tif (ret)\n+\t\tgoto free_xnm;\n+\n+\tspin_lock_init(&xnm->free_xns_bm_lock);\n+\tinit_completion(&xnm->can_destroy);\n+\tbitmap_fill(xnm->free_xns_bm, LIBIE_CTLQ_MAX_XN_ENTRIES);\n+\n+\tfor (u32 i = 0; i < LIBIE_CTLQ_MAX_XN_ENTRIES; i++) {\n+\t\tstruct libie_ctlq_xn *xn = &xnm->ring[i];\n+\n+\t\txn->index = i;\n+\t\tinit_completion(&xn->cmd_completion_event);\n+\t\tspin_lock_init(&xn->xn_lock);\n+\t}\n+\txnm->ctx = params->ctx;\n+\tparams->xnm = xnm;\n+\n+\treturn 0;\n+\n+free_xnm:\n+\tkfree(xnm);\n+ctlq_deinit:\n+\tlibie_ctlq_deinit(params->ctx);\n+\n+\treturn -ENOMEM;\n+}\n+EXPORT_SYMBOL_NS_GPL(libie_ctlq_xn_init, \"LIBIE_CP\");\n+\n MODULE_DESCRIPTION(\"Control Plane communication API\");\n MODULE_IMPORT_NS(\"LIBETH\");\n MODULE_LICENSE(\"GPL\");\ndiff --git a/include/linux/intel/libie/controlq.h b/include/linux/intel/libie/controlq.h\nindex 534508fbb405..4a627670814e 100644\n--- a/include/linux/intel/libie/controlq.h\n+++ b/include/linux/intel/libie/controlq.h\n@@ -20,6 +20,8 @@\n #define LIBIE_CTLQ_SEND_MSG_TO_CP\t\t0x801\n #define LIBIE_CTLQ_SEND_MSG_TO_PEER\t\t0x804\n \n+#define LIBIE_CP_TX_COPYBREAK\t\t128\n+\n /**\n  * struct libie_ctlq_ctx - contains controlq info and MMIO region info\n  * @mmio_info: MMIO region info structure\n@@ -60,11 +62,13 @@ struct libie_ctlq_reg {\n  * @va: virtual address\n  * @pa: physical address\n  * @size: memory size\n+ * @direction: memory to device or device to memory\n  */\n struct libie_cp_dma_mem {\n \tvoid\t\t*va;\n \tdma_addr_t\tpa;\n \tsize_t\t\tsize;\n+\tint\t\tdirection;\n };\n \n /**\n@@ -246,4 +250,172 @@ u32 libie_ctlq_recv(struct libie_ctlq_info *ctlq, struct libie_ctlq_msg *msg,\n \n int libie_ctlq_post_rx_buffs(struct libie_ctlq_info *ctlq);\n \n+/* Only 8 bits are available in descriptor for Xn index */\n+#define LIBIE_CTLQ_MAX_XN_ENTRIES\t\t256\n+#define LIBIE_CTLQ_XN_COOKIE_M\t\t\tGENMASK(15, 8)\n+#define LIBIE_CTLQ_XN_INDEX_M\t\t\tGENMASK(7, 0)\n+\n+/**\n+ * enum libie_ctlq_xn_state - Transaction state of a virtchnl message\n+ * @LIBIE_CTLQ_XN_IDLE: transaction is available to use\n+ * @LIBIE_CTLQ_XN_WAITING: waiting for transaction to complete\n+ * @LIBIE_CTLQ_XN_COMPLETED_SUCCESS: transaction completed with success\n+ * @LIBIE_CTLQ_XN_COMPLETED_FAILED: transaction completed with failure\n+ * @LIBIE_CTLQ_XN_ASYNC: asynchronous virtchnl message transaction type\n+ */\n+enum libie_ctlq_xn_state {\n+\tLIBIE_CTLQ_XN_IDLE = 0,\n+\tLIBIE_CTLQ_XN_WAITING,\n+\tLIBIE_CTLQ_XN_COMPLETED_SUCCESS,\n+\tLIBIE_CTLQ_XN_COMPLETED_FAILED,\n+\tLIBIE_CTLQ_XN_ASYNC,\n+};\n+\n+/**\n+ * struct libie_ctlq_xn - structure representing a virtchnl transaction entry\n+ * @resp_cb: callback to handle the response of an asynchronous virtchnl message\n+ * @xn_lock: lock to protect the transaction entry state\n+ * @ctlq: send control queue information\n+ * @cmd_completion_event: signal when a reply is available\n+ * @dma_mem: DMA memory of send buffer that use stack variable\n+ * @send_dma_mem: DMA memory of send buffer\n+ * @recv_mem: receive buffer\n+ * @send_ctx: context for callback function\n+ * @timeout_ms: Xn transaction timeout in msecs\n+ * @timestamp: timestamp to record the Xn send\n+ * @virtchnl_opcode: virtchnl command opcode used for Xn transaction\n+ * @state: transaction state of a virtchnl message\n+ * @cookie: unique message identifier\n+ * @index: index of the transaction entry\n+ */\n+struct libie_ctlq_xn {\n+\tvoid (*resp_cb)(void *ctx, struct kvec *mem, int status);\n+\tspinlock_t\t\t\txn_lock;\t/* protects state */\n+\tstruct libie_ctlq_info\t\t*ctlq;\n+\tstruct completion\t\tcmd_completion_event;\n+\tstruct libie_cp_dma_mem\t*dma_mem;\n+\tstruct libie_cp_dma_mem\tsend_dma_mem;\n+\tstruct kvec\t\t\trecv_mem;\n+\tvoid\t\t\t\t*send_ctx;\n+\tu64\t\t\t\ttimeout_ms;\n+\tktime_t\t\t\t\ttimestamp;\n+\tu32\t\t\t\tvirtchnl_opcode;\n+\tenum libie_ctlq_xn_state\tstate;\n+\tu8\t\t\t\tcookie;\n+\tu8\t\t\t\tindex;\n+};\n+\n+/**\n+ * struct libie_ctlq_xn_manager - structure representing the array of virtchnl\n+ *\t\t\t\t   transaction entries\n+ * @ctx: pointer to controlq context structure\n+ * @free_xns_bm_lock: lock to protect the free Xn entries bit map\n+ * @free_xns_bm: bitmap that represents the free Xn entries\n+ * @ring: array of Xn entries\n+ * @can_destroy: completion triggered by the last returned transaction\n+ * @shutdown: shows the transactions the xnm shutdown is waiting for them\n+ * @cookie: unique message identifier\n+ */\n+struct libie_ctlq_xn_manager {\n+\tstruct libie_ctlq_ctx\t*ctx;\n+\tspinlock_t\t\tfree_xns_bm_lock;\t/* get/check entries */\n+\tDECLARE_BITMAP(free_xns_bm, LIBIE_CTLQ_MAX_XN_ENTRIES);\n+\tstruct libie_ctlq_xn\tring[LIBIE_CTLQ_MAX_XN_ENTRIES];\n+\tstruct completion\tcan_destroy;\n+\tbool\t\t\tshutdown;\n+\tu8\t\t\tcookie;\n+};\n+\n+/**\n+ * struct libie_ctlq_xn_send_params - structure representing send Xn entry\n+ * @resp_cb: callback to handle the response of an asynchronous virtchnl message\n+ * @rel_tx_buf: driver entry point for freeing the send buffer after send\n+ * @xnm: Xn manager to process Xn entries\n+ * @ctlq: send control queue information\n+ * @ctlq_msg: control queue message information\n+ * @send_buf: represents the buffer that carries outgoing information\n+ * @recv_mem: receive buffer\n+ * @send_ctx: context for call back function\n+ * @timeout_ms: virtchnl transaction timeout in msecs\n+ * @chnl_opcode: virtchnl message opcode\n+ */\n+struct libie_ctlq_xn_send_params {\n+\tvoid (*resp_cb)(void *ctx, struct kvec *mem, int status);\n+\tvoid (*rel_tx_buf)(const void *buf_va);\n+\tstruct libie_ctlq_xn_manager\t\t*xnm;\n+\tstruct libie_ctlq_info\t\t\t*ctlq;\n+\tstruct libie_ctlq_msg\t\t\t*ctlq_msg;\n+\tstruct kvec\t\t\t\tsend_buf;\n+\tstruct kvec\t\t\t\trecv_mem;\n+\tvoid\t\t\t\t\t*send_ctx;\n+\tu64\t\t\t\t\ttimeout_ms;\n+\tu32\t\t\t\t\tchnl_opcode;\n+};\n+\n+/**\n+ * libie_cp_can_send_onstack - can a message be sent using a stack variable\n+ * @size: ctlq data buffer size\n+ *\n+ * Return: %true if the message size is small enough for caller to pass\n+ *\t   an on-stack buffer, %false if kmalloc is needed\n+ */\n+static inline bool libie_cp_can_send_onstack(u32 size)\n+{\n+\treturn size <= LIBIE_CP_TX_COPYBREAK;\n+}\n+\n+/**\n+ * struct libie_ctlq_xn_recv_params - structure representing receive Xn entry\n+ * @ctlq_msg_handler: callback to handle a message originated from the peer\n+ * @xnm: Xn manager to process Xn entries\n+ * @ctlq: control queue information\n+ * @budget: maximum number of messages to process\n+ */\n+struct libie_ctlq_xn_recv_params {\n+\tvoid (*ctlq_msg_handler)(struct libie_ctlq_ctx *ctx,\n+\t\t\t\t struct libie_ctlq_msg *msg);\n+\tstruct libie_ctlq_xn_manager\t\t*xnm;\n+\tstruct libie_ctlq_info\t\t\t*ctlq;\n+\tu32\t\t\t\t\tbudget;\n+};\n+\n+/**\n+ * struct libie_ctlq_xn_clean_params - Data structure used for cleaning the\n+ * control queue messages\n+ * @rel_tx_buf: driver entry point for freeing the send buffer after send\n+ * @ctx: pointer to context structure\n+ * @ctlq: control queue information\n+ * @send_ctx: context for call back function\n+ * @num_msgs: number of messages to be cleaned\n+ */\n+struct libie_ctlq_xn_clean_params {\n+\tvoid (*rel_tx_buf)(const void *buf_va);\n+\tstruct libie_ctlq_ctx\t\t\t*ctx;\n+\tstruct libie_ctlq_info\t\t\t*ctlq;\n+\tvoid\t\t\t\t\t*send_ctx;\n+\tu16\t\t\t\t\tnum_msgs;\n+};\n+\n+/**\n+ * struct libie_ctlq_xn_init_params - Data structure used for initializing the\n+ * Xn transaction manager\n+ * @cctlq_info: control queue information\n+ * @ctx: pointer to controlq context structure\n+ * @xnm: Xn manager to process Xn entries\n+ * @num_qs: number of control queues needs to initialized\n+ */\n+struct libie_ctlq_xn_init_params {\n+\tstruct libie_ctlq_create_info\t\t*cctlq_info;\n+\tstruct libie_ctlq_ctx\t\t\t*ctx;\n+\tstruct libie_ctlq_xn_manager\t\t*xnm;\n+\tu32\t\t\t\t\tnum_qs;\n+};\n+\n+int libie_ctlq_xn_init(struct libie_ctlq_xn_init_params *params);\n+void libie_ctlq_xn_deinit(struct libie_ctlq_xn_manager *xnm,\n+\t\t\t  struct libie_ctlq_ctx *ctx);\n+int libie_ctlq_xn_send(struct libie_ctlq_xn_send_params *params);\n+u32 libie_ctlq_xn_recv(struct libie_ctlq_xn_recv_params *params);\n+u32 libie_ctlq_xn_send_clean(const struct libie_ctlq_xn_clean_params *params);\n+\n #endif /* __LIBIE_CONTROLQ_H */\n",
    "prefixes": [
        "iwl-next",
        "v5",
        "06/15"
    ]
}