Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/2165741/?format=api
{ "id": 2165741, "url": "http://patchwork.ozlabs.org/api/patches/2165741/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20251117134912.18566-10-larysa.zaremba@intel.com/", "project": { "id": 46, "url": "http://patchwork.ozlabs.org/api/projects/46/?format=api", "name": "Intel Wired Ethernet development", "link_name": "intel-wired-lan", "list_id": "intel-wired-lan.osuosl.org", "list_email": "intel-wired-lan@osuosl.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20251117134912.18566-10-larysa.zaremba@intel.com>", "list_archive_url": null, "date": "2025-11-17T13:48:49", "name": "[iwl-next,v5,09/15] idpf: refactor idpf to use libie control queues", "commit_ref": null, "pull_url": null, "state": "under-review", "archived": false, "hash": "e500a749cca16c1f15c8f899c54c197917263ec5", "submitter": { "id": 84900, "url": "http://patchwork.ozlabs.org/api/people/84900/?format=api", "name": "Larysa Zaremba", "email": "larysa.zaremba@intel.com" }, "delegate": { "id": 109701, "url": "http://patchwork.ozlabs.org/api/users/109701/?format=api", "username": "anguy11", "first_name": "Anthony", "last_name": "Nguyen", "email": "anthony.l.nguyen@intel.com" }, "mbox": "http://patchwork.ozlabs.org/project/intel-wired-lan/patch/20251117134912.18566-10-larysa.zaremba@intel.com/mbox/", "series": [ { "id": 482391, "url": "http://patchwork.ozlabs.org/api/series/482391/?format=api", "web_url": "http://patchwork.ozlabs.org/project/intel-wired-lan/list/?series=482391", "date": "2025-11-17T13:48:40", "name": "Introduce iXD driver", "version": 5, "mbox": "http://patchwork.ozlabs.org/series/482391/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/2165741/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/2165741/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<intel-wired-lan-bounces@osuosl.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Delivered-To": [ "patchwork-incoming@legolas.ozlabs.org", "intel-wired-lan@lists.osuosl.org" ], "Authentication-Results": [ "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=osuosl.org header.i=@osuosl.org header.a=rsa-sha256\n header.s=default header.b=mMHSXaez;\n\tdkim-atps=neutral", "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=osuosl.org\n (client-ip=2605:bc80:3010::136; helo=smtp3.osuosl.org;\n envelope-from=intel-wired-lan-bounces@osuosl.org;\n receiver=patchwork.ozlabs.org)" ], "Received": [ "from smtp3.osuosl.org (smtp3.osuosl.org [IPv6:2605:bc80:3010::136])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4d9Cpg1SWGz1xwc\n\tfor <incoming@patchwork.ozlabs.org>; Tue, 18 Nov 2025 03:28:10 +1100 (AEDT)", "from localhost (localhost [127.0.0.1])\n\tby smtp3.osuosl.org (Postfix) with ESMTP id DA04360590;\n\tMon, 17 Nov 2025 16:28:05 +0000 (UTC)", "from smtp3.osuosl.org ([127.0.0.1])\n by localhost (smtp3.osuosl.org [127.0.0.1]) (amavis, port 10024) with ESMTP\n id oL9vTObD1TE9; Mon, 17 Nov 2025 16:28:03 +0000 (UTC)", "from lists1.osuosl.org (lists1.osuosl.org [140.211.166.142])\n\tby smtp3.osuosl.org (Postfix) with ESMTP id F375260E1D;\n\tMon, 17 Nov 2025 16:28:02 +0000 (UTC)", "from smtp3.osuosl.org (smtp3.osuosl.org [140.211.166.136])\n by lists1.osuosl.org (Postfix) with ESMTP id 1E33DD5\n for <intel-wired-lan@lists.osuosl.org>; Mon, 17 Nov 2025 13:49:49 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n by smtp3.osuosl.org (Postfix) with ESMTP id A11D260DD3\n for <intel-wired-lan@lists.osuosl.org>; Mon, 17 Nov 2025 13:49:47 +0000 (UTC)", "from smtp3.osuosl.org ([127.0.0.1])\n by localhost (smtp3.osuosl.org [127.0.0.1]) (amavis, port 10024) with ESMTP\n id ABYO0HnAoUJ7 for <intel-wired-lan@lists.osuosl.org>;\n Mon, 17 Nov 2025 13:49:45 +0000 (UTC)", "from mgamail.intel.com (mgamail.intel.com [198.175.65.12])\n by smtp3.osuosl.org (Postfix) with ESMTPS id EE08260DD2\n for <intel-wired-lan@lists.osuosl.org>; Mon, 17 Nov 2025 13:49:44 +0000 (UTC)", "from fmviesa007.fm.intel.com ([10.60.135.147])\n by orvoesa104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 17 Nov 2025 05:49:43 -0800", "from irvmail002.ir.intel.com ([10.43.11.120])\n by fmviesa007.fm.intel.com with ESMTP; 17 Nov 2025 05:49:31 -0800", "from mglak.igk.intel.com (mglak.igk.intel.com [10.237.112.146])\n by irvmail002.ir.intel.com (Postfix) with ESMTP id 02B8037E3C;\n Mon, 17 Nov 2025 13:49:28 +0000 (GMT)" ], "X-Virus-Scanned": [ "amavis at osuosl.org", "amavis at osuosl.org" ], "X-Comment": "SPF check N/A for local connections - client-ip=140.211.166.142;\n helo=lists1.osuosl.org; envelope-from=intel-wired-lan-bounces@osuosl.org;\n receiver=<UNKNOWN> ", "DKIM-Filter": [ "OpenDKIM Filter v2.11.0 smtp3.osuosl.org F375260E1D", "OpenDKIM Filter v2.11.0 smtp3.osuosl.org EE08260DD2" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=osuosl.org;\n\ts=default; t=1763396883;\n\tbh=mj4UuOiT5a6Qc+Tx9xfPGXFt8r9oD3LPJWutU48kjIs=;\n\th=From:To:Cc:Date:In-Reply-To:References:Subject:List-Id:\n\t List-Unsubscribe:List-Archive:List-Post:List-Help:List-Subscribe:\n\t From;\n\tb=mMHSXaez378H9BqjR/Zdva5T+dojLnThz3E5YGprTM8rMHarVsI9OPiH3CcjTNto6\n\t 2ZSlh2NB94AE6hFL7KYJdRJHW+m0CfpoAFyoLXNnTGMC0WjfJb9fVWi4xSPTyIOlbT\n\t P7RYh52etyOWz40/3msiTmnbx7lLGtJnEDQwwPQJ2NM8MHOK1AZBCtGh0tZJnbLSTC\n\t Cbcy9gmsqmxZqSnuo/k8lrMfcamGUAsWqT5q8YvnLUD1xUCYK/RzDQzt4gfyV6cR8Q\n\t H6AiFdSsva2niBHxnRJfxeYBSS163+EP0b/hOyjIGeIKTVpGAjrshAd1y6x/OHXNkF\n\t uf5hVo66V6i8A==", "Received-SPF": "Pass (mailfrom) identity=mailfrom; client-ip=198.175.65.12;\n helo=mgamail.intel.com; envelope-from=larysa.zaremba@intel.com;\n receiver=<UNKNOWN>", "DMARC-Filter": "OpenDMARC Filter v1.4.2 smtp3.osuosl.org EE08260DD2", "X-CSE-ConnectionGUID": [ "5IzumTFTSj67TYDyGRvPNw==", "4Wa8BBEOSt6FTqHFGEJUqw==" ], "X-CSE-MsgGUID": [ "U9db+SisSdSVnzy/LnyHyw==", "W60yqc1kSYGfB0kTEbI4+Q==" ], "X-IronPort-AV": [ "E=McAfee;i=\"6800,10657,11616\"; a=\"76846188\"", "E=Sophos;i=\"6.19,311,1754982000\"; d=\"scan'208\";a=\"76846188\"", "E=Sophos;i=\"6.19,311,1754982000\"; d=\"scan'208\";a=\"190115756\"" ], "X-ExtLoop1": "1", "From": "Larysa Zaremba <larysa.zaremba@intel.com>", "To": "intel-wired-lan@lists.osuosl.org, Tony Nguyen <anthony.l.nguyen@intel.com>", "Cc": "aleksander.lobakin@intel.com, sridhar.samudrala@intel.com,\n \"Singhai, Anjali\" <anjali.singhai@intel.com>,\n Michal Swiatkowski <michal.swiatkowski@linux.intel.com>,\n Larysa Zaremba <larysa.zaremba@intel.com>,\n \"Fijalkowski, Maciej\" <maciej.fijalkowski@intel.com>,\n Emil Tantilov <emil.s.tantilov@intel.com>,\n Madhu Chittim <madhu.chittim@intel.com>, Josh Hay <joshua.a.hay@intel.com>,\n \"Keller, Jacob E\" <jacob.e.keller@intel.com>,\n jayaprakash.shanmugam@intel.com, natalia.wochtman@intel.com,\n Jiri Pirko <jiri@resnulli.us>, \"David S. Miller\" <davem@davemloft.net>,\n Eric Dumazet <edumazet@google.com>, Jakub Kicinski <kuba@kernel.org>,\n Paolo Abeni <pabeni@redhat.com>, Simon Horman <horms@kernel.org>,\n Jonathan Corbet <corbet@lwn.net>,\n Richard Cochran <richardcochran@gmail.com>,\n Przemek Kitszel <przemyslaw.kitszel@intel.com>,\n Andrew Lunn <andrew+netdev@lunn.ch>, netdev@vger.kernel.org,\n linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,\n Aleksandr Loktionov <aleksandr.loktionov@intel.com>", "Date": "Mon, 17 Nov 2025 14:48:49 +0100", "Message-ID": "<20251117134912.18566-10-larysa.zaremba@intel.com>", "X-Mailer": "git-send-email 2.47.0", "In-Reply-To": "<20251117134912.18566-1-larysa.zaremba@intel.com>", "References": "<20251117134912.18566-1-larysa.zaremba@intel.com>", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "X-Mailman-Approved-At": "Mon, 17 Nov 2025 16:28:01 +0000", "X-Mailman-Original-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1763387385; x=1794923385;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=l+MqCX8Jc03mgt/rVe7Jf3VMbKimj26TkmKVUcWAFJc=;\n b=S9JB8QYEtyFq6AnKvQVG9oI8K2LZuKJCR4t9uwOTBPjssMx3mmWecggC\n 5b4kjchUBKDTJU+AejwMGF5cQ8lxXiMBhFk6xApjCdloAWe2qQeX19E3S\n OrBIZ9rFh9NYk5VYrx60JSgtmLgtLfZQsXC/g/vSfHfa8zC2fkCRZSwjk\n 4hXveqjEkJnDuA0tkSJggjUhtSJyqA6wYxjWOOBsTcl1gH5eoDTj3fBki\n qrz+h1N2uGCNQSP4GL+5BIs4DKHFrRQk5eNchK4IFKdfcPS8IVQL4a4tX\n p3sAcX8if2ig9r+Ay13neU7oKWhvDSJSNEZZZKtOEqHG/13ha0w13zito\n w==;", "X-Mailman-Original-Authentication-Results": [ "smtp3.osuosl.org;\n dmarc=pass (p=none dis=none)\n header.from=intel.com", "smtp3.osuosl.org;\n dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com\n header.a=rsa-sha256 header.s=Intel header.b=S9JB8QYE" ], "Subject": "[Intel-wired-lan] [PATCH iwl-next v5 09/15] idpf: refactor idpf to\n use libie control queues", "X-BeenThere": "intel-wired-lan@osuosl.org", "X-Mailman-Version": "2.1.30", "Precedence": "list", "List-Id": "Intel Wired Ethernet Linux Kernel Driver Development\n <intel-wired-lan.osuosl.org>", "List-Unsubscribe": "<https://lists.osuosl.org/mailman/options/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=unsubscribe>", "List-Archive": "<http://lists.osuosl.org/pipermail/intel-wired-lan/>", "List-Post": "<mailto:intel-wired-lan@osuosl.org>", "List-Help": "<mailto:intel-wired-lan-request@osuosl.org?subject=help>", "List-Subscribe": "<https://lists.osuosl.org/mailman/listinfo/intel-wired-lan>,\n <mailto:intel-wired-lan-request@osuosl.org?subject=subscribe>", "Errors-To": "intel-wired-lan-bounces@osuosl.org", "Sender": "\"Intel-wired-lan\" <intel-wired-lan-bounces@osuosl.org>" }, "content": "From: Pavan Kumar Linga <pavan.kumar.linga@intel.com>\n\nSupport to initialize and configure controlqs, and manage their\ntransactions was introduced in libie. As part of it, most of the existing\ncontrolq structures are renamed and modified. Use those APIs in idpf and\nmake all the necessary changes.\n\nPreviously for the send and receive virtchnl messages, there used to be a\nmemcpy involved in controlq code to copy the buffer info passed by the send\nfunction into the controlq specific buffers. There was no restriction to\nuse automatic memory in that case. The new implementation in libie removed\ncopying of the send buffer info and introduced DMA mapping of the send\nbuffer itself. To accommodate it, use dynamic memory for the larger send\nbuffers. For smaller ones (<= 128 bytes) libie still can copy them into the\npre-allocated message memory.\n\nIn case of receive, idpf receives a page pool buffer allocated by the libie\nand care should be taken to release it after use in the idpf.\n\nThe changes are fairly trivial and localized, with a notable exception\nbeing the consolidation of idpf_vc_xn_shutdown and idpf_deinit_dflt_mbx\nunder the latter name. This has some additional consequences that are\naddressed in the following patches.\n\nThis refactoring introduces roughly additional 40KB of module storage used\nfor systems that only run idpf, so idpf + libie_cp + libie_pci takes about\n7% more storage than just idpf before refactoring.\n\nWe now pre-allocate small TX buffers, so that does increase the memory\nusage, but reduces the need to allocate. This results in additional 256 *\n128B of memory permanently used, increasing the worst-case memory usage by\n32KB but our ctlq RX buffers need to be of size 4096B anyway (not changed\nby the patchset), so this is hardly noticeable.\n\nAs for the timings, the fact that we are mostly limited by the HW response\ntime which is far from instant, is not changed by this refactor.\n\nReviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>\nSigned-off-by: Pavan Kumar Linga <pavan.kumar.linga@intel.com>\nCo-developed-by: Larysa Zaremba <larysa.zaremba@intel.com>\nSigned-off-by: Larysa Zaremba <larysa.zaremba@intel.com>\n---\n drivers/net/ethernet/intel/idpf/Makefile | 2 -\n drivers/net/ethernet/intel/idpf/idpf.h | 28 +-\n .../net/ethernet/intel/idpf/idpf_controlq.c | 633 -------\n .../net/ethernet/intel/idpf/idpf_controlq.h | 142 --\n .../ethernet/intel/idpf/idpf_controlq_api.h | 177 --\n .../ethernet/intel/idpf/idpf_controlq_setup.c | 171 --\n drivers/net/ethernet/intel/idpf/idpf_dev.c | 60 +-\n .../net/ethernet/intel/idpf/idpf_ethtool.c | 20 +-\n drivers/net/ethernet/intel/idpf/idpf_lib.c | 67 +-\n drivers/net/ethernet/intel/idpf/idpf_main.c | 5 -\n drivers/net/ethernet/intel/idpf/idpf_mem.h | 20 -\n drivers/net/ethernet/intel/idpf/idpf_txrx.h | 2 +-\n drivers/net/ethernet/intel/idpf/idpf_vf_dev.c | 67 +-\n .../net/ethernet/intel/idpf/idpf_virtchnl.c | 1580 ++++++-----------\n .../net/ethernet/intel/idpf/idpf_virtchnl.h | 90 +-\n .../ethernet/intel/idpf/idpf_virtchnl_ptp.c | 239 ++-\n 16 files changed, 783 insertions(+), 2520 deletions(-)\n delete mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq.c\n delete mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq.h\n delete mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq_api.h\n delete mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c\n delete mode 100644 drivers/net/ethernet/intel/idpf/idpf_mem.h", "diff": "diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile\nindex 651ddee942bd..4aaafa175ec3 100644\n--- a/drivers/net/ethernet/intel/idpf/Makefile\n+++ b/drivers/net/ethernet/intel/idpf/Makefile\n@@ -6,8 +6,6 @@\n obj-$(CONFIG_IDPF) += idpf.o\n \n idpf-y := \\\n-\tidpf_controlq.o\t\t\\\n-\tidpf_controlq_setup.o\t\\\n \tidpf_dev.o\t\t\\\n \tidpf_ethtool.o\t\t\\\n \tidpf_idc.o\t\t\\\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h\nindex dfa7618ed261..0594f4a30f23 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf.h\n+++ b/drivers/net/ethernet/intel/idpf/idpf.h\n@@ -27,7 +27,6 @@ struct idpf_rss_data;\n #include <linux/intel/virtchnl2.h>\n \n #include \"idpf_txrx.h\"\n-#include \"idpf_controlq.h\"\n \n #define GETMAXVAL(num_bits)\t\tGENMASK((num_bits) - 1, 0)\n \n@@ -37,11 +36,10 @@ struct idpf_rss_data;\n #define IDPF_NUM_FILTERS_PER_MSG\t20\n #define IDPF_NUM_DFLT_MBX_Q\t\t2\t/* includes both TX and RX */\n #define IDPF_DFLT_MBX_Q_LEN\t\t64\n-#define IDPF_DFLT_MBX_ID\t\t-1\n /* maximum number of times to try before resetting mailbox */\n #define IDPF_MB_MAX_ERR\t\t\t20\n #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz)\t\\\n-\t((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))\n+\t((LIBIE_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))\n \n #define IDPF_WAIT_FOR_MARKER_TIMEO\t500\n #define IDPF_MAX_WAIT\t\t\t500\n@@ -202,8 +200,8 @@ struct idpf_vport_max_q {\n * @ptp_reg_init: PTP register initialization\n */\n struct idpf_reg_ops {\n-\tvoid (*ctlq_reg_init)(struct idpf_adapter *adapter,\n-\t\t\t struct idpf_ctlq_create_info *cq);\n+\tvoid (*ctlq_reg_init)(struct libie_mmio_info *mmio,\n+\t\t\t struct libie_ctlq_create_info *cctlq_info);\n \tint (*intr_reg_init)(struct idpf_vport *vport,\n \t\t\t struct idpf_q_vec_rsrc *rsrc);\n \tvoid (*mb_intr_reg_init)(struct idpf_adapter *adapter);\n@@ -606,8 +604,6 @@ struct idpf_vport_config {\n \tDECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);\n };\n \n-struct idpf_vc_xn_manager;\n-\n #define idpf_for_each_vport(adapter, iter) \\\n \tfor (struct idpf_vport **__##iter = &(adapter)->vports[0], \\\n \t *iter = (adapter)->max_vports ? *__##iter : NULL; \\\n@@ -625,8 +621,10 @@ struct idpf_vc_xn_manager;\n * @state: Init state machine\n * @flags: See enum idpf_flags\n * @reset_reg: See struct idpf_reset_reg\n- * @hw: Device access data\n * @ctlq_ctx: controlq context\n+ * @asq: Send control queue info\n+ * @arq: Receive control queue info\n+ * @xn_init_params: Xn transaction manager parameters\n * @num_avail_msix: Available number of MSIX vectors\n * @num_msix_entries: Number of entries in MSIX table\n * @msix_entries: MSIX table\n@@ -659,7 +657,6 @@ struct idpf_vc_xn_manager;\n * @stats_task: Periodic statistics retrieval task\n * @stats_wq: Workqueue for statistics task\n * @caps: Negotiated capabilities with device\n- * @vcxn_mngr: Virtchnl transaction manager\n * @dev_ops: See idpf_dev_ops\n * @cdev_info: IDC core device info pointer\n * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk\n@@ -683,8 +680,10 @@ struct idpf_adapter {\n \tenum idpf_state state;\n \tDECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);\n \tstruct idpf_reset_reg reset_reg;\n-\tstruct idpf_hw hw;\n \tstruct libie_ctlq_ctx ctlq_ctx;\n+\tstruct libie_ctlq_info *asq;\n+\tstruct libie_ctlq_info *arq;\n+\tstruct libie_ctlq_xn_init_params xn_init_params;\n \tu16 num_avail_msix;\n \tu16 num_msix_entries;\n \tstruct msix_entry *msix_entries;\n@@ -721,7 +720,6 @@ struct idpf_adapter {\n \tstruct delayed_work stats_task;\n \tstruct workqueue_struct *stats_wq;\n \tstruct virtchnl2_get_capabilities caps;\n-\tstruct idpf_vc_xn_manager *vcxn_mngr;\n \n \tstruct idpf_dev_ops dev_ops;\n \tstruct iidc_rdma_core_dev_info *cdev_info;\n@@ -881,12 +879,12 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)\n */\n static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)\n {\n-\tif (!adapter->hw.arq)\n+\tstruct libie_ctlq_info *arq = adapter->arq;\n+\n+\tif (!arq)\n \t\treturn true;\n \n-\treturn !(readl(libie_pci_get_mmio_addr(&adapter->ctlq_ctx.mmio_info,\n-\t\t\t\t\t adapter->hw.arq->reg.len)) &\n-\t\t adapter->hw.arq->reg.len_mask);\n+\treturn !(readl(arq->reg.len) & arq->reg.len_mask);\n }\n \n /**\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c\ndeleted file mode 100644\nindex 89f6b39934d8..000000000000\n--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c\n+++ /dev/null\n@@ -1,633 +0,0 @@\n-// SPDX-License-Identifier: GPL-2.0-only\n-/* Copyright (C) 2023 Intel Corporation */\n-\n-#include \"idpf.h\"\n-\n-/**\n- * idpf_ctlq_setup_regs - initialize control queue registers\n- * @cq: pointer to the specific control queue\n- * @q_create_info: structs containing info for each queue to be initialized\n- */\n-static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,\n-\t\t\t\t struct idpf_ctlq_create_info *q_create_info)\n-{\n-\t/* set control queue registers in our local struct */\n-\tcq->reg.head = q_create_info->reg.head;\n-\tcq->reg.tail = q_create_info->reg.tail;\n-\tcq->reg.len = q_create_info->reg.len;\n-\tcq->reg.bah = q_create_info->reg.bah;\n-\tcq->reg.bal = q_create_info->reg.bal;\n-\tcq->reg.len_mask = q_create_info->reg.len_mask;\n-\tcq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;\n-\tcq->reg.head_mask = q_create_info->reg.head_mask;\n-}\n-\n-/**\n- * idpf_ctlq_init_regs - Initialize control queue registers\n- * @hw: pointer to hw struct\n- * @cq: pointer to the specific Control queue\n- * @is_rxq: true if receive control queue, false otherwise\n- *\n- * Initialize registers. The caller is expected to have already initialized the\n- * descriptor ring memory and buffer memory\n- */\n-static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n-\t\t\t\tbool is_rxq)\n-{\n-\tstruct libie_mmio_info *mmio = &hw->back->ctlq_ctx.mmio_info;\n-\n-\t/* Update tail to post pre-allocated buffers for rx queues */\n-\tif (is_rxq)\n-\t\twritel((u32)(cq->ring_size - 1),\n-\t\t libie_pci_get_mmio_addr(mmio, cq->reg.tail));\n-\n-\t/* For non-Mailbox control queues only TAIL need to be set */\n-\tif (cq->q_id != -1)\n-\t\treturn;\n-\n-\t/* Clear Head for both send or receive */\n-\twritel(0, libie_pci_get_mmio_addr(mmio, cq->reg.head));\n-\n-\t/* set starting point */\n-\twritel(lower_32_bits(cq->desc_ring.pa),\n-\t libie_pci_get_mmio_addr(mmio, cq->reg.bal));\n-\twritel(upper_32_bits(cq->desc_ring.pa),\n-\t libie_pci_get_mmio_addr(mmio, cq->reg.bah));\n-\twritel((cq->ring_size | cq->reg.len_ena_mask),\n-\t libie_pci_get_mmio_addr(mmio, cq->reg.len));\n-}\n-\n-/**\n- * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf\n- * @cq: pointer to the specific Control queue\n- *\n- * Record the address of the receive queue DMA buffers in the descriptors.\n- * The buffers must have been previously allocated.\n- */\n-static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)\n-{\n-\tint i;\n-\n-\tfor (i = 0; i < cq->ring_size; i++) {\n-\t\tstruct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);\n-\t\tstruct idpf_dma_mem *bi = cq->bi.rx_buff[i];\n-\n-\t\t/* No buffer to post to descriptor, continue */\n-\t\tif (!bi)\n-\t\t\tcontinue;\n-\n-\t\tdesc->flags =\n-\t\t\tcpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);\n-\t\tdesc->opcode = 0;\n-\t\tdesc->datalen = cpu_to_le16(bi->size);\n-\t\tdesc->ret_val = 0;\n-\t\tdesc->v_opcode_dtype = 0;\n-\t\tdesc->v_retval = 0;\n-\t\tdesc->params.indirect.addr_high =\n-\t\t\tcpu_to_le32(upper_32_bits(bi->pa));\n-\t\tdesc->params.indirect.addr_low =\n-\t\t\tcpu_to_le32(lower_32_bits(bi->pa));\n-\t\tdesc->params.indirect.param0 = 0;\n-\t\tdesc->params.indirect.sw_cookie = 0;\n-\t\tdesc->params.indirect.v_flags = 0;\n-\t}\n-}\n-\n-/**\n- * idpf_ctlq_shutdown - shutdown the CQ\n- * @hw: pointer to hw struct\n- * @cq: pointer to the specific Control queue\n- *\n- * The main shutdown routine for any controq queue\n- */\n-static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n-{\n-\tspin_lock(&cq->cq_lock);\n-\n-\t/* free ring buffers and the ring itself */\n-\tidpf_ctlq_dealloc_ring_res(hw, cq);\n-\n-\t/* Set ring_size to 0 to indicate uninitialized queue */\n-\tcq->ring_size = 0;\n-\n-\tspin_unlock(&cq->cq_lock);\n-}\n-\n-/**\n- * idpf_ctlq_add - add one control queue\n- * @hw: pointer to hardware struct\n- * @qinfo: info for queue to be created\n- * @cq_out: (output) double pointer to control queue to be created\n- *\n- * Allocate and initialize a control queue and add it to the control queue list.\n- * The cq parameter will be allocated/initialized and passed back to the caller\n- * if no errors occur.\n- *\n- * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add\n- */\n-int idpf_ctlq_add(struct idpf_hw *hw,\n-\t\t struct idpf_ctlq_create_info *qinfo,\n-\t\t struct idpf_ctlq_info **cq_out)\n-{\n-\tstruct idpf_ctlq_info *cq;\n-\tbool is_rxq = false;\n-\tint err;\n-\n-\tcq = kzalloc(sizeof(*cq), GFP_KERNEL);\n-\tif (!cq)\n-\t\treturn -ENOMEM;\n-\n-\tcq->cq_type = qinfo->type;\n-\tcq->q_id = qinfo->id;\n-\tcq->buf_size = qinfo->buf_size;\n-\tcq->ring_size = qinfo->len;\n-\n-\tcq->next_to_use = 0;\n-\tcq->next_to_clean = 0;\n-\tcq->next_to_post = cq->ring_size - 1;\n-\n-\tswitch (qinfo->type) {\n-\tcase IDPF_CTLQ_TYPE_MAILBOX_RX:\n-\t\tis_rxq = true;\n-\t\tfallthrough;\n-\tcase IDPF_CTLQ_TYPE_MAILBOX_TX:\n-\t\terr = idpf_ctlq_alloc_ring_res(hw, cq);\n-\t\tbreak;\n-\tdefault:\n-\t\terr = -EBADR;\n-\t\tbreak;\n-\t}\n-\n-\tif (err)\n-\t\tgoto init_free_q;\n-\n-\tif (is_rxq) {\n-\t\tidpf_ctlq_init_rxq_bufs(cq);\n-\t} else {\n-\t\t/* Allocate the array of msg pointers for TX queues */\n-\t\tcq->bi.tx_msg = kcalloc(qinfo->len,\n-\t\t\t\t\tsizeof(struct idpf_ctlq_msg *),\n-\t\t\t\t\tGFP_KERNEL);\n-\t\tif (!cq->bi.tx_msg) {\n-\t\t\terr = -ENOMEM;\n-\t\t\tgoto init_dealloc_q_mem;\n-\t\t}\n-\t}\n-\n-\tidpf_ctlq_setup_regs(cq, qinfo);\n-\n-\tidpf_ctlq_init_regs(hw, cq, is_rxq);\n-\n-\tspin_lock_init(&cq->cq_lock);\n-\n-\tlist_add(&cq->cq_list, &hw->cq_list_head);\n-\n-\t*cq_out = cq;\n-\n-\treturn 0;\n-\n-init_dealloc_q_mem:\n-\t/* free ring buffers and the ring itself */\n-\tidpf_ctlq_dealloc_ring_res(hw, cq);\n-init_free_q:\n-\tkfree(cq);\n-\n-\treturn err;\n-}\n-\n-/**\n- * idpf_ctlq_remove - deallocate and remove specified control queue\n- * @hw: pointer to hardware struct\n- * @cq: pointer to control queue to be removed\n- */\n-void idpf_ctlq_remove(struct idpf_hw *hw,\n-\t\t struct idpf_ctlq_info *cq)\n-{\n-\tlist_del(&cq->cq_list);\n-\tidpf_ctlq_shutdown(hw, cq);\n-\tkfree(cq);\n-}\n-\n-/**\n- * idpf_ctlq_init - main initialization routine for all control queues\n- * @hw: pointer to hardware struct\n- * @num_q: number of queues to initialize\n- * @q_info: array of structs containing info for each queue to be initialized\n- *\n- * This initializes any number and any type of control queues. This is an all\n- * or nothing routine; if one fails, all previously allocated queues will be\n- * destroyed. This must be called prior to using the individual add/remove\n- * APIs.\n- */\n-int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,\n-\t\t struct idpf_ctlq_create_info *q_info)\n-{\n-\tstruct idpf_ctlq_info *cq, *tmp;\n-\tint err;\n-\tint i;\n-\n-\tINIT_LIST_HEAD(&hw->cq_list_head);\n-\n-\tfor (i = 0; i < num_q; i++) {\n-\t\tstruct idpf_ctlq_create_info *qinfo = q_info + i;\n-\n-\t\terr = idpf_ctlq_add(hw, qinfo, &cq);\n-\t\tif (err)\n-\t\t\tgoto init_destroy_qs;\n-\t}\n-\n-\treturn 0;\n-\n-init_destroy_qs:\n-\tlist_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)\n-\t\tidpf_ctlq_remove(hw, cq);\n-\n-\treturn err;\n-}\n-\n-/**\n- * idpf_ctlq_deinit - destroy all control queues\n- * @hw: pointer to hw struct\n- */\n-void idpf_ctlq_deinit(struct idpf_hw *hw)\n-{\n-\tstruct idpf_ctlq_info *cq, *tmp;\n-\n-\tlist_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)\n-\t\tidpf_ctlq_remove(hw, cq);\n-}\n-\n-/**\n- * idpf_ctlq_send - send command to Control Queue (CTQ)\n- * @hw: pointer to hw struct\n- * @cq: handle to control queue struct to send on\n- * @num_q_msg: number of messages to send on control queue\n- * @q_msg: pointer to array of queue messages to be sent\n- *\n- * The caller is expected to allocate DMAable buffers and pass them to the\n- * send routine via the q_msg struct / control queue specific data struct.\n- * The control queue will hold a reference to each send message until\n- * the completion for that message has been cleaned.\n- */\n-int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n-\t\t u16 num_q_msg, struct idpf_ctlq_msg q_msg[])\n-{\n-\tstruct idpf_ctlq_desc *desc;\n-\tint num_desc_avail;\n-\tint err = 0;\n-\tint i;\n-\n-\tspin_lock(&cq->cq_lock);\n-\n-\t/* Ensure there are enough descriptors to send all messages */\n-\tnum_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);\n-\tif (num_desc_avail == 0 || num_desc_avail < num_q_msg) {\n-\t\terr = -ENOSPC;\n-\t\tgoto err_unlock;\n-\t}\n-\n-\tfor (i = 0; i < num_q_msg; i++) {\n-\t\tstruct idpf_ctlq_msg *msg = &q_msg[i];\n-\n-\t\tdesc = IDPF_CTLQ_DESC(cq, cq->next_to_use);\n-\n-\t\tdesc->opcode = cpu_to_le16(msg->opcode);\n-\t\tdesc->pfid_vfid = cpu_to_le16(msg->func_id);\n-\n-\t\tdesc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode);\n-\t\tdesc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval);\n-\n-\t\tdesc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) <<\n-\t\t\t\t\t IDPF_CTLQ_FLAG_HOST_ID_S);\n-\t\tif (msg->data_len) {\n-\t\t\tstruct idpf_dma_mem *buff = msg->ctx.indirect.payload;\n-\n-\t\t\tdesc->datalen |= cpu_to_le16(msg->data_len);\n-\t\t\tdesc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF);\n-\t\t\tdesc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD);\n-\n-\t\t\t/* Update the address values in the desc with the pa\n-\t\t\t * value for respective buffer\n-\t\t\t */\n-\t\t\tdesc->params.indirect.addr_high =\n-\t\t\t\tcpu_to_le32(upper_32_bits(buff->pa));\n-\t\t\tdesc->params.indirect.addr_low =\n-\t\t\t\tcpu_to_le32(lower_32_bits(buff->pa));\n-\n-\t\t\tmemcpy(&desc->params, msg->ctx.indirect.context,\n-\t\t\t IDPF_INDIRECT_CTX_SIZE);\n-\t\t} else {\n-\t\t\tmemcpy(&desc->params, msg->ctx.direct,\n-\t\t\t IDPF_DIRECT_CTX_SIZE);\n-\t\t}\n-\n-\t\t/* Store buffer info */\n-\t\tcq->bi.tx_msg[cq->next_to_use] = msg;\n-\n-\t\t(cq->next_to_use)++;\n-\t\tif (cq->next_to_use == cq->ring_size)\n-\t\t\tcq->next_to_use = 0;\n-\t}\n-\n-\t/* Force memory write to complete before letting hardware\n-\t * know that there are new descriptors to fetch.\n-\t */\n-\tdma_wmb();\n-\n-\twritel(cq->next_to_use,\n-\t libie_pci_get_mmio_addr(&hw->back->ctlq_ctx.mmio_info,\n-\t\t\t\t cq->reg.tail));\n-\n-err_unlock:\n-\tspin_unlock(&cq->cq_lock);\n-\n-\treturn err;\n-}\n-\n-/**\n- * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the\n- * requested queue\n- * @cq: pointer to the specific Control queue\n- * @clean_count: (input|output) number of descriptors to clean as input, and\n- * number of descriptors actually cleaned as output\n- * @msg_status: (output) pointer to msg pointer array to be populated; needs\n- * to be allocated by caller\n- *\n- * Returns an array of message pointers associated with the cleaned\n- * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned\n- * descriptors. The status will be returned for each; any messages that failed\n- * to send will have a non-zero status. The caller is expected to free original\n- * ctlq_msgs and free or reuse the DMA buffers.\n- */\n-int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n-\t\t struct idpf_ctlq_msg *msg_status[])\n-{\n-\tstruct idpf_ctlq_desc *desc;\n-\tu16 i, num_to_clean;\n-\tu16 ntc, desc_err;\n-\n-\tif (*clean_count == 0)\n-\t\treturn 0;\n-\tif (*clean_count > cq->ring_size)\n-\t\treturn -EBADR;\n-\n-\tspin_lock(&cq->cq_lock);\n-\n-\tntc = cq->next_to_clean;\n-\n-\tnum_to_clean = *clean_count;\n-\n-\tfor (i = 0; i < num_to_clean; i++) {\n-\t\t/* Fetch next descriptor and check if marked as done */\n-\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n-\t\tif (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))\n-\t\t\tbreak;\n-\n-\t\t/* Ensure no other fields are read until DD flag is checked */\n-\t\tdma_rmb();\n-\n-\t\t/* strip off FW internal code */\n-\t\tdesc_err = le16_to_cpu(desc->ret_val) & 0xff;\n-\n-\t\tmsg_status[i] = cq->bi.tx_msg[ntc];\n-\t\tmsg_status[i]->status = desc_err;\n-\n-\t\tcq->bi.tx_msg[ntc] = NULL;\n-\n-\t\t/* Zero out any stale data */\n-\t\tmemset(desc, 0, sizeof(*desc));\n-\n-\t\tntc++;\n-\t\tif (ntc == cq->ring_size)\n-\t\t\tntc = 0;\n-\t}\n-\n-\tcq->next_to_clean = ntc;\n-\n-\tspin_unlock(&cq->cq_lock);\n-\n-\t/* Return number of descriptors actually cleaned */\n-\t*clean_count = i;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring\n- * @hw: pointer to hw struct\n- * @cq: pointer to control queue handle\n- * @buff_count: (input|output) input is number of buffers caller is trying to\n- * return; output is number of buffers that were not posted\n- * @buffs: array of pointers to dma mem structs to be given to hardware\n- *\n- * Caller uses this function to return DMA buffers to the descriptor ring after\n- * consuming them; buff_count will be the number of buffers.\n- *\n- * Note: this function needs to be called after a receive call even\n- * if there are no DMA buffers to be returned, i.e. buff_count = 0,\n- * buffs = NULL to support direct commands\n- */\n-int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,\n-\t\t\t u16 *buff_count, struct idpf_dma_mem **buffs)\n-{\n-\tstruct idpf_ctlq_desc *desc;\n-\tu16 ntp = cq->next_to_post;\n-\tbool buffs_avail = false;\n-\tu16 tbp = ntp + 1;\n-\tint i = 0;\n-\n-\tif (*buff_count > cq->ring_size)\n-\t\treturn -EBADR;\n-\n-\tif (*buff_count > 0)\n-\t\tbuffs_avail = true;\n-\n-\tspin_lock(&cq->cq_lock);\n-\n-\tif (tbp >= cq->ring_size)\n-\t\ttbp = 0;\n-\n-\tif (tbp == cq->next_to_clean)\n-\t\t/* Nothing to do */\n-\t\tgoto post_buffs_out;\n-\n-\t/* Post buffers for as many as provided or up until the last one used */\n-\twhile (ntp != cq->next_to_clean) {\n-\t\tdesc = IDPF_CTLQ_DESC(cq, ntp);\n-\n-\t\tif (cq->bi.rx_buff[ntp])\n-\t\t\tgoto fill_desc;\n-\t\tif (!buffs_avail) {\n-\t\t\t/* If the caller hasn't given us any buffers or\n-\t\t\t * there are none left, search the ring itself\n-\t\t\t * for an available buffer to move to this\n-\t\t\t * entry starting at the next entry in the ring\n-\t\t\t */\n-\t\t\ttbp = ntp + 1;\n-\n-\t\t\t/* Wrap ring if necessary */\n-\t\t\tif (tbp >= cq->ring_size)\n-\t\t\t\ttbp = 0;\n-\n-\t\t\twhile (tbp != cq->next_to_clean) {\n-\t\t\t\tif (cq->bi.rx_buff[tbp]) {\n-\t\t\t\t\tcq->bi.rx_buff[ntp] =\n-\t\t\t\t\t\tcq->bi.rx_buff[tbp];\n-\t\t\t\t\tcq->bi.rx_buff[tbp] = NULL;\n-\n-\t\t\t\t\t/* Found a buffer, no need to\n-\t\t\t\t\t * search anymore\n-\t\t\t\t\t */\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\n-\t\t\t\t/* Wrap ring if necessary */\n-\t\t\t\ttbp++;\n-\t\t\t\tif (tbp >= cq->ring_size)\n-\t\t\t\t\ttbp = 0;\n-\t\t\t}\n-\n-\t\t\tif (tbp == cq->next_to_clean)\n-\t\t\t\tgoto post_buffs_out;\n-\t\t} else {\n-\t\t\t/* Give back pointer to DMA buffer */\n-\t\t\tcq->bi.rx_buff[ntp] = buffs[i];\n-\t\t\ti++;\n-\n-\t\t\tif (i >= *buff_count)\n-\t\t\t\tbuffs_avail = false;\n-\t\t}\n-\n-fill_desc:\n-\t\tdesc->flags =\n-\t\t\tcpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);\n-\n-\t\t/* Post buffers to descriptor */\n-\t\tdesc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);\n-\t\tdesc->params.indirect.addr_high =\n-\t\t\tcpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa));\n-\t\tdesc->params.indirect.addr_low =\n-\t\t\tcpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa));\n-\n-\t\tntp++;\n-\t\tif (ntp == cq->ring_size)\n-\t\t\tntp = 0;\n-\t}\n-\n-post_buffs_out:\n-\t/* Only update tail if buffers were actually posted */\n-\tif (cq->next_to_post != ntp) {\n-\t\tif (ntp)\n-\t\t\t/* Update next_to_post to ntp - 1 since current ntp\n-\t\t\t * will not have a buffer\n-\t\t\t */\n-\t\t\tcq->next_to_post = ntp - 1;\n-\t\telse\n-\t\t\t/* Wrap to end of end ring since current ntp is 0 */\n-\t\t\tcq->next_to_post = cq->ring_size - 1;\n-\n-\t\tdma_wmb();\n-\n-\t\twritel(cq->next_to_post,\n-\t\t libie_pci_get_mmio_addr(&hw->back->ctlq_ctx.mmio_info,\n-\t\t\t\t\t cq->reg.tail));\n-\t}\n-\n-\tspin_unlock(&cq->cq_lock);\n-\n-\t/* return the number of buffers that were not posted */\n-\t*buff_count = *buff_count - i;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * idpf_ctlq_recv - receive control queue message call back\n- * @cq: pointer to control queue handle to receive on\n- * @num_q_msg: (input|output) input number of messages that should be received;\n- * output number of messages actually received\n- * @q_msg: (output) array of received control queue messages on this q;\n- * needs to be pre-allocated by caller for as many messages as requested\n- *\n- * Called by interrupt handler or polling mechanism. Caller is expected\n- * to free buffers\n- */\n-int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n-\t\t struct idpf_ctlq_msg *q_msg)\n-{\n-\tu16 num_to_clean, ntc, flags;\n-\tstruct idpf_ctlq_desc *desc;\n-\tint err = 0;\n-\tu16 i;\n-\n-\t/* take the lock before we start messing with the ring */\n-\tspin_lock(&cq->cq_lock);\n-\n-\tntc = cq->next_to_clean;\n-\n-\tnum_to_clean = *num_q_msg;\n-\n-\tfor (i = 0; i < num_to_clean; i++) {\n-\t\t/* Fetch next descriptor and check if marked as done */\n-\t\tdesc = IDPF_CTLQ_DESC(cq, ntc);\n-\t\tflags = le16_to_cpu(desc->flags);\n-\n-\t\tif (!(flags & IDPF_CTLQ_FLAG_DD))\n-\t\t\tbreak;\n-\n-\t\t/* Ensure no other fields are read until DD flag is checked */\n-\t\tdma_rmb();\n-\n-\t\tq_msg[i].vmvf_type = (flags &\n-\t\t\t\t (IDPF_CTLQ_FLAG_FTYPE_VM |\n-\t\t\t\t IDPF_CTLQ_FLAG_FTYPE_PF)) >>\n-\t\t\t\t IDPF_CTLQ_FLAG_FTYPE_S;\n-\n-\t\tif (flags & IDPF_CTLQ_FLAG_ERR)\n-\t\t\terr = -EBADMSG;\n-\n-\t\tq_msg[i].cookie.mbx.chnl_opcode =\n-\t\t\t\tle32_to_cpu(desc->v_opcode_dtype);\n-\t\tq_msg[i].cookie.mbx.chnl_retval =\n-\t\t\t\tle32_to_cpu(desc->v_retval);\n-\n-\t\tq_msg[i].opcode = le16_to_cpu(desc->opcode);\n-\t\tq_msg[i].data_len = le16_to_cpu(desc->datalen);\n-\t\tq_msg[i].status = le16_to_cpu(desc->ret_val);\n-\n-\t\tif (desc->datalen) {\n-\t\t\tmemcpy(q_msg[i].ctx.indirect.context,\n-\t\t\t &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);\n-\n-\t\t\t/* Assign pointer to dma buffer to ctlq_msg array\n-\t\t\t * to be given to upper layer\n-\t\t\t */\n-\t\t\tq_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];\n-\n-\t\t\t/* Zero out pointer to DMA buffer info;\n-\t\t\t * will be repopulated by post buffers API\n-\t\t\t */\n-\t\t\tcq->bi.rx_buff[ntc] = NULL;\n-\t\t} else {\n-\t\t\tmemcpy(q_msg[i].ctx.direct, desc->params.raw,\n-\t\t\t IDPF_DIRECT_CTX_SIZE);\n-\t\t}\n-\n-\t\t/* Zero out stale data in descriptor */\n-\t\tmemset(desc, 0, sizeof(struct idpf_ctlq_desc));\n-\n-\t\tntc++;\n-\t\tif (ntc == cq->ring_size)\n-\t\t\tntc = 0;\n-\t}\n-\n-\tcq->next_to_clean = ntc;\n-\n-\tspin_unlock(&cq->cq_lock);\n-\n-\t*num_q_msg = i;\n-\tif (*num_q_msg == 0)\n-\t\terr = -ENOMSG;\n-\n-\treturn err;\n-}\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h\ndeleted file mode 100644\nindex acf595e9265f..000000000000\n--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.h\n+++ /dev/null\n@@ -1,142 +0,0 @@\n-/* SPDX-License-Identifier: GPL-2.0-only */\n-/* Copyright (C) 2023 Intel Corporation */\n-\n-#ifndef _IDPF_CONTROLQ_H_\n-#define _IDPF_CONTROLQ_H_\n-\n-#include <linux/slab.h>\n-\n-#include \"idpf_controlq_api.h\"\n-\n-/* Maximum buffer length for all control queue types */\n-#define IDPF_CTLQ_MAX_BUF_LEN\t4096\n-\n-#define IDPF_CTLQ_DESC(R, i) \\\n-\t(&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i]))\n-\n-#define IDPF_CTLQ_DESC_UNUSED(R) \\\n-\t((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \\\n-\t (R)->next_to_clean - (R)->next_to_use - 1))\n-\n-/* Control Queue default settings */\n-#define IDPF_CTRL_SQ_CMD_TIMEOUT\t250 /* msecs */\n-\n-struct idpf_ctlq_desc {\n-\t/* Control queue descriptor flags */\n-\t__le16 flags;\n-\t/* Control queue message opcode */\n-\t__le16 opcode;\n-\t__le16 datalen;\t\t/* 0 for direct commands */\n-\tunion {\n-\t\t__le16 ret_val;\n-\t\t__le16 pfid_vfid;\n-#define IDPF_CTLQ_DESC_VF_ID_S\t0\n-#define IDPF_CTLQ_DESC_VF_ID_M\t(0x7FF << IDPF_CTLQ_DESC_VF_ID_S)\n-#define IDPF_CTLQ_DESC_PF_ID_S\t11\n-#define IDPF_CTLQ_DESC_PF_ID_M\t(0x1F << IDPF_CTLQ_DESC_PF_ID_S)\n-\t};\n-\n-\t/* Virtchnl message opcode and virtchnl descriptor type\n-\t * v_opcode=[27:0], v_dtype=[31:28]\n-\t */\n-\t__le32 v_opcode_dtype;\n-\t/* Virtchnl return value */\n-\t__le32 v_retval;\n-\tunion {\n-\t\tstruct {\n-\t\t\t__le32 param0;\n-\t\t\t__le32 param1;\n-\t\t\t__le32 param2;\n-\t\t\t__le32 param3;\n-\t\t} direct;\n-\t\tstruct {\n-\t\t\t__le32 param0;\n-\t\t\t__le16 sw_cookie;\n-\t\t\t/* Virtchnl flags */\n-\t\t\t__le16 v_flags;\n-\t\t\t__le32 addr_high;\n-\t\t\t__le32 addr_low;\n-\t\t} indirect;\n-\t\tu8 raw[16];\n-\t} params;\n-};\n-\n-/* Flags sub-structure\n- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |\n- * |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID |\n- */\n-/* command flags and offsets */\n-#define IDPF_CTLQ_FLAG_DD_S\t\t0\n-#define IDPF_CTLQ_FLAG_CMP_S\t\t1\n-#define IDPF_CTLQ_FLAG_ERR_S\t\t2\n-#define IDPF_CTLQ_FLAG_FTYPE_S\t\t6\n-#define IDPF_CTLQ_FLAG_RD_S\t\t10\n-#define IDPF_CTLQ_FLAG_VFC_S\t\t11\n-#define IDPF_CTLQ_FLAG_BUF_S\t\t12\n-#define IDPF_CTLQ_FLAG_HOST_ID_S\t13\n-\n-#define IDPF_CTLQ_FLAG_DD\tBIT(IDPF_CTLQ_FLAG_DD_S)\t/* 0x1\t */\n-#define IDPF_CTLQ_FLAG_CMP\tBIT(IDPF_CTLQ_FLAG_CMP_S)\t/* 0x2\t */\n-#define IDPF_CTLQ_FLAG_ERR\tBIT(IDPF_CTLQ_FLAG_ERR_S)\t/* 0x4\t */\n-#define IDPF_CTLQ_FLAG_FTYPE_VM\tBIT(IDPF_CTLQ_FLAG_FTYPE_S)\t/* 0x40\t */\n-#define IDPF_CTLQ_FLAG_FTYPE_PF\tBIT(IDPF_CTLQ_FLAG_FTYPE_S + 1)\t/* 0x80 */\n-#define IDPF_CTLQ_FLAG_RD\tBIT(IDPF_CTLQ_FLAG_RD_S)\t/* 0x400 */\n-#define IDPF_CTLQ_FLAG_VFC\tBIT(IDPF_CTLQ_FLAG_VFC_S)\t/* 0x800 */\n-#define IDPF_CTLQ_FLAG_BUF\tBIT(IDPF_CTLQ_FLAG_BUF_S)\t/* 0x1000 */\n-\n-/* Host ID is a special field that has 3b and not a 1b flag */\n-#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)\n-\n-struct idpf_mbxq_desc {\n-\tu8 pad[8];\t\t/* CTLQ flags/opcode/len/retval fields */\n-\tu32 chnl_opcode;\t/* avoid confusion with desc->opcode */\n-\tu32 chnl_retval;\t/* ditto for desc->retval */\n-\tu32 pf_vf_id;\t\t/* used by CP when sending to PF */\n-};\n-\n-/* Max number of MMIO regions not including the mailbox and rstat regions in\n- * the fallback case when the whole bar is mapped.\n- */\n-#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING\t\t3\n-\n-struct idpf_mmio_reg {\n-\tvoid __iomem *vaddr;\n-\tresource_size_t addr_start;\n-\tresource_size_t addr_len;\n-};\n-\n-/* Define the driver hardware struct to replace other control structs as needed\n- * Align to ctlq_hw_info\n- */\n-struct idpf_hw {\n-\t/* Array of remaining LAN BAR regions */\n-\tint num_lan_regs;\n-\tstruct idpf_mmio_reg *lan_regs;\n-\n-\tstruct idpf_adapter *back;\n-\n-\t/* control queue - send and receive */\n-\tstruct idpf_ctlq_info *asq;\n-\tstruct idpf_ctlq_info *arq;\n-\n-\t/* pci info */\n-\tu16 device_id;\n-\tu16 vendor_id;\n-\tu16 subsystem_device_id;\n-\tu16 subsystem_vendor_id;\n-\tu8 revision_id;\n-\tbool adapter_stopped;\n-\n-\tstruct list_head cq_list_head;\n-};\n-\n-int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw,\n-\t\t\t struct idpf_ctlq_info *cq);\n-\n-void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);\n-\n-/* prototype for functions used for dynamic memory allocation */\n-void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem,\n-\t\t\t u64 size);\n-void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem);\n-#endif /* _IDPF_CONTROLQ_H_ */\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h\ndeleted file mode 100644\nindex 3414c5f9a831..000000000000\n--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h\n+++ /dev/null\n@@ -1,177 +0,0 @@\n-/* SPDX-License-Identifier: GPL-2.0-only */\n-/* Copyright (C) 2023 Intel Corporation */\n-\n-#ifndef _IDPF_CONTROLQ_API_H_\n-#define _IDPF_CONTROLQ_API_H_\n-\n-#include \"idpf_mem.h\"\n-\n-struct idpf_hw;\n-\n-/* Used for queue init, response and events */\n-enum idpf_ctlq_type {\n-\tIDPF_CTLQ_TYPE_MAILBOX_TX\t= 0,\n-\tIDPF_CTLQ_TYPE_MAILBOX_RX\t= 1,\n-\tIDPF_CTLQ_TYPE_CONFIG_TX\t= 2,\n-\tIDPF_CTLQ_TYPE_CONFIG_RX\t= 3,\n-\tIDPF_CTLQ_TYPE_EVENT_RX\t\t= 4,\n-\tIDPF_CTLQ_TYPE_RDMA_TX\t\t= 5,\n-\tIDPF_CTLQ_TYPE_RDMA_RX\t\t= 6,\n-\tIDPF_CTLQ_TYPE_RDMA_COMPL\t= 7\n-};\n-\n-/* Generic Control Queue Structures */\n-struct idpf_ctlq_reg {\n-\t/* used for queue tracking */\n-\tu32 head;\n-\tu32 tail;\n-\t/* Below applies only to default mb (if present) */\n-\tu32 len;\n-\tu32 bah;\n-\tu32 bal;\n-\tu32 len_mask;\n-\tu32 len_ena_mask;\n-\tu32 head_mask;\n-};\n-\n-/* Generic queue msg structure */\n-struct idpf_ctlq_msg {\n-\tu8 vmvf_type; /* represents the source of the message on recv */\n-#define IDPF_VMVF_TYPE_VF 0\n-#define IDPF_VMVF_TYPE_VM 1\n-#define IDPF_VMVF_TYPE_PF 2\n-\tu8 host_id;\n-\t/* 3b field used only when sending a message to CP - to be used in\n-\t * combination with target func_id to route the message\n-\t */\n-#define IDPF_HOST_ID_MASK 0x7\n-\n-\tu16 opcode;\n-\tu16 data_len;\t/* data_len = 0 when no payload is attached */\n-\tunion {\n-\t\tu16 func_id;\t/* when sending a message */\n-\t\tu16 status;\t/* when receiving a message */\n-\t};\n-\tunion {\n-\t\tstruct {\n-\t\t\tu32 chnl_opcode;\n-\t\t\tu32 chnl_retval;\n-\t\t} mbx;\n-\t} cookie;\n-\tunion {\n-#define IDPF_DIRECT_CTX_SIZE\t16\n-#define IDPF_INDIRECT_CTX_SIZE\t8\n-\t\t/* 16 bytes of context can be provided or 8 bytes of context\n-\t\t * plus the address of a DMA buffer\n-\t\t */\n-\t\tu8 direct[IDPF_DIRECT_CTX_SIZE];\n-\t\tstruct {\n-\t\t\tu8 context[IDPF_INDIRECT_CTX_SIZE];\n-\t\t\tstruct idpf_dma_mem *payload;\n-\t\t} indirect;\n-\t\tstruct {\n-\t\t\tu32 rsvd;\n-\t\t\tu16 data;\n-\t\t\tu16 flags;\n-\t\t} sw_cookie;\n-\t} ctx;\n-};\n-\n-/* Generic queue info structures */\n-/* MB, CONFIG and EVENT q do not have extended info */\n-struct idpf_ctlq_create_info {\n-\tenum idpf_ctlq_type type;\n-\tint id; /* absolute queue offset passed as input\n-\t\t * -1 for default mailbox if present\n-\t\t */\n-\tu16 len; /* Queue length passed as input */\n-\tu16 buf_size; /* buffer size passed as input */\n-\tu64 base_address; /* output, HPA of the Queue start */\n-\tstruct idpf_ctlq_reg reg; /* registers accessed by ctlqs */\n-\n-\tint ext_info_size;\n-\tvoid *ext_info; /* Specific to q type */\n-};\n-\n-/* Control Queue information */\n-struct idpf_ctlq_info {\n-\tstruct list_head cq_list;\n-\n-\tenum idpf_ctlq_type cq_type;\n-\tint q_id;\n-\tspinlock_t cq_lock;\t\t/* control queue lock */\n-\t/* used for interrupt processing */\n-\tu16 next_to_use;\n-\tu16 next_to_clean;\n-\tu16 next_to_post;\t\t/* starting descriptor to post buffers\n-\t\t\t\t\t * to after recev\n-\t\t\t\t\t */\n-\n-\tstruct idpf_dma_mem desc_ring;\t/* descriptor ring memory\n-\t\t\t\t\t * idpf_dma_mem is defined in OSdep.h\n-\t\t\t\t\t */\n-\tunion {\n-\t\tstruct idpf_dma_mem **rx_buff;\n-\t\tstruct idpf_ctlq_msg **tx_msg;\n-\t} bi;\n-\n-\tu16 buf_size;\t\t\t/* queue buffer size */\n-\tu16 ring_size;\t\t\t/* Number of descriptors */\n-\tstruct idpf_ctlq_reg reg;\t/* registers accessed by ctlqs */\n-};\n-\n-/**\n- * enum idpf_mbx_opc - PF/VF mailbox commands\n- * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP\n- * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to\n- *\t\t\t\t any peer driver\n- */\n-enum idpf_mbx_opc {\n-\tidpf_mbq_opc_send_msg_to_cp\t\t= 0x0801,\n-\tidpf_mbq_opc_send_msg_to_peer_drv\t= 0x0804,\n-};\n-\n-/* API supported for control queue management */\n-/* Will init all required q including default mb. \"q_info\" is an array of\n- * create_info structs equal to the number of control queues to be created.\n- */\n-int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,\n-\t\t struct idpf_ctlq_create_info *q_info);\n-\n-/* Allocate and initialize a single control queue, which will be added to the\n- * control queue list; returns a handle to the created control queue\n- */\n-int idpf_ctlq_add(struct idpf_hw *hw,\n-\t\t struct idpf_ctlq_create_info *qinfo,\n-\t\t struct idpf_ctlq_info **cq);\n-\n-/* Deinitialize and deallocate a single control queue */\n-void idpf_ctlq_remove(struct idpf_hw *hw,\n-\t\t struct idpf_ctlq_info *cq);\n-\n-/* Sends messages to HW and will also free the buffer*/\n-int idpf_ctlq_send(struct idpf_hw *hw,\n-\t\t struct idpf_ctlq_info *cq,\n-\t\t u16 num_q_msg,\n-\t\t struct idpf_ctlq_msg q_msg[]);\n-\n-/* Receives messages and called by interrupt handler/polling\n- * initiated by app/process. Also caller is supposed to free the buffers\n- */\n-int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,\n-\t\t struct idpf_ctlq_msg *q_msg);\n-\n-/* Reclaims send descriptors on HW write back */\n-int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,\n-\t\t struct idpf_ctlq_msg *msg_status[]);\n-\n-/* Indicate RX buffers are done being processed */\n-int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,\n-\t\t\t struct idpf_ctlq_info *cq,\n-\t\t\t u16 *buff_count,\n-\t\t\t struct idpf_dma_mem **buffs);\n-\n-/* Will destroy all q including the default mb */\n-void idpf_ctlq_deinit(struct idpf_hw *hw);\n-\n-#endif /* _IDPF_CONTROLQ_API_H_ */\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c\ndeleted file mode 100644\nindex a942a6385d06..000000000000\n--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c\n+++ /dev/null\n@@ -1,171 +0,0 @@\n-// SPDX-License-Identifier: GPL-2.0-only\n-/* Copyright (C) 2023 Intel Corporation */\n-\n-#include \"idpf_controlq.h\"\n-\n-/**\n- * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings\n- * @hw: pointer to hw struct\n- * @cq: pointer to the specific Control queue\n- */\n-static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,\n-\t\t\t\t struct idpf_ctlq_info *cq)\n-{\n-\tsize_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);\n-\n-\tcq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);\n-\tif (!cq->desc_ring.va)\n-\t\treturn -ENOMEM;\n-\n-\treturn 0;\n-}\n-\n-/**\n- * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers\n- * @hw: pointer to hw struct\n- * @cq: pointer to the specific Control queue\n- *\n- * Allocate the buffer head for all control queues, and if it's a receive\n- * queue, allocate DMA buffers\n- */\n-static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,\n-\t\t\t\tstruct idpf_ctlq_info *cq)\n-{\n-\tint i;\n-\n-\t/* Do not allocate DMA buffers for transmit queues */\n-\tif (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)\n-\t\treturn 0;\n-\n-\t/* We'll be allocating the buffer info memory first, then we can\n-\t * allocate the mapped buffers for the event processing\n-\t */\n-\tcq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *),\n-\t\t\t\t GFP_KERNEL);\n-\tif (!cq->bi.rx_buff)\n-\t\treturn -ENOMEM;\n-\n-\t/* allocate the mapped buffers (except for the last one) */\n-\tfor (i = 0; i < cq->ring_size - 1; i++) {\n-\t\tstruct idpf_dma_mem *bi;\n-\t\tint num = 1; /* number of idpf_dma_mem to be allocated */\n-\n-\t\tcq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem),\n-\t\t\t\t\t GFP_KERNEL);\n-\t\tif (!cq->bi.rx_buff[i])\n-\t\t\tgoto unwind_alloc_cq_bufs;\n-\n-\t\tbi = cq->bi.rx_buff[i];\n-\n-\t\tbi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);\n-\t\tif (!bi->va) {\n-\t\t\t/* unwind will not free the failed entry */\n-\t\t\tkfree(cq->bi.rx_buff[i]);\n-\t\t\tgoto unwind_alloc_cq_bufs;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-\n-unwind_alloc_cq_bufs:\n-\t/* don't try to free the one that failed... */\n-\ti--;\n-\tfor (; i >= 0; i--) {\n-\t\tidpf_free_dma_mem(hw, cq->bi.rx_buff[i]);\n-\t\tkfree(cq->bi.rx_buff[i]);\n-\t}\n-\tkfree(cq->bi.rx_buff);\n-\n-\treturn -ENOMEM;\n-}\n-\n-/**\n- * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings\n- * @hw: pointer to hw struct\n- * @cq: pointer to the specific Control queue\n- *\n- * This assumes the posted send buffers have already been cleaned\n- * and de-allocated\n- */\n-static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,\n-\t\t\t\t struct idpf_ctlq_info *cq)\n-{\n-\tidpf_free_dma_mem(hw, &cq->desc_ring);\n-}\n-\n-/**\n- * idpf_ctlq_free_bufs - Free CQ buffer info elements\n- * @hw: pointer to hw struct\n- * @cq: pointer to the specific Control queue\n- *\n- * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX\n- * queues. The upper layers are expected to manage freeing of TX DMA buffers\n- */\n-static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n-{\n-\tvoid *bi;\n-\n-\tif (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {\n-\t\tint i;\n-\n-\t\t/* free DMA buffers for rx queues*/\n-\t\tfor (i = 0; i < cq->ring_size; i++) {\n-\t\t\tif (cq->bi.rx_buff[i]) {\n-\t\t\t\tidpf_free_dma_mem(hw, cq->bi.rx_buff[i]);\n-\t\t\t\tkfree(cq->bi.rx_buff[i]);\n-\t\t\t}\n-\t\t}\n-\n-\t\tbi = (void *)cq->bi.rx_buff;\n-\t} else {\n-\t\tbi = (void *)cq->bi.tx_msg;\n-\t}\n-\n-\t/* free the buffer header */\n-\tkfree(bi);\n-}\n-\n-/**\n- * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue\n- * @hw: pointer to hw struct\n- * @cq: pointer to the specific Control queue\n- *\n- * Free the memory used by the ring, buffers and other related structures\n- */\n-void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n-{\n-\t/* free ring buffers and the ring itself */\n-\tidpf_ctlq_free_bufs(hw, cq);\n-\tidpf_ctlq_free_desc_ring(hw, cq);\n-}\n-\n-/**\n- * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs\n- * @hw: pointer to hw struct\n- * @cq: pointer to control queue struct\n- *\n- * Do *NOT* hold cq_lock when calling this as the memory allocation routines\n- * called are not going to be atomic context safe\n- */\n-int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)\n-{\n-\tint err;\n-\n-\t/* allocate the ring memory */\n-\terr = idpf_ctlq_alloc_desc_ring(hw, cq);\n-\tif (err)\n-\t\treturn err;\n-\n-\t/* allocate buffers in the rings */\n-\terr = idpf_ctlq_alloc_bufs(hw, cq);\n-\tif (err)\n-\t\tgoto idpf_init_cq_free_ring;\n-\n-\t/* success! */\n-\treturn 0;\n-\n-idpf_init_cq_free_ring:\n-\tidpf_free_dma_mem(hw, &cq->desc_ring);\n-\n-\treturn err;\n-}\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c\nindex 3a9355d40c90..9a8ce2396cf7 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c\n+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c\n@@ -10,44 +10,32 @@\n \n /**\n * idpf_ctlq_reg_init - initialize default mailbox registers\n- * @adapter: adapter structure\n- * @cq: pointer to the array of create control queues\n+ * @mmio: struct that contains MMIO region info\n+ * @cci: struct where the register offset pointer to be copied to\n */\n-static void idpf_ctlq_reg_init(struct idpf_adapter *adapter,\n-\t\t\t struct idpf_ctlq_create_info *cq)\n+static void idpf_ctlq_reg_init(struct libie_mmio_info *mmio,\n+\t\t\t struct libie_ctlq_create_info *cci)\n {\n-\tint i;\n-\n-\tfor (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {\n-\t\tstruct idpf_ctlq_create_info *ccq = cq + i;\n-\n-\t\tswitch (ccq->type) {\n-\t\tcase IDPF_CTLQ_TYPE_MAILBOX_TX:\n-\t\t\t/* set head and tail registers in our local struct */\n-\t\t\tccq->reg.head = PF_FW_ATQH;\n-\t\t\tccq->reg.tail = PF_FW_ATQT;\n-\t\t\tccq->reg.len = PF_FW_ATQLEN;\n-\t\t\tccq->reg.bah = PF_FW_ATQBAH;\n-\t\t\tccq->reg.bal = PF_FW_ATQBAL;\n-\t\t\tccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;\n-\t\t\tccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;\n-\t\t\tccq->reg.head_mask = PF_FW_ATQH_ATQH_M;\n-\t\t\tbreak;\n-\t\tcase IDPF_CTLQ_TYPE_MAILBOX_RX:\n-\t\t\t/* set head and tail registers in our local struct */\n-\t\t\tccq->reg.head = PF_FW_ARQH;\n-\t\t\tccq->reg.tail = PF_FW_ARQT;\n-\t\t\tccq->reg.len = PF_FW_ARQLEN;\n-\t\t\tccq->reg.bah = PF_FW_ARQBAH;\n-\t\t\tccq->reg.bal = PF_FW_ARQBAL;\n-\t\t\tccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;\n-\t\t\tccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;\n-\t\t\tccq->reg.head_mask = PF_FW_ARQH_ARQH_M;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tbreak;\n-\t\t}\n-\t}\n+\tstruct libie_ctlq_reg *tx_reg = &cci[LIBIE_CTLQ_TYPE_TX].reg;\n+\tstruct libie_ctlq_reg *rx_reg = &cci[LIBIE_CTLQ_TYPE_RX].reg;\n+\n+\ttx_reg->head\t\t= libie_pci_get_mmio_addr(mmio, PF_FW_ATQH);\n+\ttx_reg->tail\t\t= libie_pci_get_mmio_addr(mmio, PF_FW_ATQT);\n+\ttx_reg->len\t\t= libie_pci_get_mmio_addr(mmio, PF_FW_ATQLEN);\n+\ttx_reg->addr_high\t= libie_pci_get_mmio_addr(mmio, PF_FW_ATQBAH);\n+\ttx_reg->addr_low\t= libie_pci_get_mmio_addr(mmio, PF_FW_ATQBAL);\n+\ttx_reg->len_mask\t= PF_FW_ATQLEN_ATQLEN_M;\n+\ttx_reg->len_ena_mask\t= PF_FW_ATQLEN_ATQENABLE_M;\n+\ttx_reg->head_mask\t= PF_FW_ATQH_ATQH_M;\n+\n+\trx_reg->head\t\t= libie_pci_get_mmio_addr(mmio, PF_FW_ARQH);\n+\trx_reg->tail\t\t= libie_pci_get_mmio_addr(mmio, PF_FW_ARQT);\n+\trx_reg->len\t\t= libie_pci_get_mmio_addr(mmio, PF_FW_ARQLEN);\n+\trx_reg->addr_high\t= libie_pci_get_mmio_addr(mmio, PF_FW_ARQBAH);\n+\trx_reg->addr_low\t= libie_pci_get_mmio_addr(mmio, PF_FW_ARQBAL);\n+\trx_reg->len_mask\t= PF_FW_ARQLEN_ARQLEN_M;\n+\trx_reg->len_ena_mask\t= PF_FW_ARQLEN_ARQENABLE_M;\n+\trx_reg->head_mask\t= PF_FW_ARQH_ARQH_M;\n }\n \n /**\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c\nindex 0b283852ad7c..961e5fec5e43 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c\n+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c\n@@ -207,7 +207,7 @@ static int idpf_add_flow_steer(struct net_device *netdev,\n \tspin_unlock_bh(&vport_config->flow_steer_list_lock);\n \n \tif (err)\n-\t\tgoto out;\n+\t\tgoto out_free_fltr;\n \n \trule->vport_id = cpu_to_le32(vport->vport_id);\n \trule->count = cpu_to_le32(1);\n@@ -233,18 +233,15 @@ static int idpf_add_flow_steer(struct net_device *netdev,\n \t\tidpf_fsteer_fill_tcp(hdrs, fsp, true);\n \t\tbreak;\n \tdefault:\n-\t\terr = -EINVAL;\n-\t\tgoto out;\n+\t\tgoto out_free_fltr;\n \t}\n \n \terr = idpf_add_del_fsteer_filters(vport->adapter, rule,\n \t\t\t\t\t VIRTCHNL2_OP_ADD_FLOW_RULE);\n-\tif (err)\n-\t\tgoto out;\n-\n-\tif (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {\n-\t\terr = -EIO;\n-\t\tgoto out;\n+\tif (err || info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {\n+\t\t/* virtchnl2 rule is already consumed */\n+\t\tkfree(fltr);\n+\t\treturn err;\n \t}\n \n \t/* Save a copy of the user's flow spec so ethtool can later retrieve it */\n@@ -256,9 +253,10 @@ static int idpf_add_flow_steer(struct net_device *netdev,\n \n \tuser_config->num_fsteer_fltrs++;\n \tspin_unlock_bh(&vport_config->flow_steer_list_lock);\n-\tgoto out_free_rule;\n \n-out:\n+\treturn 0;\n+\n+out_free_fltr:\n \tkfree(fltr);\n out_free_rule:\n \tkfree(rule);\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c\nindex e15b1e8effc8..7751a81fc29d 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c\n+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c\n@@ -1363,6 +1363,7 @@ void idpf_statistics_task(struct work_struct *work)\n */\n void idpf_mbx_task(struct work_struct *work)\n {\n+\tstruct libie_ctlq_xn_recv_params xn_params;\n \tstruct idpf_adapter *adapter;\n \n \tadapter = container_of(work, struct idpf_adapter, mbx_task.work);\n@@ -1373,7 +1374,14 @@ void idpf_mbx_task(struct work_struct *work)\n \t\tqueue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,\n \t\t\t\t usecs_to_jiffies(300));\n \n-\tidpf_recv_mb_msg(adapter, adapter->hw.arq);\n+\txn_params = (struct libie_ctlq_xn_recv_params) {\n+\t\t.xnm = adapter->xn_init_params.xnm,\n+\t\t.ctlq = adapter->arq,\n+\t\t.ctlq_msg_handler = idpf_recv_event_msg,\n+\t\t.budget = LIBIE_CTLQ_MAX_XN_ENTRIES,\n+\t};\n+\n+\tlibie_ctlq_xn_recv(&xn_params);\n }\n \n /**\n@@ -1907,7 +1915,6 @@ static void idpf_init_hard_reset(struct idpf_adapter *adapter)\n \t\tidpf_vc_core_deinit(adapter);\n \t\tif (!is_reset)\n \t\t\treg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);\n-\t\tidpf_deinit_dflt_mbx(adapter);\n \t} else {\n \t\tdev_err(dev, \"Unhandled hard reset cause\\n\");\n \t\terr = -EBADRQC;\n@@ -1972,19 +1979,11 @@ void idpf_vc_event_task(struct work_struct *work)\n \tif (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))\n \t\treturn;\n \n-\tif (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))\n-\t\tgoto func_reset;\n-\n-\tif (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))\n-\t\tgoto drv_load;\n-\n-\treturn;\n-\n-func_reset:\n-\tidpf_vc_xn_shutdown(adapter->vcxn_mngr);\n-drv_load:\n-\tset_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);\n-\tidpf_init_hard_reset(adapter);\n+\tif (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||\n+\t test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {\n+\t\tset_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);\n+\t\tidpf_init_hard_reset(adapter);\n+\t}\n }\n \n /**\n@@ -2577,44 +2576,6 @@ static int idpf_set_mac(struct net_device *netdev, void *p)\n \treturn err;\n }\n \n-/**\n- * idpf_alloc_dma_mem - Allocate dma memory\n- * @hw: pointer to hw struct\n- * @mem: pointer to dma_mem struct\n- * @size: size of the memory to allocate\n- */\n-void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)\n-{\n-\tstruct idpf_adapter *adapter = hw->back;\n-\tsize_t sz = ALIGN(size, 4096);\n-\n-\t/* The control queue resources are freed under a spinlock, contiguous\n-\t * pages will avoid IOMMU remapping and the use vmap (and vunmap in\n-\t * dma_free_*() path.\n-\t */\n-\tmem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,\n-\t\t\t\t GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);\n-\tmem->size = sz;\n-\n-\treturn mem->va;\n-}\n-\n-/**\n- * idpf_free_dma_mem - Free the allocated dma memory\n- * @hw: pointer to hw struct\n- * @mem: pointer to dma_mem struct\n- */\n-void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)\n-{\n-\tstruct idpf_adapter *adapter = hw->back;\n-\n-\tdma_free_attrs(&adapter->pdev->dev, mem->size,\n-\t\t mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);\n-\tmem->size = 0;\n-\tmem->va = NULL;\n-\tmem->pa = 0;\n-}\n-\n static int idpf_hwtstamp_set(struct net_device *netdev,\n \t\t\t struct kernel_hwtstamp_config *config,\n \t\t\t struct netlink_ext_ack *extack)\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c\nindex 9da02ce42605..5458a07ecf54 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_main.c\n+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c\n@@ -125,7 +125,6 @@ static void idpf_remove(struct pci_dev *pdev)\n \n \t/* Be a good citizen and leave the device clean on exit */\n \tadapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);\n-\tidpf_deinit_dflt_mbx(adapter);\n \n \tif (!adapter->netdevs)\n \t\tgoto destroy_wqs;\n@@ -162,8 +161,6 @@ static void idpf_remove(struct pci_dev *pdev)\n \tadapter->vport_config = NULL;\n \tkfree(adapter->netdevs);\n \tadapter->netdevs = NULL;\n-\tkfree(adapter->vcxn_mngr);\n-\tadapter->vcxn_mngr = NULL;\n \n \tmutex_destroy(&adapter->vport_ctrl_lock);\n \tmutex_destroy(&adapter->vector_lock);\n@@ -186,7 +183,6 @@ static void idpf_shutdown(struct pci_dev *pdev)\n \tcancel_delayed_work_sync(&adapter->serv_task);\n \tcancel_delayed_work_sync(&adapter->vc_event_task);\n \tidpf_vc_core_deinit(adapter);\n-\tidpf_deinit_dflt_mbx(adapter);\n \n \tif (system_state == SYSTEM_POWER_OFF)\n \t\tpci_set_power_state(pdev, PCI_D3hot);\n@@ -238,7 +234,6 @@ static int idpf_cfg_device(struct idpf_adapter *adapter)\n \t\tpci_dbg(pdev, \"PCIe PTM is not supported by PCIe bus/controller\\n\");\n \n \tpci_set_drvdata(pdev, adapter);\n-\tadapter->hw.back = adapter;\n \n \treturn 0;\n }\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h\ndeleted file mode 100644\nindex 2aaabdc02dd2..000000000000\n--- a/drivers/net/ethernet/intel/idpf/idpf_mem.h\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-/* SPDX-License-Identifier: GPL-2.0-only */\n-/* Copyright (C) 2023 Intel Corporation */\n-\n-#ifndef _IDPF_MEM_H_\n-#define _IDPF_MEM_H_\n-\n-#include <linux/io.h>\n-\n-struct idpf_dma_mem {\n-\tvoid *va;\n-\tdma_addr_t pa;\n-\tsize_t size;\n-};\n-\n-#define idpf_mbx_wr32(a, reg, value)\twritel((value), ((a)->mbx.vaddr + (reg)))\n-#define idpf_mbx_rd32(a, reg)\t\treadl((a)->mbx.vaddr + (reg))\n-#define idpf_mbx_wr64(a, reg, value)\twriteq((value), ((a)->mbx.vaddr + (reg)))\n-#define idpf_mbx_rd64(a, reg)\t\treadq((a)->mbx.vaddr + (reg))\n-\n-#endif /* _IDPF_MEM_H_ */\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h\nindex e8ca0186ac01..6796f010e382 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h\n+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h\n@@ -236,7 +236,7 @@ enum idpf_tx_ctx_desc_eipt_offload {\n \t\t\t\t (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))\n #define IDPF_RX_PTYPE_HDR_SZ\tsizeof(struct virtchnl2_get_ptype_info)\n #define IDPF_RX_MAX_PTYPES_PER_BUF\t\\\n-\tDIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \\\n+\tDIV_ROUND_DOWN_ULL(LIBIE_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ, \\\n \t\t\t IDPF_RX_MAX_PTYPE_SZ)\n \n #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c\nindex b7aa9538435e..f492ee241e56 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c\n+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c\n@@ -9,45 +9,32 @@\n \n /**\n * idpf_vf_ctlq_reg_init - initialize default mailbox registers\n- * @adapter: adapter structure\n- * @cq: pointer to the array of create control queues\n+ * @mmio: struct that contains MMIO region info\n+ * @cci: struct where the register offset pointer to be copied to\n */\n-static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter,\n-\t\t\t\t struct idpf_ctlq_create_info *cq)\n+static void idpf_vf_ctlq_reg_init(struct libie_mmio_info *mmio,\n+\t\t\t\t struct libie_ctlq_create_info *cci)\n {\n-\tresource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;\n-\tint i;\n-\n-\tfor (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {\n-\t\tstruct idpf_ctlq_create_info *ccq = cq + i;\n-\n-\t\tswitch (ccq->type) {\n-\t\tcase IDPF_CTLQ_TYPE_MAILBOX_TX:\n-\t\t\t/* set head and tail registers in our local struct */\n-\t\t\tccq->reg.head = VF_ATQH - mbx_start;\n-\t\t\tccq->reg.tail = VF_ATQT - mbx_start;\n-\t\t\tccq->reg.len = VF_ATQLEN - mbx_start;\n-\t\t\tccq->reg.bah = VF_ATQBAH - mbx_start;\n-\t\t\tccq->reg.bal = VF_ATQBAL - mbx_start;\n-\t\t\tccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;\n-\t\t\tccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;\n-\t\t\tccq->reg.head_mask = VF_ATQH_ATQH_M;\n-\t\t\tbreak;\n-\t\tcase IDPF_CTLQ_TYPE_MAILBOX_RX:\n-\t\t\t/* set head and tail registers in our local struct */\n-\t\t\tccq->reg.head = VF_ARQH - mbx_start;\n-\t\t\tccq->reg.tail = VF_ARQT - mbx_start;\n-\t\t\tccq->reg.len = VF_ARQLEN - mbx_start;\n-\t\t\tccq->reg.bah = VF_ARQBAH - mbx_start;\n-\t\t\tccq->reg.bal = VF_ARQBAL - mbx_start;\n-\t\t\tccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;\n-\t\t\tccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;\n-\t\t\tccq->reg.head_mask = VF_ARQH_ARQH_M;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tbreak;\n-\t\t}\n-\t}\n+\tstruct libie_ctlq_reg *tx_reg = &cci[LIBIE_CTLQ_TYPE_TX].reg;\n+\tstruct libie_ctlq_reg *rx_reg = &cci[LIBIE_CTLQ_TYPE_RX].reg;\n+\n+\ttx_reg->head\t\t= libie_pci_get_mmio_addr(mmio, VF_ATQH);\n+\ttx_reg->tail\t\t= libie_pci_get_mmio_addr(mmio, VF_ATQT);\n+\ttx_reg->len\t\t= libie_pci_get_mmio_addr(mmio, VF_ATQLEN);\n+\ttx_reg->addr_high\t= libie_pci_get_mmio_addr(mmio, VF_ATQBAH);\n+\ttx_reg->addr_low\t= libie_pci_get_mmio_addr(mmio, VF_ATQBAL);\n+\ttx_reg->len_mask\t= VF_ATQLEN_ATQLEN_M;\n+\ttx_reg->len_ena_mask\t= VF_ATQLEN_ATQENABLE_M;\n+\ttx_reg->head_mask\t= VF_ATQH_ATQH_M;\n+\n+\trx_reg->head\t\t= libie_pci_get_mmio_addr(mmio, VF_ARQH);\n+\trx_reg->tail\t\t= libie_pci_get_mmio_addr(mmio, VF_ARQT);\n+\trx_reg->len\t\t= libie_pci_get_mmio_addr(mmio, VF_ARQLEN);\n+\trx_reg->addr_high\t= libie_pci_get_mmio_addr(mmio, VF_ARQBAH);\n+\trx_reg->addr_low\t= libie_pci_get_mmio_addr(mmio, VF_ARQBAL);\n+\trx_reg->len_mask\t= VF_ARQLEN_ARQLEN_M;\n+\trx_reg->len_ena_mask\t= VF_ARQLEN_ARQENABLE_M;\n+\trx_reg->head_mask\t= VF_ARQH_ARQH_M;\n }\n \n /**\n@@ -161,11 +148,13 @@ static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)\n static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,\n \t\t\t\t enum idpf_flags trig_cause)\n {\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_RESET_VF,\n+\t};\n \t/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */\n \tif (trig_cause == IDPF_HR_FUNC_RESET &&\n \t !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))\n-\t\tidpf_send_mb_msg(adapter, adapter->hw.asq,\n-\t\t\t\t VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);\n+\t\tidpf_send_mb_msg(adapter, &xn_params, NULL, 0);\n }\n \n /**\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c\nindex 278247e456f4..132bbe5b9d7d 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c\n+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c\n@@ -9,20 +9,6 @@\n #include \"idpf_virtchnl.h\"\n #include \"idpf_ptp.h\"\n \n-/**\n- * struct idpf_vc_xn_manager - Manager for tracking transactions\n- * @ring: backing and lookup for transactions\n- * @free_xn_bm: bitmap for free transactions\n- * @xn_bm_lock: make bitmap access synchronous where necessary\n- * @salt: used to make cookie unique every message\n- */\n-struct idpf_vc_xn_manager {\n-\tstruct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];\n-\tDECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);\n-\tspinlock_t xn_bm_lock;\n-\tu8 salt;\n-};\n-\n /**\n * idpf_vid_to_vport - Translate vport id to vport pointer\n * @adapter: private data struct\n@@ -83,37 +69,43 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,\n \n /**\n * idpf_recv_event_msg - Receive virtchnl event message\n- * @adapter: Driver specific private structure\n+ * @ctx: control queue context\n * @ctlq_msg: message to copy from\n *\n * Receive virtchnl event message\n */\n-static void idpf_recv_event_msg(struct idpf_adapter *adapter,\n-\t\t\t\tstruct idpf_ctlq_msg *ctlq_msg)\n+void idpf_recv_event_msg(struct libie_ctlq_ctx *ctx,\n+\t\t\t struct libie_ctlq_msg *ctlq_msg)\n {\n-\tint payload_size = ctlq_msg->ctx.indirect.payload->size;\n+\tstruct kvec *buff = &ctlq_msg->recv_mem;\n+\tint payload_size = buff->iov_len;\n+\tstruct idpf_adapter *adapter;\n \tstruct virtchnl2_event *v2e;\n \tu32 event;\n \n+\tadapter = container_of(ctx, struct idpf_adapter, ctlq_ctx);\n \tif (payload_size < sizeof(*v2e)) {\n \t\tdev_err_ratelimited(&adapter->pdev->dev, \"Failed to receive valid payload for event msg (op %d len %d)\\n\",\n-\t\t\t\t ctlq_msg->cookie.mbx.chnl_opcode,\n+\t\t\t\t ctlq_msg->chnl_opcode,\n \t\t\t\t payload_size);\n-\t\treturn;\n+\t\tgoto free_rx_buf;\n \t}\n \n-\tv2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;\n+\tv2e = (struct virtchnl2_event *)buff->iov_base;\n \tevent = le32_to_cpu(v2e->event);\n \n \tswitch (event) {\n \tcase VIRTCHNL2_EVENT_LINK_CHANGE:\n \t\tidpf_handle_event_link(adapter, v2e);\n-\t\treturn;\n+\t\tbreak;\n \tdefault:\n \t\tdev_err(&adapter->pdev->dev,\n \t\t\t\"Unknown event %d from PF\\n\", event);\n \t\tbreak;\n \t}\n+\n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(buff);\n }\n \n /**\n@@ -121,41 +113,19 @@ static void idpf_recv_event_msg(struct idpf_adapter *adapter,\n * @adapter: driver specific private structure\n * @asq: send control queue info\n *\n- * Reclaim the send mailbox queue entries to be used to send further messages\n- *\n- * Returns 0 on success, negative on failure\n+ * This is a helper function to clean the send mailbox queue entries.\n */\n-static int idpf_mb_clean(struct idpf_adapter *adapter,\n-\t\t\t struct idpf_ctlq_info *asq)\n+static void idpf_mb_clean(struct idpf_adapter *adapter,\n+\t\t\t struct libie_ctlq_info *asq)\n {\n-\tu16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;\n-\tstruct idpf_ctlq_msg **q_msg;\n-\tstruct idpf_dma_mem *dma_mem;\n-\tint err;\n-\n-\tq_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);\n-\tif (!q_msg)\n-\t\treturn -ENOMEM;\n-\n-\terr = idpf_ctlq_clean_sq(asq, &num_q_msg, q_msg);\n-\tif (err)\n-\t\tgoto err_kfree;\n-\n-\tfor (i = 0; i < num_q_msg; i++) {\n-\t\tif (!q_msg[i])\n-\t\t\tcontinue;\n-\t\tdma_mem = q_msg[i]->ctx.indirect.payload;\n-\t\tif (dma_mem)\n-\t\t\tdma_free_coherent(&adapter->pdev->dev, dma_mem->size,\n-\t\t\t\t\t dma_mem->va, dma_mem->pa);\n-\t\tkfree(q_msg[i]);\n-\t\tkfree(dma_mem);\n-\t}\n-\n-err_kfree:\n-\tkfree(q_msg);\n+\tstruct libie_ctlq_xn_clean_params clean_params = {\n+\t\t.ctx\t\t= &adapter->ctlq_ctx,\n+\t\t.ctlq\t\t= asq,\n+\t\t.rel_tx_buf\t= kfree,\n+\t\t.num_msgs\t= IDPF_DFLT_MBX_Q_LEN,\n+\t};\n \n-\treturn err;\n+\tlibie_ctlq_xn_send_clean(&clean_params);\n }\n \n #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)\n@@ -189,7 +159,7 @@ static bool idpf_ptp_is_mb_msg(u32 op)\n * @ctlq_msg: Corresponding control queue message\n */\n static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,\n-\t\t\t\t struct idpf_ctlq_msg *ctlq_msg)\n+\t\t\t\t struct libie_ctlq_msg *ctlq_msg)\n {\n \t/* If the message is PTP-related and the secondary mailbox is available,\n \t * send the message through the secondary mailbox.\n@@ -197,525 +167,83 @@ static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,\n \tif (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)\n \t\treturn;\n \n-\tctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;\n+\tctlq_msg->opcode = LIBIE_CTLQ_SEND_MSG_TO_PEER;\n \tctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;\n-\tctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;\n+\tctlq_msg->flags = FIELD_PREP(LIBIE_CTLQ_DESC_FLAG_HOST_ID,\n+\t\t\t\t adapter->ptp->secondary_mbx.peer_id);\n }\n #else /* !CONFIG_PTP_1588_CLOCK */\n static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,\n-\t\t\t\t struct idpf_ctlq_msg *ctlq_msg)\n+\t\t\t\t struct libie_ctlq_msg *ctlq_msg)\n { }\n #endif /* CONFIG_PTP_1588_CLOCK */\n \n /**\n- * idpf_send_mb_msg - Send message over mailbox\n+ * idpf_send_mb_msg - send mailbox message to the device control plane\n * @adapter: driver specific private structure\n- * @asq: control queue to send message to\n- * @op: virtchnl opcode\n- * @msg_size: size of the payload\n- * @msg: pointer to buffer holding the payload\n- * @cookie: unique SW generated cookie per message\n+ * @xn_params: Xn send parameters to fill\n+ * @send_buf: buffer to send\n+ * @send_buf_size: size of the send buffer\n *\n- * Will prepare the control queue message and initiates the send api\n+ * Fill the Xn parameters with the required info to send a virtchnl message.\n+ * The send buffer is DMA mapped in the libie to avoid memcpy.\n *\n- * Returns 0 on success, negative on failure\n- */\n-int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,\n-\t\t u32 op, u16 msg_size, u8 *msg, u16 cookie)\n-{\n-\tstruct idpf_ctlq_msg *ctlq_msg;\n-\tstruct idpf_dma_mem *dma_mem;\n-\tint err;\n-\n-\t/* If we are here and a reset is detected nothing much can be\n-\t * done. This thread should silently abort and expected to\n-\t * be corrected with a new run either by user or driver\n-\t * flows after reset\n-\t */\n-\tif (idpf_is_reset_detected(adapter))\n-\t\treturn 0;\n-\n-\terr = idpf_mb_clean(adapter, asq);\n-\tif (err)\n-\t\treturn err;\n-\n-\tctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);\n-\tif (!ctlq_msg)\n-\t\treturn -ENOMEM;\n-\n-\tdma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);\n-\tif (!dma_mem) {\n-\t\terr = -ENOMEM;\n-\t\tgoto dma_mem_error;\n-\t}\n-\n-\tctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;\n-\tctlq_msg->func_id = 0;\n-\n-\tidpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);\n-\n-\tctlq_msg->data_len = msg_size;\n-\tctlq_msg->cookie.mbx.chnl_opcode = op;\n-\tctlq_msg->cookie.mbx.chnl_retval = 0;\n-\tdma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;\n-\tdma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,\n-\t\t\t\t\t &dma_mem->pa, GFP_ATOMIC);\n-\tif (!dma_mem->va) {\n-\t\terr = -ENOMEM;\n-\t\tgoto dma_alloc_error;\n-\t}\n-\n-\t/* It's possible we're just sending an opcode but no buffer */\n-\tif (msg && msg_size)\n-\t\tmemcpy(dma_mem->va, msg, msg_size);\n-\tctlq_msg->ctx.indirect.payload = dma_mem;\n-\tctlq_msg->ctx.sw_cookie.data = cookie;\n-\n-\terr = idpf_ctlq_send(&adapter->hw, asq, 1, ctlq_msg);\n-\tif (err)\n-\t\tgoto send_error;\n-\n-\treturn 0;\n-\n-send_error:\n-\tdma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,\n-\t\t\t dma_mem->pa);\n-dma_alloc_error:\n-\tkfree(dma_mem);\n-dma_mem_error:\n-\tkfree(ctlq_msg);\n-\n-\treturn err;\n-}\n-\n-/* API for virtchnl \"transaction\" support (\"xn\" for short).\n- *\n- * We are reusing the completion lock to serialize the accesses to the\n- * transaction state for simplicity, but it could be its own separate synchro\n- * as well. For now, this API is only used from within a workqueue context;\n- * raw_spin_lock() is enough.\n- */\n-/**\n- * idpf_vc_xn_lock - Request exclusive access to vc transaction\n- * @xn: struct idpf_vc_xn* to access\n- */\n-#define idpf_vc_xn_lock(xn)\t\t\t\\\n-\traw_spin_lock(&(xn)->completed.wait.lock)\n-\n-/**\n- * idpf_vc_xn_unlock - Release exclusive access to vc transaction\n- * @xn: struct idpf_vc_xn* to access\n- */\n-#define idpf_vc_xn_unlock(xn)\t\t\\\n-\traw_spin_unlock(&(xn)->completed.wait.lock)\n-\n-/**\n- * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and\n- * reset the transaction state.\n- * @xn: struct idpf_vc_xn to update\n- */\n-static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)\n-{\n-\txn->reply.iov_base = NULL;\n-\txn->reply.iov_len = 0;\n-\n-\tif (xn->state != IDPF_VC_XN_SHUTDOWN)\n-\t\txn->state = IDPF_VC_XN_IDLE;\n-}\n-\n-/**\n- * idpf_vc_xn_init - Initialize virtchnl transaction object\n- * @vcxn_mngr: pointer to vc transaction manager struct\n- */\n-static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)\n-{\n-\tint i;\n-\n-\tspin_lock_init(&vcxn_mngr->xn_bm_lock);\n-\n-\tfor (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {\n-\t\tstruct idpf_vc_xn *xn = &vcxn_mngr->ring[i];\n-\n-\t\txn->state = IDPF_VC_XN_IDLE;\n-\t\txn->idx = i;\n-\t\tidpf_vc_xn_release_bufs(xn);\n-\t\tinit_completion(&xn->completed);\n-\t}\n-\n-\tbitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);\n-}\n-\n-/**\n- * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object\n- * @vcxn_mngr: pointer to vc transaction manager struct\n- *\n- * All waiting threads will be woken-up and their transaction aborted. Further\n- * operations on that object will fail.\n- */\n-void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)\n-{\n-\tint i;\n-\n-\tspin_lock_bh(&vcxn_mngr->xn_bm_lock);\n-\tbitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);\n-\tspin_unlock_bh(&vcxn_mngr->xn_bm_lock);\n-\n-\tfor (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {\n-\t\tstruct idpf_vc_xn *xn = &vcxn_mngr->ring[i];\n-\n-\t\tidpf_vc_xn_lock(xn);\n-\t\txn->state = IDPF_VC_XN_SHUTDOWN;\n-\t\tidpf_vc_xn_release_bufs(xn);\n-\t\tidpf_vc_xn_unlock(xn);\n-\t\tcomplete_all(&xn->completed);\n-\t}\n-}\n-\n-/**\n- * idpf_vc_xn_pop_free - Pop a free transaction from free list\n- * @vcxn_mngr: transaction manager to pop from\n- *\n- * Returns NULL if no free transactions\n- */\n-static\n-struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)\n-{\n-\tstruct idpf_vc_xn *xn = NULL;\n-\tunsigned long free_idx;\n-\n-\tspin_lock_bh(&vcxn_mngr->xn_bm_lock);\n-\tfree_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);\n-\tif (free_idx == IDPF_VC_XN_RING_LEN)\n-\t\tgoto do_unlock;\n-\n-\tclear_bit(free_idx, vcxn_mngr->free_xn_bm);\n-\txn = &vcxn_mngr->ring[free_idx];\n-\txn->salt = vcxn_mngr->salt++;\n-\n-do_unlock:\n-\tspin_unlock_bh(&vcxn_mngr->xn_bm_lock);\n-\n-\treturn xn;\n-}\n-\n-/**\n- * idpf_vc_xn_push_free - Push a free transaction to free list\n- * @vcxn_mngr: transaction manager to push to\n- * @xn: transaction to push\n- */\n-static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,\n-\t\t\t\t struct idpf_vc_xn *xn)\n-{\n-\tidpf_vc_xn_release_bufs(xn);\n-\tset_bit(xn->idx, vcxn_mngr->free_xn_bm);\n-}\n-\n-/**\n- * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction\n- * @adapter: driver specific private structure with vcxn_mngr\n- * @params: parameters for this particular transaction including\n- * -vc_op: virtchannel operation to send\n- * -send_buf: kvec iov for send buf and len\n- * -recv_buf: kvec iov for recv buf and len (ignored if NULL)\n- * -timeout_ms: timeout waiting for a reply (milliseconds)\n- * -async: don't wait for message reply, will lose caller context\n- * -async_handler: callback to handle async replies\n- *\n- * @returns >= 0 for success, the size of the initial reply (may or may not be\n- * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for\n- * error.\n- */\n-ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,\n-\t\t\tconst struct idpf_vc_xn_params *params)\n-{\n-\tconst struct kvec *send_buf = ¶ms->send_buf;\n-\tstruct idpf_vc_xn *xn;\n-\tssize_t retval;\n-\tu16 cookie;\n-\n-\txn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);\n-\t/* no free transactions available */\n-\tif (!xn)\n-\t\treturn -ENOSPC;\n-\n-\tidpf_vc_xn_lock(xn);\n-\tif (xn->state == IDPF_VC_XN_SHUTDOWN) {\n-\t\tretval = -ENXIO;\n-\t\tgoto only_unlock;\n-\t} else if (xn->state != IDPF_VC_XN_IDLE) {\n-\t\t/* We're just going to clobber this transaction even though\n-\t\t * it's not IDLE. If we don't reuse it we could theoretically\n-\t\t * eventually leak all the free transactions and not be able to\n-\t\t * send any messages. At least this way we make an attempt to\n-\t\t * remain functional even though something really bad is\n-\t\t * happening that's corrupting what was supposed to be free\n-\t\t * transactions.\n-\t\t */\n-\t\tWARN_ONCE(1, \"There should only be idle transactions in free list (idx %d op %d)\\n\",\n-\t\t\t xn->idx, xn->vc_op);\n-\t}\n-\n-\txn->reply = params->recv_buf;\n-\txn->reply_sz = 0;\n-\txn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;\n-\txn->vc_op = params->vc_op;\n-\txn->async_handler = params->async_handler;\n-\tidpf_vc_xn_unlock(xn);\n-\n-\tif (!params->async)\n-\t\treinit_completion(&xn->completed);\n-\tcookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |\n-\t\t FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);\n-\n-\tretval = idpf_send_mb_msg(adapter, adapter->hw.asq, params->vc_op,\n-\t\t\t\t send_buf->iov_len, send_buf->iov_base,\n-\t\t\t\t cookie);\n-\tif (retval) {\n-\t\tidpf_vc_xn_lock(xn);\n-\t\tgoto release_and_unlock;\n-\t}\n-\n-\tif (params->async)\n-\t\treturn 0;\n-\n-\twait_for_completion_timeout(&xn->completed,\n-\t\t\t\t msecs_to_jiffies(params->timeout_ms));\n-\n-\t/* No need to check the return value; we check the final state of the\n-\t * transaction below. It's possible the transaction actually gets more\n-\t * timeout than specified if we get preempted here but after\n-\t * wait_for_completion_timeout returns. This should be non-issue\n-\t * however.\n-\t */\n-\tidpf_vc_xn_lock(xn);\n-\tswitch (xn->state) {\n-\tcase IDPF_VC_XN_SHUTDOWN:\n-\t\tretval = -ENXIO;\n-\t\tgoto only_unlock;\n-\tcase IDPF_VC_XN_WAITING:\n-\t\tdev_notice_ratelimited(&adapter->pdev->dev,\n-\t\t\t\t \"Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\\n\",\n-\t\t\t\t params->vc_op, cookie, xn->vc_op,\n-\t\t\t\t xn->salt, params->timeout_ms);\n-\t\tretval = -ETIME;\n-\t\tbreak;\n-\tcase IDPF_VC_XN_COMPLETED_SUCCESS:\n-\t\tretval = xn->reply_sz;\n-\t\tbreak;\n-\tcase IDPF_VC_XN_COMPLETED_FAILED:\n-\t\tdev_notice_ratelimited(&adapter->pdev->dev, \"Transaction failed (op %d)\\n\",\n-\t\t\t\t params->vc_op);\n-\t\tretval = -EIO;\n-\t\tbreak;\n-\tdefault:\n-\t\t/* Invalid state. */\n-\t\tWARN_ON_ONCE(1);\n-\t\tretval = -EIO;\n-\t\tbreak;\n-\t}\n-\n-release_and_unlock:\n-\tidpf_vc_xn_push_free(adapter->vcxn_mngr, xn);\n-\t/* If we receive a VC reply after here, it will be dropped. */\n-only_unlock:\n-\tidpf_vc_xn_unlock(xn);\n-\n-\treturn retval;\n-}\n-\n-/**\n- * idpf_vc_xn_forward_async - Handle async reply receives\n- * @adapter: private data struct\n- * @xn: transaction to handle\n- * @ctlq_msg: corresponding ctlq_msg\n+ * Cleanup the mailbox queue entries of the previously sent message to\n+ * unmap and release the buffer.\n *\n- * For async sends we're going to lose the caller's context so, if an\n- * async_handler was provided, it can deal with the reply, otherwise we'll just\n- * check and report if there is an error.\n+ * Return: 0 if sending was successful or reset in detected,\n+ *\t negative error code on failure.\n */\n-static int\n-idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,\n-\t\t\t const struct idpf_ctlq_msg *ctlq_msg)\n+int idpf_send_mb_msg(struct idpf_adapter *adapter,\n+\t\t struct libie_ctlq_xn_send_params *xn_params,\n+\t\t void *send_buf, size_t send_buf_size)\n {\n-\tint err = 0;\n+\tstruct libie_ctlq_msg ctlq_msg = {};\n \n-\tif (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {\n-\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\\n\",\n-\t\t\t\t ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);\n-\t\txn->reply_sz = 0;\n-\t\terr = -EINVAL;\n-\t\tgoto release_bufs;\n-\t}\n+\tif (idpf_is_reset_detected(adapter)) {\n+\t\tif (!libie_cp_can_send_onstack(send_buf_size))\n+\t\t\tkfree(send_buf);\n \n-\tif (xn->async_handler) {\n-\t\terr = xn->async_handler(adapter, xn, ctlq_msg);\n-\t\tgoto release_bufs;\n+\t\treturn -EBUSY;\n \t}\n \n-\tif (ctlq_msg->cookie.mbx.chnl_retval) {\n-\t\txn->reply_sz = 0;\n-\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Async message failure (op %d)\\n\",\n-\t\t\t\t ctlq_msg->cookie.mbx.chnl_opcode);\n-\t\terr = -EINVAL;\n-\t}\n+\tidpf_prepare_ptp_mb_msg(adapter, xn_params->chnl_opcode, &ctlq_msg);\n+\txn_params->ctlq_msg = ctlq_msg.opcode ? &ctlq_msg : NULL;\n \n-release_bufs:\n-\tidpf_vc_xn_push_free(adapter->vcxn_mngr, xn);\n+\txn_params->send_buf.iov_base = send_buf;\n+\txn_params->send_buf.iov_len = send_buf_size;\n+\txn_params->xnm = adapter->xn_init_params.xnm;\n+\txn_params->ctlq = xn_params->ctlq ? xn_params->ctlq : adapter->asq;\n+\txn_params->rel_tx_buf = kfree;\n \n-\treturn err;\n-}\n-\n-/**\n- * idpf_vc_xn_forward_reply - copy a reply back to receiving thread\n- * @adapter: driver specific private structure with vcxn_mngr\n- * @ctlq_msg: controlq message to send back to receiving thread\n- */\n-static int\n-idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,\n-\t\t\t const struct idpf_ctlq_msg *ctlq_msg)\n-{\n-\tconst void *payload = NULL;\n-\tsize_t payload_size = 0;\n-\tstruct idpf_vc_xn *xn;\n-\tu16 msg_info;\n-\tint err = 0;\n-\tu16 xn_idx;\n-\tu16 salt;\n-\n-\tmsg_info = ctlq_msg->ctx.sw_cookie.data;\n-\txn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);\n-\tif (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {\n-\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Out of bounds cookie received: %02x\\n\",\n-\t\t\t\t xn_idx);\n-\t\treturn -EINVAL;\n-\t}\n-\txn = &adapter->vcxn_mngr->ring[xn_idx];\n-\tidpf_vc_xn_lock(xn);\n-\tsalt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);\n-\tif (xn->salt != salt) {\n-\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\\n\",\n-\t\t\t\t xn->vc_op, xn->salt, xn->state,\n-\t\t\t\t ctlq_msg->cookie.mbx.chnl_opcode, salt);\n-\t\tidpf_vc_xn_unlock(xn);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tswitch (xn->state) {\n-\tcase IDPF_VC_XN_WAITING:\n-\t\t/* success */\n-\t\tbreak;\n-\tcase IDPF_VC_XN_IDLE:\n-\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Unexpected or belated VC reply (op %d)\\n\",\n-\t\t\t\t ctlq_msg->cookie.mbx.chnl_opcode);\n-\t\terr = -EINVAL;\n-\t\tgoto out_unlock;\n-\tcase IDPF_VC_XN_SHUTDOWN:\n-\t\t/* ENXIO is a bit special here as the recv msg loop uses that\n-\t\t * know if it should stop trying to clean the ring if we lost\n-\t\t * the virtchnl. We need to stop playing with registers and\n-\t\t * yield.\n-\t\t */\n-\t\terr = -ENXIO;\n-\t\tgoto out_unlock;\n-\tcase IDPF_VC_XN_ASYNC:\n-\t\terr = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);\n-\t\tidpf_vc_xn_unlock(xn);\n-\t\treturn err;\n-\tdefault:\n-\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Overwriting VC reply (op %d)\\n\",\n-\t\t\t\t ctlq_msg->cookie.mbx.chnl_opcode);\n-\t\terr = -EBUSY;\n-\t\tgoto out_unlock;\n-\t}\n-\n-\tif (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {\n-\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Message opcode does not match transaction opcode (msg: %d) (xn: %d)\\n\",\n-\t\t\t\t ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);\n-\t\txn->reply_sz = 0;\n-\t\txn->state = IDPF_VC_XN_COMPLETED_FAILED;\n-\t\terr = -EINVAL;\n-\t\tgoto out_unlock;\n-\t}\n-\n-\tif (ctlq_msg->cookie.mbx.chnl_retval) {\n-\t\txn->reply_sz = 0;\n-\t\txn->state = IDPF_VC_XN_COMPLETED_FAILED;\n-\t\terr = -EINVAL;\n-\t\tgoto out_unlock;\n-\t}\n-\n-\tif (ctlq_msg->data_len) {\n-\t\tpayload = ctlq_msg->ctx.indirect.payload->va;\n-\t\tpayload_size = ctlq_msg->data_len;\n-\t}\n+\tidpf_mb_clean(adapter, xn_params->ctlq);\n \n-\txn->reply_sz = payload_size;\n-\txn->state = IDPF_VC_XN_COMPLETED_SUCCESS;\n-\n-\tif (xn->reply.iov_base && xn->reply.iov_len && payload_size)\n-\t\tmemcpy(xn->reply.iov_base, payload,\n-\t\t min_t(size_t, xn->reply.iov_len, payload_size));\n-\n-out_unlock:\n-\tidpf_vc_xn_unlock(xn);\n-\t/* we _cannot_ hold lock while calling complete */\n-\tcomplete(&xn->completed);\n-\n-\treturn err;\n+\treturn libie_ctlq_xn_send(xn_params);\n }\n \n /**\n- * idpf_recv_mb_msg - Receive message over mailbox\n+ * idpf_send_mb_msg_kfree - send mailbox message and free the send buffer\n * @adapter: driver specific private structure\n- * @arq: control queue to receive message from\n+ * @xn_params: Xn send parameters to fill\n+ * @send_buf: buffer to send, can be released with kfree()\n+ * @send_buf_size: size of the send buffer\n *\n- * Will receive control queue message and posts the receive buffer. Returns 0\n- * on success and negative on failure.\n+ * libie_cp functions consume only buffers above certain size,\n+ * smaller buffers are assumed to be on the stack. However, for some\n+ * commands with variable message size it makes sense to always use kzalloc(),\n+ * which means we have to free smaller buffers ourselves.\n+ *\n+ * Return: 0 if no unexpected errors were encountered,\n+ *\t negative error code otherwise.\n */\n-int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq)\n+static int idpf_send_mb_msg_kfree(struct idpf_adapter *adapter,\n+\t\t\t\t struct libie_ctlq_xn_send_params *xn_params,\n+\t\t\t\t void *send_buf, size_t send_buf_size)\n {\n-\tstruct idpf_ctlq_msg ctlq_msg;\n-\tstruct idpf_dma_mem *dma_mem;\n-\tint post_err, err;\n-\tu16 num_recv;\n-\n-\twhile (1) {\n-\t\t/* This will get <= num_recv messages and output how many\n-\t\t * actually received on num_recv.\n-\t\t */\n-\t\tnum_recv = 1;\n-\t\terr = idpf_ctlq_recv(arq, &num_recv, &ctlq_msg);\n-\t\tif (err || !num_recv)\n-\t\t\tbreak;\n+\tint err = idpf_send_mb_msg(adapter, xn_params, send_buf, send_buf_size);\n \n-\t\tif (ctlq_msg.data_len) {\n-\t\t\tdma_mem = ctlq_msg.ctx.indirect.payload;\n-\t\t} else {\n-\t\t\tdma_mem = NULL;\n-\t\t\tnum_recv = 0;\n-\t\t}\n-\n-\t\tif (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)\n-\t\t\tidpf_recv_event_msg(adapter, &ctlq_msg);\n-\t\telse\n-\t\t\terr = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);\n-\n-\t\tpost_err = idpf_ctlq_post_rx_buffs(&adapter->hw, arq,\n-\t\t\t\t\t\t &num_recv, &dma_mem);\n-\n-\t\t/* If post failed clear the only buffer we supplied */\n-\t\tif (post_err) {\n-\t\t\tif (dma_mem)\n-\t\t\t\tdma_free_coherent(&adapter->pdev->dev,\n-\t\t\t\t\t\t dma_mem->size, dma_mem->va,\n-\t\t\t\t\t\t dma_mem->pa);\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\t/* virtchnl trying to shutdown, stop cleaning */\n-\t\tif (err == -ENXIO)\n-\t\t\tbreak;\n-\t}\n+\tif (libie_cp_can_send_onstack(send_buf_size))\n+\t\tkfree(send_buf);\n \n \treturn err;\n }\n@@ -765,45 +293,41 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,\n static int idpf_send_chunked_msg(struct idpf_adapter *adapter,\n \t\t\t\t const struct idpf_chunked_msg_params *params)\n {\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op\t\t= params->vc_op,\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n \t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= params->vc_op,\n \t};\n \tconst void *pos = params->chunks;\n-\tu32 num_chunks, num_msgs, buf_sz;\n-\tvoid *buf __free(kfree) = NULL;\n \tu32 totqs = params->num_chunks;\n \tu32 vid = params->vport_id;\n+\tu32 num_chunks, num_msgs;\n \n-\tnum_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,\n-\t\t\t\t\t\t params->chunk_sz), totqs);\n+\tnum_chunks = IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,\n+\t\t\t\t\t params->chunk_sz);\n \tnum_msgs = DIV_ROUND_UP(totqs, num_chunks);\n \n-\tbuf_sz = params->config_sz + num_chunks * params->chunk_sz;\n-\tbuf = kzalloc(buf_sz, GFP_KERNEL);\n-\tif (!buf)\n-\t\treturn -ENOMEM;\n-\n-\txn_params.send_buf.iov_base = buf;\n-\n \tfor (u32 i = 0; i < num_msgs; i++) {\n-\t\tssize_t reply_sz;\n+\t\tu32 buf_sz;\n+\t\tvoid *buf;\n+\t\tint err;\n \n-\t\tmemset(buf, 0, buf_sz);\n-\t\txn_params.send_buf.iov_len = buf_sz;\n+\t\tnum_chunks = min(num_chunks, totqs);\n+\t\tbuf_sz = params->config_sz + num_chunks * params->chunk_sz;\n+\t\tbuf = kzalloc(buf_sz, GFP_KERNEL);\n+\t\tif (!buf)\n+\t\t\treturn -ENOMEM;\n \n \t\tif (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz)\n \t\t\treturn -EINVAL;\n \n-\t\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\t\tif (reply_sz < 0)\n-\t\t\treturn reply_sz;\n+\t\terr = idpf_send_mb_msg_kfree(adapter, &xn_params, buf, buf_sz);\n+\t\tif (err)\n+\t\t\treturn err;\n \n+\t\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\t\txn_params.recv_mem = (struct kvec) {};\n \t\tpos += num_chunks * params->chunk_sz;\n \t\ttotqs -= num_chunks;\n-\n-\t\tnum_chunks = min(num_chunks, totqs);\n-\t\tbuf_sz = params->config_sz + num_chunks * params->chunk_sz;\n \t}\n \n \treturn 0;\n@@ -878,11 +402,14 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)\n */\n static int idpf_send_ver_msg(struct idpf_adapter *adapter)\n {\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_VERSION,\n+\t};\n+\tstruct virtchnl2_version_info *vvi_recv;\n \tstruct virtchnl2_version_info vvi;\n-\tssize_t reply_sz;\n \tu32 major, minor;\n-\tint err = 0;\n+\tint err;\n \n \tif (adapter->virt_ver_maj) {\n \t\tvvi.major = cpu_to_le32(adapter->virt_ver_maj);\n@@ -892,24 +419,23 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)\n \t\tvvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);\n \t}\n \n-\txn_params.vc_op = VIRTCHNL2_OP_VERSION;\n-\txn_params.send_buf.iov_base = &vvi;\n-\txn_params.send_buf.iov_len = sizeof(vvi);\n-\txn_params.recv_buf = xn_params.send_buf;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &vvi, sizeof(vvi));\n+\tif (err)\n+\t\treturn err;\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz < sizeof(vvi))\n-\t\treturn -EIO;\n+\tif (xn_params.recv_mem.iov_len < sizeof(*vvi_recv)) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n \n-\tmajor = le32_to_cpu(vvi.major);\n-\tminor = le32_to_cpu(vvi.minor);\n+\tvvi_recv = xn_params.recv_mem.iov_base;\n+\tmajor = le32_to_cpu(vvi_recv->major);\n+\tminor = le32_to_cpu(vvi_recv->minor);\n \n \tif (major > IDPF_VIRTCHNL_VERSION_MAJOR) {\n \t\tdev_warn(&adapter->pdev->dev, \"Virtchnl major version greater than supported\\n\");\n-\t\treturn -EINVAL;\n+\t\terr = -EINVAL;\n+\t\tgoto free_rx_buf;\n \t}\n \n \tif (major == IDPF_VIRTCHNL_VERSION_MAJOR &&\n@@ -927,6 +453,9 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)\n \tadapter->virt_ver_maj = major;\n \tadapter->virt_ver_min = minor;\n \n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n \treturn err;\n }\n \n@@ -939,9 +468,12 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)\n */\n static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)\n {\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_GET_CAPS,\n+\t};\n \tstruct virtchnl2_get_capabilities caps = {};\n-\tstruct idpf_vc_xn_params xn_params = {};\n-\tssize_t reply_sz;\n+\tint err;\n \n \tcaps.csum_caps =\n \t\tcpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4\t|\n@@ -1001,20 +533,22 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)\n \t\t\t VIRTCHNL2_CAP_LOOPBACK\t\t|\n \t\t\t VIRTCHNL2_CAP_PTP);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;\n-\txn_params.send_buf.iov_base = ∩︀\n-\txn_params.send_buf.iov_len = sizeof(caps);\n-\txn_params.recv_buf.iov_base = &adapter->caps;\n-\txn_params.recv_buf.iov_len = sizeof(adapter->caps);\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz < sizeof(adapter->caps))\n-\t\treturn -EIO;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &caps, sizeof(caps));\n+\tif (err)\n+\t\treturn err;\n \n-\treturn 0;\n+\tif (xn_params.recv_mem.iov_len < sizeof(adapter->caps)) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n+\n+\tmemcpy(&adapter->caps, xn_params.recv_mem.iov_base,\n+\t sizeof(adapter->caps));\n+\n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n+\treturn err;\n }\n \n /**\n@@ -1059,32 +593,29 @@ static void idpf_decfg_lan_memory_regions(struct idpf_adapter *adapter)\n */\n static int idpf_cfg_lan_memory_regions(struct idpf_adapter *adapter)\n {\n-\tstruct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,\n-\t\t.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n-\tint num_regions, size;\n-\tssize_t reply_sz;\n+\tstruct virtchnl2_get_lan_memory_regions send_regions = {};\n+\tstruct virtchnl2_get_lan_memory_regions *rcvd_regions;\n+\tsize_t reply_sz, size;\n+\tint num_regions;\n \tint err = 0;\n \n-\trcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n-\tif (!rcvd_regions)\n-\t\treturn -ENOMEM;\n-\n-\txn_params.recv_buf.iov_base = rcvd_regions;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &send_regions,\n+\t\t\t sizeof(send_regions));\n+\tif (err)\n+\t\treturn err;\n \n+\trcvd_regions = xn_params.recv_mem.iov_base;\n+\treply_sz = xn_params.recv_mem.iov_len;\n \tnum_regions = le16_to_cpu(rcvd_regions->num_memory_regions);\n \tsize = struct_size(rcvd_regions, mem_reg, num_regions);\n-\tif (reply_sz < size)\n-\t\treturn -EIO;\n-\n-\tif (size > IDPF_CTLQ_MAX_BUF_LEN)\n-\t\treturn -EINVAL;\n+\tif (reply_sz < size) {\n+\t\terr = -EIO;\n+\t\tgoto rel_rx_buf;\n+\t}\n \n \tfor (int i = 0; i < num_regions; i++) {\n \t\tstruct libie_mmio_info *mmio = &adapter->ctlq_ctx.mmio_info;\n@@ -1094,10 +625,14 @@ static int idpf_cfg_lan_memory_regions(struct idpf_adapter *adapter)\n \t\tlen = le64_to_cpu(rcvd_regions->mem_reg[i].size);\n \t\tif (!libie_pci_map_mmio_region(mmio, offset, len)) {\n \t\t\tidpf_decfg_lan_memory_regions(adapter);\n-\t\t\treturn -EIO;\n+\t\t\terr = -EIO;\n+\t\t\tgoto rel_rx_buf;\n \t\t}\n \t}\n \n+rel_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n \treturn err;\n }\n \n@@ -1150,24 +685,27 @@ int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,\n \t\t\t\tstruct virtchnl2_flow_rule_add_del *rule,\n \t\t\t\tenum virtchnl2_op opcode)\n {\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = opcode,\n+\t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t};\n \tint rule_count = le32_to_cpu(rule->count);\n-\tstruct idpf_vc_xn_params xn_params = {};\n-\tssize_t reply_sz;\n+\tsize_t send_sz;\n+\tint err;\n \n \tif (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&\n-\t opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)\n+\t opcode != VIRTCHNL2_OP_DEL_FLOW_RULE) {\n+\t\tkfree(rule);\n \t\treturn -EINVAL;\n+\t}\n \n-\txn_params.vc_op = opcode;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.async = false;\n-\txn_params.send_buf.iov_base = rule;\n-\txn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);\n-\txn_params.recv_buf.iov_base = rule;\n-\txn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);\n+\tsend_sz = struct_size(rule, rule_info, rule_count);\n+\terr = idpf_send_mb_msg(adapter, &xn_params, rule, send_sz);\n+\tif (err)\n+\t\treturn err;\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\treturn reply_sz < 0 ? reply_sz : 0;\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\treturn 0;\n }\n \n /**\n@@ -1539,11 +1077,13 @@ int idpf_queue_reg_init(struct idpf_vport *vport,\n int idpf_send_create_vport_msg(struct idpf_adapter *adapter,\n \t\t\t struct idpf_vport_max_q *max_q)\n {\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_CREATE_VPORT,\n+\t};\n \tstruct virtchnl2_create_vport *vport_msg;\n-\tstruct idpf_vc_xn_params xn_params = {};\n \tu16 idx = adapter->next_vport;\n \tint err, buf_size;\n-\tssize_t reply_sz;\n \n \tbuf_size = sizeof(struct virtchnl2_create_vport);\n \tvport_msg = kzalloc(buf_size, GFP_KERNEL);\n@@ -1570,33 +1110,29 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,\n \t}\n \n \tif (!adapter->vport_params_recvd[idx]) {\n-\t\tadapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,\n-\t\t\t\t\t\t\t GFP_KERNEL);\n+\t\tadapter->vport_params_recvd[idx] =\n+\t\t\tkzalloc(LIBIE_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n \t\tif (!adapter->vport_params_recvd[idx]) {\n \t\t\terr = -ENOMEM;\n \t\t\tgoto rel_buf;\n \t\t}\n \t}\n \n-\txn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;\n-\txn_params.send_buf.iov_base = vport_msg;\n-\txn_params.send_buf.iov_len = buf_size;\n-\txn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];\n-\txn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0) {\n-\t\terr = reply_sz;\n-\t\tgoto free_vport_params;\n+\terr = idpf_send_mb_msg_kfree(adapter, &xn_params, vport_msg,\n+\t\t\t\t sizeof(*vport_msg));\n+\tif (err) {\n+\t\tkfree(adapter->vport_params_recvd[idx]);\n+\t\tadapter->vport_params_recvd[idx] = NULL;\n+\t\treturn err;\n \t}\n \n-\tkfree(vport_msg);\n+\tmemcpy(adapter->vport_params_recvd[idx], xn_params.recv_mem.iov_base,\n+\t xn_params.recv_mem.iov_len);\n+\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n \treturn 0;\n \n-free_vport_params:\n-\tkfree(adapter->vport_params_recvd[idx]);\n-\tadapter->vport_params_recvd[idx] = NULL;\n rel_buf:\n \tkfree(vport_msg);\n \n@@ -1659,19 +1195,22 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)\n */\n int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id)\n {\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_DESTROY_VPORT,\n+\t};\n \tstruct virtchnl2_vport v_id;\n-\tssize_t reply_sz;\n+\tint err;\n \n \tv_id.vport_id = cpu_to_le32(vport_id);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;\n-\txn_params.send_buf.iov_base = &v_id;\n-\txn_params.send_buf.iov_len = sizeof(v_id);\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &v_id, sizeof(v_id));\n+\tif (err)\n+\t\treturn err;\n+\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn reply_sz < 0 ? reply_sz : 0;\n+\treturn 0;\n }\n \n /**\n@@ -1684,19 +1223,22 @@ int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id)\n */\n int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)\n {\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_ENABLE_VPORT,\n+\t};\n \tstruct virtchnl2_vport v_id;\n-\tssize_t reply_sz;\n+\tint err;\n \n \tv_id.vport_id = cpu_to_le32(vport_id);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;\n-\txn_params.send_buf.iov_base = &v_id;\n-\txn_params.send_buf.iov_len = sizeof(v_id);\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &v_id, sizeof(v_id));\n+\tif (err)\n+\t\treturn err;\n+\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn reply_sz < 0 ? reply_sz : 0;\n+\treturn 0;\n }\n \n /**\n@@ -1709,19 +1251,22 @@ int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)\n */\n int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)\n {\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_DISABLE_VPORT,\n+\t};\n \tstruct virtchnl2_vport v_id;\n-\tssize_t reply_sz;\n+\tint err;\n \n \tv_id.vport_id = cpu_to_le32(vport_id);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;\n-\txn_params.send_buf.iov_base = &v_id;\n-\txn_params.send_buf.iov_len = sizeof(v_id);\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &v_id, sizeof(v_id));\n+\tif (err)\n+\t\treturn err;\n+\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn reply_sz < 0 ? reply_sz : 0;\n+\treturn 0;\n }\n \n /**\n@@ -2561,11 +2106,14 @@ int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,\n \t\t\t\tstruct idpf_queue_id_reg_info *chunks,\n \t\t\t\tu32 vport_id)\n {\n-\tstruct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;\n-\tstruct idpf_vc_xn_params xn_params = {};\n-\tssize_t reply_sz;\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_DEL_QUEUES,\n+\t};\n+\tstruct virtchnl2_del_ena_dis_queues *eq;\n+\tssize_t buf_size;\n \tu16 num_chunks;\n-\tint buf_size;\n+\tint err;\n \n \tnum_chunks = chunks->num_chunks;\n \tbuf_size = struct_size(eq, chunks.chunks, num_chunks);\n@@ -2580,13 +2128,13 @@ int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,\n \tidpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks,\n \t\t\t\t\t num_chunks);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.send_buf.iov_base = eq;\n-\txn_params.send_buf.iov_len = buf_size;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n+\terr = idpf_send_mb_msg_kfree(adapter, &xn_params, eq, buf_size);\n+\tif (err)\n+\t\treturn err;\n+\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn reply_sz < 0 ? reply_sz : 0;\n+\treturn 0;\n }\n \n /**\n@@ -2626,15 +2174,14 @@ int idpf_send_add_queues_msg(struct idpf_adapter *adapter,\n \t\t\t struct idpf_q_vec_rsrc *rsrc,\n \t\t\t u32 vport_id)\n {\n-\tstruct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_ADD_QUEUES,\n+\t};\n+\tstruct virtchnl2_add_queues *vc_msg;\n \tstruct virtchnl2_add_queues aq = {};\n-\tssize_t reply_sz;\n-\tint size;\n-\n-\tvc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n-\tif (!vc_msg)\n-\t\treturn -ENOMEM;\n+\tsize_t size;\n+\tint err;\n \n \taq.vport_id = cpu_to_le32(vport_id);\n \taq.num_tx_q = cpu_to_le16(rsrc->num_txq);\n@@ -2642,29 +2189,32 @@ int idpf_send_add_queues_msg(struct idpf_adapter *adapter,\n \taq.num_rx_q = cpu_to_le16(rsrc->num_rxq);\n \taq.num_rx_bufq = cpu_to_le16(rsrc->num_bufq);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.send_buf.iov_base = &aq;\n-\txn_params.send_buf.iov_len = sizeof(aq);\n-\txn_params.recv_buf.iov_base = vc_msg;\n-\txn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &aq, sizeof(aq));\n+\tif (err)\n+\t\treturn err;\n+\n+\tvc_msg = xn_params.recv_mem.iov_base;\n \n \t/* compare vc_msg num queues with vport num queues */\n \tif (le16_to_cpu(vc_msg->num_tx_q) != rsrc->num_txq ||\n \t le16_to_cpu(vc_msg->num_rx_q) != rsrc->num_rxq ||\n \t le16_to_cpu(vc_msg->num_tx_complq) != rsrc->num_complq ||\n-\t le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq)\n-\t\treturn -EINVAL;\n+\t le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq) {\n+\t\terr = -EINVAL;\n+\t\tgoto free_rx_buf;\n+\t}\n \n \tsize = struct_size(vc_msg, chunks.chunks,\n \t\t\t le16_to_cpu(vc_msg->chunks.num_chunks));\n-\tif (reply_sz < size)\n+\tif (xn_params.recv_mem.iov_len < size)\n \t\treturn -EIO;\n \n-\treturn idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);\n+\terr = idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);\n+\n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n+\treturn err;\n }\n \n /**\n@@ -2676,49 +2226,47 @@ int idpf_send_add_queues_msg(struct idpf_adapter *adapter,\n */\n int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)\n {\n-\tstruct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_ALLOC_VECTORS,\n+\t};\n+\tstruct virtchnl2_alloc_vectors *rcvd_vec;\n \tstruct virtchnl2_alloc_vectors ac = {};\n-\tssize_t reply_sz;\n \tu16 num_vchunks;\n-\tint size;\n+\tint size, err;\n \n \tac.num_vectors = cpu_to_le16(num_vectors);\n \n-\trcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n-\tif (!rcvd_vec)\n-\t\treturn -ENOMEM;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &ac, sizeof(ac));\n+\tif (err)\n+\t\treturn err;\n \n-\txn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;\n-\txn_params.send_buf.iov_base = ∾\n-\txn_params.send_buf.iov_len = sizeof(ac);\n-\txn_params.recv_buf.iov_base = rcvd_vec;\n-\txn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n+\trcvd_vec = xn_params.recv_mem.iov_base;\n \n \tnum_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);\n \tsize = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);\n-\tif (reply_sz < size)\n-\t\treturn -EIO;\n-\n-\tif (size > IDPF_CTLQ_MAX_BUF_LEN)\n-\t\treturn -EINVAL;\n+\tif (xn_params.recv_mem.iov_len < size) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n \n \tkfree(adapter->req_vec_chunks);\n \tadapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);\n-\tif (!adapter->req_vec_chunks)\n-\t\treturn -ENOMEM;\n+\tif (!adapter->req_vec_chunks) {\n+\t\terr = -ENOMEM;\n+\t\tgoto free_rx_buf;\n+\t}\n \n \tif (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {\n \t\tkfree(adapter->req_vec_chunks);\n \t\tadapter->req_vec_chunks = NULL;\n-\t\treturn -EINVAL;\n+\t\terr = -EINVAL;\n \t}\n \n-\treturn 0;\n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n+\treturn err;\n }\n \n /**\n@@ -2730,24 +2278,28 @@ int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)\n int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)\n {\n \tstruct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;\n-\tstruct virtchnl2_vector_chunks *vcs = &ac->vchunks;\n-\tstruct idpf_vc_xn_params xn_params = {};\n-\tssize_t reply_sz;\n-\tint buf_size;\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_DEALLOC_VECTORS,\n+\t};\n+\tstruct virtchnl2_vector_chunks *vcs;\n+\tint buf_size, err;\n \n-\tbuf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));\n+\tbuf_size = struct_size(&ac->vchunks, vchunks,\n+\t\t\t le16_to_cpu(ac->vchunks.num_vchunks));\n+\tvcs = kmemdup(&ac->vchunks, buf_size, GFP_KERNEL);\n+\tif (!vcs)\n+\t\treturn -ENOMEM;\n \n-\txn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;\n-\txn_params.send_buf.iov_base = vcs;\n-\txn_params.send_buf.iov_len = buf_size;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n+\terr = idpf_send_mb_msg_kfree(adapter, &xn_params, vcs, buf_size);\n+\tif (err)\n+\t\treturn err;\n \n \tkfree(adapter->req_vec_chunks);\n \tadapter->req_vec_chunks = NULL;\n \n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n \treturn 0;\n }\n \n@@ -2771,18 +2323,22 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)\n */\n int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)\n {\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_SET_SRIOV_VFS,\n+\t};\n \tstruct virtchnl2_sriov_vfs_info svi = {};\n-\tstruct idpf_vc_xn_params xn_params = {};\n-\tssize_t reply_sz;\n+\tint err;\n \n \tsvi.num_vfs = cpu_to_le16(num_vfs);\n-\txn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.send_buf.iov_base = &svi;\n-\txn_params.send_buf.iov_len = sizeof(svi);\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n \n-\treturn reply_sz < 0 ? reply_sz : 0;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &svi, sizeof(svi));\n+\tif (err)\n+\t\treturn err;\n+\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n+\treturn 0;\n }\n \n /**\n@@ -2795,10 +2351,14 @@ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)\n int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,\n \t\t\t struct idpf_port_stats *port_stats)\n {\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_GET_STATS,\n+\t};\n \tstruct rtnl_link_stats64 *netstats = &np->netstats;\n+\tstruct virtchnl2_vport_stats *stats_recv;\n \tstruct virtchnl2_vport_stats stats_msg = {};\n-\tstruct idpf_vc_xn_params xn_params = {};\n-\tssize_t reply_sz;\n+\tint err;\n \n \n \t/* Don't send get_stats message if the link is down */\n@@ -2807,37 +2367,40 @@ int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,\n \n \tstats_msg.vport_id = cpu_to_le32(np->vport_id);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_GET_STATS;\n-\txn_params.send_buf.iov_base = &stats_msg;\n-\txn_params.send_buf.iov_len = sizeof(stats_msg);\n-\txn_params.recv_buf = xn_params.send_buf;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n+\terr = idpf_send_mb_msg(np->adapter, &xn_params, &stats_msg,\n+\t\t\t sizeof(stats_msg));\n+\tif (err)\n+\t\treturn err;\n \n-\treply_sz = idpf_vc_xn_exec(np->adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz < sizeof(stats_msg))\n-\t\treturn -EIO;\n+\tif (xn_params.recv_mem.iov_len < sizeof(*stats_recv)) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n+\n+\tstats_recv = xn_params.recv_mem.iov_base;\n \n \tspin_lock_bh(&np->stats_lock);\n \n-\tnetstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +\n-\t\t\t le64_to_cpu(stats_msg.rx_multicast) +\n-\t\t\t le64_to_cpu(stats_msg.rx_broadcast);\n-\tnetstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +\n-\t\t\t le64_to_cpu(stats_msg.tx_multicast) +\n-\t\t\t le64_to_cpu(stats_msg.tx_broadcast);\n-\tnetstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);\n-\tnetstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);\n-\tnetstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);\n-\tnetstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);\n-\tnetstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);\n-\tnetstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);\n-\n-\tport_stats->vport_stats = stats_msg;\n+\tnetstats->rx_packets = le64_to_cpu(stats_recv->rx_unicast) +\n+\t\t\t le64_to_cpu(stats_recv->rx_multicast) +\n+\t\t\t le64_to_cpu(stats_recv->rx_broadcast);\n+\tnetstats->tx_packets = le64_to_cpu(stats_recv->tx_unicast) +\n+\t\t\t le64_to_cpu(stats_recv->tx_multicast) +\n+\t\t\t le64_to_cpu(stats_recv->tx_broadcast);\n+\tnetstats->rx_bytes = le64_to_cpu(stats_recv->rx_bytes);\n+\tnetstats->tx_bytes = le64_to_cpu(stats_recv->tx_bytes);\n+\tnetstats->rx_errors = le64_to_cpu(stats_recv->rx_errors);\n+\tnetstats->tx_errors = le64_to_cpu(stats_recv->tx_errors);\n+\tnetstats->rx_dropped = le64_to_cpu(stats_recv->rx_discards);\n+\tnetstats->tx_dropped = le64_to_cpu(stats_recv->tx_discards);\n+\n+\tport_stats->vport_stats = *stats_recv;\n \n \tspin_unlock_bh(&np->stats_lock);\n \n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\n \treturn 0;\n }\n \n@@ -2854,12 +2417,14 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,\n \t\t\t\t struct idpf_rss_data *rss_data,\n \t\t\t\t u32 vport_id, bool get)\n {\n-\tstruct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;\n-\tstruct virtchnl2_rss_lut *rl __free(kfree) = NULL;\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= get ? VIRTCHNL2_OP_GET_RSS_LUT :\n+\t\t\t\t\tVIRTCHNL2_OP_SET_RSS_LUT,\n+\t};\n+\tstruct virtchnl2_rss_lut *rl, *recv_rl;\n \tint buf_size, lut_buf_size;\n-\tssize_t reply_sz;\n-\tint i;\n+\tint i, err;\n \n \tbuf_size = struct_size(rl, lut, rss_data->rss_lut_size);\n \trl = kzalloc(buf_size, GFP_KERNEL);\n@@ -2867,36 +2432,30 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,\n \t\treturn -ENOMEM;\n \n \trl->vport_id = cpu_to_le32(vport_id);\n-\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.send_buf.iov_base = rl;\n-\txn_params.send_buf.iov_len = buf_size;\n-\n-\tif (get) {\n-\t\trecv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n-\t\tif (!recv_rl)\n-\t\t\treturn -ENOMEM;\n-\t\txn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;\n-\t\txn_params.recv_buf.iov_base = recv_rl;\n-\t\txn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;\n-\t} else {\n+\tif (!get) {\n \t\trl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);\n \t\tfor (i = 0; i < rss_data->rss_lut_size; i++)\n \t\t\trl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);\n-\n-\t\txn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;\n \t}\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n+\n+\terr = idpf_send_mb_msg_kfree(adapter, &xn_params, rl, buf_size);\n+\tif (err)\n+\t\treturn err;\n+\n \tif (!get)\n-\t\treturn 0;\n-\tif (reply_sz < sizeof(struct virtchnl2_rss_lut))\n-\t\treturn -EIO;\n+\t\tgoto free_rx_buf;\n+\tif (xn_params.recv_mem.iov_len < sizeof(struct virtchnl2_rss_lut)) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n+\n+\trecv_rl = xn_params.recv_mem.iov_base;\n \n \tlut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);\n-\tif (reply_sz < lut_buf_size)\n-\t\treturn -EIO;\n+\tif (xn_params.recv_mem.iov_len < lut_buf_size) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n \n \t/* size didn't change, we can reuse existing lut buf */\n \tif (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))\n@@ -2908,13 +2467,16 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,\n \trss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);\n \tif (!rss_data->rss_lut) {\n \t\trss_data->rss_lut_size = 0;\n-\t\treturn -ENOMEM;\n+\t\terr = -ENOMEM;\n+\t\tgoto free_rx_buf;\n \t}\n \n do_memcpy:\n \tmemcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);\n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn 0;\n+\treturn err;\n }\n \n /**\n@@ -2930,12 +2492,14 @@ int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,\n \t\t\t\t struct idpf_rss_data *rss_data,\n \t\t\t\t u32 vport_id, bool get)\n {\n-\tstruct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;\n-\tstruct virtchnl2_rss_key *rk __free(kfree) = NULL;\n-\tstruct idpf_vc_xn_params xn_params = {};\n-\tssize_t reply_sz;\n-\tint i, buf_size;\n-\tu16 key_size;\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= get ? VIRTCHNL2_OP_GET_RSS_KEY :\n+\t\t\t\t\tVIRTCHNL2_OP_SET_RSS_KEY,\n+\t};\n+\tstruct virtchnl2_rss_key *rk, *recv_rk;\n+\tu16 key_size, recv_len;\n+\tint i, buf_size, err;\n \n \tbuf_size = struct_size(rk, key_flex, rss_data->rss_key_size);\n \trk = kzalloc(buf_size, GFP_KERNEL);\n@@ -2943,37 +2507,32 @@ int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,\n \t\treturn -ENOMEM;\n \n \trk->vport_id = cpu_to_le32(vport_id);\n-\txn_params.send_buf.iov_base = rk;\n-\txn_params.send_buf.iov_len = buf_size;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\tif (get) {\n-\t\trecv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n-\t\tif (!recv_rk)\n-\t\t\treturn -ENOMEM;\n-\n-\t\txn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;\n-\t\txn_params.recv_buf.iov_base = recv_rk;\n-\t\txn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;\n-\t} else {\n+\tif (!get) {\n \t\trk->key_len = cpu_to_le16(rss_data->rss_key_size);\n \t\tfor (i = 0; i < rss_data->rss_key_size; i++)\n \t\t\trk->key_flex[i] = rss_data->rss_key[i];\n-\n-\t\txn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;\n \t}\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n+\terr = idpf_send_mb_msg_kfree(adapter, &xn_params, rk, buf_size);\n+\tif (err)\n+\t\treturn err;\n+\n \tif (!get)\n-\t\treturn 0;\n-\tif (reply_sz < sizeof(struct virtchnl2_rss_key))\n-\t\treturn -EIO;\n+\t\tgoto free_rx_buf;\n+\n+\trecv_len = xn_params.recv_mem.iov_len;\n+\tif (recv_len < sizeof(struct virtchnl2_rss_key)) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n \n+\trecv_rk = xn_params.recv_mem.iov_base;\n \tkey_size = min_t(u16, NETDEV_RSS_KEY_LEN,\n \t\t\t le16_to_cpu(recv_rk->key_len));\n-\tif (reply_sz < key_size)\n-\t\treturn -EIO;\n+\tif (recv_len < key_size) {\n+\t\terr = -EIO;\n+\t\tgoto free_rx_buf;\n+\t}\n \n \t/* key len didn't change, reuse existing buf */\n \tif (rss_data->rss_key_size == key_size)\n@@ -2984,13 +2543,16 @@ int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,\n \trss_data->rss_key = kzalloc(key_size, GFP_KERNEL);\n \tif (!rss_data->rss_key) {\n \t\trss_data->rss_key_size = 0;\n-\t\treturn -ENOMEM;\n+\t\terr = -ENOMEM;\n+\t\tgoto free_rx_buf;\n \t}\n \n do_memcpy:\n \tmemcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);\n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn 0;\n+\treturn err;\n }\n \n /**\n@@ -3167,15 +2729,19 @@ static void idpf_parse_protocol_ids(struct virtchnl2_ptype *ptype,\n */\n static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)\n {\n-\tstruct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;\n-\tstruct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;\n \tstruct libeth_rx_pt *singleq_pt_lkup __free(kfree) = NULL;\n \tstruct libeth_rx_pt *splitq_pt_lkup __free(kfree) = NULL;\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_GET_PTYPE_INFO,\n+\t};\n+\tstruct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;\n+\tstruct virtchnl2_get_ptype_info *get_ptype_info;\n+\tstruct virtchnl2_get_ptype_info *ptype_info;\n+\tint err = 0, max_ptype = IDPF_RX_MAX_PTYPE;\n+\tint buf_size = sizeof(*get_ptype_info);\n \tint ptypes_recvd = 0, ptype_offset;\n-\tu32 max_ptype = IDPF_RX_MAX_PTYPE;\n \tu16 next_ptype_id = 0;\n-\tssize_t reply_sz;\n \n \tsingleq_pt_lkup = kcalloc(IDPF_RX_MAX_BASE_PTYPE,\n \t\t\t\t sizeof(*singleq_pt_lkup), GFP_KERNEL);\n@@ -3186,42 +2752,34 @@ static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)\n \tif (!splitq_pt_lkup)\n \t\treturn -ENOMEM;\n \n-\tget_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);\n-\tif (!get_ptype_info)\n-\t\treturn -ENOMEM;\n-\n-\tptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n-\tif (!ptype_info)\n-\t\treturn -ENOMEM;\n+\twhile (next_ptype_id < max_ptype) {\n+\t\tu16 num_ptypes;\n \n-\txn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;\n-\txn_params.send_buf.iov_base = get_ptype_info;\n-\txn_params.send_buf.iov_len = sizeof(*get_ptype_info);\n-\txn_params.recv_buf.iov_base = ptype_info;\n-\txn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n+\t\tget_ptype_info = kzalloc(buf_size, GFP_KERNEL);\n+\t\tif (!get_ptype_info)\n+\t\t\treturn -ENOMEM;\n \n-\twhile (next_ptype_id < max_ptype) {\n \t\tget_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);\n \n \t\tif ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)\n-\t\t\tget_ptype_info->num_ptypes =\n-\t\t\t\tcpu_to_le16(max_ptype - next_ptype_id);\n+\t\t\tnum_ptypes = max_ptype - next_ptype_id;\n \t\telse\n-\t\t\tget_ptype_info->num_ptypes =\n-\t\t\t\tcpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);\n+\t\t\tnum_ptypes = IDPF_RX_MAX_PTYPES_PER_BUF;\n \n-\t\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\t\tif (reply_sz < 0)\n-\t\t\treturn reply_sz;\n+\t\tget_ptype_info->num_ptypes = cpu_to_le16(num_ptypes);\n+\t\terr = idpf_send_mb_msg_kfree(adapter, &xn_params,\n+\t\t\t\t\t get_ptype_info, buf_size);\n+\t\tif (err)\n+\t\t\treturn err;\n \n+\t\tptype_info = xn_params.recv_mem.iov_base;\n \t\tptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);\n-\t\tif (ptypes_recvd > max_ptype)\n-\t\t\treturn -EINVAL;\n-\n-\t\tnext_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +\n-\t\t\t\tle16_to_cpu(get_ptype_info->num_ptypes);\n+\t\tif (ptypes_recvd > max_ptype) {\n+\t\t\terr = -EINVAL;\n+\t\t\tgoto free_rx_buf;\n+\t\t}\n \n+\t\tnext_ptype_id = next_ptype_id + num_ptypes;\n \t\tptype_offset = IDPF_RX_PTYPE_HDR_SZ;\n \n \t\tfor (u16 i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {\n@@ -3236,8 +2794,10 @@ static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)\n \t\t\tpt_8 = ptype->ptype_id_8;\n \n \t\t\tptype_offset += IDPF_GET_PTYPE_SIZE(ptype);\n-\t\t\tif (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)\n-\t\t\t\treturn -EINVAL;\n+\t\t\tif (ptype_offset > LIBIE_CTLQ_MAX_BUF_LEN) {\n+\t\t\t\terr = -EINVAL;\n+\t\t\t\tgoto free_rx_buf;\n+\t\t\t}\n \n \t\t\t/* 0xFFFF indicates end of ptypes */\n \t\t\tif (pt_10 == IDPF_INVALID_PTYPE_ID)\n@@ -3255,13 +2815,18 @@ static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)\n \t\t\tif (!singleq_pt_lkup[pt_8].outer_ip)\n \t\t\t\tsingleq_pt_lkup[pt_8] = rx_pt;\n \t\t}\n+\n+\t\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\t\txn_params.recv_mem = (struct kvec) {};\n \t}\n \n out:\n \tadapter->splitq_pt_lkup = no_free_ptr(splitq_pt_lkup);\n \tadapter->singleq_pt_lkup = no_free_ptr(singleq_pt_lkup);\n+free_rx_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn 0;\n+\treturn err;\n }\n \n /**\n@@ -3289,40 +2854,24 @@ static void idpf_rel_rx_pt_lkup(struct idpf_adapter *adapter)\n int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,\n \t\t\t\t bool loopback_ena)\n {\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_LOOPBACK,\n+\t};\n \tstruct virtchnl2_loopback loopback;\n-\tssize_t reply_sz;\n+\tint err;\n \n \tloopback.vport_id = cpu_to_le32(vport_id);\n \tloopback.enable = loopback_ena;\n \n-\txn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.send_buf.iov_base = &loopback;\n-\txn_params.send_buf.iov_len = sizeof(loopback);\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\n-\treturn reply_sz < 0 ? reply_sz : 0;\n-}\n-\n-/**\n- * idpf_find_ctlq - Given a type and id, find ctlq info\n- * @hw: hardware struct\n- * @type: type of ctrlq to find\n- * @id: ctlq id to find\n- *\n- * Returns pointer to found ctlq info struct, NULL otherwise.\n- */\n-static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,\n-\t\t\t\t\t enum idpf_ctlq_type type, int id)\n-{\n-\tstruct idpf_ctlq_info *cq, *tmp;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &loopback,\n+\t\t\t sizeof(loopback));\n+\tif (err)\n+\t\treturn err;\n \n-\tlist_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)\n-\t\tif (cq->q_id == id && cq->cq_type == type)\n-\t\t\treturn cq;\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn NULL;\n+\treturn 0;\n }\n \n /**\n@@ -3333,40 +2882,43 @@ static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,\n */\n int idpf_init_dflt_mbx(struct idpf_adapter *adapter)\n {\n-\tstruct idpf_ctlq_create_info ctlq_info[] = {\n+\tstruct libie_ctlq_ctx *ctx = &adapter->ctlq_ctx;\n+\tstruct libie_ctlq_create_info ctlq_info[] = {\n \t\t{\n-\t\t\t.type = IDPF_CTLQ_TYPE_MAILBOX_TX,\n-\t\t\t.id = IDPF_DFLT_MBX_ID,\n+\t\t\t.type = LIBIE_CTLQ_TYPE_TX,\n+\t\t\t.id = LIBIE_CTLQ_MBX_ID,\n \t\t\t.len = IDPF_DFLT_MBX_Q_LEN,\n-\t\t\t.buf_size = IDPF_CTLQ_MAX_BUF_LEN\n \t\t},\n \t\t{\n-\t\t\t.type = IDPF_CTLQ_TYPE_MAILBOX_RX,\n-\t\t\t.id = IDPF_DFLT_MBX_ID,\n+\t\t\t.type = LIBIE_CTLQ_TYPE_RX,\n+\t\t\t.id = LIBIE_CTLQ_MBX_ID,\n \t\t\t.len = IDPF_DFLT_MBX_Q_LEN,\n-\t\t\t.buf_size = IDPF_CTLQ_MAX_BUF_LEN\n \t\t}\n \t};\n-\tstruct idpf_hw *hw = &adapter->hw;\n+\tstruct libie_ctlq_xn_init_params params = {\n+\t\t.num_qs = IDPF_NUM_DFLT_MBX_Q,\n+\t\t.cctlq_info = ctlq_info,\n+\t\t.ctx = ctx,\n+\t};\n \tint err;\n \n-\tadapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);\n+\tadapter->dev_ops.reg_ops.ctlq_reg_init(&ctx->mmio_info,\n+\t\t\t\t\t params.cctlq_info);\n \n-\terr = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);\n+\terr = libie_ctlq_xn_init(¶ms);\n \tif (err)\n \t\treturn err;\n \n-\thw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,\n-\t\t\t\t IDPF_DFLT_MBX_ID);\n-\thw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,\n-\t\t\t\t IDPF_DFLT_MBX_ID);\n-\n-\tif (!hw->asq || !hw->arq) {\n-\t\tidpf_ctlq_deinit(hw);\n-\n+\tadapter->asq = libie_find_ctlq(ctx, LIBIE_CTLQ_TYPE_TX,\n+\t\t\t\t LIBIE_CTLQ_MBX_ID);\n+\tadapter->arq = libie_find_ctlq(ctx, LIBIE_CTLQ_TYPE_RX,\n+\t\t\t\t LIBIE_CTLQ_MBX_ID);\n+\tif (!adapter->asq || !adapter->arq) {\n+\t\tlibie_ctlq_xn_deinit(params.xnm, ctx);\n \t\treturn -ENOENT;\n \t}\n \n+\tadapter->xn_init_params.xnm = params.xnm;\n \tadapter->state = __IDPF_VER_CHECK;\n \n \treturn 0;\n@@ -3378,12 +2930,14 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)\n */\n void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)\n {\n-\tif (adapter->hw.arq && adapter->hw.asq) {\n-\t\tidpf_mb_clean(adapter, adapter->hw.asq);\n-\t\tidpf_ctlq_deinit(&adapter->hw);\n+\tif (adapter->arq && adapter->asq) {\n+\t\tidpf_mb_clean(adapter, adapter->asq);\n+\t\tlibie_ctlq_xn_deinit(adapter->xn_init_params.xnm,\n+\t\t\t\t &adapter->ctlq_ctx);\n \t}\n-\tadapter->hw.arq = NULL;\n-\tadapter->hw.asq = NULL;\n+\n+\tadapter->arq = NULL;\n+\tadapter->asq = NULL;\n }\n \n /**\n@@ -3456,15 +3010,6 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)\n \tu16 num_max_vports;\n \tint err = 0;\n \n-\tif (!adapter->vcxn_mngr) {\n-\t\tadapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);\n-\t\tif (!adapter->vcxn_mngr) {\n-\t\t\terr = -ENOMEM;\n-\t\t\tgoto init_failed;\n-\t\t}\n-\t}\n-\tidpf_vc_xn_init(adapter->vcxn_mngr);\n-\n \twhile (adapter->state != __IDPF_INIT_SW) {\n \t\tswitch (adapter->state) {\n \t\tcase __IDPF_VER_CHECK:\n@@ -3609,8 +3154,7 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)\n \t * the mailbox again\n \t */\n \tadapter->state = __IDPF_VER_CHECK;\n-\tif (adapter->vcxn_mngr)\n-\t\tidpf_vc_xn_shutdown(adapter->vcxn_mngr);\n+\tidpf_deinit_dflt_mbx(adapter);\n \tset_bit(IDPF_HR_DRV_LOAD, adapter->flags);\n \tqueue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,\n \t\t\t msecs_to_jiffies(task_delay));\n@@ -3633,7 +3177,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)\n \t/* Avoid transaction timeouts when called during reset */\n \tremove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);\n \tif (!remove_in_prog)\n-\t\tidpf_vc_xn_shutdown(adapter->vcxn_mngr);\n+\t\tidpf_deinit_dflt_mbx(adapter);\n \n \tidpf_ptp_release(adapter);\n \tidpf_deinit_task(adapter);\n@@ -3642,7 +3186,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)\n \tidpf_intr_rel(adapter);\n \n \tif (remove_in_prog)\n-\t\tidpf_vc_xn_shutdown(adapter->vcxn_mngr);\n+\t\tidpf_deinit_dflt_mbx(adapter);\n \n \tcancel_delayed_work_sync(&adapter->serv_task);\n \tcancel_delayed_work_sync(&adapter->mbx_task);\n@@ -4184,9 +3728,9 @@ static void idpf_set_mac_type(const u8 *default_mac_addr,\n \n /**\n * idpf_mac_filter_async_handler - Async callback for mac filters\n- * @adapter: private data struct\n- * @xn: transaction for message\n- * @ctlq_msg: received message\n+ * @ctx: controlq context structure\n+ * @buff: response buffer pointer and size\n+ * @status: async call return value\n *\n * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is\n * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult\n@@ -4194,13 +3738,14 @@ static void idpf_set_mac_type(const u8 *default_mac_addr,\n * ultimately do is remove it from our list of mac filters and report the\n * error.\n */\n-static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,\n-\t\t\t\t\t struct idpf_vc_xn *xn,\n-\t\t\t\t\t const struct idpf_ctlq_msg *ctlq_msg)\n+static void idpf_mac_filter_async_handler(void *ctx,\n+\t\t\t\t\t struct kvec *buff,\n+\t\t\t\t\t int status)\n {\n \tstruct virtchnl2_mac_addr_list *ma_list;\n \tstruct idpf_vport_config *vport_config;\n \tstruct virtchnl2_mac_addr *mac_addr;\n+\tstruct idpf_adapter *adapter = ctx;\n \tstruct idpf_mac_filter *f, *tmp;\n \tstruct list_head *ma_list_head;\n \tstruct idpf_vport *vport;\n@@ -4208,18 +3753,18 @@ static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,\n \tint i;\n \n \t/* if success we're done, we're only here if something bad happened */\n-\tif (!ctlq_msg->cookie.mbx.chnl_retval)\n-\t\treturn 0;\n+\tif (!status)\n+\t\tgoto free_mem;\n \n+\tma_list = buff->iov_base;\n \t/* make sure at least struct is there */\n-\tif (xn->reply_sz < sizeof(*ma_list))\n+\tif (buff->iov_len < sizeof(*ma_list))\n \t\tgoto invalid_payload;\n \n-\tma_list = ctlq_msg->ctx.indirect.payload->va;\n \tmac_addr = ma_list->mac_addr_list;\n \tnum_entries = le16_to_cpu(ma_list->num_mac_addr);\n \t/* we should have received a buffer at least this big */\n-\tif (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))\n+\tif (buff->iov_len < struct_size(ma_list, mac_addr_list, num_entries))\n \t\tgoto invalid_payload;\n \n \tvport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));\n@@ -4239,16 +3784,15 @@ static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,\n \t\t\tif (ether_addr_equal(mac_addr[i].addr, f->macaddr))\n \t\t\t\tlist_del(&f->list);\n \tspin_unlock_bh(&vport_config->mac_filter_list_lock);\n-\tdev_err_ratelimited(&adapter->pdev->dev, \"Received error sending MAC filter request (op %d)\\n\",\n-\t\t\t xn->vc_op);\n-\n-\treturn 0;\n+\tdev_err_ratelimited(&adapter->pdev->dev, \"Received error %d on sending MAC filter request\\n\",\n+\t\t\t status);\n+\tgoto free_mem;\n \n invalid_payload:\n-\tdev_err_ratelimited(&adapter->pdev->dev, \"Received invalid MAC filter payload (op %d) (len %zd)\\n\",\n-\t\t\t xn->vc_op, xn->reply_sz);\n-\n-\treturn -EINVAL;\n+\tdev_err_ratelimited(&adapter->pdev->dev, \"Received invalid MAC filter payload (len %zd)\\n\",\n+\t\t\t buff->iov_len);\n+free_mem:\n+\tlibie_ctlq_release_rx_buf(buff);\n }\n \n /**\n@@ -4267,19 +3811,21 @@ int idpf_add_del_mac_filters(struct idpf_adapter *adapter,\n \t\t\t const u8 *default_mac_addr, u32 vport_id,\n \t\t\t bool add, bool async)\n {\n-\tstruct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;\n \tstruct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= add ? VIRTCHNL2_OP_ADD_MAC_ADDR :\n+\t\t\t\t\tVIRTCHNL2_OP_DEL_MAC_ADDR,\n+\t};\n+\tstruct virtchnl2_mac_addr_list *ma_list;\n \tu32 num_msgs, total_filters = 0;\n \tstruct idpf_mac_filter *f;\n-\tssize_t reply_sz;\n-\tint i = 0, k;\n+\tint i = 0;\n \n-\txn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :\n-\t\t\t\tVIRTCHNL2_OP_DEL_MAC_ADDR;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.async = async;\n-\txn_params.async_handler = idpf_mac_filter_async_handler;\n+\tif (async) {\n+\t\txn_params.resp_cb = idpf_mac_filter_async_handler;\n+\t\txn_params.send_ctx = adapter;\n+\t}\n \n \tspin_lock_bh(&vport_config->mac_filter_list_lock);\n \n@@ -4334,32 +3880,31 @@ int idpf_add_del_mac_filters(struct idpf_adapter *adapter,\n \t */\n \tnum_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);\n \n-\tfor (i = 0, k = 0; i < num_msgs; i++) {\n-\t\tu32 entries_size, buf_size, num_entries;\n+\tfor (u32 i = 0, k = 0; i < num_msgs; i++) {\n+\t\tu32 entries_size, num_entries;\n+\t\tsize_t buf_size;\n+\t\tint err;\n \n \t\tnum_entries = min_t(u32, total_filters,\n \t\t\t\t IDPF_NUM_FILTERS_PER_MSG);\n \t\tentries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;\n \t\tbuf_size = struct_size(ma_list, mac_addr_list, num_entries);\n \n-\t\tif (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {\n-\t\t\tkfree(ma_list);\n-\t\t\tma_list = kzalloc(buf_size, GFP_ATOMIC);\n-\t\t\tif (!ma_list)\n-\t\t\t\treturn -ENOMEM;\n-\t\t} else {\n-\t\t\tmemset(ma_list, 0, buf_size);\n-\t\t}\n+\t\tma_list = kzalloc(buf_size, GFP_KERNEL);\n+\t\tif (!ma_list)\n+\t\t\treturn -ENOMEM;\n \n \t\tma_list->vport_id = cpu_to_le32(vport_id);\n \t\tma_list->num_mac_addr = cpu_to_le16(num_entries);\n \t\tmemcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);\n \n-\t\txn_params.send_buf.iov_base = ma_list;\n-\t\txn_params.send_buf.iov_len = buf_size;\n-\t\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\t\tif (reply_sz < 0)\n-\t\t\treturn reply_sz;\n+\t\terr = idpf_send_mb_msg_kfree(adapter, &xn_params, ma_list,\n+\t\t\t\t\t buf_size);\n+\t\tif (err)\n+\t\t\treturn err;\n+\n+\t\tif (!async)\n+\t\t\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n \t\tk += num_entries;\n \t\ttotal_filters -= num_entries;\n@@ -4368,6 +3913,28 @@ int idpf_add_del_mac_filters(struct idpf_adapter *adapter,\n \treturn 0;\n }\n \n+/**\n+ * idpf_promiscuous_async_handler - async callback for promiscuous mode\n+ * @ctx: controlq context structure\n+ * @buff: response buffer pointer and size\n+ * @status: async call return value\n+ *\n+ * Nobody is waiting for the promiscuous virtchnl message response. Print\n+ * an error message if something went wrong and return.\n+ */\n+static void idpf_promiscuous_async_handler(void *ctx,\n+\t\t\t\t\t struct kvec *buff,\n+\t\t\t\t\t int status)\n+{\n+\tstruct idpf_adapter *adapter = ctx;\n+\n+\tif (status)\n+\t\tdev_err_ratelimited(&adapter->pdev->dev, \"Failed to set promiscuous mode: %d\\n\",\n+\t\t\t\t status);\n+\n+\tlibie_ctlq_release_rx_buf(buff);\n+}\n+\n /**\n * idpf_set_promiscuous - set promiscuous and send message to mailbox\n * @adapter: Driver specific private structure\n@@ -4382,9 +3949,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,\n \t\t\t struct idpf_vport_user_config_data *config_data,\n \t\t\t u32 vport_id)\n {\n-\tstruct idpf_vc_xn_params xn_params = {};\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.timeout_ms\t= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t\t.chnl_opcode\t= VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,\n+\t\t.resp_cb\t= idpf_promiscuous_async_handler,\n+\t\t.send_ctx\t= adapter,\n+\t};\n \tstruct virtchnl2_promisc_info vpi;\n-\tssize_t reply_sz;\n \tu16 flags = 0;\n \n \tif (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))\n@@ -4395,15 +3966,7 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,\n \tvpi.vport_id = cpu_to_le32(vport_id);\n \tvpi.flags = cpu_to_le16(flags);\n \n-\txn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.send_buf.iov_base = &vpi;\n-\txn_params.send_buf.iov_len = sizeof(vpi);\n-\t/* setting promiscuous is only ever done asynchronously */\n-\txn_params.async = true;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\n-\treturn reply_sz < 0 ? reply_sz : 0;\n+\treturn idpf_send_mb_msg(adapter, &xn_params, &vpi, sizeof(vpi));\n }\n \n /**\n@@ -4421,26 +3984,39 @@ int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,\n \t\t\t u8 *recv_msg, u16 *recv_len)\n {\n \tstruct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);\n-\tstruct idpf_vc_xn_params xn_params = { };\n-\tssize_t reply_sz;\n-\tu16 recv_size;\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_RDMA,\n+\t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n+\t};\n+\tu8 on_stack_buf[LIBIE_CP_TX_COPYBREAK];\n+\tvoid *send_buf;\n+\tint err;\n \n-\tif (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)\n+\tif (!recv_msg || !recv_len || msg_size > LIBIE_CTLQ_MAX_BUF_LEN)\n \t\treturn -EINVAL;\n \n-\trecv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);\n-\t*recv_len = 0;\n-\txn_params.vc_op = VIRTCHNL2_OP_RDMA;\n-\txn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;\n-\txn_params.send_buf.iov_base = send_msg;\n-\txn_params.send_buf.iov_len = msg_size;\n-\txn_params.recv_buf.iov_base = recv_msg;\n-\txn_params.recv_buf.iov_len = recv_size;\n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\t*recv_len = reply_sz;\n+\tif (!libie_cp_can_send_onstack(msg_size)) {\n+\t\tsend_buf = kzalloc(msg_size, GFP_KERNEL);\n+\t\tif (!send_buf)\n+\t\t\treturn -ENOMEM;\n+\t} else {\n+\t\tsend_buf = on_stack_buf;\n+\t}\n \n-\treturn 0;\n+\tmemcpy(send_buf, send_msg, msg_size);\n+\terr = idpf_send_mb_msg(adapter, &xn_params, send_buf, msg_size);\n+\tif (err)\n+\t\treturn err;\n+\n+\tif (xn_params.recv_mem.iov_len > *recv_len) {\n+\t\terr = -EINVAL;\n+\t\tgoto rel_buf;\n+\t}\n+\n+\t*recv_len = xn_params.recv_mem.iov_len;\n+\tmemcpy(recv_msg, xn_params.recv_mem.iov_base, *recv_len);\n+rel_buf:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\treturn err;\n }\n EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h\nindex 762b477e019c..be3fe8fa7327 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h\n+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h\n@@ -7,85 +7,6 @@\n #include <linux/intel/virtchnl2.h>\n \n #define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC\t(60 * 1000)\n-#define IDPF_VC_XN_IDX_M\t\tGENMASK(7, 0)\n-#define IDPF_VC_XN_SALT_M\t\tGENMASK(15, 8)\n-#define IDPF_VC_XN_RING_LEN\t\tU8_MAX\n-\n-/**\n- * enum idpf_vc_xn_state - Virtchnl transaction status\n- * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used\n- * @IDPF_VC_XN_WAITING: expecting a reply, not yet received\n- * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer\n- *\t\t\t\t updated\n- * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there\n- *\t\t\t\t was an error, buffer not updated\n- * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down\n- * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the\n- *\t\t return context; a callback may be provided to handle\n- *\t\t return\n- */\n-enum idpf_vc_xn_state {\n-\tIDPF_VC_XN_IDLE = 1,\n-\tIDPF_VC_XN_WAITING,\n-\tIDPF_VC_XN_COMPLETED_SUCCESS,\n-\tIDPF_VC_XN_COMPLETED_FAILED,\n-\tIDPF_VC_XN_SHUTDOWN,\n-\tIDPF_VC_XN_ASYNC,\n-};\n-\n-struct idpf_vc_xn;\n-/* Callback for asynchronous messages */\n-typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,\n-\t\t\t const struct idpf_ctlq_msg *);\n-\n-/**\n- * struct idpf_vc_xn - Data structure representing virtchnl transactions\n- * @completed: virtchnl event loop uses that to signal when a reply is\n- *\t available, uses kernel completion API\n- * @state: virtchnl event loop stores the data below, protected by the\n- *\t completion's lock.\n- * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be\n- *\t truncated on its way to the receiver thread according to\n- *\t reply_buf.iov_len.\n- * @reply: Reference to the buffer(s) where the reply data should be written\n- *\t to. May be 0-length (then NULL address permitted) if the reply data\n- *\t should be ignored.\n- * @async_handler: if sent asynchronously, a callback can be provided to handle\n- *\t\t the reply when it's received\n- * @vc_op: corresponding opcode sent with this transaction\n- * @idx: index used as retrieval on reply receive, used for cookie\n- * @salt: changed every message to make unique, used for cookie\n- */\n-struct idpf_vc_xn {\n-\tstruct completion completed;\n-\tenum idpf_vc_xn_state state;\n-\tsize_t reply_sz;\n-\tstruct kvec reply;\n-\tasync_vc_cb async_handler;\n-\tu32 vc_op;\n-\tu8 idx;\n-\tu8 salt;\n-};\n-\n-/**\n- * struct idpf_vc_xn_params - Parameters for executing transaction\n- * @send_buf: kvec for send buffer\n- * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length\n- * @timeout_ms: timeout to wait for reply\n- * @async: send message asynchronously, will not wait on completion\n- * @async_handler: If sent asynchronously, optional callback handler. The user\n- *\t\t must be careful when using async handlers as the memory for\n- *\t\t the recv_buf _cannot_ be on stack if this is async.\n- * @vc_op: virtchnl op to send\n- */\n-struct idpf_vc_xn_params {\n-\tstruct kvec send_buf;\n-\tstruct kvec recv_buf;\n-\tint timeout_ms;\n-\tbool async;\n-\tasync_vc_cb async_handler;\n-\tu32 vc_op;\n-};\n \n struct idpf_adapter;\n struct idpf_netdev_priv;\n@@ -95,8 +16,6 @@ struct idpf_vport_max_q;\n struct idpf_vport_config;\n struct idpf_vport_user_config_data;\n \n-ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,\n-\t\t\tconst struct idpf_vc_xn_params *params);\n int idpf_init_dflt_mbx(struct idpf_adapter *adapter);\n void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);\n int idpf_vc_core_init(struct idpf_adapter *adapter);\n@@ -123,9 +42,11 @@ bool idpf_sideband_action_ena(struct idpf_vport *vport,\n \t\t\t struct ethtool_rx_flow_spec *fsp);\n unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);\n \n-int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq);\n-int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,\n-\t\t u32 op, u16 msg_size, u8 *msg, u16 cookie);\n+void idpf_recv_event_msg(struct libie_ctlq_ctx *ctx,\n+\t\t\t struct libie_ctlq_msg *ctlq_msg);\n+int idpf_send_mb_msg(struct idpf_adapter *adapter,\n+\t\t struct libie_ctlq_xn_send_params *xn_params,\n+\t\t void *send_buf, size_t send_buf_size);\n \n struct idpf_queue_ptr {\n \tenum virtchnl2_queue_type\ttype;\n@@ -213,7 +134,6 @@ int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,\n int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,\n \t\t\t\t struct idpf_rss_data *rss_data,\n \t\t\t\t u32 vport_id, bool get);\n-void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);\n int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,\n \t\t\t u8 *send_msg, u16 msg_size,\n \t\t\t u8 *recv_msg, u16 *recv_len);\ndiff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c\nindex 82f26fc7bc08..a6cadba28905 100644\n--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c\n+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c\n@@ -15,7 +15,6 @@\n */\n int idpf_ptp_get_caps(struct idpf_adapter *adapter)\n {\n-\tstruct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;\n \tstruct virtchnl2_ptp_get_caps send_ptp_caps_msg = {\n \t\t.caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |\n \t\t\t\t VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |\n@@ -24,35 +23,34 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)\n \t\t\t\t VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |\n \t\t\t\t VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)\n \t};\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,\n-\t\t.send_buf.iov_base = &send_ptp_caps_msg,\n-\t\t.send_buf.iov_len = sizeof(send_ptp_caps_msg),\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_GET_CAPS,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n \tstruct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;\n \tstruct libie_mmio_info *mmio = &adapter->ctlq_ctx.mmio_info;\n \tstruct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;\n \tstruct virtchnl2_ptp_clk_reg_offsets clock_offsets;\n+\tstruct virtchnl2_ptp_get_caps *recv_ptp_caps_msg;\n \tstruct idpf_ptp_secondary_mbx *scnd_mbx;\n \tstruct idpf_ptp *ptp = adapter->ptp;\n \tenum idpf_ptp_access access_type;\n \tu32 temp_offset;\n-\tint reply_sz;\n+\tsize_t reply_sz;\n+\tint err;\n \n-\trecv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),\n-\t\t\t\t GFP_KERNEL);\n-\tif (!recv_ptp_caps_msg)\n-\t\treturn -ENOMEM;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &send_ptp_caps_msg,\n+\t\t\t sizeof(send_ptp_caps_msg));\n+\tif (err)\n+\t\treturn err;\n \n-\txn_params.recv_buf.iov_base = recv_ptp_caps_msg;\n-\txn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);\n+\treply_sz = xn_params.recv_mem.iov_len;\n+\tif (reply_sz != sizeof(*recv_ptp_caps_msg)) {\n+\t\terr = -EIO;\n+\t\tgoto free_resp;\n+\t}\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\telse if (reply_sz != sizeof(*recv_ptp_caps_msg))\n-\t\treturn -EIO;\n+\trecv_ptp_caps_msg = xn_params.recv_mem.iov_base;\n \n \tptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);\n \tptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);\n@@ -113,7 +111,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)\n discipline_clock:\n \taccess_type = ptp->adj_dev_clk_time_access;\n \tif (access_type != IDPF_PTP_DIRECT)\n-\t\treturn 0;\n+\t\tgoto free_resp;\n \n \tclk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;\n \n@@ -146,7 +144,9 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)\n \tptp->dev_clk_regs.phy_shadj_h =\n \t\tlibie_pci_get_mmio_addr(mmio, temp_offset);\n \n-\treturn 0;\n+free_resp:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\treturn err;\n }\n \n /**\n@@ -161,28 +161,34 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)\n int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,\n \t\t\t struct idpf_ptp_dev_timers *dev_clk_time)\n {\n+\tstruct virtchnl2_ptp_get_dev_clk_time *get_dev_clk_time_resp;\n \tstruct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,\n-\t\t.send_buf.iov_base = &get_dev_clk_time_msg,\n-\t\t.send_buf.iov_len = sizeof(get_dev_clk_time_msg),\n-\t\t.recv_buf.iov_base = &get_dev_clk_time_msg,\n-\t\t.recv_buf.iov_len = sizeof(get_dev_clk_time_msg),\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n-\tint reply_sz;\n+\tsize_t reply_sz;\n \tu64 dev_time;\n+\tint err;\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz != sizeof(get_dev_clk_time_msg))\n-\t\treturn -EIO;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &get_dev_clk_time_msg,\n+\t\t\t sizeof(get_dev_clk_time_msg));\n+\tif (err)\n+\t\treturn err;\n \n-\tdev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);\n+\treply_sz = xn_params.recv_mem.iov_len;\n+\tif (reply_sz != sizeof(*get_dev_clk_time_resp)) {\n+\t\terr = -EIO;\n+\t\tgoto free_resp;\n+\t}\n+\n+\tget_dev_clk_time_resp = xn_params.recv_mem.iov_base;\n+\tdev_time = le64_to_cpu(get_dev_clk_time_resp->dev_time_ns);\n \tdev_clk_time->dev_clk_time_ns = dev_time;\n \n-\treturn 0;\n+free_resp:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\treturn err;\n }\n \n /**\n@@ -198,27 +204,30 @@ int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,\n int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,\n \t\t\t struct idpf_ptp_dev_timers *cross_time)\n {\n-\tstruct virtchnl2_ptp_get_cross_time cross_time_msg;\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,\n-\t\t.send_buf.iov_base = &cross_time_msg,\n-\t\t.send_buf.iov_len = sizeof(cross_time_msg),\n-\t\t.recv_buf.iov_base = &cross_time_msg,\n-\t\t.recv_buf.iov_len = sizeof(cross_time_msg),\n+\tstruct virtchnl2_ptp_get_cross_time cross_time_send, *cross_time_recv;\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n-\tint reply_sz;\n+\tint err = 0;\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz != sizeof(cross_time_msg))\n-\t\treturn -EIO;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &cross_time_send,\n+\t\t\t sizeof(cross_time_send));\n+\tif (err)\n+\t\treturn err;\n+\n+\tif (xn_params.recv_mem.iov_len != sizeof(*cross_time_recv)) {\n+\t\terr = -EIO;\n+\t\tgoto free_resp;\n+\t}\n \n-\tcross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);\n-\tcross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);\n+\tcross_time_recv = xn_params.recv_mem.iov_base;\n+\tcross_time->dev_clk_time_ns = le64_to_cpu(cross_time_recv->dev_time_ns);\n+\tcross_time->sys_time_ns = le64_to_cpu(cross_time_recv->sys_time_ns);\n \n-\treturn 0;\n+free_resp:\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n+\treturn err;\n }\n \n /**\n@@ -235,23 +244,18 @@ int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)\n \tstruct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {\n \t\t.dev_time_ns = cpu_to_le64(time),\n \t};\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,\n-\t\t.send_buf.iov_base = &set_dev_clk_time_msg,\n-\t\t.send_buf.iov_len = sizeof(set_dev_clk_time_msg),\n-\t\t.recv_buf.iov_base = &set_dev_clk_time_msg,\n-\t\t.recv_buf.iov_len = sizeof(set_dev_clk_time_msg),\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n-\tint reply_sz;\n+\tint err;\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz != sizeof(set_dev_clk_time_msg))\n-\t\treturn -EIO;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &set_dev_clk_time_msg,\n+\t\t\t sizeof(set_dev_clk_time_msg));\n+\tif (!err)\n+\t\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn 0;\n+\treturn err;\n }\n \n /**\n@@ -268,23 +272,18 @@ int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)\n \tstruct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {\n \t\t.delta = cpu_to_le64(delta),\n \t};\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,\n-\t\t.send_buf.iov_base = &adj_dev_clk_time_msg,\n-\t\t.send_buf.iov_len = sizeof(adj_dev_clk_time_msg),\n-\t\t.recv_buf.iov_base = &adj_dev_clk_time_msg,\n-\t\t.recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n-\tint reply_sz;\n+\tint err;\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz != sizeof(adj_dev_clk_time_msg))\n-\t\treturn -EIO;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &adj_dev_clk_time_msg,\n+\t\t\t sizeof(adj_dev_clk_time_msg));\n+\tif (!err)\n+\t\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn 0;\n+\treturn err;\n }\n \n /**\n@@ -302,23 +301,18 @@ int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)\n \tstruct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {\n \t\t.incval = cpu_to_le64(incval),\n \t};\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,\n-\t\t.send_buf.iov_base = &adj_dev_clk_fine_msg,\n-\t\t.send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),\n-\t\t.recv_buf.iov_base = &adj_dev_clk_fine_msg,\n-\t\t.recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n-\tint reply_sz;\n+\tint err;\n \n-\treply_sz = idpf_vc_xn_exec(adapter, &xn_params);\n-\tif (reply_sz < 0)\n-\t\treturn reply_sz;\n-\tif (reply_sz != sizeof(adj_dev_clk_fine_msg))\n-\t\treturn -EIO;\n+\terr = idpf_send_mb_msg(adapter, &xn_params, &adj_dev_clk_fine_msg,\n+\t\t\t sizeof(adj_dev_clk_fine_msg));\n+\tif (!err)\n+\t\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n-\treturn 0;\n+\treturn err;\n }\n \n /**\n@@ -337,18 +331,16 @@ int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)\n \tstruct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;\n \tstruct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;\n \tstruct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,\n-\t\t.send_buf.iov_base = &send_tx_tstamp_caps,\n-\t\t.send_buf.iov_len = sizeof(send_tx_tstamp_caps),\n-\t\t.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n \t};\n \tenum idpf_ptp_access tstamp_access, get_dev_clk_access;\n \tstruct idpf_ptp *ptp = vport->adapter->ptp;\n \tstruct list_head *head;\n-\tint err = 0, reply_sz;\n+\tsize_t reply_sz;\n \tu16 num_latches;\n+\tint err = 0;\n \tu32 size;\n \n \tif (!ptp)\n@@ -360,19 +352,15 @@ int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)\n \t get_dev_clk_access == IDPF_PTP_NONE)\n \t\treturn -EOPNOTSUPP;\n \n-\trcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);\n-\tif (!rcv_tx_tstamp_caps)\n-\t\treturn -ENOMEM;\n-\n \tsend_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);\n-\txn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;\n \n-\treply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);\n-\tif (reply_sz < 0) {\n-\t\terr = reply_sz;\n-\t\tgoto get_tstamp_caps_out;\n-\t}\n+\terr = idpf_send_mb_msg(vport->adapter, &xn_params, &send_tx_tstamp_caps,\n+\t\t\t sizeof(send_tx_tstamp_caps));\n+\tif (err)\n+\t\treturn err;\n \n+\trcv_tx_tstamp_caps = xn_params.recv_mem.iov_base;\n+\treply_sz = xn_params.recv_mem.iov_len;\n \tnum_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);\n \tsize = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);\n \tif (reply_sz != size) {\n@@ -427,7 +415,7 @@ int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)\n \t}\n \n \tvport->tx_tstamp_caps = tstamp_caps;\n-\tkfree(rcv_tx_tstamp_caps);\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n \treturn 0;\n \n@@ -440,7 +428,7 @@ int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)\n \n \tkfree(tstamp_caps);\n get_tstamp_caps_out:\n-\tkfree(rcv_tx_tstamp_caps);\n+\tlibie_ctlq_release_rx_buf(&xn_params.recv_mem);\n \n \treturn err;\n }\n@@ -537,9 +525,9 @@ idpf_ptp_get_tstamp_value(struct idpf_vport *vport,\n \n /**\n * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps\n- * @adapter: Driver specific private structure\n- * @xn: transaction for message\n- * @ctlq_msg: received message\n+ * @ctx: adapter pointer\n+ * @mem: address and size of the response\n+ * @status: return value of the request\n *\n * Read the tstamps Tx tstamp values from a received message and put them\n * directly to the skb. The number of timestamps to read is specified by\n@@ -547,22 +535,21 @@ idpf_ptp_get_tstamp_value(struct idpf_vport *vport,\n *\n * Return: 0 on success, -errno otherwise.\n */\n-static int\n-idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,\n-\t\t\t\t struct idpf_vc_xn *xn,\n-\t\t\t\t const struct idpf_ctlq_msg *ctlq_msg)\n+static void\n+idpf_ptp_get_tx_tstamp_async_handler(void *ctx, struct kvec *mem, int status)\n {\n \tstruct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;\n \tstruct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;\n \tstruct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;\n \tstruct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;\n \tstruct idpf_vport *tstamp_vport = NULL;\n+\tstruct idpf_adapter *adapter = ctx;\n \tstruct list_head *head;\n \tu16 num_latches;\n \tu32 vport_id;\n \tint err = 0;\n \n-\trecv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;\n+\trecv_tx_tstamp_msg = mem->iov_base;\n \tvport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);\n \n \tidpf_for_each_vport(adapter, vport) {\n@@ -576,7 +563,7 @@ idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,\n \t}\n \n \tif (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)\n-\t\treturn -EINVAL;\n+\t\tgoto free_resp;\n \n \ttx_tstamp_caps = tstamp_vport->tx_tstamp_caps;\n \tnum_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);\n@@ -611,8 +598,8 @@ idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,\n \n unlock:\n \tspin_unlock_bh(&tx_tstamp_caps->latches_lock);\n-\n-\treturn err;\n+free_resp:\n+\tlibie_ctlq_release_rx_buf(mem);\n }\n \n /**\n@@ -628,15 +615,15 @@ int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)\n {\n \tstruct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;\n \tstruct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;\n-\tstruct idpf_vc_xn_params xn_params = {\n-\t\t.vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,\n+\tstruct libie_ctlq_xn_send_params xn_params = {\n+\t\t.chnl_opcode = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,\n \t\t.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,\n-\t\t.async = true,\n-\t\t.async_handler = idpf_ptp_get_tx_tstamp_async_handler,\n+\t\t.resp_cb = idpf_ptp_get_tx_tstamp_async_handler,\n+\t\t.send_ctx = vport->adapter,\n \t};\n \tstruct idpf_ptp_tx_tstamp *ptp_tx_tstamp;\n-\tint reply_sz, size, msg_size;\n \tstruct list_head *head;\n+\tint size, msg_size;\n \tbool state_upd;\n \tu16 id = 0;\n \n@@ -669,11 +656,7 @@ int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)\n \tmsg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);\n \tsend_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);\n \tsend_tx_tstamp_msg->num_latches = cpu_to_le16(id);\n-\txn_params.send_buf.iov_base = send_tx_tstamp_msg;\n-\txn_params.send_buf.iov_len = msg_size;\n-\n-\treply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);\n-\tkfree(send_tx_tstamp_msg);\n \n-\treturn min(reply_sz, 0);\n+\treturn idpf_send_mb_msg(vport->adapter, &xn_params, send_tx_tstamp_msg,\n+\t\t\t\tmsg_size);\n }\n", "prefixes": [ "iwl-next", "v5", "09/15" ] }