get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.1/patches/2221021/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2221021,
    "url": "http://patchwork.ozlabs.org/api/1.1/patches/2221021/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260408-ibmvfc-fpin-support-v1-4-52b06c464e03@linux.ibm.com/",
    "project": {
        "id": 2,
        "url": "http://patchwork.ozlabs.org/api/1.1/projects/2/?format=api",
        "name": "Linux PPC development",
        "link_name": "linuxppc-dev",
        "list_id": "linuxppc-dev.lists.ozlabs.org",
        "list_email": "linuxppc-dev@lists.ozlabs.org",
        "web_url": "https://github.com/linuxppc/wiki/wiki",
        "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git",
        "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/"
    },
    "msgid": "<20260408-ibmvfc-fpin-support-v1-4-52b06c464e03@linux.ibm.com>",
    "date": "2026-04-08T17:07:45",
    "name": "[4/5] ibmvfc: use async sub-queue for FPIN messages",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "6254adcfb1585ab94984d5bb54607c300b32271b",
    "submitter": {
        "id": 93089,
        "url": "http://patchwork.ozlabs.org/api/1.1/people/93089/?format=api",
        "name": "Dave Marquardt via B4 Relay",
        "email": "devnull+davemarq.linux.ibm.com@kernel.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260408-ibmvfc-fpin-support-v1-4-52b06c464e03@linux.ibm.com/mbox/",
    "series": [
        {
            "id": 499164,
            "url": "http://patchwork.ozlabs.org/api/1.1/series/499164/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=499164",
            "date": "2026-04-08T17:07:44",
            "name": "ibmvfc: make ibmvfc support FPIN messages",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/499164/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2221021/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2221021/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "\n <linuxppc-dev+bounces-19521-incoming=patchwork.ozlabs.org@lists.ozlabs.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256\n header.s=k20201202 header.b=m3vG4mao;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=lists.ozlabs.org\n (client-ip=2404:9400:21b9:f100::1; helo=lists.ozlabs.org;\n envelope-from=linuxppc-dev+bounces-19521-incoming=patchwork.ozlabs.org@lists.ozlabs.org;\n receiver=patchwork.ozlabs.org)",
            "lists.ozlabs.org;\n arc=none smtp.remote-ip=\"2600:3c0a:e001:78e:0:1991:8:25\"",
            "lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=kernel.org",
            "lists.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256\n header.s=k20201202 header.b=m3vG4mao;\n\tdkim-atps=neutral",
            "lists.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=kernel.org\n (client-ip=2600:3c0a:e001:78e:0:1991:8:25; helo=sea.source.kernel.org;\n envelope-from=devnull+davemarq.linux.ibm.com@kernel.org;\n receiver=lists.ozlabs.org)"
        ],
        "Received": [
            "from lists.ozlabs.org (lists.ozlabs.org\n [IPv6:2404:9400:21b9:f100::1])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1 raw public key)\n server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4frTzV6KM6z1xv0\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 09 Apr 2026 03:08:22 +1000 (AEST)",
            "from boromir.ozlabs.org (localhost [127.0.0.1])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 4frTz32MH9z2ytx;\n\tThu, 09 Apr 2026 03:07:59 +1000 (AEST)",
            "from sea.source.kernel.org (sea.source.kernel.org\n [IPv6:2600:3c0a:e001:78e:0:1991:8:25])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 4frTyw5CYnz2yd7\n\tfor <linuxppc-dev@lists.ozlabs.org>; Thu, 09 Apr 2026 03:07:52 +1000 (AEST)",
            "from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58])\n\tby sea.source.kernel.org (Postfix) with ESMTP id AEE914442E;\n\tWed,  8 Apr 2026 17:07:49 +0000 (UTC)",
            "by smtp.kernel.org (Postfix) with ESMTPS id 92215C2BCB4;\n\tWed,  8 Apr 2026 17:07:49 +0000 (UTC)",
            "from aws-us-west-2-korg-lkml-1.web.codeaurora.org\n (localhost.localdomain [127.0.0.1])\n\tby smtp.lore.kernel.org (Postfix) with ESMTP id 88E4310F9961;\n\tWed,  8 Apr 2026 17:07:49 +0000 (UTC)"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707; t=1775668079;\n\tcv=none;\n b=fT3GXWI04Mar2K+vYtjuWh0JrLXRG2sqQiGrrCW/05oFGhfM+MFSN2NgLQsvWcBMqIbN30hiHvC/LpYlmz0Vv3lyQMKcsSDMTeNxcmD/zp3+U1w8bCoWsAcuXqkzEao/Bwxc+gnlB4sAKVtlkmn3iCMtsOlGwmhYvyi1lXDmMWdswIRxjuL752HxpPdjjs+oDp6CyndjMv0jRkXzk31up5CyC4ZoN2q9qMksVT/FCZ7Ls4BAv6R0yCseVXRcpF/NymlMv82c+BuvSKpYpm+nCdWhzAvn6ONBrLJ4JJY6tIBzu+uWiIeJ72s7G1LYI8GA5hn+edxoS1qAV52isyQ4QQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707;\n\tt=1775668079; c=relaxed/relaxed;\n\tbh=VSdSCsLD9Vpk5fUzpxZ99SVepfZrzeHuWAV94bCZx6Y=;\n\th=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References:\n\t In-Reply-To:To:Cc;\n b=R1wf3uRXYovuKNJesV21QKz7iSRA81Fw7TUVY3beFLrlzzAmbWk2P0JOAQHgkSyi6nYmvmoTKYQNQFuPmazPcIuxToOzusgU7R/lV9wBoZDcqIzx57wpGCU2BVsj97nKP83uZg0Ctdw9sQrAz0I+tJuYKyKzKhKyER24MYCFHTYKQ8QhRyg+UIUG17YuLZIlRaCG7XU6MXaROhG/+uSkYPA0PV4VWZ3NYqJSOrPZi4E6Rz4rZzjaSB2eVXQbMhJ7Z9we0otehrXxLNdAkhmTqV3lNa5PhZf5JzTDSoyhEws5oGShrg6FAi/3JXc3JQbSIKhmRv67ZAERqamH8HPTgg==",
        "ARC-Authentication-Results": "i=1; lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=kernel.org;\n dkim=pass (2048-bit key;\n unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256\n header.s=k20201202 header.b=m3vG4mao; dkim-atps=neutral;\n spf=pass (client-ip=2600:3c0a:e001:78e:0:1991:8:25;\n helo=sea.source.kernel.org;\n envelope-from=devnull+davemarq.linux.ibm.com@kernel.org;\n receiver=lists.ozlabs.org) smtp.mailfrom=kernel.org",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org;\n\ts=k20201202; t=1775668069;\n\tbh=zW6pWMZAsiP4lwVlKWyw8uP6eNO/BsUuOmFvMREaXY8=;\n\th=From:Date:Subject:References:In-Reply-To:To:Cc:Reply-To:From;\n\tb=m3vG4maoWB5+Vy2kLXPGw4JUBnitvX0bO9P3dPmBQwXxvC/mviC7R0IvHuasrbE6h\n\t bLs1AtiJlLzHXR/A1AUtFcNJkWVMLfOfE++Ig3h6/JE6qPC9/wbc7im2K3exFQLjOx\n\t fKzUZ32QrQW+J7YRJoEYkeIt0DDXjz3ZbyEzUhF6piiLmyVb9vRVq5usETwszsvD9c\n\t /an35zBdPiT2yiX14qZmNXJd2PM0kGnkkwIg/dbrlQhtutjr1tluRzuPG0IAf6Xubb\n\t uo3z+bX8mUVpLJIZ/WEVNRShfHonZucNMxUm4W+YqT6whWi6Hup5XF+ax4xOo86mxf\n\t rFGkLmC1LFPLg==",
        "From": "Dave Marquardt via B4 Relay <devnull+davemarq.linux.ibm.com@kernel.org>",
        "Date": "Wed, 08 Apr 2026 12:07:45 -0500",
        "Subject": "[PATCH 4/5] ibmvfc: use async sub-queue for FPIN messages",
        "X-Mailing-List": "linuxppc-dev@lists.ozlabs.org",
        "List-Id": "<linuxppc-dev.lists.ozlabs.org>",
        "List-Help": "<mailto:linuxppc-dev+help@lists.ozlabs.org>",
        "List-Owner": "<mailto:linuxppc-dev+owner@lists.ozlabs.org>",
        "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>",
        "List-Archive": "<https://lore.kernel.org/linuxppc-dev/>,\n  <https://lists.ozlabs.org/pipermail/linuxppc-dev/>",
        "List-Subscribe": "<mailto:linuxppc-dev+subscribe@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-digest@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-nomail@lists.ozlabs.org>",
        "List-Unsubscribe": "<mailto:linuxppc-dev+unsubscribe@lists.ozlabs.org>",
        "Precedence": "list",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"utf-8\"",
        "Content-Transfer-Encoding": "7bit",
        "Message-Id": "<20260408-ibmvfc-fpin-support-v1-4-52b06c464e03@linux.ibm.com>",
        "References": "<20260408-ibmvfc-fpin-support-v1-0-52b06c464e03@linux.ibm.com>",
        "In-Reply-To": "<20260408-ibmvfc-fpin-support-v1-0-52b06c464e03@linux.ibm.com>",
        "To": "\"James E.J. Bottomley\" <James.Bottomley@HansenPartnership.com>,\n \"Martin K. Petersen\" <martin.petersen@oracle.com>,\n Madhavan Srinivasan <maddy@linux.ibm.com>,\n Michael Ellerman <mpe@ellerman.id.au>, Nicholas Piggin <npiggin@gmail.com>,\n \"Christophe Leroy (CS GROUP)\" <chleroy@kernel.org>,\n Tyrel Datwyler <tyreld@linux.ibm.com>",
        "Cc": "linux-kernel@vger.kernel.org, linux-scsi@vger.kernel.org,\n linuxppc-dev@lists.ozlabs.org, Brian King <brking@linux.ibm.com>,\n Greg Joyce <gjoyce@linux.ibm.com>, Kyle Mahlkuch <kmahlkuc@linux.ibm.com>,\n Dave Marquardt <davemarq@linux.ibm.com>",
        "X-Mailer": "b4 0.15.1",
        "X-Developer-Signature": "v=1; a=ed25519-sha256; t=1775668068; l=23596;\n i=davemarq@linux.ibm.com; s=20260216; h=from:subject:message-id;\n bh=yxFkgxkcN0WiR0tHGPw55PFL9hgdZ6iAncVKx5i+7t8=;\n b=Cv5rV+VK+yRNI9py9uv+D6Gam8zx+kUIBP6CF4ku06BiP9OVx2TS9OEpoRuwyx/qdTXWfgqyP\n D9kQFd2Wi5qBTAadME4847ncXUjkkqSVCnZ776nS4wt5JJd7rcN/B42",
        "X-Developer-Key": "i=davemarq@linux.ibm.com; a=ed25519;\n pk=vy0/nfobrje6EqZxuyw6a3ZstytG8WK2vf5Y3xtGrEg=",
        "X-Endpoint-Received": "by B4 Relay for davemarq@linux.ibm.com/20260216 with\n auth_id=689",
        "X-Original-From": "Dave Marquardt <davemarq@linux.ibm.com>",
        "Reply-To": "davemarq@linux.ibm.com",
        "X-Spam-Status": "No, score=-0.2 required=3.0 tests=DKIMWL_WL_HIGH,DKIM_SIGNED,\n\tDKIM_VALID,DKIM_VALID_AU,DKIM_VALID_EF,SPF_HELO_NONE,SPF_PASS,\n\tT_FILL_THIS_FORM_SHORT autolearn=disabled version=4.0.1 OzLabs 8",
        "X-Spam-Checker-Version": "SpamAssassin 4.0.1 (2024-03-25) on lists.ozlabs.org"
    },
    "content": "From: Dave Marquardt <davemarq@linux.ibm.com>\n\n- allocate async sub-queue\n- allocate interrupt and set up handler\n- negotiate use of async sub-queue with NPIV (VIOS)\n- refactor ibmvfc_basic_fpin_to_desc() and ibmvfc_full_fpin_to_desc()\n  into common routine\n- add KUnit test to verify async sub-queue is allocated\n---\n drivers/scsi/ibmvscsi/ibmvfc.c       | 325 ++++++++++++++++++++++++++++++++---\n drivers/scsi/ibmvscsi/ibmvfc.h       |  29 +++-\n drivers/scsi/ibmvscsi/ibmvfc_kunit.c |  52 +++---\n 3 files changed, 363 insertions(+), 43 deletions(-)",
    "diff": "diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c\nindex 803fc3caa14d..26e39b367022 100644\n--- a/drivers/scsi/ibmvscsi/ibmvfc.c\n+++ b/drivers/scsi/ibmvscsi/ibmvfc.c\n@@ -1471,6 +1471,13 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)\n \tof_node_put(rootdn);\n }\n \n+static __be64 ibmvfc_npiv_chan_caps[] = {\n+\tcpu_to_be64(IBMVFC_CAN_USE_CHANNELS | IBMVFC_USE_ASYNC_SUBQ |\n+\t\t    IBMVFC_YES_SCSI | IBMVFC_CAN_HANDLE_FPIN),\n+\tcpu_to_be64(IBMVFC_CAN_USE_CHANNELS),\n+};\n+#define IBMVFC_NPIV_CHAN_CAPS_SIZE (sizeof(ibmvfc_npiv_chan_caps)/sizeof(__be64))\n+\n /**\n  * ibmvfc_set_login_info - Setup info for NPIV login\n  * @vhost:\tibmvfc host struct\n@@ -1486,6 +1493,8 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)\n \tconst char *location;\n \tu16 max_cmds;\n \n+\tENTER;\n+\n \tmax_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;\n \tif (mq_enabled)\n \t\tmax_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *\n@@ -1509,8 +1518,12 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)\n \t\tcpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN |\n \t\t\t    IBMVFC_CAN_USE_NOOP_CMD);\n \n-\tif (vhost->mq_enabled || vhost->using_channels)\n-\t\tlogin_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);\n+\tif (vhost->mq_enabled || vhost->using_channels) {\n+\t\tif (vhost->login_cap_index >= IBMVFC_NPIV_CHAN_CAPS_SIZE)\n+\t\t\tlogin_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);\n+\t\telse\n+\t\t\tlogin_info->capabilities |= ibmvfc_npiv_chan_caps[vhost->login_cap_index];\n+\t}\n \n \tlogin_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);\n \tlogin_info->async.len = cpu_to_be32(async_crq->size *\n@@ -1524,6 +1537,8 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)\n \tlocation = of_get_property(of_node, \"ibm,loc-code\", NULL);\n \tlocation = location ? location : dev_name(vhost->dev);\n \tstrscpy(login_info->drc_name, location, sizeof(login_info->drc_name));\n+\n+\tLEAVE;\n }\n \n /**\n@@ -3323,7 +3338,7 @@ ibmvfc_common_fpin_to_desc(u8 fpin_status, __be64 wwpn, __be16 modifier,\n  * non-NULL - pointer to populated struct fc_els_fpin\n  */\n static struct fc_els_fpin *\n-/*XXX*/ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)\n+ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)\n {\n \treturn ibmvfc_common_fpin_to_desc(crq->fpin_status, crq->wwpn,\n \t\t\t\t\t  cpu_to_be16(0),\n@@ -3332,6 +3347,29 @@ static struct fc_els_fpin *\n \t\t\t\t\t  cpu_to_be32(1));\n }\n \n+/**\n+ * ibmvfc_full_fpin_to_desc(): allocate and populate a struct fc_els_fpin struct\n+ * containing a descriptor.\n+ * @ibmvfc_fpin: Pointer to async subq FPIN data\n+ *\n+ * Allocate a struct fc_els_fpin containing a descriptor and populate\n+ * based on data from *ibmvfc_fpin.\n+ *\n+ * Return:\n+ * NULL     - unable to allocate structure\n+ * non-NULL - pointer to populated struct fc_els_fpin\n+ */\n+static struct fc_els_fpin *\n+ibmvfc_full_fpin_to_desc(struct ibmvfc_async_subq *ibmvfc_fpin)\n+{\n+\treturn ibmvfc_common_fpin_to_desc(ibmvfc_fpin->fpin_status,\n+\t\t\t\t\t  ibmvfc_fpin->wwpn,\n+\t\t\t\t\t  cpu_to_be16(0),\n+\t\t\t\t\t  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_PERIOD),\n+\t\t\t\t\t  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_THRESHOLD),\n+\t\t\t\t\t  cpu_to_be32(1));\n+}\n+\n /**\n  * ibmvfc_handle_async - Handle an async event from the adapter\n  * @crq:\tcrq to process\n@@ -3449,6 +3487,120 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,\n }\n EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_async);\n \n+VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,\n+\t\t\t\t\t   struct ibmvfc_host *vhost,\n+\t\t\t\t\t   struct list_head *evt_doneq)\n+{\n+\tstruct ibmvfc_async_subq *crq = (struct ibmvfc_async_subq *)crq_instance;\n+\tconst struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be16_to_cpu(crq->event));\n+\tstruct ibmvfc_target *tgt;\n+\tstruct fc_els_fpin *fpin;\n+\n+\tibmvfc_log(vhost, desc->log_level,\n+\t\t   \"%s event received. wwpn: %llx, node_name: %llx%s event 0x%x\\n\",\n+\t\t   desc->desc, be64_to_cpu(crq->wwpn), be64_to_cpu(crq->id.node_name),\n+\t\t   ibmvfc_get_link_state(crq->link_state), be16_to_cpu(crq->event));\n+\n+\tswitch (be16_to_cpu(crq->event)) {\n+\tcase IBMVFC_AE_RESUME:\n+\t\tswitch (crq->link_state) {\n+\t\tcase IBMVFC_AE_LS_LINK_DOWN:\n+\t\t\tibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);\n+\t\t\tbreak;\n+\t\tcase IBMVFC_AE_LS_LINK_DEAD:\n+\t\t\tibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);\n+\t\t\tbreak;\n+\t\tcase IBMVFC_AE_LS_LINK_UP:\n+\t\tcase IBMVFC_AE_LS_LINK_BOUNCED:\n+\t\tdefault:\n+\t\t\tvhost->events_to_log |= IBMVFC_AE_LINKUP;\n+\t\t\tvhost->delay_init = 1;\n+\t\t\t__ibmvfc_reset_host(vhost);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tbreak;\n+\tcase IBMVFC_AE_LINK_UP:\n+\t\tvhost->events_to_log |= IBMVFC_AE_LINKUP;\n+\t\tvhost->delay_init = 1;\n+\t\t__ibmvfc_reset_host(vhost);\n+\t\tbreak;\n+\tcase IBMVFC_AE_SCN_FABRIC:\n+\tcase IBMVFC_AE_SCN_DOMAIN:\n+\t\tvhost->events_to_log |= IBMVFC_AE_RSCN;\n+\t\tif (vhost->state < IBMVFC_HALTED) {\n+\t\t\tvhost->delay_init = 1;\n+\t\t\t__ibmvfc_reset_host(vhost);\n+\t\t}\n+\t\tbreak;\n+\tcase IBMVFC_AE_SCN_NPORT:\n+\tcase IBMVFC_AE_SCN_GROUP:\n+\t\tvhost->events_to_log |= IBMVFC_AE_RSCN;\n+\t\tibmvfc_reinit_host(vhost);\n+\t\tbreak;\n+\tcase IBMVFC_AE_ELS_LOGO:\n+\tcase IBMVFC_AE_ELS_PRLO:\n+\tcase IBMVFC_AE_ELS_PLOGI:\n+\t\tlist_for_each_entry(tgt, &vhost->targets, queue) {\n+\t\t\tif (!crq->wwpn && !crq->id.node_name)\n+\t\t\t\tbreak;\n+\t#ifdef notyet\n+\t\t\tif (cpu_to_be64(tgt->scsi_id) != acrq->scsi_id)\n+\t\t\t\tcontinue;\n+\t#endif\n+\t\t\tif (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)\n+\t\t\t\tcontinue;\n+\t\t\tif (crq->id.node_name &&\n+\t\t\t    cpu_to_be64(tgt->ids.node_name) != crq->id.node_name)\n+\t\t\t\tcontinue;\n+\t\t\tif (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)\n+\t\t\t\ttgt->logo_rcvd = 1;\n+\t\t\tif (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {\n+\t\t\t\tibmvfc_del_tgt(tgt);\n+\t\t\t\tibmvfc_reinit_host(vhost);\n+\t\t\t}\n+\t\t}\n+\t\tbreak;\n+\tcase IBMVFC_AE_LINK_DOWN:\n+\tcase IBMVFC_AE_ADAPTER_FAILED:\n+\t\tibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);\n+\t\tbreak;\n+\tcase IBMVFC_AE_LINK_DEAD:\n+\t\tibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);\n+\t\tbreak;\n+\tcase IBMVFC_AE_HALT:\n+\t\tibmvfc_link_down(vhost, IBMVFC_HALTED);\n+\t\tbreak;\n+\tcase IBMVFC_AE_FPIN:\n+\t\tif (!crq->wwpn && !crq->id.node_name)\n+\t\t\tbreak;\n+\t\tlist_for_each_entry(tgt, &vhost->targets, queue) {\n+\t\t\tif (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)\n+\t\t\t\tcontinue;\n+\t\t\tif (crq->id.node_name &&\n+\t\t\t    cpu_to_be64(tgt->ids.node_name) != crq->id.node_name)\n+\t\t\t\tcontinue;\n+\t\t\tif (!tgt->rport)\n+\t\t\t\tcontinue;\n+\t\t\tfpin = ibmvfc_full_fpin_to_desc(crq);\n+\t\t\tif (fpin) {\n+\t\t\t\tfc_host_fpin_rcv(tgt->vhost->host,\n+\t\t\t\t\t\t sizeof(*fpin) + be32_to_cpu(fpin->desc_len),\n+\t\t\t\t\t\t (char *)fpin, 0);\n+\t\t\t\tkfree(fpin);\n+\t\t\t} else\n+\t\t\t\tdev_err(vhost->dev,\n+\t\t\t\t\t\"FPIN event %u received, unable to process\\n\",\n+\t\t\t\t\tcrq->fpin_status);\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_err(vhost->dev, \"Unknown async event received: %d\\n\", crq->event);\n+\t\tbreak;\n+\t}\n+}\n+EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_asyncq);\n+\n /**\n  * ibmvfc_handle_crq - Handles and frees received events in the CRQ\n  * @crq:\tCommand/Response queue\n@@ -3500,6 +3652,7 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_ho\n \t\t\tdev_err(vhost->dev, \"Host partner adapter deregistered or failed (rc=%d)\\n\", crq->format);\n \t\t\tibmvfc_purge_requests(vhost, DID_ERROR);\n \t\t\tibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);\n+\t\t\tvhost->login_cap_index++;\n \t\t\tibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);\n \t\t} else {\n \t\t\tdev_err(vhost->dev, \"Received unknown transport event from partner (rc=%d)\\n\", crq->format);\n@@ -4078,6 +4231,13 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost\n \tspin_unlock(&evt->queue->l_lock);\n }\n \n+/**\n+ * ibmvfc_next_scrq - Returns the next entry in message subqueue\n+ * @scrq:\tPointer to message subqueue\n+ *\n+ * Returns:\n+ *\tPointer to next entry in queue / NULL if empty\n+ **/\n static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)\n {\n \tstruct ibmvfc_crq *crq;\n@@ -4093,6 +4253,65 @@ static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)\n \treturn crq;\n }\n \n+static void ibmvfc_drain_async_subq(struct ibmvfc_queue *scrq)\n+{\n+\tstruct ibmvfc_crq *crq;\n+\tstruct ibmvfc_event *evt, *temp;\n+\tunsigned long flags;\n+\tint done = 0;\n+\tLIST_HEAD(evt_doneq);\n+\n+\tENTER;\n+\n+\tspin_lock_irqsave(scrq->q_lock, flags);\n+\twhile (!done) {\n+\t\twhile ((crq = ibmvfc_next_scrq(scrq)) != NULL) {\n+\t\t\tibmvfc_handle_asyncq(crq, scrq->vhost, &evt_doneq);\n+\t\t\tcrq->valid = 0;\n+\t\t\twmb();\n+\t\t}\n+\n+\t\tibmvfc_toggle_scrq_irq(scrq, 1);\n+\t\tcrq = ibmvfc_next_scrq(scrq);\n+\t\tif (crq != NULL) {\n+\t\t\tibmvfc_toggle_scrq_irq(scrq, 0);\n+\t\t\tibmvfc_handle_asyncq(crq, scrq->vhost, &evt_doneq);\n+\t\t\tcrq->valid = 0;\n+\t\t\twmb();\n+\t\t} else\n+\t\t\tdone = 1;\n+\t}\n+\tspin_unlock_irqrestore(scrq->q_lock, flags);\n+\n+\tlist_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {\n+\t\ttimer_delete(&evt->timer);\n+\t\tlist_del(&evt->queue_list);\n+\t\tibmvfc_trc_end(evt);\n+\t\tevt->done(evt);\n+\t}\n+\tLEAVE;\n+}\n+\n+/**\n+ * ibmvfc_interrupt_asyncq - Handle an async event from the adapter\n+ * @irq:           interrupt request\n+ * @scrq_instance: async subq\n+ *\n+ **/\n+static irqreturn_t ibmvfc_interrupt_asyncq(int irq, void *scrq_instance)\n+{\n+\tstruct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;\n+\n+\tENTER;\n+\n+\tibmvfc_toggle_scrq_irq(scrq, 0);\n+\tibmvfc_drain_async_subq(scrq);\n+\n+\tLEAVE;\n+\n+\treturn IRQ_HANDLED;\n+}\n+\n static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)\n {\n \tstruct ibmvfc_crq *crq;\n@@ -5316,6 +5535,8 @@ static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)\n \t\t\tfor (i = 0; i < active_queues; i++)\n \t\t\t\tscrqs->scrqs[i].vios_cookie =\n \t\t\t\t\tbe64_to_cpu(setup->channel_handles[i]);\n+\t\t\tscrqs->async_scrq->vios_cookie =\n+\t\t\t\tbe64_to_cpu(setup->asyncSubqHandle);\n \n \t\t\tibmvfc_dbg(vhost, \"Using %u channels\\n\",\n \t\t\t\t   vhost->scsi_scrqs.active_queues);\n@@ -5366,6 +5587,7 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)\n \t\tsetup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);\n \t\tfor (i = 0; i < num_channels; i++)\n \t\t\tsetup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);\n+\t\tsetup_buf->asyncSubqHandle = cpu_to_be64(scrqs->async_scrq->cookie);\n \t}\n \n \tibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);\n@@ -5461,6 +5683,8 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)\n \tunsigned int npiv_max_sectors;\n \tint level = IBMVFC_DEFAULT_LOG_LEVEL;\n \n+\tENTER;\n+\n \tswitch (mad_status) {\n \tcase IBMVFC_MAD_SUCCESS:\n \t\tibmvfc_free_event(evt);\n@@ -5540,6 +5764,8 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)\n \t\tibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);\n \t\twake_up(&vhost->work_wait_q);\n \t}\n+\n+\tLEAVE;\n }\n \n /**\n@@ -6188,14 +6414,26 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)\n \treturn retrc;\n }\n \n-static int ibmvfc_register_channel(struct ibmvfc_host *vhost,\n-\t\t\t\t   struct ibmvfc_channels *channels,\n-\t\t\t\t   int index)\n+static inline char *ibmvfc_channel_index(struct ibmvfc_channels *channels,\n+\t\t\t\t\t struct ibmvfc_queue *scrq,\n+\t\t\t\t\t char *buf, size_t bufsize)\n+{\n+\tif (scrq < channels->scrqs || scrq >= channels->scrqs + channels->active_queues)\n+\t\tstrscpy(buf, \"async\", 6);\n+\telse\n+\t\tsnprintf(buf, bufsize, \"%ld\", scrq - channels->scrqs);\n+\treturn buf;\n+}\n+\n+static int ibmvfc_register_channel_handler(struct ibmvfc_host *vhost,\n+\t\t\t\t\t   struct ibmvfc_channels *channels,\n+\t\t\t\t\t   struct ibmvfc_queue *scrq,\n+\t\t\t\t\t   irq_handler_t irq)\n {\n \tstruct device *dev = vhost->dev;\n \tstruct vio_dev *vdev = to_vio_dev(dev);\n-\tstruct ibmvfc_queue *scrq = &channels->scrqs[index];\n \tint rc = -ENOMEM;\n+\tchar buf[16];\n \n \tENTER;\n \n@@ -6214,20 +6452,23 @@ static int ibmvfc_register_channel(struct ibmvfc_host *vhost,\n \n \tif (!scrq->irq) {\n \t\trc = -EINVAL;\n-\t\tdev_err(dev, \"Error mapping sub-crq[%d] irq\\n\", index);\n+\t\tdev_err(dev, \"Error mapping sub-crq[%s] irq\\n\",\n+\t\t\tibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));\n \t\tgoto irq_failed;\n \t}\n \n \tswitch (channels->protocol) {\n \tcase IBMVFC_PROTO_SCSI:\n-\t\tsnprintf(scrq->name, sizeof(scrq->name), \"ibmvfc-%x-scsi%d\",\n-\t\t\t vdev->unit_address, index);\n-\t\tscrq->handler = ibmvfc_interrupt_mq;\n+\t\tsnprintf(scrq->name, sizeof(scrq->name), \"ibmvfc-%x-scsi%s\",\n+\t\t\t vdev->unit_address,\n+\t\t\t ibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));\n+\t\tscrq->handler = irq;\n \t\tbreak;\n \tcase IBMVFC_PROTO_NVME:\n-\t\tsnprintf(scrq->name, sizeof(scrq->name), \"ibmvfc-%x-nvmf%d\",\n-\t\t\t vdev->unit_address, index);\n-\t\tscrq->handler = ibmvfc_interrupt_mq;\n+\t\tsnprintf(scrq->name, sizeof(scrq->name), \"ibmvfc-%x-nvmf%s\",\n+\t\t\t vdev->unit_address,\n+\t\t\t ibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));\n+\t\tscrq->handler = irq;\n \t\tbreak;\n \tdefault:\n \t\tdev_err(dev, \"Unknown channel protocol (%d)\\n\",\n@@ -6238,12 +6479,14 @@ static int ibmvfc_register_channel(struct ibmvfc_host *vhost,\n \trc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq);\n \n \tif (rc) {\n-\t\tdev_err(dev, \"Couldn't register sub-crq[%d] irq\\n\", index);\n+\t\tdev_err(dev, \"Couldn't register sub-crq[%s] irq\\n\",\n+\t\t\tibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));\n \t\tirq_dispose_mapping(scrq->irq);\n \t\tgoto irq_failed;\n \t}\n \n-\tscrq->hwq_id = index;\n+\tif (scrq >= channels->scrqs && scrq < channels->scrqs + channels->active_queues)\n+\t\tscrq->hwq_id = scrq - channels->scrqs;\n \n \tLEAVE;\n \treturn 0;\n@@ -6257,13 +6500,21 @@ static int ibmvfc_register_channel(struct ibmvfc_host *vhost,\n \treturn rc;\n }\n \n+static inline int\n+ibmvfc_register_channel(struct ibmvfc_host *vhost,\n+\t\t\tstruct ibmvfc_channels *channels,\n+\t\t\tstruct ibmvfc_queue *scrq)\n+{\n+\treturn ibmvfc_register_channel_handler(vhost, channels, scrq, ibmvfc_interrupt_mq);\n+}\n+\n static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,\n \t\t\t\t      struct ibmvfc_channels *channels,\n-\t\t\t\t      int index)\n+\t\t\t\t      struct ibmvfc_queue *scrq)\n {\n \tstruct device *dev = vhost->dev;\n \tstruct vio_dev *vdev = to_vio_dev(dev);\n-\tstruct ibmvfc_queue *scrq = &channels->scrqs[index];\n+\tchar buf[16];\n \tlong rc;\n \n \tENTER;\n@@ -6278,7 +6529,8 @@ static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,\n \t} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));\n \n \tif (rc)\n-\t\tdev_err(dev, \"Failed to free sub-crq[%d]: rc=%ld\\n\", index, rc);\n+\t\tdev_err(dev, \"Failed to free sub-crq[%s]: rc=%ld\\n\",\n+\t\t\tibmvfc_channel_index(channels, scrq, buf, sizeof(buf)), rc);\n \n \t/* Clean out the queue */\n \tmemset(scrq->msgs.crq, 0, PAGE_SIZE);\n@@ -6296,10 +6548,19 @@ static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,\n \tif (!vhost->mq_enabled || !channels->scrqs)\n \t\treturn;\n \n+\tif (ibmvfc_register_channel_handler(vhost, channels,\n+\t\t\t\t\t    channels->async_scrq,\n+\t\t\t\t\t    ibmvfc_interrupt_asyncq))\n+\t\treturn;\n+\n \tfor (i = 0; i < channels->max_queues; i++) {\n-\t\tif (ibmvfc_register_channel(vhost, channels, i)) {\n+\t\tif (ibmvfc_register_channel(vhost, channels, &channels->scrqs[i])) {\n \t\t\tfor (j = i; j > 0; j--)\n-\t\t\t\tibmvfc_deregister_channel(vhost, channels, j - 1);\n+\t\t\t\tibmvfc_deregister_channel(\n+\t\t\t\t\tvhost, channels, &channels->scrqs[j - 1]);\n+\t\t\tibmvfc_deregister_channel(vhost, channels,\n+\t\t\t\t\t\t\tchannels->async_scrq);\n+\n \t\t\tvhost->do_enquiry = 0;\n \t\t\treturn;\n \t\t}\n@@ -6318,7 +6579,8 @@ static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,\n \t\treturn;\n \n \tfor (i = 0; i < channels->max_queues; i++)\n-\t\tibmvfc_deregister_channel(vhost, channels, i);\n+\t\tibmvfc_deregister_channel(vhost, channels, &channels->scrqs[i]);\n+\tibmvfc_deregister_channel(vhost, channels, channels->async_scrq);\n \n \tLEAVE;\n }\n@@ -6334,6 +6596,21 @@ static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,\n \tif (!channels->scrqs)\n \t\treturn -ENOMEM;\n \n+\tchannels->async_scrq = kzalloc_obj(*channels->async_scrq, GFP_KERNEL);\n+\n+\tif (!channels->async_scrq) {\n+\t\tkfree(channels->scrqs);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trc = ibmvfc_alloc_queue(vhost, channels->async_scrq,\n+\t\t\t\tIBMVFC_SUB_CRQ_FMT);\n+\tif (rc) {\n+\t\tkfree(channels->scrqs);\n+\t\tkfree(channels->async_scrq);\n+\t\treturn rc;\n+\t}\n+\n \tfor (i = 0; i < channels->max_queues; i++) {\n \t\tscrq = &channels->scrqs[i];\n \t\trc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);\n@@ -6345,6 +6622,9 @@ static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,\n \t\t\tkfree(channels->scrqs);\n \t\t\tchannels->scrqs = NULL;\n \t\t\tchannels->active_queues = 0;\n+\t\t\tibmvfc_free_queue(vhost, channels->async_scrq);\n+\t\t\tkfree(channels->async_scrq);\n+\t\t\tchannels->async_scrq = NULL;\n \t\t\treturn rc;\n \t\t}\n \t}\n@@ -6629,6 +6909,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)\n \tvhost->using_channels = 0;\n \tvhost->do_enquiry = 1;\n \tvhost->scan_timeout = 0;\n+\tvhost->login_cap_index = 0;\n \n \tstrcpy(vhost->partition_name, \"UNKNOWN\");\n \tinit_waitqueue_head(&vhost->work_wait_q);\ndiff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h\nindex 4f680c5d9558..b9f22613d144 100644\n--- a/drivers/scsi/ibmvscsi/ibmvfc.h\n+++ b/drivers/scsi/ibmvscsi/ibmvfc.h\n@@ -182,6 +182,9 @@ struct ibmvfc_npiv_login {\n #define IBMVFC_CAN_HANDLE_FPIN\t\t0x04\n #define IBMVFC_CAN_USE_MAD_VERSION\t0x08\n #define IBMVFC_CAN_SEND_VF_WWPN\t\t0x10\n+#define IBMVFC_YES_NVMEOF\t\t0x20\n+#define IBMVFC_YES_SCSI\t\t\t0x40\n+#define IBMVFC_USE_ASYNC_SUBQ\t\t0x100\n #define IBMVFC_CAN_USE_NOOP_CMD\t\t0x200\n \t__be64 node_name;\n \tstruct srp_direct_buf async;\n@@ -231,6 +234,7 @@ struct ibmvfc_npiv_login_resp {\n #define IBMVFC_CAN_SUPPORT_CHANNELS\t0x80\n #define IBMVFC_SUPPORT_NVMEOF\t\t0x100\n #define IBMVFC_SUPPORT_SCSI\t\t0x200\n+#define IBMVFC_SUPPORT_ASYNC_SUBQ\t0x800\n #define IBMVFC_SUPPORT_NOOP_CMD\t\t0x1000\n \t__be32 max_cmds;\n \t__be32 scsi_id_sz;\n@@ -565,7 +569,7 @@ struct ibmvfc_channel_setup_mad {\n \tstruct srp_direct_buf buffer;\n } __packed __aligned(8);\n \n-#define IBMVFC_MAX_CHANNELS\t502\n+#define IBMVFC_MAX_CHANNELS\t501\n \n struct ibmvfc_channel_setup {\n \t__be32 flags;\n@@ -580,6 +584,7 @@ struct ibmvfc_channel_setup {\n \tstruct srp_direct_buf buffer;\n \t__be64 reserved2[5];\n \t__be64 channel_handles[IBMVFC_MAX_CHANNELS];\n+\t__be64 asyncSubqHandle;\n } __packed __aligned(8);\n \n struct ibmvfc_connection_info {\n@@ -710,6 +715,25 @@ struct ibmvfc_async_crq {\n \t__be64 reserved;\n } __packed __aligned(8);\n \n+struct ibmvfc_async_subq {\n+\tvolatile u8 valid;\n+#define IBMVFC_ASYNC_ID_IS_ASSOC_ID\t0x01\n+#define IBMVFC_FC_EEH\t\t\t0x04\n+#define IBMVFC_FC_FW_UPDATE\t\t0x08\n+#define IBMVFC_FC_FW_DUMP\t\t0x10\n+\tu8 flags;\n+\tu8 link_state;\n+\tu8 fpin_status;\n+\t__be16 event;\n+\t__be16 pad;\n+\tvolatile __be64 wwpn;\n+\tvolatile __be64 nport_id;\n+\tunion {\n+\t\t__be64 node_name;\n+\t\t__be64 assoc_id;\n+\t} id;\n+} __packed __aligned(8);\n+\n union ibmvfc_iu {\n \tstruct ibmvfc_mad_common mad_common;\n \tstruct ibmvfc_npiv_login_mad npiv_login;\n@@ -849,6 +873,7 @@ struct ibmvfc_queue {\n \n struct ibmvfc_channels {\n \tstruct ibmvfc_queue *scrqs;\n+\tstruct ibmvfc_queue *async_scrq;\n \tenum ibmvfc_protocol protocol;\n \tunsigned int active_queues;\n \tunsigned int desired_queues;\n@@ -989,6 +1014,8 @@ static inline int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap\n \n #ifdef VISIBLE_IF_KUNIT\n VISIBLE_IF_KUNIT void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost);\n+VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,\n+\t\t\t\t\t   struct ibmvfc_host *vhost, struct list_head *evt_doneq);\n VISIBLE_IF_KUNIT struct list_head *ibmvfc_get_headp(void);\n VISIBLE_IF_KUNIT void ibmvfc_handle_crq(struct ibmvfc_crq *crq,\n \t\t\t\t\tstruct ibmvfc_host *vhost,\ndiff --git a/drivers/scsi/ibmvscsi/ibmvfc_kunit.c b/drivers/scsi/ibmvscsi/ibmvfc_kunit.c\nindex 3359e4ebebe2..3a41127c4e81 100644\n--- a/drivers/scsi/ibmvscsi/ibmvfc_kunit.c\n+++ b/drivers/scsi/ibmvscsi/ibmvfc_kunit.c\n@@ -22,14 +22,14 @@ MODULE_IMPORT_NS(\"EXPORTED_FOR_KUNIT_TESTING\");\n static void ibmvfc_handle_fpin_event_test(struct kunit *test)\n {\n \tu64 *stats[IBMVFC_AE_FPIN_CONGESTION_CLEARED + 1] = { NULL };\n-\tu64 post[IBMVFC_AE_FPIN_CONGESTION_CLEARED + 1];\n-\tu64 pre[IBMVFC_AE_FPIN_CONGESTION_CLEARED + 1];\n \tenum ibmvfc_ae_fpin_status fs;\n-\tstruct ibmvfc_async_crq crq;\n+\tstruct ibmvfc_async_subq crq;\n \tstruct ibmvfc_target *tgt;\n \tstruct ibmvfc_host *vhost;\n \tstruct list_head *queue;\n \tstruct list_head *headp;\n+\tLIST_HEAD(evt_doneq);\n+\tu64 pre, post;\n \n \n \theadp = ibmvfc_get_headp();\n@@ -52,31 +52,23 @@ static void ibmvfc_handle_fpin_event_test(struct kunit *test)\n \t\tcrq.valid = 0x80;\n \t\tcrq.link_state = IBMVFC_AE_LS_LINK_UP;\n \t\tcrq.fpin_status = fs;\n-\t\tcrq.event = cpu_to_be64(IBMVFC_AE_FPIN);\n-\t\tcrq.scsi_id = cpu_to_be64(tgt->scsi_id);\n+\t\tcrq.event = cpu_to_be16(IBMVFC_AE_FPIN);\n \t\tcrq.wwpn = cpu_to_be64(tgt->wwpn);\n-\t\tcrq.node_name = cpu_to_be64(tgt->ids.node_name);\n-\t\tpre[fs] = *stats[fs];\n-\t\tibmvfc_handle_async(&crq, vhost);\n-\t\tpost[fs] = *stats[fs];\n-\t\tKUNIT_EXPECT_EQ(test, post[fs], pre[fs]+1);\n+\t\tcrq.id.node_name = cpu_to_be64(tgt->ids.node_name);\n+\t\tpre = *stats[fs];\n+\t\tibmvfc_handle_asyncq((struct ibmvfc_crq *)&crq, vhost, &evt_doneq);\n+\t\tpost = *stats[fs];\n+\t\tKUNIT_EXPECT_EQ(test, post, pre+1);\n \t}\n \n \t/* bad path */\n-\tfor (fs = IBMVFC_AE_FPIN_LINK_CONGESTED; fs <= IBMVFC_AE_FPIN_CONGESTION_CLEARED; fs++)\n-\t\tpre[fs] = *stats[fs];\n \tcrq.valid = 0x80;\n \tcrq.link_state = IBMVFC_AE_LS_LINK_UP;\n \tcrq.fpin_status = 0; /* bad value */\n-\tcrq.event = cpu_to_be64(IBMVFC_AE_FPIN);\n-\tcrq.scsi_id = cpu_to_be64(tgt->scsi_id);\n+\tcrq.event = cpu_to_be16(IBMVFC_AE_FPIN);\n \tcrq.wwpn = cpu_to_be64(tgt->wwpn);\n-\tcrq.node_name = cpu_to_be64(tgt->ids.node_name);\n-\tibmvfc_handle_async(&crq, vhost);\n-\tfor (fs = IBMVFC_AE_FPIN_LINK_CONGESTED; fs <= IBMVFC_AE_FPIN_CONGESTION_CLEARED; fs++) {\n-\t\tpost[fs] = *stats[fs];\n-\t\tKUNIT_EXPECT_EQ(test, pre[fs], post[fs]);\n-\t}\n+\tcrq.id.node_name = cpu_to_be64(tgt->ids.node_name);\n+\tibmvfc_handle_asyncq((struct ibmvfc_crq *)&crq, vhost, &evt_doneq);\n }\n \n /**\n@@ -105,9 +97,29 @@ static void ibmvfc_noop_test(struct kunit *test)\n \tibmvfc_handle_crq(&crq, vhost, &evtq);\n }\n \n+/**\n+ * ibmvfc_async_subq_test - unit test for allocating async subqueue\n+ * @test: pointer to kunit structure\n+ *\n+ * Return: void\n+ */\n+static void ibmvfc_async_subq_test(struct kunit *test)\n+{\n+\tstruct ibmvfc_host *vhost;\n+\tstruct list_head *queue;\n+\tstruct list_head *headp;\n+\n+\theadp = ibmvfc_get_headp();\n+\tqueue = headp->next;\n+\tvhost = container_of(queue, struct ibmvfc_host, queue);\n+\n+\tKUNIT_EXPECT_NOT_NULL(test, vhost->scsi_scrqs.async_scrq);\n+}\n+\n static struct kunit_case ibmvfc_fpin_test_cases[] = {\n \tKUNIT_CASE(ibmvfc_handle_fpin_event_test),\n \tKUNIT_CASE(ibmvfc_noop_test),\n+\tKUNIT_CASE(ibmvfc_async_subq_test),\n \t{},\n };\n \n",
    "prefixes": [
        "4/5"
    ]
}