get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.1/patches/2221937/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2221937,
    "url": "http://patchwork.ozlabs.org/api/1.1/patches/2221937/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linux-pci/patch/20260410-sdxi-base-v1-23-1d184cb5c60a@amd.com/",
    "project": {
        "id": 28,
        "url": "http://patchwork.ozlabs.org/api/1.1/projects/28/?format=api",
        "name": "Linux PCI development",
        "link_name": "linux-pci",
        "list_id": "linux-pci.vger.kernel.org",
        "list_email": "linux-pci@vger.kernel.org",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null
    },
    "msgid": "<20260410-sdxi-base-v1-23-1d184cb5c60a@amd.com>",
    "date": "2026-04-10T13:07:33",
    "name": "[23/23] dmaengine: sdxi: Add DMA engine provider",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "763ade9fe8e493b2fd118eafb599c0320ca81f10",
    "submitter": {
        "id": 91626,
        "url": "http://patchwork.ozlabs.org/api/1.1/people/91626/?format=api",
        "name": "Nathan Lynch via B4 Relay",
        "email": "devnull+nathan.lynch.amd.com@kernel.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linux-pci/patch/20260410-sdxi-base-v1-23-1d184cb5c60a@amd.com/mbox/",
    "series": [
        {
            "id": 499458,
            "url": "http://patchwork.ozlabs.org/api/1.1/series/499458/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linux-pci/list/?series=499458",
            "date": "2026-04-10T13:07:10",
            "name": "dmaengine: Smart Data Accelerator Interface (SDXI) basic support",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/499458/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2221937/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2221937/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "\n <linux-pci+bounces-52333-incoming=patchwork.ozlabs.org@vger.kernel.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "linux-pci@vger.kernel.org"
        ],
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256\n header.s=k20201202 header.b=uKHtM19h;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org\n (client-ip=2600:3c04:e001:36c::12fc:5321; helo=tor.lore.kernel.org;\n envelope-from=linux-pci+bounces-52333-incoming=patchwork.ozlabs.org@vger.kernel.org;\n receiver=patchwork.ozlabs.org)",
            "smtp.subspace.kernel.org;\n\tdkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org\n header.b=\"uKHtM19h\"",
            "smtp.subspace.kernel.org;\n arc=none smtp.client-ip=10.30.226.201"
        ],
        "Received": [
            "from tor.lore.kernel.org (tor.lore.kernel.org\n [IPv6:2600:3c04:e001:36c::12fc:5321])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fscZ32KNdz1yGb\n\tfor <incoming@patchwork.ozlabs.org>; Fri, 10 Apr 2026 23:08:43 +1000 (AEST)",
            "from smtp.subspace.kernel.org (conduit.subspace.kernel.org\n [100.90.174.1])\n\tby tor.lore.kernel.org (Postfix) with ESMTP id DD3ED30406B7\n\tfor <incoming@patchwork.ozlabs.org>; Fri, 10 Apr 2026 13:08:06 +0000 (UTC)",
            "from localhost.localdomain (localhost.localdomain [127.0.0.1])\n\tby smtp.subspace.kernel.org (Postfix) with ESMTP id 660073C343F;\n\tFri, 10 Apr 2026 13:07:51 +0000 (UTC)",
            "from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org\n [10.30.226.201])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby smtp.subspace.kernel.org (Postfix) with ESMTPS id 2B23C3C13FF;\n\tFri, 10 Apr 2026 13:07:51 +0000 (UTC)",
            "by smtp.kernel.org (Postfix) with ESMTPS id 1027AC19425;\n\tFri, 10 Apr 2026 13:07:51 +0000 (UTC)",
            "from aws-us-west-2-korg-lkml-1.web.codeaurora.org\n (localhost.localdomain [127.0.0.1])\n\tby smtp.lore.kernel.org (Postfix) with ESMTP id 093DDF44863;\n\tFri, 10 Apr 2026 13:07:51 +0000 (UTC)"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;\n\tt=1775826471; cv=none;\n b=rJQGHoeKFPotWiuOtzFj8LLi41Hnhbclhu+TzJbgYnJnMqV2/JTzWRFgivNaGd2gtNlPGhs4iwg9rQDbUT4h0R7BOFeNgxbKPQV+g/wZkC8EhtQZLqIXBiJbdEYsm/zqU3gIb6u9IHMwsr0uH0Hn71tCAjDfGYSsq/NIvAxG6uI=",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=subspace.kernel.org;\n\ts=arc-20240116; t=1775826471; c=relaxed/simple;\n\tbh=7+KRjDF/uWK4e36nnjIyOUDuwqE7B2w+FV3Sra/SIT8=;\n\th=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References:\n\t In-Reply-To:To:Cc;\n b=kkWL2a1lDxZpriZoAGR7GDE77NwtLjskQa02IH3BMt02mj4mkyBwttRGnp5yGNdao4HCy8PvuyTvzN0QXC/NyU6JBViyDXZFGxqB6WrWc6SjCd2U1UQcFbF8uUz//bkqp2FIb8913iPDSUqm6gWTl8YU+68KlnHwMdtTm75lXNA=",
        "ARC-Authentication-Results": "i=1; smtp.subspace.kernel.org;\n dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org\n header.b=uKHtM19h; arc=none smtp.client-ip=10.30.226.201",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org;\n\ts=k20201202; t=1775826471;\n\tbh=7+KRjDF/uWK4e36nnjIyOUDuwqE7B2w+FV3Sra/SIT8=;\n\th=From:Date:Subject:References:In-Reply-To:To:Cc:Reply-To:From;\n\tb=uKHtM19hcatpHR3kXdCb7yJVnDJDoTSr1JXFc4hTqExhHLK/wb3gXF20xRCXpkfTN\n\t zhW/z7jWJT1d0RwrBTYraSy977Mwt3vn/b68iE+PPjTBiFA0BM09O99niE83gmy2Xq\n\t ZF9Adg7h2TlSUjPYuAZuZ7kYH6R4k9rNsRBUo7G1ZVVyluCErMA6Ok3xBJR2BFIoXO\n\t BW8138P2r29MeuMJbXEFMaFLivFW9InoiFtH8FKCmj2PKQz9JVj38rxKA1707oXqI+\n\t 2s+sGcUswYoOPhOf7amY9HL+Q1FUjQchzgObFzpVtcNxVYd/oLzq3vslONj/KRQG8T\n\t 4xTKjEw5rrM3g==",
        "From": "Nathan Lynch via B4 Relay <devnull+nathan.lynch.amd.com@kernel.org>",
        "Date": "Fri, 10 Apr 2026 08:07:33 -0500",
        "Subject": "[PATCH 23/23] dmaengine: sdxi: Add DMA engine provider",
        "Precedence": "bulk",
        "X-Mailing-List": "linux-pci@vger.kernel.org",
        "List-Id": "<linux-pci.vger.kernel.org>",
        "List-Subscribe": "<mailto:linux-pci+subscribe@vger.kernel.org>",
        "List-Unsubscribe": "<mailto:linux-pci+unsubscribe@vger.kernel.org>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"utf-8\"",
        "Content-Transfer-Encoding": "7bit",
        "Message-Id": "<20260410-sdxi-base-v1-23-1d184cb5c60a@amd.com>",
        "References": "<20260410-sdxi-base-v1-0-1d184cb5c60a@amd.com>",
        "In-Reply-To": "<20260410-sdxi-base-v1-0-1d184cb5c60a@amd.com>",
        "To": "Vinod Koul <vkoul@kernel.org>",
        "Cc": "Wei Huang <wei.huang2@amd.com>,\n Mario Limonciello <mario.limonciello@amd.com>,\n Bjorn Helgaas <bhelgaas@google.com>,\n Jonathan Cameron <jonathan.cameron@huawei.com>,\n Stephen Bates <Stephen.Bates@amd.com>, PradeepVineshReddy.Kodamati@amd.com,\n John.Kariuki@amd.com, linux-pci@vger.kernel.org,\n linux-kernel@vger.kernel.org, dmaengine@vger.kernel.org,\n Nathan Lynch <nathan.lynch@amd.com>",
        "X-Mailer": "b4 0.15.2",
        "X-Developer-Signature": "v=1; a=ed25519-sha256; t=1775826467; l=18394;\n i=nathan.lynch@amd.com; s=20260410; h=from:subject:message-id;\n bh=RkHTf9ZFYIm82czlOAy+YS4SszHAEh+BDuoSSbFLJAQ=;\n b=ZGgmeZ5GXVl76GqjjjtNOhtqmVZNKlXikU3n4S5Yz7cU8P8lPJY7ajJOo9t+KTIfr4/9DpYEd\n j67+KCRGuR4AAlX5y9dj5ZWPkcJ7EIqet+QDnLE33CbUwMc/u+kExiB",
        "X-Developer-Key": "i=nathan.lynch@amd.com; a=ed25519;\n pk=PK4ozhq+/z9/2Jl5rgDmvHa9raVomv79qM8p1RAFpEw=",
        "X-Endpoint-Received": "by B4 Relay for nathan.lynch@amd.com/20260410 with\n auth_id=728",
        "X-Original-From": "Nathan Lynch <nathan.lynch@amd.com>",
        "Reply-To": "nathan.lynch@amd.com"
    },
    "content": "From: Nathan Lynch <nathan.lynch@amd.com>\n\nRegister a DMA engine provider that implements memcpy. The number of\nchannels per SDXI function can be controlled via a module\nparameter (dma_channels). The provider uses the virt-dma library.\n\nThis survives dmatest runs with both polled and interrupt-signaled\ncompletion modes, with the following debug options and sanitizers\nenabled:\n\nCONFIG_DEBUG_KMEMLEAK=y\nCONFIG_KASAN=y\nCONFIG_PROVE_LOCKING=y\nCONFIG_SLUB_DEBUG_ON=y\nCONFIG_UBSAN=y\n\nExample test:\n  $ qemu-system-x86_64 -m 4G -smp 4 -kernel ~/bzImage -nographic \\\n    -append 'console=ttyS0 debug sdxi.dma_channels=2 dmatest.polled=0 \\\n     dmatest.iterations=10000 dmatest.run=1 dmatest.threads_per_chan=2 \\\n     sdxi.dyndbg=+p' -device vfio-pci,host=0000:01:02.1 \\\n     -initrd ~/rootfs.cpio -M q35 -accel kvm\n  [...]\n  # dmesg | grep -i -e sdxi -e dmatest\n  dmatest: No channels configured, continue with any\n  sdxi 0000:00:03.0: allocated 64 vectors\n  sdxi 0000:00:03.0: sdxi_dev_stop: function state: stopped\n  sdxi 0000:00:03.0: SDXI 1.0 device found\n  sdxi 0000:00:03.0: sdxi_dev_start: function state: active\n  sdxi 0000:00:03.0: activated\n  dmatest: Added 2 threads using dma0chan0\n  dmatest: Added 2 threads using dma0chan1\n  dmatest: Started 2 threads using dma0chan0\n  dmatest: Started 2 threads using dma0chan1\n  dmatest: dma0chan1-copy1: summary 10000 tests, 0 failures\n  dmatest: dma0chan1-copy0: summary 10000 tests, 0 failures\n  dmatest: dma0chan0-copy1: summary 10000 tests, 0 failures\n  dmatest: dma0chan0-copy0: summary 10000 tests, 0 failures\n\nCo-developed-by: Wei Huang <wei.huang2@amd.com>\nSigned-off-by: Wei Huang <wei.huang2@amd.com>\nSigned-off-by: Nathan Lynch <nathan.lynch@amd.com>\n---\n drivers/dma/sdxi/Kconfig  |   1 +\n drivers/dma/sdxi/Makefile |   1 +\n drivers/dma/sdxi/device.c |   2 +\n drivers/dma/sdxi/dma.c    | 497 ++++++++++++++++++++++++++++++++++++++++++++++\n drivers/dma/sdxi/dma.h    |  12 ++\n 5 files changed, 513 insertions(+)",
    "diff": "diff --git a/drivers/dma/sdxi/Kconfig b/drivers/dma/sdxi/Kconfig\nindex 39343eb85614..41158e77b991 100644\n--- a/drivers/dma/sdxi/Kconfig\n+++ b/drivers/dma/sdxi/Kconfig\n@@ -1,6 +1,7 @@\n config SDXI\n \ttristate \"SDXI support\"\n \tselect DMA_ENGINE\n+\tselect DMA_VIRTUAL_CHANNELS\n \thelp\n \t  Enable support for Smart Data Accelerator Interface (SDXI)\n \t  Platform Data Mover devices. SDXI is a vendor-neutral\ndiff --git a/drivers/dma/sdxi/Makefile b/drivers/dma/sdxi/Makefile\nindex 419c71c2ef6a..80b1871fe7b5 100644\n--- a/drivers/dma/sdxi/Makefile\n+++ b/drivers/dma/sdxi/Makefile\n@@ -6,6 +6,7 @@ sdxi-objs += \\\n \tcontext.o     \\\n \tdescriptor.o  \\\n \tdevice.o      \\\n+\tdma.o         \\\n \tring.o\n \n sdxi-$(CONFIG_PCI_MSI) += pci.o\ndiff --git a/drivers/dma/sdxi/device.c b/drivers/dma/sdxi/device.c\nindex 8b11197c5781..e159c9939fb4 100644\n--- a/drivers/dma/sdxi/device.c\n+++ b/drivers/dma/sdxi/device.c\n@@ -16,6 +16,7 @@\n #include <linux/xarray.h>\n \n #include \"context.h\"\n+#include \"dma.h\"\n #include \"hw.h\"\n #include \"mmio.h\"\n #include \"sdxi.h\"\n@@ -290,6 +291,7 @@ static int sdxi_device_init(struct sdxi_dev *sdxi)\n \tif (err)\n \t\treturn err;\n \n+\tsdxi_dma_register(sdxi);\n \treturn 0;\n }\n \ndiff --git a/drivers/dma/sdxi/dma.c b/drivers/dma/sdxi/dma.c\nnew file mode 100644\nindex 000000000000..238b3140c90f\n--- /dev/null\n+++ b/drivers/dma/sdxi/dma.c\n@@ -0,0 +1,497 @@\n+// SPDX-License-Identifier: GPL-2.0-only\n+/*\n+ * SDXI dmaengine provider\n+ *\n+ * Copyright Advanced Micro Devices, Inc.\n+ */\n+\n+#include <linux/cleanup.h>\n+#include <linux/delay.h>\n+#include <linux/dev_printk.h>\n+#include <linux/container_of.h>\n+#include <linux/dma-mapping.h>\n+#include <linux/dmaengine.h>\n+#include <linux/list.h>\n+#include <linux/module.h>\n+#include <linux/overflow.h>\n+#include <linux/spinlock.h>\n+\n+#include \"../dmaengine.h\"\n+#include \"../virt-dma.h\"\n+#include \"completion.h\"\n+#include \"context.h\"\n+#include \"descriptor.h\"\n+#include \"dma.h\"\n+#include \"ring.h\"\n+#include \"sdxi.h\"\n+\n+static unsigned short dma_channels = 1;\n+module_param(dma_channels, ushort, 0644);\n+MODULE_PARM_DESC(dma_channels, \"DMA channels per function (default: 1)\");\n+\n+/*\n+ * An SDXI context is allocated for each channel configured.\n+ *\n+ * Each context has a descriptor ring with a minimum of 1K entries.\n+ * SDXI supports a variety of primitive operations, e.g. copy,\n+ * interrupt, nop. Each Linux virtual DMA descriptor may be composed\n+ * of a grouping of SDXI descriptors in the ring. E.g. two SDXI\n+ * descriptors (copy, then interrupt) to implement a\n+ * dma_async_tx_descriptor for memcpy with DMA_PREP_INTERRUPT flag.\n+ *\n+ * dma_device->device_prep_dma_* functions reserve space in the\n+ * descriptor ring and serialize SDXI descriptors implementing the\n+ * operation to the reserved slots, leaving their valid (vl) bits\n+ * clear. A single virtual descriptor is added to the allocated list.\n+ *\n+ * dma_async_tx_descriptor->tx_submit() invokes vchan_tx_submit(),\n+ * which merely assigns a cookie and moves the txd to the submitted\n+ * list without entering the SDXI provider code.\n+ *\n+ * dma_device->device_issue_pending() (sdxi_dma_issue_pending()) sets vl\n+ * on each SDXI descriptor reachable from the submitted list, then\n+ * rings the context doorbell. The submitted txds are moved to the\n+ * issued list via vchan_issue_pending().\n+ */\n+\n+struct sdxi_dma_chan {\n+\tstruct virt_dma_chan vchan;\n+\tstruct sdxi_cxt *cxt;\n+\tunsigned int vector;\n+\tunsigned int irq;\n+\tstruct sdxi_akey_ent *akey;\n+};\n+\n+struct sdxi_dma_dev {\n+\tstruct dma_device dma_dev;\n+\tsize_t nr_channels;\n+\tstruct sdxi_dma_chan sdchan[] __counted_by(nr_channels);\n+};\n+\n+/*\n+ * A virtual descriptor can correspond to a group of SDXI hardware descriptors.\n+ */\n+struct sdxi_dma_desc {\n+\tstruct virt_dma_desc vdesc;\n+\tstruct sdxi_ring_resv resv;\n+\tstruct sdxi_completion *completion;\n+};\n+\n+static struct sdxi_dma_chan *to_sdxi_dma_chan(const struct dma_chan *dma_chan)\n+{\n+\tconst struct virt_dma_chan *vchan;\n+\n+\tvchan = container_of_const(dma_chan, struct virt_dma_chan, chan);\n+\treturn container_of(vchan, struct sdxi_dma_chan, vchan);\n+}\n+\n+static struct sdxi_dma_desc *\n+to_sdxi_dma_desc(const struct virt_dma_desc *vdesc)\n+{\n+\treturn container_of(vdesc, struct sdxi_dma_desc, vdesc);\n+}\n+\n+static void sdxi_tx_desc_free(struct virt_dma_desc *vdesc)\n+{\n+\tstruct sdxi_dma_desc *sddesc = to_sdxi_dma_desc(vdesc);\n+\n+\tsdxi_completion_free(sddesc->completion);\n+\tkfree(to_sdxi_dma_desc(vdesc));\n+}\n+\n+static struct sdxi_dma_desc *\n+prep_memcpy_intr(struct dma_chan *dma_chan, const struct sdxi_copy *params)\n+{\n+\tstruct sdxi_cxt *cxt = to_sdxi_dma_chan(dma_chan)->cxt;\n+\tstruct sdxi_akey_ent *akey = to_sdxi_dma_chan(dma_chan)->akey;\n+\tstruct sdxi_desc *copy, *intr;\n+\n+\tstruct sdxi_completion *comp __free(sdxi_completion) = sdxi_completion_alloc(cxt->sdxi);\n+\tif (!comp)\n+\t\treturn NULL;\n+\n+\tstruct sdxi_dma_desc *sddesc __free(kfree) = kzalloc(sizeof(*sddesc), GFP_NOWAIT);\n+\tif (!sddesc)\n+\t\treturn NULL;\n+\n+\tif (sdxi_ring_try_reserve(cxt->ring_state, 2, &sddesc->resv))\n+\t\treturn NULL;\n+\n+\tcopy = sdxi_ring_resv_next(&sddesc->resv);\n+\t(void)sdxi_encode_copy(copy, params); /* Caller checked validity. */\n+\tsdxi_desc_set_fence(copy); /* Conservatively fence every descriptor. */\n+\tsdxi_completion_attach(copy, comp);\n+\n+\tsddesc->completion = no_free_ptr(comp);\n+\n+\tintr = sdxi_ring_resv_next(&sddesc->resv);\n+\tsdxi_encode_intr(intr, &(const struct sdxi_intr) {\n+\t\t\t.akey = sdxi_akey_index(cxt, akey),\n+\t\t});\n+\t/* Raise the interrupt only after the copy has completed. */\n+\tsdxi_desc_set_fence(intr);\n+\treturn_ptr(sddesc);\n+}\n+\n+static struct sdxi_dma_desc *\n+prep_memcpy_polled(struct dma_chan *dma_chan, const struct sdxi_copy *params)\n+{\n+\tstruct sdxi_cxt *cxt = to_sdxi_dma_chan(dma_chan)->cxt;\n+\tstruct sdxi_desc *copy;\n+\n+\tstruct sdxi_completion *comp __free(sdxi_completion) = sdxi_completion_alloc(cxt->sdxi);\n+\tif (!comp)\n+\t\treturn NULL;\n+\n+\tstruct sdxi_dma_desc *sddesc __free(kfree) = kzalloc(sizeof(*sddesc), GFP_NOWAIT);\n+\tif (!sddesc)\n+\t\treturn NULL;\n+\n+\tif (sdxi_ring_try_reserve(cxt->ring_state, 1, &sddesc->resv))\n+\t\treturn NULL;\n+\n+\tcopy = sdxi_ring_resv_next(&sddesc->resv);\n+\t(void)sdxi_encode_copy(copy, params); /* Caller checked validity. */\n+\tsdxi_completion_attach(copy, comp);\n+\n+\tsddesc->completion = no_free_ptr(comp);\n+\treturn_ptr(sddesc);\n+}\n+\n+static struct dma_async_tx_descriptor *\n+sdxi_dma_prep_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,\n+\t\t     dma_addr_t src, size_t len, unsigned long flags)\n+{\n+\tstruct sdxi_akey_ent *akey = to_sdxi_dma_chan(dma_chan)->akey;\n+\tstruct sdxi_cxt *cxt = to_sdxi_dma_chan(dma_chan)->cxt;\n+\tu16 akey_index = sdxi_akey_index(cxt, akey);\n+\tstruct sdxi_dma_desc *sddesc;\n+\tstruct sdxi_copy copy = {\n+\t\t.src = src,\n+\t\t.dst = dst,\n+\t\t.src_akey = akey_index,\n+\t\t.dst_akey = akey_index,\n+\t\t.len = len,\n+\t};\n+\n+\t/*\n+\t * Perform a trial encode to a dummy descriptor on the stack\n+\t * so we can reject bad inputs without touching the ring\n+\t * state.\n+\t */\n+\tif (sdxi_encode_copy(&(struct sdxi_desc){}, &copy))\n+\t\treturn NULL;\n+\n+\tsddesc = (flags & DMA_PREP_INTERRUPT) ?\n+\t\tprep_memcpy_intr(dma_chan, &copy) :\n+\t\tprep_memcpy_polled(dma_chan, &copy);\n+\n+\tif (!sddesc)\n+\t\treturn NULL;\n+\n+\treturn vchan_tx_prep(to_virt_chan(dma_chan), &sddesc->vdesc, flags);\n+}\n+\n+static enum dma_status sdxi_tx_status(struct dma_chan *chan,\n+\t\t\t\t      dma_cookie_t cookie,\n+\t\t\t\t      struct dma_tx_state *state)\n+{\n+\tstruct sdxi_dma_chan *sdchan = to_sdxi_dma_chan(chan);\n+\tstruct sdxi_dma_desc *sddesc;\n+\tenum dma_status status;\n+\tstruct virt_dma_desc *vdesc;\n+\n+\tstatus = dma_cookie_status(chan, cookie, state);\n+\tif (status == DMA_COMPLETE)\n+\t\treturn status;\n+\n+\tguard(spinlock_irqsave)(&sdchan->vchan.lock);\n+\n+\tvdesc = vchan_find_desc(&sdchan->vchan, cookie);\n+\tif (!vdesc)\n+\t\treturn status;\n+\n+\tsddesc = to_sdxi_dma_desc(vdesc);\n+\n+\tif (WARN_ON_ONCE(!sddesc->completion))\n+\t\treturn DMA_ERROR;\n+\n+\tif (!sdxi_completion_signaled(sddesc->completion))\n+\t\treturn DMA_IN_PROGRESS;\n+\n+\tif (sdxi_completion_errored(sddesc->completion))\n+\t\treturn DMA_ERROR;\n+\n+\tlist_del(&vdesc->node);\n+\tvchan_cookie_complete(vdesc);\n+\n+\treturn dma_cookie_status(chan, cookie, state);\n+}\n+\n+static void sdxi_dma_issue_pending(struct dma_chan *dma_chan)\n+{\n+\tstruct virt_dma_chan *vchan = to_virt_chan(dma_chan);\n+\tstruct virt_dma_desc *vdesc;\n+\tu64 dbval = 0;\n+\n+\tscoped_guard(spinlock_irqsave, &vchan->lock) {\n+\t\t/*\n+\t\t * This can happen with racing submitters.\n+\t\t */\n+\t\tif (list_empty(&vchan->desc_submitted))\n+\t\t\treturn;\n+\n+\t\tlist_for_each_entry(vdesc, &vchan->desc_submitted, node) {\n+\t\t\tstruct sdxi_dma_desc *sddesc = to_sdxi_dma_desc(vdesc);\n+\t\t\tstruct sdxi_desc *hwdesc;\n+\n+\t\t\tsdxi_ring_resv_foreach(&sddesc->resv, hwdesc)\n+\t\t\t\tsdxi_desc_make_valid(hwdesc);\n+\t\t\t/*\n+\t\t\t * The reservations ought to be ordered\n+\t\t\t * ascending, but use umax() just in case.\n+\t\t\t */\n+\t\t\tdbval = umax(sdxi_ring_resv_dbval(&sddesc->resv), dbval);\n+\t\t}\n+\n+\t\tvchan_issue_pending(vchan);\n+\t}\n+\n+\t/*\n+\t * The implementation is required to handle out-of-order\n+\t * doorbell updates; we can do this after dropping the\n+\t * lock.\n+\t */\n+\tsdxi_cxt_push_doorbell(to_sdxi_dma_chan(dma_chan)->cxt, dbval);\n+}\n+\n+static int sdxi_dma_terminate_all(struct dma_chan *dma_chan)\n+{\n+\tstruct virt_dma_chan *vchan = to_virt_chan(dma_chan);\n+\tu64 dbval = 0;\n+\n+\t/*\n+\t * Allocated and submitted txds are in the ring but not valid\n+\t * yet. Overwrite them with nops and then set their valid\n+\t * bits.\n+\t *\n+\t * The implementation may start consuming these as soon as the\n+\t * valid bits flip. sdxi_dma_synchronize() will ensure they're\n+\t * all done.\n+\t */\n+\tscoped_guard(spinlock_irqsave, &vchan->lock) {\n+\t\tstruct virt_dma_desc *vdesc;\n+\t\tLIST_HEAD(head);\n+\n+\t\tlist_splice_tail_init(&vchan->desc_allocated, &head);\n+\t\tlist_splice_tail_init(&vchan->desc_submitted, &head);\n+\n+\t\tif (list_empty(&head))\n+\t\t\treturn 0;\n+\n+\t\tlist_for_each_entry(vdesc, &head, node) {\n+\t\t\tstruct sdxi_dma_desc *sddesc = to_sdxi_dma_desc(vdesc);\n+\t\t\tstruct sdxi_desc *hwdesc;\n+\n+\t\t\tsdxi_ring_resv_foreach(&sddesc->resv, hwdesc) {\n+\t\t\t\tsdxi_serialize_nop(hwdesc);\n+\t\t\t\tsdxi_desc_make_valid(hwdesc);\n+\t\t\t}\n+\n+\t\t\tdbval = umax(sdxi_ring_resv_dbval(&sddesc->resv), dbval);\n+\t\t}\n+\n+\t\tlist_splice_tail(&head, &vchan->desc_terminated);\n+\t}\n+\n+\tsdxi_cxt_push_doorbell(to_sdxi_dma_chan(dma_chan)->cxt, dbval);\n+\n+\treturn 0;\n+}\n+\n+static void sdxi_dma_synchronize(struct dma_chan *dma_chan)\n+{\n+\tstruct sdxi_cxt *cxt = to_sdxi_dma_chan(dma_chan)->cxt;\n+\tstruct sdxi_ring_resv resv;\n+\tstruct sdxi_desc *nop;\n+\n+\t/* Submit a single nop with fence and wait for it to complete. */\n+\n+\tif (sdxi_ring_reserve(cxt->ring_state, 1, &resv))\n+\t\treturn;\n+\n+\tstruct sdxi_completion *comp __free(sdxi_completion) = sdxi_completion_alloc(cxt->sdxi);\n+\tif (!comp)\n+\t\treturn;\n+\n+\tnop = sdxi_ring_resv_next(&resv);\n+\tsdxi_serialize_nop(nop);\n+\tsdxi_completion_attach(nop, comp);\n+\tsdxi_desc_set_fence(nop);\n+\tsdxi_desc_make_valid(nop);\n+\tsdxi_cxt_push_doorbell(cxt, sdxi_ring_resv_dbval(&resv));\n+\tsdxi_completion_poll(comp);\n+\n+\tvchan_synchronize(to_virt_chan(dma_chan));\n+}\n+\n+static irqreturn_t sdxi_dma_cxt_irq(int irq, void *data)\n+{\n+\tstruct sdxi_dma_chan *sdchan = data;\n+\tstruct virt_dma_chan *vchan = &sdchan->vchan;\n+\tstruct virt_dma_desc *vdesc;\n+\tbool completed = false;\n+\n+\tguard(spinlock_irqsave)(&vchan->lock);\n+\n+\twhile ((vdesc = vchan_next_desc(vchan))) {\n+\t\tstruct sdxi_dma_desc *sddesc = to_sdxi_dma_desc(vdesc);\n+\n+\t\tif (!sdxi_completion_signaled(sddesc->completion))\n+\t\t\tbreak;\n+\n+\t\tlist_del(&vdesc->node);\n+\t\tvchan_cookie_complete(&sddesc->vdesc);\n+\t\tcompleted = true;\n+\t}\n+\n+\tif (completed)\n+\t\tsdxi_ring_wake_up(sdchan->cxt->ring_state);\n+\n+\treturn IRQ_HANDLED;\n+}\n+\n+static int sdxi_dma_alloc_chan_resources(struct dma_chan *dma_chan)\n+{\n+\tstruct sdxi_dev *sdxi = dev_get_drvdata(dma_chan->device->dev);\n+\tstruct sdxi_dma_chan *sdchan = to_sdxi_dma_chan(dma_chan);\n+\tint vector, irq, err;\n+\n+\tsdchan->cxt = sdxi_cxt_new(sdxi);\n+\tif (!sdchan->cxt)\n+\t\treturn -ENOMEM;\n+\t/*\n+\t * This irq and akey setup should perhaps all be pushed into\n+\t * the context allocation.\n+\t */\n+\terr = vector = sdxi_alloc_vector(sdxi);\n+\tif (vector < 0)\n+\t\tgoto exit_cxt;\n+\n+\tsdchan->vector = vector;\n+\n+\terr = irq = sdxi_vector_to_irq(sdxi, vector);\n+\tif (irq < 0)\n+\t\tgoto free_vector;\n+\n+\tsdchan->irq = irq;\n+\n+\t/*\n+\t * Note this akey entry is used for both the completion\n+\t * interrupt and source and destination access for copies.\n+\t */\n+\tsdchan->akey = sdxi_alloc_akey(sdchan->cxt);\n+\tif (!sdchan->akey)\n+\t\tgoto free_vector;\n+\n+\t*sdchan->akey = (typeof(*sdchan->akey)) {\n+\t\t.intr_num = cpu_to_le16(FIELD_PREP(SDXI_AKEY_ENT_VL, 1) |\n+\t\t\t\t\tFIELD_PREP(SDXI_AKEY_ENT_IV, 1) |\n+\t\t\t\t\tFIELD_PREP(SDXI_AKEY_ENT_INTR_NUM,\n+\t\t\t\t\t\t   vector)),\n+\t};\n+\n+\terr = request_irq(sdchan->irq, sdxi_dma_cxt_irq,\n+\t\t\t  IRQF_TRIGGER_NONE, \"SDXI DMAengine\", sdchan);\n+\tif (err)\n+\t\tgoto free_akey;\n+\n+\terr = sdxi_start_cxt(sdchan->cxt);\n+\tif (err)\n+\t\tgoto free_irq;\n+\n+\treturn 0;\n+free_irq:\n+\tfree_irq(sdchan->irq, sdchan);\n+free_akey:\n+\tsdxi_free_akey(sdchan->cxt, sdchan->akey);\n+free_vector:\n+\tsdxi_free_vector(sdxi, vector);\n+exit_cxt:\n+\tsdxi_cxt_exit(sdchan->cxt);\n+\treturn err;\n+}\n+\n+static void sdxi_dma_free_chan_resources(struct dma_chan *dma_chan)\n+{\n+\tstruct sdxi_dma_chan *sdchan = to_sdxi_dma_chan(dma_chan);\n+\n+\tsdxi_stop_cxt(sdchan->cxt);\n+\tfree_irq(sdchan->irq, sdchan);\n+\tsdxi_free_vector(sdchan->cxt->sdxi, sdchan->vector);\n+\tsdxi_free_akey(sdchan->cxt, sdchan->akey);\n+\tvchan_free_chan_resources(to_virt_chan(dma_chan));\n+\tsdxi_cxt_exit(sdchan->cxt);\n+}\n+\n+int sdxi_dma_register(struct sdxi_dev *sdxi)\n+{\n+\tstruct device *dev = sdxi_to_dev(sdxi);\n+\tstruct sdxi_dma_dev *sddev;\n+\tstruct dma_device *dma_dev;\n+\tint err;\n+\n+\tif (!dma_channels)\n+\t\treturn 0;\n+\t/*\n+\t * Note that this code assumes the device supports the\n+\t * interrupt operation group (IntrGrp), which is optional. See\n+\t * SDXI 1.0 Table 6-1 SDXI Operation Groups.\n+\t *\n+\t * TODO: check sdxi->op_grp_cap for IntrGrp support and error\n+\t * out if it's missing.\n+\t */\n+\n+\tsddev = devm_kzalloc(dev, struct_size(sddev, sdchan, dma_channels),\n+\t\t\t     GFP_KERNEL);\n+\tif (!sddev)\n+\t\treturn -ENOMEM;\n+\n+\tsddev->nr_channels = dma_channels;\n+\n+\tdma_dev = &sddev->dma_dev;\n+\t*dma_dev = (typeof(*dma_dev)) {\n+\t\t.dev                 = sdxi_to_dev(sdxi),\n+\t\t.src_addr_widths     = DMA_SLAVE_BUSWIDTH_64_BYTES,\n+\t\t.dst_addr_widths     = DMA_SLAVE_BUSWIDTH_64_BYTES,\n+\t\t.directions          = BIT(DMA_MEM_TO_MEM),\n+\t\t.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR,\n+\n+\t\t.device_alloc_chan_resources = sdxi_dma_alloc_chan_resources,\n+\t\t.device_free_chan_resources  = sdxi_dma_free_chan_resources,\n+\n+\t\t.device_prep_dma_memcpy = sdxi_dma_prep_memcpy,\n+\n+\t\t.device_terminate_all = sdxi_dma_terminate_all,\n+\t\t.device_synchronize = sdxi_dma_synchronize,\n+\t\t.device_tx_status = sdxi_tx_status,\n+\t\t.device_issue_pending = sdxi_dma_issue_pending,\n+\t};\n+\n+\tdma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);\n+\tdma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));\n+\tINIT_LIST_HEAD(&dma_dev->channels);\n+\n+\tfor (size_t i = 0; i < sddev->nr_channels; ++i) {\n+\t\tstruct sdxi_dma_chan *sdchan = &sddev->sdchan[i];\n+\n+\t\tsdchan->vchan.desc_free = sdxi_tx_desc_free;\n+\t\tvchan_init(&sdchan->vchan, &sddev->dma_dev);\n+\t}\n+\n+\terr = dmaenginem_async_device_register(dma_dev);\n+\tif (err)\n+\t\treturn dev_warn_probe(dev, err, \"failed to register dma device\\n\");\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/dma/sdxi/dma.h b/drivers/dma/sdxi/dma.h\nnew file mode 100644\nindex 000000000000..4ff3c2cb67fc\n--- /dev/null\n+++ b/drivers/dma/sdxi/dma.h\n@@ -0,0 +1,12 @@\n+/* SPDX-License-Identifier: GPL-2.0-only */\n+/* Copyright Advanced Micro Devices, Inc. */\n+\n+#ifndef DMA_SDXI_DMA_H\n+#define DMA_SDXI_DMA_H\n+\n+struct sdxi_dev;\n+\n+int sdxi_dma_register(struct sdxi_dev *sdxi);\n+void sdxi_dma_unregister(struct sdxi_dev *sdxi);\n+\n+#endif /* DMA_SDXI_DMA_H */\n",
    "prefixes": [
        "23/23"
    ]
}