get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/808352/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 808352,
    "url": "http://patchwork.ozlabs.org/api/patches/808352/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linux-imx/patch/1504198860-12951-20-git-send-email-Dave.Martin@arm.com/",
    "project": {
        "id": 19,
        "url": "http://patchwork.ozlabs.org/api/projects/19/?format=api",
        "name": "Linux IMX development",
        "link_name": "linux-imx",
        "list_id": "linux-imx-kernel.lists.patchwork.ozlabs.org",
        "list_email": "linux-imx-kernel@lists.patchwork.ozlabs.org",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null,
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<1504198860-12951-20-git-send-email-Dave.Martin@arm.com>",
    "list_archive_url": null,
    "date": "2017-08-31T17:00:51",
    "name": "[v2,19/28] arm64/sve: ptrace and ELF coredump support",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "46451158798203cf2f87da7e2b8a27fb171e7b99",
    "submitter": {
        "id": 26612,
        "url": "http://patchwork.ozlabs.org/api/people/26612/?format=api",
        "name": "Dave Martin",
        "email": "Dave.Martin@arm.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linux-imx/patch/1504198860-12951-20-git-send-email-Dave.Martin@arm.com/mbox/",
    "series": [
        {
            "id": 883,
            "url": "http://patchwork.ozlabs.org/api/series/883/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linux-imx/list/?series=883",
            "date": "2017-08-31T17:00:33",
            "name": "ARM Scalable Vector Extension (SVE)",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/883/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/808352/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/808352/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<linux-arm-kernel-bounces+incoming-imx=patchwork.ozlabs.org@lists.infradead.org>",
        "X-Original-To": "incoming-imx@patchwork.ozlabs.org",
        "Delivered-To": "patchwork-incoming-imx@bilbo.ozlabs.org",
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=none (mailfrom) smtp.mailfrom=lists.infradead.org\n\t(client-ip=65.50.211.133; helo=bombadil.infradead.org;\n\tenvelope-from=linux-arm-kernel-bounces+incoming-imx=patchwork.ozlabs.org@lists.infradead.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org; dkim=pass (2048-bit key;\n\tunprotected) header.d=lists.infradead.org\n\theader.i=@lists.infradead.org\n\theader.b=\"JK8/DlyY\"; dkim-atps=neutral"
        ],
        "Received": [
            "from bombadil.infradead.org (bombadil.infradead.org\n\t[65.50.211.133])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xjpjZ0fn1z9s81\n\tfor <incoming-imx@patchwork.ozlabs.org>;\n\tFri,  1 Sep 2017 03:08:18 +1000 (AEST)",
            "from localhost ([127.0.0.1] helo=bombadil.infradead.org)\n\tby bombadil.infradead.org with esmtp (Exim 4.87 #1 (Red Hat Linux))\n\tid 1dnSww-0004rk-CC; Thu, 31 Aug 2017 17:08:14 +0000",
            "from usa-sjc-mx-foss1.foss.arm.com ([217.140.101.70]\n\thelo=foss.arm.com)\n\tby bombadil.infradead.org with esmtp (Exim 4.87 #1 (Red Hat Linux))\n\tid 1dnSr0-0007H7-Ng for linux-arm-kernel@lists.infradead.org;\n\tThu, 31 Aug 2017 17:02:39 +0000",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249])\n\tby usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 44EA21AFA;\n\tThu, 31 Aug 2017 10:01:53 -0700 (PDT)",
            "from e103592.cambridge.arm.com (usa-sjc-imap-foss1.foss.arm.com\n\t[10.72.51.249])\n\tby usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id\n\tDB3E63F58F; Thu, 31 Aug 2017 10:01:50 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;\n\td=lists.infradead.org; s=bombadil.20170209; h=Sender:\n\tContent-Transfer-Encoding:Content-Type:Cc:List-Subscribe:List-Help:List-Post:\n\tList-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:\n\tMessage-Id:Date:Subject:To:From:Reply-To:Content-ID:Content-Description:\n\tResent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:\n\tList-Owner; bh=+9gJ3P/0+wdJbfmj4LvZSpvwwqw1SXH4Fhdy1852+Zs=;\n\tb=JK8/DlyYlPTiDp\n\tv96x9soiFd3ymjGL4WcOZtU4lqWDpFvimUIT1L5iU3agNqwZqFiKHu88OflkuKFBFHYd5ULYIwO6/\n\thdQ2riw3iagZp5h1fd3NquLVAxLItBy7LG8VRhs0NcomsnddzRU64UzMXpgUfqWMHfm2Tlg3Ek/z0\n\tYmpXZGU4Gu/8zLJRTXtoxRfx75y8cewJd+fzs3KQTD/OZhwiVjKPsIsrzTYKKTb7mfX3TBdT0kDlw\n\tUrXQa0mcvORBcriRVM8C313lYqosqltfVxfdAIbeumM4MI9HLbInDUGFIM00eUk+zaoG3AXfYwBZ8\n\tfMaHavQ6Y4VOO4qhGlyw==;",
        "From": "Dave Martin <Dave.Martin@arm.com>",
        "To": "linux-arm-kernel@lists.infradead.org",
        "Subject": "[PATCH v2 19/28] arm64/sve: ptrace and ELF coredump support",
        "Date": "Thu, 31 Aug 2017 18:00:51 +0100",
        "Message-Id": "<1504198860-12951-20-git-send-email-Dave.Martin@arm.com>",
        "X-Mailer": "git-send-email 2.1.4",
        "In-Reply-To": "<1504198860-12951-1-git-send-email-Dave.Martin@arm.com>",
        "References": "<1504198860-12951-1-git-send-email-Dave.Martin@arm.com>",
        "MIME-Version": "1.0",
        "X-CRM114-Version": "20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 ",
        "X-CRM114-CacheID": "sfid-20170831_100207_103364_1D678E04 ",
        "X-CRM114-Status": "GOOD (  22.15  )",
        "X-Spam-Score": "-6.9 (------)",
        "X-Spam-Report": "SpamAssassin version 3.4.1 on bombadil.infradead.org summary:\n\tContent analysis details:   (-6.9 points)\n\tpts rule name              description\n\t---- ----------------------\n\t--------------------------------------------------\n\t-5.0 RCVD_IN_DNSWL_HI RBL: Sender listed at http://www.dnswl.org/,\n\thigh trust [217.140.101.70 listed in list.dnswl.org]\n\t-0.0 SPF_PASS               SPF: sender matches SPF record\n\t-0.0 RP_MATCHES_RCVD Envelope sender domain matches handover relay\n\tdomain\n\t-1.9 BAYES_00               BODY: Bayes spam probability is 0 to 1%\n\t[score: 0.0000]",
        "X-BeenThere": "linux-arm-kernel@lists.infradead.org",
        "X-Mailman-Version": "2.1.21",
        "Precedence": "list",
        "List-Unsubscribe": "<http://lists.infradead.org/mailman/options/linux-arm-kernel>,\n\t<mailto:linux-arm-kernel-request@lists.infradead.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.infradead.org/pipermail/linux-arm-kernel/>",
        "List-Post": "<mailto:linux-arm-kernel@lists.infradead.org>",
        "List-Help": "<mailto:linux-arm-kernel-request@lists.infradead.org?subject=help>",
        "List-Subscribe": "<http://lists.infradead.org/mailman/listinfo/linux-arm-kernel>,\n\t<mailto:linux-arm-kernel-request@lists.infradead.org?subject=subscribe>",
        "Cc": "linux-arch@vger.kernel.org, libc-alpha@sourceware.org, gdb@sourceware.org,\n\tArd Biesheuvel <ard.biesheuvel@linaro.org>, Szabolcs Nagy\n\t<szabolcs.nagy@arm.com>, Catalin Marinas <catalin.marinas@arm.com>,\n\tYao Qi <Yao.Qi@arm.com>, Alan Hayward <alan.hayward@arm.com>,\n\tWill Deacon <will.deacon@arm.com>, Oleg Nesterov <oleg@redhat.com>,\n\tAlexander Viro <viro@zeniv.linux.org.uk>, \n\tRichard Sandiford <richard.sandiford@arm.com>, =?utf-8?q?Alex_Benn?=\n\t=?utf-8?b?w6ll?= <alex.bennee@linaro.org>,  kvmarm@lists.cs.columbia.edu",
        "Content-Type": "text/plain; charset=\"utf-8\"",
        "Content-Transfer-Encoding": "base64",
        "Sender": "\"linux-arm-kernel\" <linux-arm-kernel-bounces@lists.infradead.org>",
        "Errors-To": "linux-arm-kernel-bounces+incoming-imx=patchwork.ozlabs.org@lists.infradead.org",
        "List-Id": "linux-imx-kernel.lists.patchwork.ozlabs.org"
    },
    "content": "This patch defines and implements a new regset NT_ARM_SVE, which\ndescribes a thread's SVE register state.  This allows a debugger to\nmanipulate the SVE state, as well as being included in ELF\ncoredumps for post-mortem debugging.\n\nBecause the regset size and layout are dependent on the thread's\ncurrent vector length, it is not possible to define a C struct to\ndescribe the regset contents as is done for existing regsets.\nInstead, and for the same reasons, NT_ARM_SVE is based on the\nfreeform variable-layout approach used for the SVE signal frame.\n\nAdditionally, to reduce debug overhead when debugging threads that\nmight or might not have live SVE register state, NT_ARM_SVE may be\npresented in one of two different formats: the old struct\nuser_fpsimd_state format is embedded for describing the state of a\nthread with no live SVE state, whereas a new variable-layout\nstructure is embedded for describing live SVE state.  This avoids a\ndebugger needing to poll NT_PRFPREG in addition to NT_ARM_SVE, and\nallows existing userspace code to handle the non-SVE case without\ntoo much modification.\n\nFor this to work, NT_ARM_SVE is defined with a fixed-format header\nof type struct user_sve_header, which the recipient can use to\nfigure out the content, size and layout of the reset of the regset.\nAccessor macros are defined to allow the vector-length-dependent\nparts of the regset to be manipulated.\n\nSigned-off-by: Alan Hayward <alan.hayward@arm.com>\nSigned-off-by: Dave Martin <Dave.Martin@arm.com>\nCc: Alex Bennée <alex.bennee@linaro.org>\n\n---\n\nChanges since v1\n----------------\n\nOther changes related to Alex Bennée's comments:\n\n* Migrate to SVE_VQ_BYTES instead of magic numbers.\n\nRequested by Alex Bennée:\n\n* Thin out BUG_ON()s:\nRedundant BUG_ON()s and ones that just check invariants are removed.\nImportant sanity-checks are migrated to WARN_ON()s, with some\nminimal best-effort patch-up code.\n\nOther:\n\n* [ABI fix] Bail out with -EIO if attempting to set the\nSVE regs for an unsupported VL, instead of misparsing the regset data.\n\n* Replace some in-kernel open-coded arithmetic with ALIGN()/\nDIV_ROUND_UP().\n---\n arch/arm64/include/asm/fpsimd.h      |  13 +-\n arch/arm64/include/uapi/asm/ptrace.h | 135 ++++++++++++++++++\n arch/arm64/kernel/fpsimd.c           |  40 +++++-\n arch/arm64/kernel/ptrace.c           | 270 +++++++++++++++++++++++++++++++++--\n include/uapi/linux/elf.h             |   1 +\n 5 files changed, 449 insertions(+), 10 deletions(-)",
    "diff": "diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h\nindex 6c22624..2723cca 100644\n--- a/arch/arm64/include/asm/fpsimd.h\n+++ b/arch/arm64/include/asm/fpsimd.h\n@@ -38,13 +38,16 @@ struct fpsimd_state {\n \t\t\t__uint128_t vregs[32];\n \t\t\tu32 fpsr;\n \t\t\tu32 fpcr;\n+\t\t\t/*\n+\t\t\t * For ptrace compatibility, pad to next 128-bit\n+\t\t\t * boundary here if extending this struct.\n+\t\t\t */\n \t\t};\n \t};\n \t/* the id of the last cpu to have restored this state */\n \tunsigned int cpu;\n };\n \n-\n #if defined(__KERNEL__) && defined(CONFIG_COMPAT)\n /* Masks for extracting the FPSR and FPCR from the FPSCR */\n #define VFP_FPSCR_STAT_MASK\t0xf800009f\n@@ -89,6 +92,10 @@ extern void sve_alloc(struct task_struct *task);\n extern void fpsimd_release_thread(struct task_struct *task);\n extern void fpsimd_dup_sve(struct task_struct *dst,\n \t\t\t   struct task_struct const *src);\n+extern void fpsimd_sync_to_sve(struct task_struct *task);\n+extern void sve_sync_to_fpsimd(struct task_struct *task);\n+extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);\n+\n extern int sve_set_vector_length(struct task_struct *task,\n \t\t\t\t unsigned long vl, unsigned long flags);\n \n@@ -103,6 +110,10 @@ static void __maybe_unused sve_alloc(struct task_struct *task) { }\n static void __maybe_unused fpsimd_release_thread(struct task_struct *task) { }\n static void __maybe_unused fpsimd_dup_sve(struct task_struct *dst,\n \t\t\t\t\t  struct task_struct const *src) { }\n+static void __maybe_unused sve_sync_to_fpsimd(struct task_struct *task) { }\n+static void __maybe_unused sve_sync_from_fpsimd_zeropad(\n+\tstruct task_struct *task) { }\n+\n static void __maybe_unused sve_init_vq_map(void) { }\n static void __maybe_unused sve_update_vq_map(void) { }\n static int __maybe_unused sve_verify_vq_map(void) { return 0; }\ndiff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h\nindex d1ff83d..1915ab0 100644\n--- a/arch/arm64/include/uapi/asm/ptrace.h\n+++ b/arch/arm64/include/uapi/asm/ptrace.h\n@@ -22,6 +22,7 @@\n #include <linux/types.h>\n \n #include <asm/hwcap.h>\n+#include <asm/sigcontext.h>\n \n \n /*\n@@ -63,6 +64,8 @@\n \n #ifndef __ASSEMBLY__\n \n+#include <linux/prctl.h>\n+\n /*\n  * User structures for general purpose, floating point and debug registers.\n  */\n@@ -90,6 +93,138 @@ struct user_hwdebug_state {\n \t}\t\tdbg_regs[16];\n };\n \n+/* SVE/FP/SIMD state (NT_ARM_SVE) */\n+\n+struct user_sve_header {\n+\t__u32 size; /* total meaningful regset content in bytes */\n+\t__u32 max_size; /* maxmium possible size for this thread */\n+\t__u16 vl; /* current vector length */\n+\t__u16 max_vl; /* maximum possible vector length */\n+\t__u16 flags;\n+\t__u16 __reserved;\n+};\n+\n+/* Definitions for user_sve_header.flags: */\n+#define SVE_PT_REGS_MASK\t\t(1 << 0)\n+\n+/* Flags: must be kept in sync with prctl interface in <linux/ptrace.h> */\n+#define SVE_PT_REGS_FPSIMD\t\t0\n+#define SVE_PT_REGS_SVE\t\t\tSVE_PT_REGS_MASK\n+\n+#define SVE_PT_VL_INHERIT\t\t(PR_SVE_VL_INHERIT >> 16)\n+#define SVE_PT_VL_ONEXEC\t\t(PR_SVE_SET_VL_ONEXEC >> 16)\n+\n+\n+/*\n+ * The remainder of the SVE state follows struct user_sve_header.  The\n+ * total size of the SVE state (including header) depends on the\n+ * metadata in the header:  SVE_PT_SIZE(vq, flags) gives the total size\n+ * of the state in bytes, including the header.\n+ *\n+ * Refer to <asm/sigcontext.h> for details of how to pass the correct\n+ * \"vq\" argument to these macros.\n+ */\n+\n+/* Offset from the start of struct user_sve_header to the register data */\n+#define SVE_PT_REGS_OFFSET\t\t\t\t\t\\\n+\t((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))\t\\\n+\t\t/ SVE_VQ_BYTES * SVE_VQ_BYTES)\n+\n+/*\n+ * The register data content and layout depends on the value of the\n+ * flags field.\n+ */\n+\n+/*\n+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case:\n+ *\n+ * The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type\n+ * struct user_fpsimd_state.  Additional data might be appended in the\n+ * future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size.\n+ * SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than\n+ * sizeof(struct user_fpsimd_state).\n+ */\n+\n+#define SVE_PT_FPSIMD_OFFSET\t\tSVE_PT_REGS_OFFSET\n+\n+#define SVE_PT_FPSIMD_SIZE(vq, flags)\t(sizeof(struct user_fpsimd_state))\n+\n+/*\n+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case:\n+ *\n+ * The payload starts at offset SVE_PT_SVE_OFFSET, and is of size\n+ * SVE_PT_SVE_SIZE(vq, flags).\n+ *\n+ * Additional macros describe the contents and layout of the payload.\n+ * For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to\n+ * the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is\n+ * the size in bytes:\n+ *\n+ *\tx\ttype\t\t\t\tdescription\n+ *\t-\t----\t\t\t\t-----------\n+ *\tZREGS\t\t\\\n+ *\tZREG\t\t|\n+ *\tPREGS\t\t| refer to <asm/sigcontext.h>\n+ *\tPREG\t\t|\n+ *\tFFR\t\t/\n+ *\n+ *\tFPSR\tuint32_t\t\t\tFPSR\n+ *\tFPCR\tuint32_t\t\t\tFPCR\n+ *\n+ * Additional data might be appended in the future.\n+ */\n+\n+#define SVE_PT_SVE_ZREG_SIZE(vq)\tSVE_SIG_ZREG_SIZE(vq)\n+#define SVE_PT_SVE_PREG_SIZE(vq)\tSVE_SIG_PREG_SIZE(vq)\n+#define SVE_PT_SVE_FFR_SIZE(vq)\t\tSVE_SIG_FFR_SIZE(vq)\n+#define SVE_PT_SVE_FPSR_SIZE\t\tsizeof(__u32)\n+#define SVE_PT_SVE_FPCR_SIZE\t\tsizeof(__u32)\n+\n+#define __SVE_SIG_TO_PT(offset) \\\n+\t((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)\n+\n+#define SVE_PT_SVE_OFFSET\t\tSVE_PT_REGS_OFFSET\n+\n+#define SVE_PT_SVE_ZREGS_OFFSET \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)\n+#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))\n+#define SVE_PT_SVE_ZREGS_SIZE(vq) \\\n+\t(SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)\n+\n+#define SVE_PT_SVE_PREGS_OFFSET(vq) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))\n+#define SVE_PT_SVE_PREG_OFFSET(vq, n) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))\n+#define SVE_PT_SVE_PREGS_SIZE(vq) \\\n+\t(SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \\\n+\t\tSVE_PT_SVE_PREGS_OFFSET(vq))\n+\n+#define SVE_PT_SVE_FFR_OFFSET(vq) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))\n+\n+#define SVE_PT_SVE_FPSR_OFFSET(vq)\t\t\t\t\\\n+\t((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) +\t\\\n+\t\t\t(SVE_VQ_BYTES - 1))\t\t\t\\\n+\t\t/ SVE_VQ_BYTES * SVE_VQ_BYTES)\n+#define SVE_PT_SVE_FPCR_OFFSET(vq) \\\n+\t(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)\n+\n+/*\n+ * Any future extension appended after FPCR must be aligned to the next\n+ * 128-bit boundary.\n+ */\n+\n+#define SVE_PT_SVE_SIZE(vq, flags)\t\t\t\t\t\\\n+\t((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE\t\t\\\n+\t\t\t- SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1))\t\\\n+\t\t/ SVE_VQ_BYTES * SVE_VQ_BYTES)\n+\n+#define SVE_PT_SIZE(vq, flags)\t\t\t\t\t\t\\\n+\t (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ?\t\t\\\n+\t\t  SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags)\t\\\n+\t\t: SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))\n+\n #endif /* __ASSEMBLY__ */\n \n #endif /* _UAPI__ASM_PTRACE_H */\ndiff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c\nindex fff9fcf..361c019 100644\n--- a/arch/arm64/kernel/fpsimd.c\n+++ b/arch/arm64/kernel/fpsimd.c\n@@ -303,6 +303,37 @@ void sve_alloc(struct task_struct *task)\n \tBUG_ON(!task->thread.sve_state);\n }\n \n+void fpsimd_sync_to_sve(struct task_struct *task)\n+{\n+\tif (!test_tsk_thread_flag(task, TIF_SVE))\n+\t\tfpsimd_to_sve(task);\n+}\n+\n+void sve_sync_to_fpsimd(struct task_struct *task)\n+{\n+\tif (test_tsk_thread_flag(task, TIF_SVE))\n+\t\tsve_to_fpsimd(task);\n+}\n+\n+void sve_sync_from_fpsimd_zeropad(struct task_struct *task)\n+{\n+\tunsigned int vq;\n+\tvoid *sst = task->thread.sve_state;\n+\tstruct fpsimd_state const *fst = &task->thread.fpsimd_state;\n+\tunsigned int i;\n+\n+\tif (!test_tsk_thread_flag(task, TIF_SVE))\n+\t\treturn;\n+\n+\tvq = sve_vq_from_vl(task->thread.sve_vl);\n+\n+\tmemset(sst, 0, SVE_SIG_REGS_SIZE(vq));\n+\n+\tfor (i = 0; i < 32; ++i)\n+\t\tmemcpy(ZREG(sst, vq, i), &fst->vregs[i],\n+\t\t       sizeof(fst->vregs[i]));\n+}\n+\n /*\n  * Handle SVE state across fork():\n  *\n@@ -459,10 +490,17 @@ static void __init sve_efi_setup(void)\n \t * This is evidence of a crippled system and we are returning void,\n \t * so no attempt is made to handle this situation here.\n \t */\n-\tBUG_ON(!sve_vl_valid(sve_max_vl));\n+\tif (!sve_vl_valid(sve_max_vl))\n+\t\tgoto fail;\n+\n \tefi_sve_state = __alloc_percpu(\n \t\tSVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);\n \tif (!efi_sve_state)\n+\t\tgoto fail;\n+\n+\treturn;\n+\n+fail:\n \t\tpanic(\"Cannot allocate percpu memory for EFI SVE save/restore\");\n }\n \ndiff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c\nindex 9cbb612..5ef4735b 100644\n--- a/arch/arm64/kernel/ptrace.c\n+++ b/arch/arm64/kernel/ptrace.c\n@@ -32,6 +32,7 @@\n #include <linux/security.h>\n #include <linux/init.h>\n #include <linux/signal.h>\n+#include <linux/string.h>\n #include <linux/uaccess.h>\n #include <linux/perf_event.h>\n #include <linux/hw_breakpoint.h>\n@@ -40,6 +41,7 @@\n #include <linux/elf.h>\n \n #include <asm/compat.h>\n+#include <asm/cpufeature.h>\n #include <asm/debug-monitors.h>\n #include <asm/pgtable.h>\n #include <asm/stacktrace.h>\n@@ -618,33 +620,66 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,\n /*\n  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)\n  */\n-static int fpr_get(struct task_struct *target, const struct user_regset *regset,\n-\t\t   unsigned int pos, unsigned int count,\n-\t\t   void *kbuf, void __user *ubuf)\n+static int __fpr_get(struct task_struct *target,\n+\t\t     const struct user_regset *regset,\n+\t\t     unsigned int pos, unsigned int count,\n+\t\t     void *kbuf, void __user *ubuf, unsigned int start_pos)\n {\n \tstruct user_fpsimd_state *uregs;\n+\n+\tsve_sync_to_fpsimd(target);\n+\n \turegs = &target->thread.fpsimd_state.user_fpsimd;\n \n+\treturn user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,\n+\t\t\t\t   start_pos, start_pos + sizeof(*uregs));\n+}\n+\n+static int fpr_get(struct task_struct *target, const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   void *kbuf, void __user *ubuf)\n+{\n \tif (target == current)\n \t\tfpsimd_preserve_current_state();\n \n-\treturn user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);\n+\treturn __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);\n }\n \n-static int fpr_set(struct task_struct *target, const struct user_regset *regset,\n-\t\t   unsigned int pos, unsigned int count,\n-\t\t   const void *kbuf, const void __user *ubuf)\n+static int __fpr_set(struct task_struct *target,\n+\t\t     const struct user_regset *regset,\n+\t\t     unsigned int pos, unsigned int count,\n+\t\t     const void *kbuf, const void __user *ubuf,\n+\t\t     unsigned int start_pos)\n {\n \tint ret;\n \tstruct user_fpsimd_state newstate =\n \t\ttarget->thread.fpsimd_state.user_fpsimd;\n \n-\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);\n+\tsve_sync_to_fpsimd(target);\n+\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,\n+\t\t\t\t start_pos, start_pos + sizeof(newstate));\n \tif (ret)\n \t\treturn ret;\n \n \ttarget->thread.fpsimd_state.user_fpsimd = newstate;\n+\n+\treturn ret;\n+}\n+\n+static int fpr_set(struct task_struct *target, const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   const void *kbuf, const void __user *ubuf)\n+{\n+\tint ret;\n+\n+\tret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tsve_sync_from_fpsimd_zeropad(target);\n \tfpsimd_flush_task_state(target);\n+\n \treturn ret;\n }\n \n@@ -702,6 +737,210 @@ static int system_call_set(struct task_struct *target,\n \treturn ret;\n }\n \n+#ifdef CONFIG_ARM64_SVE\n+\n+static void sve_init_header_from_task(struct user_sve_header *header,\n+\t\t\t\t      struct task_struct *target)\n+{\n+\tunsigned int vq;\n+\n+\tmemset(header, 0, sizeof(*header));\n+\n+\theader->flags = test_tsk_thread_flag(target, TIF_SVE) ?\n+\t\tSVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;\n+\tif (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))\n+\t\theader->flags |= SVE_PT_VL_INHERIT;\n+\n+\theader->vl = target->thread.sve_vl;\n+\tvq = sve_vq_from_vl(header->vl);\n+\n+\tif (WARN_ON(!sve_vl_valid(sve_max_vl)))\n+\t\theader->max_vl = header->vl;\n+\n+\theader->size = SVE_PT_SIZE(vq, header->flags);\n+\theader->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),\n+\t\t\t\t      SVE_PT_REGS_SVE);\n+}\n+\n+static unsigned int sve_size_from_header(struct user_sve_header const *header)\n+{\n+\treturn ALIGN(header->size, SVE_VQ_BYTES);\n+}\n+\n+static unsigned int sve_get_size(struct task_struct *target,\n+\t\t\t\t const struct user_regset *regset)\n+{\n+\tstruct user_sve_header header;\n+\n+\tif (!system_supports_sve())\n+\t\treturn 0;\n+\n+\tsve_init_header_from_task(&header, target);\n+\treturn sve_size_from_header(&header);\n+}\n+\n+static int sve_get(struct task_struct *target,\n+\t\t   const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   void *kbuf, void __user *ubuf)\n+{\n+\tint ret;\n+\tstruct user_sve_header header;\n+\tunsigned int vq;\n+\tunsigned long start, end;\n+\n+\tif (!system_supports_sve())\n+\t\treturn -EINVAL;\n+\n+\t/* Header */\n+\tsve_init_header_from_task(&header, target);\n+\tvq = sve_vq_from_vl(header.vl);\n+\n+\tret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,\n+\t\t\t\t  0, sizeof(header));\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (target == current)\n+\t\tfpsimd_preserve_current_state();\n+\n+\t/* Registers: FPSIMD-only case */\n+\n+\tBUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));\n+\tif ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)\n+\t\treturn __fpr_get(target, regset, pos, count, kbuf, ubuf,\n+\t\t\t\t SVE_PT_FPSIMD_OFFSET);\n+\n+\t/* Otherwise: full SVE case */\n+\n+\tBUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));\n+\tstart = SVE_PT_SVE_OFFSET;\n+\tend = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);\n+\tret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t  target->thread.sve_state,\n+\t\t\t\t  start, end);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPSR_OFFSET(vq);\n+\tret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t       start, end);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/*\n+\t * Copy fpsr, and fpcr which must follow contiguously in\n+\t * struct fpsimd_state:\n+\t */\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;\n+\tret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t  &target->thread.fpsimd_state.fpsr,\n+\t\t\t\t  start, end);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tstart = end;\n+\tend = sve_size_from_header(&header);\n+\treturn user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t\tstart, end);\n+}\n+\n+static int sve_set(struct task_struct *target,\n+\t\t   const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   const void *kbuf, const void __user *ubuf)\n+{\n+\tint ret;\n+\tstruct user_sve_header header;\n+\tunsigned int vq;\n+\tunsigned long start, end;\n+\n+\tif (!system_supports_sve())\n+\t\treturn -EINVAL;\n+\n+\t/* Header */\n+\tif (count < sizeof(header))\n+\t\treturn -EINVAL;\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,\n+\t\t\t\t 0, sizeof(header));\n+\tif (ret)\n+\t\tgoto out;\n+\n+\t/*\n+\t * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by\n+\t * sve_set_vector_length(), which will also validate them for us:\n+\t */\n+\tret = sve_set_vector_length(target, header.vl,\n+\t\t\t\t    header.flags & ~SVE_PT_REGS_MASK);\n+\tif (ret)\n+\t\tgoto out;\n+\n+\t/* Actual VL set may be less than the user asked for: */\n+\tvq = sve_vq_from_vl(target->thread.sve_vl);\n+\n+\t/* Registers: FPSIMD-only case */\n+\n+\tBUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));\n+\tif ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {\n+\t\tsve_sync_to_fpsimd(target);\n+\n+\t\tret = __fpr_set(target, regset, pos, count, kbuf, ubuf,\n+\t\t\t\tSVE_PT_FPSIMD_OFFSET);\n+\t\tclear_tsk_thread_flag(target, TIF_SVE);\n+\t\tgoto out;\n+\t}\n+\n+\t/* Otherwise: full SVE case */\n+\n+\t/*\n+\t * If setting a different VL from the requested VL and there is\n+\t * register data, the data layout will be wrong: don't even\n+\t * try to set the registers in this case.\n+\t */\n+\tif (count && vq != sve_vq_from_vl(header.vl)) {\n+\t\tret = -EIO;\n+\t\tgoto out;\n+\t}\n+\n+\tsve_alloc(target);\n+\tfpsimd_sync_to_sve(target);\n+\tset_tsk_thread_flag(target, TIF_SVE);\n+\n+\tBUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));\n+\tstart = SVE_PT_SVE_OFFSET;\n+\tend = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t target->thread.sve_state,\n+\t\t\t\t start, end);\n+\tif (ret)\n+\t\tgoto out;\n+\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPSR_OFFSET(vq);\n+\tret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t\tstart, end);\n+\tif (ret)\n+\t\tgoto out;\n+\n+\t/*\n+\t * Copy fpsr, and fpcr which must follow contiguously in\n+\t * struct fpsimd_state:\n+\t */\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t &target->thread.fpsimd_state.fpsr,\n+\t\t\t\t start, end);\n+\n+out:\n+\tfpsimd_flush_task_state(target);\n+\treturn ret;\n+}\n+\n+#endif /* CONFIG_ARM64_SVE */\n+\n enum aarch64_regset {\n \tREGSET_GPR,\n \tREGSET_FPR,\n@@ -711,6 +950,9 @@ enum aarch64_regset {\n \tREGSET_HW_WATCH,\n #endif\n \tREGSET_SYSTEM_CALL,\n+#ifdef CONFIG_ARM64_SVE\n+\tREGSET_SVE,\n+#endif\n };\n \n static const struct user_regset aarch64_regsets[] = {\n@@ -768,6 +1010,18 @@ static const struct user_regset aarch64_regsets[] = {\n \t\t.get = system_call_get,\n \t\t.set = system_call_set,\n \t},\n+#ifdef CONFIG_ARM64_SVE\n+\t[REGSET_SVE] = { /* Scalable Vector Extension */\n+\t\t.core_note_type = NT_ARM_SVE,\n+\t\t.n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),\n+\t\t\t\t  SVE_VQ_BYTES),\n+\t\t.size = SVE_VQ_BYTES,\n+\t\t.align = SVE_VQ_BYTES,\n+\t\t.get = sve_get,\n+\t\t.set = sve_set,\n+\t\t.get_size = sve_get_size,\n+\t},\n+#endif\n };\n \n static const struct user_regset_view user_aarch64_view = {\ndiff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h\nindex b5280db..735b8f4 100644\n--- a/include/uapi/linux/elf.h\n+++ b/include/uapi/linux/elf.h\n@@ -416,6 +416,7 @@ typedef struct elf64_shdr {\n #define NT_ARM_HW_BREAK\t0x402\t\t/* ARM hardware breakpoint registers */\n #define NT_ARM_HW_WATCH\t0x403\t\t/* ARM hardware watchpoint registers */\n #define NT_ARM_SYSTEM_CALL\t0x404\t/* ARM system call number */\n+#define NT_ARM_SVE\t0x405\t\t/* ARM Scalable Vector Extension registers */\n #define NT_METAG_CBUF\t0x500\t\t/* Metag catch buffer registers */\n #define NT_METAG_RPIPE\t0x501\t\t/* Metag read pipeline state */\n #define NT_METAG_TLS\t0x502\t\t/* Metag TLS pointer */\n",
    "prefixes": [
        "v2",
        "19/28"
    ]
}