get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/808339/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 808339,
    "url": "http://patchwork.ozlabs.org/api/patches/808339/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/glibc/patch/1504198860-12951-20-git-send-email-Dave.Martin@arm.com/",
    "project": {
        "id": 41,
        "url": "http://patchwork.ozlabs.org/api/projects/41/?format=api",
        "name": "GNU C Library",
        "link_name": "glibc",
        "list_id": "libc-alpha.sourceware.org",
        "list_email": "libc-alpha@sourceware.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<1504198860-12951-20-git-send-email-Dave.Martin@arm.com>",
    "list_archive_url": null,
    "date": "2017-08-31T17:00:51",
    "name": "[v2,19/28] arm64/sve: ptrace and ELF coredump support",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "46451158798203cf2f87da7e2b8a27fb171e7b99",
    "submitter": {
        "id": 26612,
        "url": "http://patchwork.ozlabs.org/api/people/26612/?format=api",
        "name": "Dave Martin",
        "email": "Dave.Martin@arm.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/glibc/patch/1504198860-12951-20-git-send-email-Dave.Martin@arm.com/mbox/",
    "series": [
        {
            "id": 882,
            "url": "http://patchwork.ozlabs.org/api/series/882/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/glibc/list/?series=882",
            "date": "2017-08-31T17:00:32",
            "name": "ARM Scalable Vector Extension (SVE)",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/882/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/808339/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/808339/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<libc-alpha-return-83976-incoming=patchwork.ozlabs.org@sourceware.org>",
        "X-Original-To": "incoming@patchwork.ozlabs.org",
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "mailing list libc-alpha@sourceware.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=sourceware.org\n\t(client-ip=209.132.180.131; helo=sourceware.org;\n\tenvelope-from=libc-alpha-return-83976-incoming=patchwork.ozlabs.org@sourceware.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org; dkim=pass (1024-bit key;\n\tsecure) header.d=sourceware.org header.i=@sourceware.org\n\theader.b=\"ftWHvU/8\"; dkim-atps=neutral",
            "sourceware.org; auth=none"
        ],
        "Received": [
            "from sourceware.org (server1.sourceware.org [209.132.180.131])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xjpcq4PGLz9s81\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri,  1 Sep 2017 03:04:11 +1000 (AEST)",
            "(qmail 83574 invoked by alias); 31 Aug 2017 17:02:04 -0000",
            "(qmail 83514 invoked by uid 89); 31 Aug 2017 17:02:03 -0000"
        ],
        "DomainKey-Signature": "a=rsa-sha1; c=nofws; d=sourceware.org; h=list-id\n\t:list-unsubscribe:list-subscribe:list-archive:list-post\n\t:list-help:sender:from:to:cc:subject:date:message-id:in-reply-to\n\t:references:mime-version:content-type:content-transfer-encoding;\n\tq=dns; s=default; b=Fl8y4AOdpIiIj90FK6N8p8hsLSDVHvA/r56uO2lnCAk\n\ttCHLuRVfNjL+0Iamy11TxN2/pS9b0lKmKL5bJO3d/MZDWXczacZ5/xISIjfXfSAW\n\tsJlh59u5wozd59oucbiTLKJ7y0bePqYIHKmpZ1+N2ILVQjU/YVTJcP4kBHD7oB6s\n\t=",
        "DKIM-Signature": "v=1; a=rsa-sha1; c=relaxed; d=sourceware.org; h=list-id\n\t:list-unsubscribe:list-subscribe:list-archive:list-post\n\t:list-help:sender:from:to:cc:subject:date:message-id:in-reply-to\n\t:references:mime-version:content-type:content-transfer-encoding;\n\ts=default; bh=/V7KVSoOtR2mbyzgGWDX1dp17W0=; b=ftWHvU/8wwUIrmIT2\n\tPm+WN+F5KwmYFZKcKuQu3Rt9TCqGz5EALFpnw8eER/NPFGIdnPuCPRFKCasZUR1m\n\tM/9SxhkHohMVYEiJTOm9qc1eWGw8yU07S6IYPx409xG0sZHp06qZC/ADMYzf4eOL\n\tVidhCqYCIuxVE9lEF6gTdqTR2g=",
        "Mailing-List": "contact libc-alpha-help@sourceware.org; run by ezmlm",
        "Precedence": "bulk",
        "List-Id": "<libc-alpha.sourceware.org>",
        "List-Unsubscribe": "<mailto:libc-alpha-unsubscribe-incoming=patchwork.ozlabs.org@sourceware.org>",
        "List-Subscribe": "<mailto:libc-alpha-subscribe@sourceware.org>",
        "List-Archive": "<http://sourceware.org/ml/libc-alpha/>",
        "List-Post": "<mailto:libc-alpha@sourceware.org>",
        "List-Help": "<mailto:libc-alpha-help@sourceware.org>,\n\t<http://sourceware.org/ml/#faqs>",
        "Sender": "libc-alpha-owner@sourceware.org",
        "X-Virus-Found": "No",
        "X-Spam-SWARE-Status": "No, score=-26.9 required=5.0 tests=BAYES_00, GIT_PATCH_0,\n\tGIT_PATCH_1, GIT_PATCH_2, GIT_PATCH_3, RP_MATCHES_RCVD,\n\tSPF_PASS autolearn=ham version=3.3.2 spammy=",
        "X-Spam-User": "qpsmtpd, 2 recipients",
        "X-HELO": "foss.arm.com",
        "From": "Dave Martin <Dave.Martin@arm.com>",
        "To": "linux-arm-kernel@lists.infradead.org",
        "Cc": "Catalin Marinas <catalin.marinas@arm.com>, Will Deacon\n\t<will.deacon@arm.com>, \tArd Biesheuvel <ard.biesheuvel@linaro.org>,\n\t=?utf-8?q?Alex_Benn=C3=A9?= =?utf-8?q?e?= <alex.bennee@linaro.org>,\n\tSzabolcs Nagy <szabolcs.nagy@arm.com>, Richard Sandiford\n\t<richard.sandiford@arm.com>, \tkvmarm@lists.cs.columbia.edu,\n\tlibc-alpha@sourceware.org, \tlinux-arch@vger.kernel.org,\n\tgdb@sourceware.org, \tAlan Hayward <alan.hayward@arm.com>,\n\tYao Qi <Yao.Qi@arm.com>, \tOleg Nesterov <oleg@redhat.com>, Alexander Viro\n\t<viro@zeniv.linux.org.uk>",
        "Subject": "[PATCH v2 19/28] arm64/sve: ptrace and ELF coredump support",
        "Date": "Thu, 31 Aug 2017 18:00:51 +0100",
        "Message-Id": "<1504198860-12951-20-git-send-email-Dave.Martin@arm.com>",
        "In-Reply-To": "<1504198860-12951-1-git-send-email-Dave.Martin@arm.com>",
        "References": "<1504198860-12951-1-git-send-email-Dave.Martin@arm.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit"
    },
    "content": "This patch defines and implements a new regset NT_ARM_SVE, which\ndescribes a thread's SVE register state.  This allows a debugger to\nmanipulate the SVE state, as well as being included in ELF\ncoredumps for post-mortem debugging.\n\nBecause the regset size and layout are dependent on the thread's\ncurrent vector length, it is not possible to define a C struct to\ndescribe the regset contents as is done for existing regsets.\nInstead, and for the same reasons, NT_ARM_SVE is based on the\nfreeform variable-layout approach used for the SVE signal frame.\n\nAdditionally, to reduce debug overhead when debugging threads that\nmight or might not have live SVE register state, NT_ARM_SVE may be\npresented in one of two different formats: the old struct\nuser_fpsimd_state format is embedded for describing the state of a\nthread with no live SVE state, whereas a new variable-layout\nstructure is embedded for describing live SVE state.  This avoids a\ndebugger needing to poll NT_PRFPREG in addition to NT_ARM_SVE, and\nallows existing userspace code to handle the non-SVE case without\ntoo much modification.\n\nFor this to work, NT_ARM_SVE is defined with a fixed-format header\nof type struct user_sve_header, which the recipient can use to\nfigure out the content, size and layout of the reset of the regset.\nAccessor macros are defined to allow the vector-length-dependent\nparts of the regset to be manipulated.\n\nSigned-off-by: Alan Hayward <alan.hayward@arm.com>\nSigned-off-by: Dave Martin <Dave.Martin@arm.com>\nCc: Alex Bennée <alex.bennee@linaro.org>\n\n---\n\nChanges since v1\n----------------\n\nOther changes related to Alex Bennée's comments:\n\n* Migrate to SVE_VQ_BYTES instead of magic numbers.\n\nRequested by Alex Bennée:\n\n* Thin out BUG_ON()s:\nRedundant BUG_ON()s and ones that just check invariants are removed.\nImportant sanity-checks are migrated to WARN_ON()s, with some\nminimal best-effort patch-up code.\n\nOther:\n\n* [ABI fix] Bail out with -EIO if attempting to set the\nSVE regs for an unsupported VL, instead of misparsing the regset data.\n\n* Replace some in-kernel open-coded arithmetic with ALIGN()/\nDIV_ROUND_UP().\n---\n arch/arm64/include/asm/fpsimd.h      |  13 +-\n arch/arm64/include/uapi/asm/ptrace.h | 135 ++++++++++++++++++\n arch/arm64/kernel/fpsimd.c           |  40 +++++-\n arch/arm64/kernel/ptrace.c           | 270 +++++++++++++++++++++++++++++++++--\n include/uapi/linux/elf.h             |   1 +\n 5 files changed, 449 insertions(+), 10 deletions(-)",
    "diff": "diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h\nindex 6c22624..2723cca 100644\n--- a/arch/arm64/include/asm/fpsimd.h\n+++ b/arch/arm64/include/asm/fpsimd.h\n@@ -38,13 +38,16 @@ struct fpsimd_state {\n \t\t\t__uint128_t vregs[32];\n \t\t\tu32 fpsr;\n \t\t\tu32 fpcr;\n+\t\t\t/*\n+\t\t\t * For ptrace compatibility, pad to next 128-bit\n+\t\t\t * boundary here if extending this struct.\n+\t\t\t */\n \t\t};\n \t};\n \t/* the id of the last cpu to have restored this state */\n \tunsigned int cpu;\n };\n \n-\n #if defined(__KERNEL__) && defined(CONFIG_COMPAT)\n /* Masks for extracting the FPSR and FPCR from the FPSCR */\n #define VFP_FPSCR_STAT_MASK\t0xf800009f\n@@ -89,6 +92,10 @@ extern void sve_alloc(struct task_struct *task);\n extern void fpsimd_release_thread(struct task_struct *task);\n extern void fpsimd_dup_sve(struct task_struct *dst,\n \t\t\t   struct task_struct const *src);\n+extern void fpsimd_sync_to_sve(struct task_struct *task);\n+extern void sve_sync_to_fpsimd(struct task_struct *task);\n+extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);\n+\n extern int sve_set_vector_length(struct task_struct *task,\n \t\t\t\t unsigned long vl, unsigned long flags);\n \n@@ -103,6 +110,10 @@ static void __maybe_unused sve_alloc(struct task_struct *task) { }\n static void __maybe_unused fpsimd_release_thread(struct task_struct *task) { }\n static void __maybe_unused fpsimd_dup_sve(struct task_struct *dst,\n \t\t\t\t\t  struct task_struct const *src) { }\n+static void __maybe_unused sve_sync_to_fpsimd(struct task_struct *task) { }\n+static void __maybe_unused sve_sync_from_fpsimd_zeropad(\n+\tstruct task_struct *task) { }\n+\n static void __maybe_unused sve_init_vq_map(void) { }\n static void __maybe_unused sve_update_vq_map(void) { }\n static int __maybe_unused sve_verify_vq_map(void) { return 0; }\ndiff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h\nindex d1ff83d..1915ab0 100644\n--- a/arch/arm64/include/uapi/asm/ptrace.h\n+++ b/arch/arm64/include/uapi/asm/ptrace.h\n@@ -22,6 +22,7 @@\n #include <linux/types.h>\n \n #include <asm/hwcap.h>\n+#include <asm/sigcontext.h>\n \n \n /*\n@@ -63,6 +64,8 @@\n \n #ifndef __ASSEMBLY__\n \n+#include <linux/prctl.h>\n+\n /*\n  * User structures for general purpose, floating point and debug registers.\n  */\n@@ -90,6 +93,138 @@ struct user_hwdebug_state {\n \t}\t\tdbg_regs[16];\n };\n \n+/* SVE/FP/SIMD state (NT_ARM_SVE) */\n+\n+struct user_sve_header {\n+\t__u32 size; /* total meaningful regset content in bytes */\n+\t__u32 max_size; /* maxmium possible size for this thread */\n+\t__u16 vl; /* current vector length */\n+\t__u16 max_vl; /* maximum possible vector length */\n+\t__u16 flags;\n+\t__u16 __reserved;\n+};\n+\n+/* Definitions for user_sve_header.flags: */\n+#define SVE_PT_REGS_MASK\t\t(1 << 0)\n+\n+/* Flags: must be kept in sync with prctl interface in <linux/ptrace.h> */\n+#define SVE_PT_REGS_FPSIMD\t\t0\n+#define SVE_PT_REGS_SVE\t\t\tSVE_PT_REGS_MASK\n+\n+#define SVE_PT_VL_INHERIT\t\t(PR_SVE_VL_INHERIT >> 16)\n+#define SVE_PT_VL_ONEXEC\t\t(PR_SVE_SET_VL_ONEXEC >> 16)\n+\n+\n+/*\n+ * The remainder of the SVE state follows struct user_sve_header.  The\n+ * total size of the SVE state (including header) depends on the\n+ * metadata in the header:  SVE_PT_SIZE(vq, flags) gives the total size\n+ * of the state in bytes, including the header.\n+ *\n+ * Refer to <asm/sigcontext.h> for details of how to pass the correct\n+ * \"vq\" argument to these macros.\n+ */\n+\n+/* Offset from the start of struct user_sve_header to the register data */\n+#define SVE_PT_REGS_OFFSET\t\t\t\t\t\\\n+\t((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))\t\\\n+\t\t/ SVE_VQ_BYTES * SVE_VQ_BYTES)\n+\n+/*\n+ * The register data content and layout depends on the value of the\n+ * flags field.\n+ */\n+\n+/*\n+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case:\n+ *\n+ * The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type\n+ * struct user_fpsimd_state.  Additional data might be appended in the\n+ * future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size.\n+ * SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than\n+ * sizeof(struct user_fpsimd_state).\n+ */\n+\n+#define SVE_PT_FPSIMD_OFFSET\t\tSVE_PT_REGS_OFFSET\n+\n+#define SVE_PT_FPSIMD_SIZE(vq, flags)\t(sizeof(struct user_fpsimd_state))\n+\n+/*\n+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case:\n+ *\n+ * The payload starts at offset SVE_PT_SVE_OFFSET, and is of size\n+ * SVE_PT_SVE_SIZE(vq, flags).\n+ *\n+ * Additional macros describe the contents and layout of the payload.\n+ * For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to\n+ * the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is\n+ * the size in bytes:\n+ *\n+ *\tx\ttype\t\t\t\tdescription\n+ *\t-\t----\t\t\t\t-----------\n+ *\tZREGS\t\t\\\n+ *\tZREG\t\t|\n+ *\tPREGS\t\t| refer to <asm/sigcontext.h>\n+ *\tPREG\t\t|\n+ *\tFFR\t\t/\n+ *\n+ *\tFPSR\tuint32_t\t\t\tFPSR\n+ *\tFPCR\tuint32_t\t\t\tFPCR\n+ *\n+ * Additional data might be appended in the future.\n+ */\n+\n+#define SVE_PT_SVE_ZREG_SIZE(vq)\tSVE_SIG_ZREG_SIZE(vq)\n+#define SVE_PT_SVE_PREG_SIZE(vq)\tSVE_SIG_PREG_SIZE(vq)\n+#define SVE_PT_SVE_FFR_SIZE(vq)\t\tSVE_SIG_FFR_SIZE(vq)\n+#define SVE_PT_SVE_FPSR_SIZE\t\tsizeof(__u32)\n+#define SVE_PT_SVE_FPCR_SIZE\t\tsizeof(__u32)\n+\n+#define __SVE_SIG_TO_PT(offset) \\\n+\t((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)\n+\n+#define SVE_PT_SVE_OFFSET\t\tSVE_PT_REGS_OFFSET\n+\n+#define SVE_PT_SVE_ZREGS_OFFSET \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)\n+#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))\n+#define SVE_PT_SVE_ZREGS_SIZE(vq) \\\n+\t(SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)\n+\n+#define SVE_PT_SVE_PREGS_OFFSET(vq) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))\n+#define SVE_PT_SVE_PREG_OFFSET(vq, n) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))\n+#define SVE_PT_SVE_PREGS_SIZE(vq) \\\n+\t(SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \\\n+\t\tSVE_PT_SVE_PREGS_OFFSET(vq))\n+\n+#define SVE_PT_SVE_FFR_OFFSET(vq) \\\n+\t__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))\n+\n+#define SVE_PT_SVE_FPSR_OFFSET(vq)\t\t\t\t\\\n+\t((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) +\t\\\n+\t\t\t(SVE_VQ_BYTES - 1))\t\t\t\\\n+\t\t/ SVE_VQ_BYTES * SVE_VQ_BYTES)\n+#define SVE_PT_SVE_FPCR_OFFSET(vq) \\\n+\t(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)\n+\n+/*\n+ * Any future extension appended after FPCR must be aligned to the next\n+ * 128-bit boundary.\n+ */\n+\n+#define SVE_PT_SVE_SIZE(vq, flags)\t\t\t\t\t\\\n+\t((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE\t\t\\\n+\t\t\t- SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1))\t\\\n+\t\t/ SVE_VQ_BYTES * SVE_VQ_BYTES)\n+\n+#define SVE_PT_SIZE(vq, flags)\t\t\t\t\t\t\\\n+\t (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ?\t\t\\\n+\t\t  SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags)\t\\\n+\t\t: SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))\n+\n #endif /* __ASSEMBLY__ */\n \n #endif /* _UAPI__ASM_PTRACE_H */\ndiff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c\nindex fff9fcf..361c019 100644\n--- a/arch/arm64/kernel/fpsimd.c\n+++ b/arch/arm64/kernel/fpsimd.c\n@@ -303,6 +303,37 @@ void sve_alloc(struct task_struct *task)\n \tBUG_ON(!task->thread.sve_state);\n }\n \n+void fpsimd_sync_to_sve(struct task_struct *task)\n+{\n+\tif (!test_tsk_thread_flag(task, TIF_SVE))\n+\t\tfpsimd_to_sve(task);\n+}\n+\n+void sve_sync_to_fpsimd(struct task_struct *task)\n+{\n+\tif (test_tsk_thread_flag(task, TIF_SVE))\n+\t\tsve_to_fpsimd(task);\n+}\n+\n+void sve_sync_from_fpsimd_zeropad(struct task_struct *task)\n+{\n+\tunsigned int vq;\n+\tvoid *sst = task->thread.sve_state;\n+\tstruct fpsimd_state const *fst = &task->thread.fpsimd_state;\n+\tunsigned int i;\n+\n+\tif (!test_tsk_thread_flag(task, TIF_SVE))\n+\t\treturn;\n+\n+\tvq = sve_vq_from_vl(task->thread.sve_vl);\n+\n+\tmemset(sst, 0, SVE_SIG_REGS_SIZE(vq));\n+\n+\tfor (i = 0; i < 32; ++i)\n+\t\tmemcpy(ZREG(sst, vq, i), &fst->vregs[i],\n+\t\t       sizeof(fst->vregs[i]));\n+}\n+\n /*\n  * Handle SVE state across fork():\n  *\n@@ -459,10 +490,17 @@ static void __init sve_efi_setup(void)\n \t * This is evidence of a crippled system and we are returning void,\n \t * so no attempt is made to handle this situation here.\n \t */\n-\tBUG_ON(!sve_vl_valid(sve_max_vl));\n+\tif (!sve_vl_valid(sve_max_vl))\n+\t\tgoto fail;\n+\n \tefi_sve_state = __alloc_percpu(\n \t\tSVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);\n \tif (!efi_sve_state)\n+\t\tgoto fail;\n+\n+\treturn;\n+\n+fail:\n \t\tpanic(\"Cannot allocate percpu memory for EFI SVE save/restore\");\n }\n \ndiff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c\nindex 9cbb612..5ef4735b 100644\n--- a/arch/arm64/kernel/ptrace.c\n+++ b/arch/arm64/kernel/ptrace.c\n@@ -32,6 +32,7 @@\n #include <linux/security.h>\n #include <linux/init.h>\n #include <linux/signal.h>\n+#include <linux/string.h>\n #include <linux/uaccess.h>\n #include <linux/perf_event.h>\n #include <linux/hw_breakpoint.h>\n@@ -40,6 +41,7 @@\n #include <linux/elf.h>\n \n #include <asm/compat.h>\n+#include <asm/cpufeature.h>\n #include <asm/debug-monitors.h>\n #include <asm/pgtable.h>\n #include <asm/stacktrace.h>\n@@ -618,33 +620,66 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,\n /*\n  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)\n  */\n-static int fpr_get(struct task_struct *target, const struct user_regset *regset,\n-\t\t   unsigned int pos, unsigned int count,\n-\t\t   void *kbuf, void __user *ubuf)\n+static int __fpr_get(struct task_struct *target,\n+\t\t     const struct user_regset *regset,\n+\t\t     unsigned int pos, unsigned int count,\n+\t\t     void *kbuf, void __user *ubuf, unsigned int start_pos)\n {\n \tstruct user_fpsimd_state *uregs;\n+\n+\tsve_sync_to_fpsimd(target);\n+\n \turegs = &target->thread.fpsimd_state.user_fpsimd;\n \n+\treturn user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,\n+\t\t\t\t   start_pos, start_pos + sizeof(*uregs));\n+}\n+\n+static int fpr_get(struct task_struct *target, const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   void *kbuf, void __user *ubuf)\n+{\n \tif (target == current)\n \t\tfpsimd_preserve_current_state();\n \n-\treturn user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);\n+\treturn __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);\n }\n \n-static int fpr_set(struct task_struct *target, const struct user_regset *regset,\n-\t\t   unsigned int pos, unsigned int count,\n-\t\t   const void *kbuf, const void __user *ubuf)\n+static int __fpr_set(struct task_struct *target,\n+\t\t     const struct user_regset *regset,\n+\t\t     unsigned int pos, unsigned int count,\n+\t\t     const void *kbuf, const void __user *ubuf,\n+\t\t     unsigned int start_pos)\n {\n \tint ret;\n \tstruct user_fpsimd_state newstate =\n \t\ttarget->thread.fpsimd_state.user_fpsimd;\n \n-\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);\n+\tsve_sync_to_fpsimd(target);\n+\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,\n+\t\t\t\t start_pos, start_pos + sizeof(newstate));\n \tif (ret)\n \t\treturn ret;\n \n \ttarget->thread.fpsimd_state.user_fpsimd = newstate;\n+\n+\treturn ret;\n+}\n+\n+static int fpr_set(struct task_struct *target, const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   const void *kbuf, const void __user *ubuf)\n+{\n+\tint ret;\n+\n+\tret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tsve_sync_from_fpsimd_zeropad(target);\n \tfpsimd_flush_task_state(target);\n+\n \treturn ret;\n }\n \n@@ -702,6 +737,210 @@ static int system_call_set(struct task_struct *target,\n \treturn ret;\n }\n \n+#ifdef CONFIG_ARM64_SVE\n+\n+static void sve_init_header_from_task(struct user_sve_header *header,\n+\t\t\t\t      struct task_struct *target)\n+{\n+\tunsigned int vq;\n+\n+\tmemset(header, 0, sizeof(*header));\n+\n+\theader->flags = test_tsk_thread_flag(target, TIF_SVE) ?\n+\t\tSVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;\n+\tif (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))\n+\t\theader->flags |= SVE_PT_VL_INHERIT;\n+\n+\theader->vl = target->thread.sve_vl;\n+\tvq = sve_vq_from_vl(header->vl);\n+\n+\tif (WARN_ON(!sve_vl_valid(sve_max_vl)))\n+\t\theader->max_vl = header->vl;\n+\n+\theader->size = SVE_PT_SIZE(vq, header->flags);\n+\theader->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),\n+\t\t\t\t      SVE_PT_REGS_SVE);\n+}\n+\n+static unsigned int sve_size_from_header(struct user_sve_header const *header)\n+{\n+\treturn ALIGN(header->size, SVE_VQ_BYTES);\n+}\n+\n+static unsigned int sve_get_size(struct task_struct *target,\n+\t\t\t\t const struct user_regset *regset)\n+{\n+\tstruct user_sve_header header;\n+\n+\tif (!system_supports_sve())\n+\t\treturn 0;\n+\n+\tsve_init_header_from_task(&header, target);\n+\treturn sve_size_from_header(&header);\n+}\n+\n+static int sve_get(struct task_struct *target,\n+\t\t   const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   void *kbuf, void __user *ubuf)\n+{\n+\tint ret;\n+\tstruct user_sve_header header;\n+\tunsigned int vq;\n+\tunsigned long start, end;\n+\n+\tif (!system_supports_sve())\n+\t\treturn -EINVAL;\n+\n+\t/* Header */\n+\tsve_init_header_from_task(&header, target);\n+\tvq = sve_vq_from_vl(header.vl);\n+\n+\tret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,\n+\t\t\t\t  0, sizeof(header));\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (target == current)\n+\t\tfpsimd_preserve_current_state();\n+\n+\t/* Registers: FPSIMD-only case */\n+\n+\tBUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));\n+\tif ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)\n+\t\treturn __fpr_get(target, regset, pos, count, kbuf, ubuf,\n+\t\t\t\t SVE_PT_FPSIMD_OFFSET);\n+\n+\t/* Otherwise: full SVE case */\n+\n+\tBUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));\n+\tstart = SVE_PT_SVE_OFFSET;\n+\tend = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);\n+\tret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t  target->thread.sve_state,\n+\t\t\t\t  start, end);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPSR_OFFSET(vq);\n+\tret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t       start, end);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/*\n+\t * Copy fpsr, and fpcr which must follow contiguously in\n+\t * struct fpsimd_state:\n+\t */\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;\n+\tret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t  &target->thread.fpsimd_state.fpsr,\n+\t\t\t\t  start, end);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tstart = end;\n+\tend = sve_size_from_header(&header);\n+\treturn user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t\tstart, end);\n+}\n+\n+static int sve_set(struct task_struct *target,\n+\t\t   const struct user_regset *regset,\n+\t\t   unsigned int pos, unsigned int count,\n+\t\t   const void *kbuf, const void __user *ubuf)\n+{\n+\tint ret;\n+\tstruct user_sve_header header;\n+\tunsigned int vq;\n+\tunsigned long start, end;\n+\n+\tif (!system_supports_sve())\n+\t\treturn -EINVAL;\n+\n+\t/* Header */\n+\tif (count < sizeof(header))\n+\t\treturn -EINVAL;\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,\n+\t\t\t\t 0, sizeof(header));\n+\tif (ret)\n+\t\tgoto out;\n+\n+\t/*\n+\t * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by\n+\t * sve_set_vector_length(), which will also validate them for us:\n+\t */\n+\tret = sve_set_vector_length(target, header.vl,\n+\t\t\t\t    header.flags & ~SVE_PT_REGS_MASK);\n+\tif (ret)\n+\t\tgoto out;\n+\n+\t/* Actual VL set may be less than the user asked for: */\n+\tvq = sve_vq_from_vl(target->thread.sve_vl);\n+\n+\t/* Registers: FPSIMD-only case */\n+\n+\tBUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));\n+\tif ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {\n+\t\tsve_sync_to_fpsimd(target);\n+\n+\t\tret = __fpr_set(target, regset, pos, count, kbuf, ubuf,\n+\t\t\t\tSVE_PT_FPSIMD_OFFSET);\n+\t\tclear_tsk_thread_flag(target, TIF_SVE);\n+\t\tgoto out;\n+\t}\n+\n+\t/* Otherwise: full SVE case */\n+\n+\t/*\n+\t * If setting a different VL from the requested VL and there is\n+\t * register data, the data layout will be wrong: don't even\n+\t * try to set the registers in this case.\n+\t */\n+\tif (count && vq != sve_vq_from_vl(header.vl)) {\n+\t\tret = -EIO;\n+\t\tgoto out;\n+\t}\n+\n+\tsve_alloc(target);\n+\tfpsimd_sync_to_sve(target);\n+\tset_tsk_thread_flag(target, TIF_SVE);\n+\n+\tBUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));\n+\tstart = SVE_PT_SVE_OFFSET;\n+\tend = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t target->thread.sve_state,\n+\t\t\t\t start, end);\n+\tif (ret)\n+\t\tgoto out;\n+\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPSR_OFFSET(vq);\n+\tret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t\tstart, end);\n+\tif (ret)\n+\t\tgoto out;\n+\n+\t/*\n+\t * Copy fpsr, and fpcr which must follow contiguously in\n+\t * struct fpsimd_state:\n+\t */\n+\tstart = end;\n+\tend = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;\n+\tret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,\n+\t\t\t\t &target->thread.fpsimd_state.fpsr,\n+\t\t\t\t start, end);\n+\n+out:\n+\tfpsimd_flush_task_state(target);\n+\treturn ret;\n+}\n+\n+#endif /* CONFIG_ARM64_SVE */\n+\n enum aarch64_regset {\n \tREGSET_GPR,\n \tREGSET_FPR,\n@@ -711,6 +950,9 @@ enum aarch64_regset {\n \tREGSET_HW_WATCH,\n #endif\n \tREGSET_SYSTEM_CALL,\n+#ifdef CONFIG_ARM64_SVE\n+\tREGSET_SVE,\n+#endif\n };\n \n static const struct user_regset aarch64_regsets[] = {\n@@ -768,6 +1010,18 @@ static const struct user_regset aarch64_regsets[] = {\n \t\t.get = system_call_get,\n \t\t.set = system_call_set,\n \t},\n+#ifdef CONFIG_ARM64_SVE\n+\t[REGSET_SVE] = { /* Scalable Vector Extension */\n+\t\t.core_note_type = NT_ARM_SVE,\n+\t\t.n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),\n+\t\t\t\t  SVE_VQ_BYTES),\n+\t\t.size = SVE_VQ_BYTES,\n+\t\t.align = SVE_VQ_BYTES,\n+\t\t.get = sve_get,\n+\t\t.set = sve_set,\n+\t\t.get_size = sve_get_size,\n+\t},\n+#endif\n };\n \n static const struct user_regset_view user_aarch64_view = {\ndiff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h\nindex b5280db..735b8f4 100644\n--- a/include/uapi/linux/elf.h\n+++ b/include/uapi/linux/elf.h\n@@ -416,6 +416,7 @@ typedef struct elf64_shdr {\n #define NT_ARM_HW_BREAK\t0x402\t\t/* ARM hardware breakpoint registers */\n #define NT_ARM_HW_WATCH\t0x403\t\t/* ARM hardware watchpoint registers */\n #define NT_ARM_SYSTEM_CALL\t0x404\t/* ARM system call number */\n+#define NT_ARM_SVE\t0x405\t\t/* ARM Scalable Vector Extension registers */\n #define NT_METAG_CBUF\t0x500\t\t/* Metag catch buffer registers */\n #define NT_METAG_RPIPE\t0x501\t\t/* Metag read pipeline state */\n #define NT_METAG_TLS\t0x502\t\t/* Metag TLS pointer */\n",
    "prefixes": [
        "v2",
        "19/28"
    ]
}