get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.1/patches/2228806/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2228806,
    "url": "http://patchwork.ozlabs.org/api/1.1/patches/2228806/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260427122742.210074-8-mkchauras@gmail.com/",
    "project": {
        "id": 2,
        "url": "http://patchwork.ozlabs.org/api/1.1/projects/2/?format=api",
        "name": "Linux PPC development",
        "link_name": "linuxppc-dev",
        "list_id": "linuxppc-dev.lists.ozlabs.org",
        "list_email": "linuxppc-dev@lists.ozlabs.org",
        "web_url": "https://github.com/linuxppc/wiki/wiki",
        "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git",
        "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/"
    },
    "msgid": "<20260427122742.210074-8-mkchauras@gmail.com>",
    "date": "2026-04-27T12:27:41",
    "name": "[v5,7/8] powerpc: Enable GENERIC_ENTRY feature",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "1472860f200b1f3df3f47b7ff31b925f12cd0fd1",
    "submitter": {
        "id": 92575,
        "url": "http://patchwork.ozlabs.org/api/1.1/people/92575/?format=api",
        "name": "Mukesh Kumar Chaurasiya",
        "email": "mkchauras@gmail.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260427122742.210074-8-mkchauras@gmail.com/mbox/",
    "series": [
        {
            "id": 501638,
            "url": "http://patchwork.ozlabs.org/api/1.1/series/501638/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=501638",
            "date": "2026-04-27T12:27:34",
            "name": "Generic IRQ entry/exit support for powerpc",
            "version": 5,
            "mbox": "http://patchwork.ozlabs.org/series/501638/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2228806/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2228806/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "\n <linuxppc-dev+bounces-20166-incoming=patchwork.ozlabs.org@lists.ozlabs.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=gmail.com header.i=@gmail.com header.a=rsa-sha256\n header.s=20251104 header.b=P5vfBb4O;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=lists.ozlabs.org\n (client-ip=112.213.38.117; helo=lists.ozlabs.org;\n envelope-from=linuxppc-dev+bounces-20166-incoming=patchwork.ozlabs.org@lists.ozlabs.org;\n receiver=patchwork.ozlabs.org)",
            "lists.ozlabs.org;\n arc=none smtp.remote-ip=\"2607:f8b0:4864:20::42d\"",
            "lists.ozlabs.org;\n dmarc=pass (p=none dis=none) header.from=gmail.com",
            "lists.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=gmail.com header.i=@gmail.com header.a=rsa-sha256\n header.s=20251104 header.b=P5vfBb4O;\n\tdkim-atps=neutral",
            "lists.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=gmail.com\n (client-ip=2607:f8b0:4864:20::42d; helo=mail-pf1-x42d.google.com;\n envelope-from=mkchauras@gmail.com; receiver=lists.ozlabs.org)"
        ],
        "Received": [
            "from lists.ozlabs.org (lists.ozlabs.org [112.213.38.117])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4g42tz1y0Rz1yHv\n\tfor <incoming@patchwork.ozlabs.org>; Mon, 27 Apr 2026 22:29:31 +1000 (AEST)",
            "from boromir.ozlabs.org (localhost [127.0.0.1])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 4g42tz0Vwnz2ypW;\n\tMon, 27 Apr 2026 22:29:31 +1000 (AEST)",
            "from mail-pf1-x42d.google.com (mail-pf1-x42d.google.com\n [IPv6:2607:f8b0:4864:20::42d])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 4g42tx1pGSz2y2B\n\tfor <linuxppc-dev@lists.ozlabs.org>; Mon, 27 Apr 2026 22:29:29 +1000 (AEST)",
            "by mail-pf1-x42d.google.com with SMTP id\n d2e1a72fcca58-82f0884bcfaso6898025b3a.1\n        for <linuxppc-dev@lists.ozlabs.org>;\n Mon, 27 Apr 2026 05:29:29 -0700 (PDT)",
            "from li-1a3e774c-28e4-11b2-a85c-acc9f2883e29.ibm.com ([129.41.58.4])\n        by smtp.gmail.com with ESMTPSA id\n d2e1a72fcca58-82f8e9f7735sm32733466b3a.21.2026.04.27.05.29.13\n        (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n        Mon, 27 Apr 2026 05:29:26 -0700 (PDT)"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707; t=1777292970;\n\tcv=none;\n b=VYiLN4TRCo6B85/pj5D+bT/wPn+JfVoBvfoy3SPiEIPDC/i4aBWVgen+IKFE44oJj4Jg7guRoio+T4KLsivN2JpSBTCtlsDYtCIi351XA3rXYpDURtXC7rIpmV2snLCGgSWaeQLvqMCSD+zLUhXLM6PlXRCtS+5B4qXAkvQS97bTrbXt/o4xSFfjgl26IsRYje3801RedHGT8E8leKVtomNXi+p9ZXNwZBDbPipKLbTWe23tCyzVIcQZfz2VwgBTh5nMWE+JtqMKj917vKfQ+HPCmbzKeQmPggg4fn0OUzXzH20ODP0fUtgbNVBJW+YUGKjQ4SBNLU7YMouVLLy5kg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707;\n\tt=1777292970; c=relaxed/relaxed;\n\tbh=BcERRxmHDEF5joRyKsvM8IXhICq8izEhRkUJG7EtPXQ=;\n\th=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References:\n\t MIME-Version:Content-Type;\n b=kCOmY89qnW1IUYij6N7KY/1JmpFX7e0bjPe7OuWddeO6Ydpnl/G0ncneYHgLGu8W/zeuuCClKEMJtBlE60A8ZGtJ7AOGwVCEgRi1SNMIEi308ITE4vWr6CHW0kwyNoBSguLT74Tu1DBVEZBZQFt2d2/yfT2P9MTtUhPVoXQSmJ3GpAd2MHnKyiDYOAHmQ6UJQUdlJWEkqBFuWoA0SKDl/YM40gB5YZPG9Rkp3tVM91hyk5feBGA53OwgKgyAVrcnT6OIieWcQv4PG0pzOT0QJCrPthO6EXrczxSsIMMR9YqlVMYBdgomYVgMGY92BEg+jWQmM+41hSx+SCcb0WFDIA==",
        "ARC-Authentication-Results": "i=1; lists.ozlabs.org;\n dmarc=pass (p=none dis=none) header.from=gmail.com; dkim=pass (2048-bit key;\n unprotected) header.d=gmail.com header.i=@gmail.com header.a=rsa-sha256\n header.s=20251104 header.b=P5vfBb4O; dkim-atps=neutral;\n spf=pass (client-ip=2607:f8b0:4864:20::42d; helo=mail-pf1-x42d.google.com;\n envelope-from=mkchauras@gmail.com;\n receiver=lists.ozlabs.org) smtp.mailfrom=gmail.com",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n        d=gmail.com; s=20251104; t=1777292967; x=1777897767;\n darn=lists.ozlabs.org;\n        h=content-transfer-encoding:mime-version:references:in-reply-to\n         :message-id:date:subject:cc:to:from:from:to:cc:subject:date\n         :message-id:reply-to;\n        bh=BcERRxmHDEF5joRyKsvM8IXhICq8izEhRkUJG7EtPXQ=;\n        b=P5vfBb4OFY+5eZrO/83EOAdof5SM3mdZ3blo2CZdNzY3GHYaLrd82Mb9kIeKTWtY0v\n         SbKL8Ox4upLQ8gRf/5tEGQgvsJMEK4uMsVv7Z9bDZ8NHF6uxzQ7FoBQinu1RZsqTlqfA\n         eEqSlaPkcOpnvca3ov3iJb6hxqXkgVhFV7AifDm7s7TcaVI7LjDYuY9y+so9jBWHgW4t\n         ldtvcHfk6PFLNJ42oplgW9JO0vacHLC6caQC1uHMduptVzOLAasWA01bbnp4HWf/R2zG\n         r5HbSer7IWVgKl8q+R92Q1wO9QzAI6kZnQa90g9ulQ0hzkwVntg/gXLEk65kFiHJji36\n         VIKw==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n        d=1e100.net; s=20251104; t=1777292967; x=1777897767;\n        h=content-transfer-encoding:mime-version:references:in-reply-to\n         :message-id:date:subject:cc:to:from:x-gm-gg:x-gm-message-state:from\n         :to:cc:subject:date:message-id:reply-to;\n        bh=BcERRxmHDEF5joRyKsvM8IXhICq8izEhRkUJG7EtPXQ=;\n        b=gFdXZtjM5xyc2Bs9kML8PTZf3vNkfIALTdsTgw0hDZievKQxFn8i2qwo5Rlg2XhlbR\n         fmgBDJe0T7Ygtu2mQWS2AyVYprrU/v73XB5cvYw42HB9+CmRxEIA2dV9e8jeAGJY/7Wz\n         YgMQhs+ox7zhGLXV9xmLgLOJPHoy2IyYFrIYm44IIqoc6aHVlzfSZb8tStTPC11RyEJ0\n         TeYET338e5LNuqJsJoj7cQAKqih9Ne4NfZ6nX3Mqxyc3rFz5yf65q6Oj5EH3YB47qHNF\n         1Fj/ZF7elRrcSXj624ob76Jeu7dZmQ6zp+JgTAzRQpMYRJVgmjl2yb4FCzIphLkDEfie\n         yxRQ==",
        "X-Forwarded-Encrypted": "i=1;\n AFNElJ95zc70hSV/mDB/sX8G8F43X72K/Wq8+eK5dxXAx2+TuEgbyHFsQrnfUTAkzDiHvlc1Mim5FTYspqGdmm4=@lists.ozlabs.org",
        "X-Gm-Message-State": "AOJu0Yx+g+PQz6OhwfNmtOCc2HkjSycTlz2q1vCvbdwm/SwiK7wJKFe1\n\tVhRMAOPspiYwUOLYNa3whCv+cerGkPSrAPO9+mCHF5na7FBc09dxb9es",
        "X-Gm-Gg": "AeBDiesI4JKg7DZrVi9ay6HM8HDr6XYIg7FiESm3oafMGtleizwszAAXQyg6mY5oIwj\n\t+Q2KDDmO+OBjAGiZVNReTX1XyPjDYuv58pGQRMV2WSdZq+bN0HB148+IdXnI5DH6/N+ErY5PQ/u\n\t4Ni+Q3VGJNdaCEoo0MkWnYWT5pHQe+MFKN5siDXaoanDG05YCVhsk2IAOVZLo37zJnHeYpkW+gZ\n\tOR5+qOMqtKdFPFDMKR2c8qBCUFqfrERL0LzLPluEx+Y/mbodd4SvJIGa8Y455VRQt4Chov2d1a8\n\tE/SaNDFa1BuA+to4P/2DyqgSVxb/a8377vlKtjKZ3QkqVQpeHUctcKDIUGX5trLJGwJnqfIWA/f\n\tqg8E6VJ2EaIPbKUrdArcOUafueo6CQfxRJg6JB2LtmOfIlhIX7MP7qtXVm3DxUO6Ew9JOYwgppv\n\tDVJYdXmcBTTVtHimGbZ6ITr7d4mB3cf8O8/pa1G3I76Y0gtik0Pl/5UXssWXNa2m5CkZISXQ+UT\n\tE+nrw==",
        "X-Received": "by 2002:a05:6a00:1d88:b0:82d:24f:2510 with SMTP id\n d2e1a72fcca58-82f8c9512c8mr45441683b3a.50.1777292966884;\n        Mon, 27 Apr 2026 05:29:26 -0700 (PDT)",
        "From": "\"Mukesh Kumar Chaurasiya (IBM)\" <mkchauras@gmail.com>",
        "To": "maddy@linux.ibm.com,\n\tmpe@ellerman.id.au,\n\tnpiggin@gmail.com,\n\tchleroy@kernel.org,\n\tryabinin.a.a@gmail.com,\n\tglider@google.com,\n\tandreyknvl@gmail.com,\n\tdvyukov@google.com,\n\tvincenzo.frascino@arm.com,\n\toleg@redhat.com,\n\tkees@kernel.org,\n\tluto@amacapital.net,\n\twad@chromium.org,\n\tmchauras@linux.ibm.com,\n\tsshegde@linux.ibm.com,\n\tthuth@redhat.com,\n\truanjinjie@huawei.com,\n\takpm@linux-foundation.org,\n\tmacro@orcam.me.uk,\n\tldv@strace.io,\n\tcharlie@rivosinc.com,\n\tdeller@gmx.de,\n\tkevin.brodsky@arm.com,\n\tritesh.list@gmail.com,\n\tyeoreum.yun@arm.com,\n\tagordeev@linux.ibm.com,\n\tsegher@kernel.crashing.org,\n\tmark.rutland@arm.com,\n\tryan.roberts@arm.com,\n\tpmladek@suse.com,\n\tfeng.tang@linux.alibaba.com,\n\tpeterz@infradead.org,\n\tkan.liang@linux.intel.com,\n\tlinuxppc-dev@lists.ozlabs.org,\n\tlinux-kernel@vger.kernel.org,\n\tkasan-dev@googlegroups.com",
        "Cc": "Mukesh Kumar Chaurasiya <mkchauras@linux.ibm.com>,\n\tSamir M <samir@linux.ibm.com>,\n\tDavid Gow <davidgow@google.com>,\n\tVenkat Rao Bagalkote <venkat88@linux.ibm.com>",
        "Subject": "[PATCH v5 7/8] powerpc: Enable GENERIC_ENTRY feature",
        "Date": "Mon, 27 Apr 2026 17:57:41 +0530",
        "Message-ID": "<20260427122742.210074-8-mkchauras@gmail.com>",
        "X-Mailer": "git-send-email 2.53.0",
        "In-Reply-To": "<20260427122742.210074-1-mkchauras@gmail.com>",
        "References": "<20260427122742.210074-1-mkchauras@gmail.com>",
        "X-Mailing-List": "linuxppc-dev@lists.ozlabs.org",
        "List-Id": "<linuxppc-dev.lists.ozlabs.org>",
        "List-Help": "<mailto:linuxppc-dev+help@lists.ozlabs.org>",
        "List-Owner": "<mailto:linuxppc-dev+owner@lists.ozlabs.org>",
        "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>",
        "List-Archive": "<https://lore.kernel.org/linuxppc-dev/>,\n  <https://lists.ozlabs.org/pipermail/linuxppc-dev/>",
        "List-Subscribe": "<mailto:linuxppc-dev+subscribe@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-digest@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-nomail@lists.ozlabs.org>",
        "List-Unsubscribe": "<mailto:linuxppc-dev+unsubscribe@lists.ozlabs.org>",
        "Precedence": "list",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-Spam-Status": "No, score=-0.2 required=3.0 tests=DKIM_SIGNED,DKIM_VALID,\n\tDKIM_VALID_AU,DKIM_VALID_EF,FREEMAIL_FROM,RCVD_IN_DNSWL_NONE,\n\tSPF_HELO_NONE,SPF_PASS autolearn=disabled version=4.0.1 OzLabs 8",
        "X-Spam-Checker-Version": "SpamAssassin 4.0.1 (2024-03-25) on lists.ozlabs.org"
    },
    "content": "From: Mukesh Kumar Chaurasiya <mkchauras@linux.ibm.com>\n\nEnable the generic IRQ entry/exit infrastructure on PowerPC by selecting\nGENERIC_ENTRY and integrating the architecture-specific interrupt and\nsyscall handlers with the generic entry/exit APIs.\n\nThis change replaces PowerPC’s local interrupt entry/exit handling with\ncalls to the generic irqentry_* helpers, aligning the architecture with\nthe common kernel entry model. The macros that define interrupt, async,\nand NMI handlers are updated to use irqentry_enter()/irqentry_exit()\nand irqentry_nmi_enter()/irqentry_nmi_exit() where applicable also\nconvert the PowerPC syscall entry and exit paths to use the generic\nentry/exit framework and integrating with the common syscall handling\nroutines.\n\nKey updates include:\n - The architecture now selects GENERIC_ENTRY in Kconfig.\n - Replace interrupt_enter/exit_prepare() with arch_interrupt_* helpers.\n - Integrate irqentry_enter()/exit() in standard and async interrupt paths.\n - Integrate irqentry_nmi_enter()/exit() in NMI handlers.\n - Remove redundant irq_enter()/irq_exit() calls now handled generically.\n - Use irqentry_exit_cond_resched() for preemption checks.\n - interrupt.c and syscall.c are simplified to delegate context\n   management and user exit handling to the generic entry path.\n - The new pt_regs field `exit_flags` introduced earlier is now used\n   to carry per-syscall exit state flags (e.g. _TIF_RESTOREALL).\n - Remove unused code.\n\nThis change establishes the necessary wiring for PowerPC to use the\ngeneric IRQ entry/exit framework while maintaining existing semantics.\nThis aligns PowerPC with the common entry code used by other\narchitectures and reduces duplicated logic around syscall tracing,\ncontext tracking, and signal handling.\n\nThe performance benchmarks from perf bench basic syscall are below:\n\nperf bench syscall usec/op (-ve is improvement)\n\n| Syscall | Base        | test        | change % |\n| ------- | ----------- | ----------- | -------- |\n| basic   | 0.093543    | 0.093023    | -0.56    |\n| execve  | 446.557781  | 450.107172  | +0.79    |\n| fork    | 1142.204391 | 1156.377214 | +1.24    |\n| getpgid | 0.097666    | 0.092677    | -5.11    |\n\nperf bench syscall ops/sec (+ve is improvement)\n\n| Syscall | Base     | New      | change % |\n| ------- | -------- | -------- | -------- |\n| basic   | 10690548 | 10750140 | +0.56    |\n| execve  | 2239     | 2221     | -0.80    |\n| fork    | 875      | 864      | -1.26    |\n| getpgid | 10239026 | 10790324 | +5.38    |\n\nIPI latency benchmark (-ve is improvement)\n\n| Metric         | Base (ns)     | New (ns)      | % Change |\n| -------------- | ------------- | ------------- | -------- |\n| Dry run        | 583136.56     | 584136.35     | 0.17%    |\n| Self IPI       | 4167393.42    | 4149093.90    | -0.44%   |\n| Normal IPI     | 61769347.82   | 61753728.39   | -0.03%   |\n| Broadcast IPI  | 2235584825.02 | 2227521401.45 | -0.36%   |\n| Broadcast lock | 2164964433.31 | 2125658641.76 | -1.82%   |\n\nThats very close to performance earlier with arch specific handling.\n\nSigned-off-by: Mukesh Kumar Chaurasiya <mkchauras@linux.ibm.com>\nTested-by: Samir M <samir@linux.ibm.com>\nTested-by: David Gow <davidgow@google.com>\nTested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>\nReviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>\n---\n arch/powerpc/Kconfig                 |   1 +\n arch/powerpc/include/asm/interrupt.h | 384 +++++----------------------\n arch/powerpc/include/asm/kasan.h     |  15 +-\n arch/powerpc/kernel/interrupt.c      | 250 +++--------------\n arch/powerpc/kernel/ptrace/ptrace.c  |   3 -\n arch/powerpc/kernel/signal.c         |   8 +\n arch/powerpc/kernel/syscall.c        | 119 +--------\n 7 files changed, 124 insertions(+), 656 deletions(-)",
    "diff": "diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig\nindex e93df95b79e7..81642206f7de 100644\n--- a/arch/powerpc/Kconfig\n+++ b/arch/powerpc/Kconfig\n@@ -206,6 +206,7 @@ config PPC\n \tselect GENERIC_CPU_AUTOPROBE\n \tselect GENERIC_CPU_VULNERABILITIES\tif PPC_BARRIER_NOSPEC\n \tselect GENERIC_EARLY_IOREMAP\n+\tselect GENERIC_ENTRY\n \tselect GENERIC_GETTIMEOFDAY\n \tselect GENERIC_IDLE_POLL_SETUP\n \tselect GENERIC_IOREMAP\ndiff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h\nindex 0e2cddf8bd21..fb42a664ae54 100644\n--- a/arch/powerpc/include/asm/interrupt.h\n+++ b/arch/powerpc/include/asm/interrupt.h\n@@ -66,11 +66,9 @@\n \n #ifndef __ASSEMBLER__\n \n-#include <linux/context_tracking.h>\n-#include <linux/hardirq.h>\n-#include <asm/cputime.h>\n-#include <asm/firmware.h>\n-#include <asm/ftrace.h>\n+#include <linux/sched/debug.h> /* for show_regs */\n+#include <linux/irq-entry-common.h>\n+\n #include <asm/kprobes.h>\n #include <asm/runlatch.h>\n \n@@ -88,308 +86,6 @@ do {\t\t\t\t\t\t\t\t\t\\\n #define INT_SOFT_MASK_BUG_ON(regs, cond)\n #endif\n \n-#ifdef CONFIG_PPC_BOOK3S_64\n-extern char __end_soft_masked[];\n-bool search_kernel_soft_mask_table(unsigned long addr);\n-unsigned long search_kernel_restart_table(unsigned long addr);\n-\n-DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);\n-\n-static inline bool is_implicit_soft_masked(struct pt_regs *regs)\n-{\n-\tif (user_mode(regs))\n-\t\treturn false;\n-\n-\tif (regs->nip >= (unsigned long)__end_soft_masked)\n-\t\treturn false;\n-\n-\treturn search_kernel_soft_mask_table(regs->nip);\n-}\n-\n-static inline void srr_regs_clobbered(void)\n-{\n-\tlocal_paca->srr_valid = 0;\n-\tlocal_paca->hsrr_valid = 0;\n-}\n-#else\n-static inline unsigned long search_kernel_restart_table(unsigned long addr)\n-{\n-\treturn 0;\n-}\n-\n-static inline bool is_implicit_soft_masked(struct pt_regs *regs)\n-{\n-\treturn false;\n-}\n-\n-static inline void srr_regs_clobbered(void)\n-{\n-}\n-#endif\n-\n-static inline void nap_adjust_return(struct pt_regs *regs)\n-{\n-#ifdef CONFIG_PPC_970_NAP\n-\tif (unlikely(test_thread_local_flags(_TLF_NAPPING))) {\n-\t\t/* Can avoid a test-and-clear because NMIs do not call this */\n-\t\tclear_thread_local_flags(_TLF_NAPPING);\n-\t\tregs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);\n-\t}\n-#endif\n-}\n-\n-static inline void booke_restore_dbcr0(void)\n-{\n-#ifdef CONFIG_PPC_ADV_DEBUG_REGS\n-\tunsigned long dbcr0 = current->thread.debug.dbcr0;\n-\n-\tif (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {\n-\t\tmtspr(SPRN_DBSR, -1);\n-\t\tmtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);\n-\t}\n-#endif\n-}\n-\n-static inline void interrupt_enter_prepare(struct pt_regs *regs)\n-{\n-#ifdef CONFIG_PPC64\n-\tirq_soft_mask_set(IRQS_ALL_DISABLED);\n-\n-\t/*\n-\t * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].\n-\t * Asynchronous interrupts get here with HARD_DIS set (see below), so\n-\t * this enables MSR[EE] for synchronous interrupts. IRQs remain\n-\t * soft-masked. The interrupt handler may later call\n-\t * interrupt_cond_local_irq_enable() to achieve a regular process\n-\t * context.\n-\t */\n-\tif (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {\n-\t\tINT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE));\n-\t\t__hard_irq_enable();\n-\t} else {\n-\t\t__hard_RI_enable();\n-\t}\n-\t/* Enable MSR[RI] early, to support kernel SLB and hash faults */\n-#endif\n-\n-\tif (!regs_irqs_disabled(regs))\n-\t\ttrace_hardirqs_off();\n-\n-\tif (user_mode(regs)) {\n-\t\tkuap_lock();\n-\t\tCT_WARN_ON(ct_state() != CT_STATE_USER);\n-\t\tuser_exit_irqoff();\n-\n-\t\taccount_cpu_user_entry();\n-\t\taccount_stolen_time();\n-\t} else {\n-\t\tkuap_save_and_lock(regs);\n-\t\t/*\n-\t\t * CT_WARN_ON comes here via program_check_exception,\n-\t\t * so avoid recursion.\n-\t\t */\n-\t\tif (TRAP(regs) != INTERRUPT_PROGRAM)\n-\t\t\tCT_WARN_ON(ct_state() != CT_STATE_KERNEL &&\n-\t\t\t\t   ct_state() != CT_STATE_IDLE);\n-\t\tINT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));\n-\t\tINT_SOFT_MASK_BUG_ON(regs, regs_irqs_disabled(regs) &&\n-\t\t\t\t     search_kernel_restart_table(regs->nip));\n-\t}\n-\tINT_SOFT_MASK_BUG_ON(regs, !regs_irqs_disabled(regs) &&\n-\t\t\t     !(regs->msr & MSR_EE));\n-\n-\tbooke_restore_dbcr0();\n-}\n-\n-/*\n- * Care should be taken to note that interrupt_exit_prepare and\n- * interrupt_async_exit_prepare do not necessarily return immediately to\n- * regs context (e.g., if regs is usermode, we don't necessarily return to\n- * user mode). Other interrupts might be taken between here and return,\n- * context switch / preemption may occur in the exit path after this, or a\n- * signal may be delivered, etc.\n- *\n- * The real interrupt exit code is platform specific, e.g.,\n- * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.\n- *\n- * However interrupt_nmi_exit_prepare does return directly to regs, because\n- * NMIs do not do \"exit work\" or replay soft-masked interrupts.\n- */\n-static inline void interrupt_exit_prepare(struct pt_regs *regs)\n-{\n-}\n-\n-static inline void interrupt_async_enter_prepare(struct pt_regs *regs)\n-{\n-#ifdef CONFIG_PPC64\n-\t/* Ensure interrupt_enter_prepare does not enable MSR[EE] */\n-\tlocal_paca->irq_happened |= PACA_IRQ_HARD_DIS;\n-#endif\n-\tinterrupt_enter_prepare(regs);\n-#ifdef CONFIG_PPC_BOOK3S_64\n-\t/*\n-\t * RI=1 is set by interrupt_enter_prepare, so this thread flags access\n-\t * has to come afterward (it can cause SLB faults).\n-\t */\n-\tif (cpu_has_feature(CPU_FTR_CTRL) &&\n-\t    !test_thread_local_flags(_TLF_RUNLATCH))\n-\t\t__ppc64_runlatch_on();\n-#endif\n-\tirq_enter();\n-}\n-\n-static inline void interrupt_async_exit_prepare(struct pt_regs *regs)\n-{\n-\t/*\n-\t * Adjust at exit so the main handler sees the true NIA. This must\n-\t * come before irq_exit() because irq_exit can enable interrupts, and\n-\t * if another interrupt is taken before nap_adjust_return has run\n-\t * here, then that interrupt would return directly to idle nap return.\n-\t */\n-\tnap_adjust_return(regs);\n-\n-\tirq_exit();\n-\tinterrupt_exit_prepare(regs);\n-}\n-\n-struct interrupt_nmi_state {\n-#ifdef CONFIG_PPC64\n-\tu8 irq_soft_mask;\n-\tu8 irq_happened;\n-\tu8 ftrace_enabled;\n-\tu64 softe;\n-#endif\n-};\n-\n-static inline bool nmi_disables_ftrace(struct pt_regs *regs)\n-{\n-\t/* Allow DEC and PMI to be traced when they are soft-NMI */\n-\tif (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {\n-\t\tif (TRAP(regs) == INTERRUPT_DECREMENTER)\n-\t\t       return false;\n-\t\tif (TRAP(regs) == INTERRUPT_PERFMON)\n-\t\t       return false;\n-\t}\n-\tif (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {\n-\t\tif (TRAP(regs) == INTERRUPT_PERFMON)\n-\t\t\treturn false;\n-\t}\n-\n-\treturn true;\n-}\n-\n-static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)\n-{\n-#ifdef CONFIG_PPC64\n-\tstate->irq_soft_mask = local_paca->irq_soft_mask;\n-\tstate->irq_happened = local_paca->irq_happened;\n-\tstate->softe = regs->softe;\n-\n-\t/*\n-\t * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does\n-\t * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile\n-\t * because that goes through irq tracing which we don't want in NMI.\n-\t */\n-\tlocal_paca->irq_soft_mask = IRQS_ALL_DISABLED;\n-\tlocal_paca->irq_happened |= PACA_IRQ_HARD_DIS;\n-\n-\tif (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {\n-\t\t/*\n-\t\t * Adjust regs->softe to be soft-masked if it had not been\n-\t\t * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe\n-\t\t * not yet set disabled), or if it was in an implicit soft\n-\t\t * masked state. This makes regs_irqs_disabled(regs)\n-\t\t * behave as expected.\n-\t\t */\n-\t\tregs->softe = IRQS_ALL_DISABLED;\n-\t}\n-\n-\t__hard_RI_enable();\n-\n-\t/* Don't do any per-CPU operations until interrupt state is fixed */\n-\n-\tif (nmi_disables_ftrace(regs)) {\n-\t\tstate->ftrace_enabled = this_cpu_get_ftrace_enabled();\n-\t\tthis_cpu_set_ftrace_enabled(0);\n-\t}\n-#endif\n-\n-\t/* If data relocations are enabled, it's safe to use nmi_enter() */\n-\tif (mfmsr() & MSR_DR) {\n-\t\tnmi_enter();\n-\t\treturn;\n-\t}\n-\n-\t/*\n-\t * But do not use nmi_enter() for pseries hash guest taking a real-mode\n-\t * NMI because not everything it touches is within the RMA limit.\n-\t */\n-\tif (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&\n-\t    firmware_has_feature(FW_FEATURE_LPAR) &&\n-\t    !radix_enabled())\n-\t\treturn;\n-\n-\t/*\n-\t * Likewise, don't use it if we have some form of instrumentation (like\n-\t * KASAN shadow) that is not safe to access in real mode (even on radix)\n-\t */\n-\tif (IS_ENABLED(CONFIG_KASAN))\n-\t\treturn;\n-\n-\t/*\n-\t * Likewise, do not use it in real mode if percpu first chunk is not\n-\t * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there\n-\t * are chances where percpu allocation can come from vmalloc area.\n-\t */\n-\tif (percpu_first_chunk_is_paged)\n-\t\treturn;\n-\n-\t/* Otherwise, it should be safe to call it */\n-\tnmi_enter();\n-}\n-\n-static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)\n-{\n-\tif (mfmsr() & MSR_DR) {\n-\t\t// nmi_exit if relocations are on\n-\t\tnmi_exit();\n-\t} else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&\n-\t\t   firmware_has_feature(FW_FEATURE_LPAR) &&\n-\t\t   !radix_enabled()) {\n-\t\t// no nmi_exit for a pseries hash guest taking a real mode exception\n-\t} else if (IS_ENABLED(CONFIG_KASAN)) {\n-\t\t// no nmi_exit for KASAN in real mode\n-\t} else if (percpu_first_chunk_is_paged) {\n-\t\t// no nmi_exit if percpu first chunk is not embedded\n-\t} else {\n-\t\tnmi_exit();\n-\t}\n-\n-\t/*\n-\t * nmi does not call nap_adjust_return because nmi should not create\n-\t * new work to do (must use irq_work for that).\n-\t */\n-\n-#ifdef CONFIG_PPC64\n-#ifdef CONFIG_PPC_BOOK3S\n-\tif (regs_irqs_disabled(regs)) {\n-\t\tunsigned long rst = search_kernel_restart_table(regs->nip);\n-\t\tif (rst)\n-\t\t\tregs_set_return_ip(regs, rst);\n-\t}\n-#endif\n-\n-\tif (nmi_disables_ftrace(regs))\n-\t\tthis_cpu_set_ftrace_enabled(state->ftrace_enabled);\n-\n-\t/* Check we didn't change the pending interrupt mask. */\n-\tWARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);\n-\tregs->softe = state->softe;\n-\tlocal_paca->irq_happened = state->irq_happened;\n-\tlocal_paca->irq_soft_mask = state->irq_soft_mask;\n-#endif\n-}\n-\n /*\n  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each\n  * function definition. The reason for this is the noinstr section is placed\n@@ -470,11 +166,14 @@ static __always_inline void ____##func(struct pt_regs *regs);\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n interrupt_handler void func(struct pt_regs *regs)\t\t\t\\\n {\t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_enter_prepare(regs);\t\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n+\tirqentry_state_t state;\t\t\t\t\t\t\\\n+\tarch_interrupt_enter_prepare(regs);\t\t\t\t\\\n+\tstate = irqentry_enter(regs);\t\t\t\t\t\\\n+\tinstrumentation_begin();\t\t\t\t\t\\\n \t____##func (regs);\t\t\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_exit_prepare(regs);\t\t\t\t\t\\\n+\tinstrumentation_end();\t\t\t\t\t\t\\\n+\tarch_interrupt_exit_prepare(regs);\t\t\t\t\\\n+\tirqentry_exit(regs, state);\t\t\t\t\t\\\n }\t\t\t\t\t\t\t\t\t\\\n NOKPROBE_SYMBOL(func);\t\t\t\t\t\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n@@ -504,12 +203,15 @@ static __always_inline long ____##func(struct pt_regs *regs);\t\t\\\n interrupt_handler long func(struct pt_regs *regs)\t\t\t\\\n {\t\t\t\t\t\t\t\t\t\\\n \tlong ret;\t\t\t\t\t\t\t\\\n+\tirqentry_state_t state;\t\t\t\t\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_enter_prepare(regs);\t\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n+\tarch_interrupt_enter_prepare(regs);\t\t\t\t\\\n+\tstate = irqentry_enter(regs);\t\t\t\t\t\\\n+\tinstrumentation_begin();\t\t\t\t\t\\\n \tret = ____##func (regs);\t\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_exit_prepare(regs);\t\t\t\t\t\\\n+\tinstrumentation_end();\t\t\t\t\t\t\\\n+\tarch_interrupt_exit_prepare(regs);\t\t\t\t\\\n+\tirqentry_exit(regs, state);\t\t\t\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n \treturn ret;\t\t\t\t\t\t\t\\\n }\t\t\t\t\t\t\t\t\t\\\n@@ -538,11 +240,16 @@ static __always_inline void ____##func(struct pt_regs *regs);\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n interrupt_handler void func(struct pt_regs *regs)\t\t\t\\\n {\t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_async_enter_prepare(regs);\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n+\tirqentry_state_t state;\t\t\t\t\t\t\\\n+\tarch_interrupt_async_enter_prepare(regs);\t\t\t\\\n+\tstate = irqentry_enter(regs);\t\t\t\t\t\\\n+\tinstrumentation_begin();\t\t\t\t\t\\\n+\tirq_enter_rcu();\t\t\t\t\t\t\\\n \t____##func (regs);\t\t\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_async_exit_prepare(regs);\t\t\t\t\\\n+\tirq_exit_rcu();\t\t\t\t\t\t\t\\\n+\tinstrumentation_end();\t\t\t\t\t\t\\\n+\tarch_interrupt_async_exit_prepare(regs);\t\t\t\\\n+\tirqentry_exit(regs, state);\t\t\t\t\t\\\n }\t\t\t\t\t\t\t\t\t\\\n NOKPROBE_SYMBOL(func);\t\t\t\t\t\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n@@ -572,14 +279,43 @@ ____##func(struct pt_regs *regs);\t\t\t\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n interrupt_handler long func(struct pt_regs *regs)\t\t\t\\\n {\t\t\t\t\t\t\t\t\t\\\n-\tstruct interrupt_nmi_state state;\t\t\t\t\\\n+\tirqentry_state_t state;\t\t\t\t\t\t\\\n+\tstruct interrupt_nmi_state nmi_state;\t\t\t\t\\\n \tlong ret;\t\t\t\t\t\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_nmi_enter_prepare(regs, &state);\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n+\tarch_interrupt_nmi_enter_prepare(regs, &nmi_state);\t\t\\\n+\tif (mfmsr() & MSR_DR) {\t\t\t\t\t\t\\\n+\t\t/* nmi_entry if relocations are on */\t\t\t\\\n+\t\tstate = irqentry_nmi_enter(regs);\t\t\t\\\n+\t} else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&\t\t\t\\\n+\t\t   firmware_has_feature(FW_FEATURE_LPAR) &&\t\t\\\n+\t\t   !radix_enabled()) {\t\t\t\t\t\\\n+\t\t/* no nmi_entry for a pseries hash guest\t\t\\\n+\t\t * taking a real mode exception */\t\t\t\\\n+\t} else if (IS_ENABLED(CONFIG_KASAN)) {\t\t\t\t\\\n+\t\t/* no nmi_entry for KASAN in real mode */\t\t\\\n+\t} else if (percpu_first_chunk_is_paged) {\t\t\t\\\n+\t\t/* no nmi_entry if percpu first chunk is not embedded */\\\n+\t} else {\t\t\t\t\t\t\t\\\n+\t\tstate = irqentry_nmi_enter(regs);\t\t\t\\\n+\t}\t\t\t\t\t\t\t\t\\\n \tret = ____##func (regs);\t\t\t\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n-\tinterrupt_nmi_exit_prepare(regs, &state);\t\t\t\\\n+\tarch_interrupt_nmi_exit_prepare(regs, &nmi_state);\t\t\\\n+\tif (mfmsr() & MSR_DR) {\t\t\t\t\t\t\\\n+\t\t/* nmi_exit if relocations are on */\t\t\t\\\n+\t\tirqentry_nmi_exit(regs, state);\t\t\t\t\\\n+\t} else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&\t\t\t\\\n+\t\t   firmware_has_feature(FW_FEATURE_LPAR) &&\t\t\\\n+\t\t   !radix_enabled()) {\t\t\t\t\t\\\n+\t\t/* no nmi_exit for a pseries hash guest\t\t\t\\\n+\t\t * taking a real mode exception */\t\t\t\\\n+\t} else if (IS_ENABLED(CONFIG_KASAN)) {\t\t\t\t\\\n+\t\t/* no nmi_exit for KASAN in real mode */\t\t\\\n+\t} else if (percpu_first_chunk_is_paged) {\t\t\t\\\n+\t\t/* no nmi_exit if percpu first chunk is not embedded */\t\\\n+\t} else {\t\t\t\t\t\t\t\\\n+\t\tirqentry_nmi_exit(regs, state);\t\t\t\t\\\n+\t}\t\t\t\t\t\t\t\t\\\n \t\t\t\t\t\t\t\t\t\\\n \treturn ret;\t\t\t\t\t\t\t\\\n }\t\t\t\t\t\t\t\t\t\\\ndiff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h\nindex 045804a86f98..a690e7da53c2 100644\n--- a/arch/powerpc/include/asm/kasan.h\n+++ b/arch/powerpc/include/asm/kasan.h\n@@ -3,14 +3,19 @@\n #define __ASM_KASAN_H\n \n #if defined(CONFIG_KASAN) && !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)\n-#define _GLOBAL_KASAN(fn)\t_GLOBAL(__##fn)\n-#define _GLOBAL_TOC_KASAN(fn)\t_GLOBAL_TOC(__##fn)\n-#define EXPORT_SYMBOL_KASAN(fn)\tEXPORT_SYMBOL(__##fn)\n-#else\n+#define _GLOBAL_KASAN(fn)\t\t\t\\\n+\t_GLOBAL(fn);\t\t\t\t\\\n+\t_GLOBAL(__##fn)\n+#define _GLOBAL_TOC_KASAN(fn)\t\t\t\\\n+\t_GLOBAL_TOC(fn);\t\t\t\\\n+\t_GLOBAL_TOC(__##fn)\n+#define EXPORT_SYMBOL_KASAN(fn)\t\t\t\\\n+\tEXPORT_SYMBOL(__##fn)\n+#else /* CONFIG_KASAN && !CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */\n #define _GLOBAL_KASAN(fn)\t_GLOBAL(fn)\n #define _GLOBAL_TOC_KASAN(fn)\t_GLOBAL_TOC(fn)\n #define EXPORT_SYMBOL_KASAN(fn)\n-#endif\n+#endif /* CONFIG_KASAN && !CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */\n \n #ifndef __ASSEMBLER__\n \ndiff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c\nindex 666eadb589a5..89a999be1352 100644\n--- a/arch/powerpc/kernel/interrupt.c\n+++ b/arch/powerpc/kernel/interrupt.c\n@@ -1,6 +1,7 @@\n // SPDX-License-Identifier: GPL-2.0-or-later\n \n #include <linux/context_tracking.h>\n+#include <linux/entry-common.h>\n #include <linux/err.h>\n #include <linux/compat.h>\n #include <linux/rseq.h>\n@@ -25,10 +26,6 @@\n unsigned long global_dbcr0[NR_CPUS];\n #endif\n \n-#if defined(CONFIG_PREEMPT_DYNAMIC)\n-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);\n-#endif\n-\n #ifdef CONFIG_PPC_BOOK3S_64\n DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);\n static inline bool exit_must_hard_disable(void)\n@@ -78,181 +75,6 @@ static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)\n \treturn true;\n }\n \n-static notrace void booke_load_dbcr0(void)\n-{\n-#ifdef CONFIG_PPC_ADV_DEBUG_REGS\n-\tunsigned long dbcr0 = current->thread.debug.dbcr0;\n-\n-\tif (likely(!(dbcr0 & DBCR0_IDM)))\n-\t\treturn;\n-\n-\t/*\n-\t * Check to see if the dbcr0 register is set up to debug.\n-\t * Use the internal debug mode bit to do this.\n-\t */\n-\tmtmsr(mfmsr() & ~MSR_DE);\n-\tif (IS_ENABLED(CONFIG_PPC32)) {\n-\t\tisync();\n-\t\tglobal_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);\n-\t}\n-\tmtspr(SPRN_DBCR0, dbcr0);\n-\tmtspr(SPRN_DBSR, -1);\n-#endif\n-}\n-\n-static notrace void check_return_regs_valid(struct pt_regs *regs)\n-{\n-#ifdef CONFIG_PPC_BOOK3S_64\n-\tunsigned long trap, srr0, srr1;\n-\tstatic bool warned;\n-\tu8 *validp;\n-\tchar *h;\n-\n-\tif (trap_is_scv(regs))\n-\t\treturn;\n-\n-\ttrap = TRAP(regs);\n-\t// EE in HV mode sets HSRRs like 0xea0\n-\tif (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)\n-\t\ttrap = 0xea0;\n-\n-\tswitch (trap) {\n-\tcase 0x980:\n-\tcase INTERRUPT_H_DATA_STORAGE:\n-\tcase 0xe20:\n-\tcase 0xe40:\n-\tcase INTERRUPT_HMI:\n-\tcase 0xe80:\n-\tcase 0xea0:\n-\tcase INTERRUPT_H_FAC_UNAVAIL:\n-\tcase 0x1200:\n-\tcase 0x1500:\n-\tcase 0x1600:\n-\tcase 0x1800:\n-\t\tvalidp = &local_paca->hsrr_valid;\n-\t\tif (!READ_ONCE(*validp))\n-\t\t\treturn;\n-\n-\t\tsrr0 = mfspr(SPRN_HSRR0);\n-\t\tsrr1 = mfspr(SPRN_HSRR1);\n-\t\th = \"H\";\n-\n-\t\tbreak;\n-\tdefault:\n-\t\tvalidp = &local_paca->srr_valid;\n-\t\tif (!READ_ONCE(*validp))\n-\t\t\treturn;\n-\n-\t\tsrr0 = mfspr(SPRN_SRR0);\n-\t\tsrr1 = mfspr(SPRN_SRR1);\n-\t\th = \"\";\n-\t\tbreak;\n-\t}\n-\n-\tif (srr0 == regs->nip && srr1 == regs->msr)\n-\t\treturn;\n-\n-\t/*\n-\t * A NMI / soft-NMI interrupt may have come in after we found\n-\t * srr_valid and before the SRRs are loaded. The interrupt then\n-\t * comes in and clobbers SRRs and clears srr_valid. Then we load\n-\t * the SRRs here and test them above and find they don't match.\n-\t *\n-\t * Test validity again after that, to catch such false positives.\n-\t *\n-\t * This test in general will have some window for false negatives\n-\t * and may not catch and fix all such cases if an NMI comes in\n-\t * later and clobbers SRRs without clearing srr_valid, but hopefully\n-\t * such things will get caught most of the time, statistically\n-\t * enough to be able to get a warning out.\n-\t */\n-\tif (!READ_ONCE(*validp))\n-\t\treturn;\n-\n-\tif (!data_race(warned)) {\n-\t\tdata_race(warned = true);\n-\t\tprintk(\"%sSRR0 was: %lx should be: %lx\\n\", h, srr0, regs->nip);\n-\t\tprintk(\"%sSRR1 was: %lx should be: %lx\\n\", h, srr1, regs->msr);\n-\t\tshow_regs(regs);\n-\t}\n-\n-\tWRITE_ONCE(*validp, 0); /* fixup */\n-#endif\n-}\n-\n-static notrace unsigned long\n-interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)\n-{\n-\tunsigned long ti_flags;\n-\n-again:\n-\tti_flags = read_thread_flags();\n-\twhile (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {\n-\t\tlocal_irq_enable();\n-\t\tif (ti_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) {\n-\t\t\tschedule();\n-\t\t} else {\n-\t\t\t/*\n-\t\t\t * SIGPENDING must restore signal handler function\n-\t\t\t * argument GPRs, and some non-volatiles (e.g., r1).\n-\t\t\t * Restore all for now. This could be made lighter.\n-\t\t\t */\n-\t\t\tif (ti_flags & _TIF_SIGPENDING)\n-\t\t\t\tret |= _TIF_RESTOREALL;\n-\t\t\tdo_notify_resume(regs, ti_flags);\n-\t\t}\n-\t\tlocal_irq_disable();\n-\t\tti_flags = read_thread_flags();\n-\t}\n-\n-\tif (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {\n-\t\tif (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&\n-\t\t\t\tunlikely((ti_flags & _TIF_RESTORE_TM))) {\n-\t\t\trestore_tm_state(regs);\n-\t\t} else {\n-\t\t\tunsigned long mathflags = MSR_FP;\n-\n-\t\t\tif (cpu_has_feature(CPU_FTR_VSX))\n-\t\t\t\tmathflags |= MSR_VEC | MSR_VSX;\n-\t\t\telse if (cpu_has_feature(CPU_FTR_ALTIVEC))\n-\t\t\t\tmathflags |= MSR_VEC;\n-\n-\t\t\t/*\n-\t\t\t * If userspace MSR has all available FP bits set,\n-\t\t\t * then they are live and no need to restore. If not,\n-\t\t\t * it means the regs were given up and restore_math\n-\t\t\t * may decide to restore them (to avoid taking an FP\n-\t\t\t * fault).\n-\t\t\t */\n-\t\t\tif ((regs->msr & mathflags) != mathflags)\n-\t\t\t\trestore_math(regs);\n-\t\t}\n-\t}\n-\n-\tcheck_return_regs_valid(regs);\n-\n-\tuser_enter_irqoff();\n-\tif (!prep_irq_for_enabled_exit(true)) {\n-\t\tuser_exit_irqoff();\n-\t\tlocal_irq_enable();\n-\t\tlocal_irq_disable();\n-\t\tgoto again;\n-\t}\n-\n-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM\n-\tlocal_paca->tm_scratch = regs->msr;\n-#endif\n-\n-\tbooke_load_dbcr0();\n-\n-\taccount_cpu_user_exit();\n-\n-\t/* Restore user access locks last */\n-\tkuap_user_restore(regs);\n-\n-\treturn ret;\n-}\n-\n /*\n  * This should be called after a syscall returns, with r3 the return value\n  * from the syscall. If this function returns non-zero, the system call\n@@ -267,17 +89,12 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,\n \t\t\t\t\t   long scv)\n {\n \tunsigned long ti_flags;\n-\tunsigned long ret = 0;\n \tbool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;\n \n-\tCT_WARN_ON(ct_state() == CT_STATE_USER);\n-\n \tkuap_assert_locked();\n \n \tregs->result = r3;\n-\n-\t/* Check whether the syscall is issued inside a restartable sequence */\n-\trseq_syscall(regs);\n+\tregs->exit_flags = 0;\n \n \tti_flags = read_thread_flags();\n \n@@ -290,7 +107,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,\n \n \tif (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {\n \t\tif (ti_flags & _TIF_RESTOREALL)\n-\t\t\tret = _TIF_RESTOREALL;\n+\t\t\tregs->exit_flags = _TIF_RESTOREALL;\n \t\telse\n \t\t\tregs->gpr[3] = r3;\n \t\tclear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);\n@@ -299,18 +116,28 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,\n \t}\n \n \tif (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {\n-\t\tdo_syscall_trace_leave(regs);\n-\t\tret |= _TIF_RESTOREALL;\n+\t\tregs->exit_flags |= _TIF_RESTOREALL;\n \t}\n \n-\tlocal_irq_disable();\n-\tret = interrupt_exit_user_prepare_main(ret, regs);\n+\tsyscall_exit_to_user_mode(regs);\n+\n+again:\n+\tuser_enter_irqoff();\n+\tif (!prep_irq_for_enabled_exit(true)) {\n+\t\tuser_exit_irqoff();\n+\t\tlocal_irq_enable();\n+\t\tlocal_irq_disable();\n+\t\tgoto again;\n+\t}\n+\n+\t/* Restore user access locks last */\n+\tkuap_user_restore(regs);\n \n #ifdef CONFIG_PPC64\n-\tregs->exit_result = ret;\n+\tregs->exit_result = regs->exit_flags;\n #endif\n \n-\treturn ret;\n+\treturn regs->exit_flags;\n }\n \n #ifdef CONFIG_PPC64\n@@ -330,13 +157,16 @@ notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *reg\n \tset_kuap(AMR_KUAP_BLOCKED);\n #endif\n \n-\ttrace_hardirqs_off();\n-\tuser_exit_irqoff();\n-\taccount_cpu_user_entry();\n-\n-\tBUG_ON(!user_mode(regs));\n+again:\n+\tuser_enter_irqoff();\n+\tif (!prep_irq_for_enabled_exit(true)) {\n+\t\tuser_exit_irqoff();\n+\t\tlocal_irq_enable();\n+\t\tlocal_irq_disable();\n+\t\tgoto again;\n+\t}\n \n-\tregs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);\n+\tregs->exit_result |= regs->exit_flags;\n \n \treturn regs->exit_result;\n }\n@@ -348,7 +178,6 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)\n \n \tBUG_ON(regs_is_unrecoverable(regs));\n \tBUG_ON(regs_irqs_disabled(regs));\n-\tCT_WARN_ON(ct_state() == CT_STATE_USER);\n \n \t/*\n \t * We don't need to restore AMR on the way back to userspace for KUAP.\n@@ -357,8 +186,21 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)\n \tkuap_assert_locked();\n \n \tlocal_irq_disable();\n+\tregs->exit_flags = 0;\n+again:\n+\tcheck_return_regs_valid(regs);\n+\tuser_enter_irqoff();\n+\tif (!prep_irq_for_enabled_exit(true)) {\n+\t\tuser_exit_irqoff();\n+\t\tlocal_irq_enable();\n+\t\tlocal_irq_disable();\n+\t\tgoto again;\n+\t}\n+\n+\t/* Restore user access locks last */\n+\tkuap_user_restore(regs);\n \n-\tret = interrupt_exit_user_prepare_main(0, regs);\n+\tret = regs->exit_flags;\n \n #ifdef CONFIG_PPC64\n \tregs->exit_result = ret;\n@@ -400,13 +242,6 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)\n \t\t/* Returning to a kernel context with local irqs enabled. */\n \t\tWARN_ON_ONCE(!(regs->msr & MSR_EE));\n again:\n-\t\tif (need_irq_preemption()) {\n-\t\t\t/* Return to preemptible kernel context */\n-\t\t\tif (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {\n-\t\t\t\tif (preempt_count() == 0)\n-\t\t\t\t\tpreempt_schedule_irq();\n-\t\t\t}\n-\t\t}\n \n \t\tcheck_return_regs_valid(regs);\n \n@@ -479,7 +314,6 @@ notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)\n #endif\n \n \ttrace_hardirqs_off();\n-\tuser_exit_irqoff();\n \taccount_cpu_user_entry();\n \n \tBUG_ON(!user_mode(regs));\ndiff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c\nindex 2134b6d155ff..f006a03a0211 100644\n--- a/arch/powerpc/kernel/ptrace/ptrace.c\n+++ b/arch/powerpc/kernel/ptrace/ptrace.c\n@@ -21,9 +21,6 @@\n #include <asm/switch_to.h>\n #include <asm/debug.h>\n \n-#define CREATE_TRACE_POINTS\n-#include <trace/events/syscalls.h>\n-\n #include \"ptrace-decl.h\"\n \n /*\ndiff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c\nindex aa17e62f3754..9f1847b4742e 100644\n--- a/arch/powerpc/kernel/signal.c\n+++ b/arch/powerpc/kernel/signal.c\n@@ -6,6 +6,7 @@\n  *    Extracted from signal_32.c and signal_64.c\n  */\n \n+#include <linux/entry-common.h>\n #include <linux/resume_user_mode.h>\n #include <linux/signal.h>\n #include <linux/uprobes.h>\n@@ -368,3 +369,10 @@ void signal_fault(struct task_struct *tsk, struct pt_regs *regs,\n \t\tprintk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, tsk->comm,\n \t\t\t\t   task_pid_nr(tsk), where, ptr, regs->nip, regs->link);\n }\n+\n+void arch_do_signal_or_restart(struct pt_regs *regs)\n+{\n+\tBUG_ON(regs != current->thread.regs);\n+\tregs->exit_flags |= _TIF_RESTOREALL;\n+\tdo_signal(current);\n+}\ndiff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c\nindex 52d6e10eab22..a9da2af6efa8 100644\n--- a/arch/powerpc/kernel/syscall.c\n+++ b/arch/powerpc/kernel/syscall.c\n@@ -3,6 +3,7 @@\n #include <linux/compat.h>\n #include <linux/context_tracking.h>\n #include <linux/randomize_kstack.h>\n+#include <linux/entry-common.h>\n \n #include <asm/interrupt.h>\n #include <asm/kup.h>\n@@ -18,124 +19,10 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)\n \tlong ret;\n \tsyscall_fn f;\n \n-\tkuap_lock();\n-\n-\tif (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))\n-\t\tBUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);\n-\n-\ttrace_hardirqs_off(); /* finish reconciling */\n-\n-\tCT_WARN_ON(ct_state() == CT_STATE_KERNEL);\n-\tuser_exit_irqoff();\n-\n \tadd_random_kstack_offset();\n+\tr0 = syscall_enter_from_user_mode(regs, r0);\n \n-\tBUG_ON(regs_is_unrecoverable(regs));\n-\tBUG_ON(!user_mode(regs));\n-\tBUG_ON(regs_irqs_disabled(regs));\n-\n-#ifdef CONFIG_PPC_PKEY\n-\tif (mmu_has_feature(MMU_FTR_PKEY)) {\n-\t\tunsigned long amr, iamr;\n-\t\tbool flush_needed = false;\n-\t\t/*\n-\t\t * When entering from userspace we mostly have the AMR/IAMR\n-\t\t * different from kernel default values. Hence don't compare.\n-\t\t */\n-\t\tamr = mfspr(SPRN_AMR);\n-\t\tiamr = mfspr(SPRN_IAMR);\n-\t\tregs->amr  = amr;\n-\t\tregs->iamr = iamr;\n-\t\tif (mmu_has_feature(MMU_FTR_KUAP)) {\n-\t\t\tmtspr(SPRN_AMR, AMR_KUAP_BLOCKED);\n-\t\t\tflush_needed = true;\n-\t\t}\n-\t\tif (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {\n-\t\t\tmtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);\n-\t\t\tflush_needed = true;\n-\t\t}\n-\t\tif (flush_needed)\n-\t\t\tisync();\n-\t} else\n-#endif\n-\t\tkuap_assert_locked();\n-\n-\tbooke_restore_dbcr0();\n-\n-\taccount_cpu_user_entry();\n-\n-\taccount_stolen_time();\n-\n-\t/*\n-\t * This is not required for the syscall exit path, but makes the\n-\t * stack frame look nicer. If this was initialised in the first stack\n-\t * frame, or if the unwinder was taught the first stack frame always\n-\t * returns to user with IRQS_ENABLED, this store could be avoided!\n-\t */\n-\tirq_soft_mask_regs_set_state(regs, IRQS_ENABLED);\n-\n-\t/*\n-\t * If system call is called with TM active, set _TIF_RESTOREALL to\n-\t * prevent RFSCV being used to return to userspace, because POWER9\n-\t * TM implementation has problems with this instruction returning to\n-\t * transactional state. Final register values are not relevant because\n-\t * the transaction will be aborted upon return anyway. Or in the case\n-\t * of unsupported_scv SIGILL fault, the return state does not much\n-\t * matter because it's an edge case.\n-\t */\n-\tif (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&\n-\t\t\tunlikely(MSR_TM_TRANSACTIONAL(regs->msr)))\n-\t\tset_bits(_TIF_RESTOREALL, &current_thread_info()->flags);\n-\n-\t/*\n-\t * If the system call was made with a transaction active, doom it and\n-\t * return without performing the system call. Unless it was an\n-\t * unsupported scv vector, in which case it's treated like an illegal\n-\t * instruction.\n-\t */\n-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM\n-\tif (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&\n-\t    !trap_is_unsupported_scv(regs)) {\n-\t\t/* Enable TM in the kernel, and disable EE (for scv) */\n-\t\thard_irq_disable();\n-\t\tmtmsr(mfmsr() | MSR_TM);\n-\n-\t\t/* tabort, this dooms the transaction, nothing else */\n-\t\tasm volatile(\".long 0x7c00071d | ((%0) << 16)\"\n-\t\t\t\t:: \"r\"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));\n-\n-\t\t/*\n-\t\t * Userspace will never see the return value. Execution will\n-\t\t * resume after the tbegin. of the aborted transaction with the\n-\t\t * checkpointed register state. A context switch could occur\n-\t\t * or signal delivered to the process before resuming the\n-\t\t * doomed transaction context, but that should all be handled\n-\t\t * as expected.\n-\t\t */\n-\t\treturn -ENOSYS;\n-\t}\n-#endif // CONFIG_PPC_TRANSACTIONAL_MEM\n-\n-\tlocal_irq_enable();\n-\n-\tif (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {\n-\t\tif (unlikely(trap_is_unsupported_scv(regs))) {\n-\t\t\t/* Unsupported scv vector */\n-\t\t\t_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);\n-\t\t\treturn regs->gpr[3];\n-\t\t}\n-\t\t/*\n-\t\t * We use the return value of do_syscall_trace_enter() as the\n-\t\t * syscall number. If the syscall was rejected for any reason\n-\t\t * do_syscall_trace_enter() returns an invalid syscall number\n-\t\t * and the test against NR_syscalls will fail and the return\n-\t\t * value to be used is in regs->gpr[3].\n-\t\t */\n-\t\tr0 = do_syscall_trace_enter(regs);\n-\t\tif (unlikely(r0 >= NR_syscalls))\n-\t\t\treturn regs->gpr[3];\n-\n-\t} else if (unlikely(r0 >= NR_syscalls)) {\n+\tif (unlikely(r0 >= NR_syscalls)) {\n \t\tif (unlikely(trap_is_unsupported_scv(regs))) {\n \t\t\t/* Unsupported scv vector */\n \t\t\t_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);\n",
    "prefixes": [
        "v5",
        "7/8"
    ]
}