Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/2218964/?format=api
{ "id": 2218964, "url": "http://patchwork.ozlabs.org/api/patches/2218964/?format=api", "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260402072701.628293-11-ruanjinjie@huawei.com/", "project": { "id": 2, "url": "http://patchwork.ozlabs.org/api/projects/2/?format=api", "name": "Linux PPC development", "link_name": "linuxppc-dev", "list_id": "linuxppc-dev.lists.ozlabs.org", "list_email": "linuxppc-dev@lists.ozlabs.org", "web_url": "https://github.com/linuxppc/wiki/wiki", "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git", "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/", "list_archive_url": "https://lore.kernel.org/linuxppc-dev/", "list_archive_url_format": "https://lore.kernel.org/linuxppc-dev/{}/", "commit_url_format": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/commit/?id={}" }, "msgid": "<20260402072701.628293-11-ruanjinjie@huawei.com>", "list_archive_url": "https://lore.kernel.org/linuxppc-dev/20260402072701.628293-11-ruanjinjie@huawei.com/", "date": "2026-04-02T07:26:56", "name": "[v12,10/15] x86/kexec: Use crash_prepare_headers() helper to simplify code", "commit_ref": null, "pull_url": null, "state": "handled-elsewhere", "archived": false, "hash": "4b2c53f1f2b00ccc3a3bc764105025297f8fbf70", "submitter": { "id": 84791, "url": "http://patchwork.ozlabs.org/api/people/84791/?format=api", "name": "Jinjie Ruan", "email": "ruanjinjie@huawei.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260402072701.628293-11-ruanjinjie@huawei.com/mbox/", "series": [ { "id": 498443, "url": "http://patchwork.ozlabs.org/api/series/498443/?format=api", "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=498443", "date": "2026-04-02T07:26:46", "name": "arm64/riscv: Add support for crashkernel CMA reservation", "version": 12, "mbox": "http://patchwork.ozlabs.org/series/498443/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/2218964/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/2218964/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "\n <linuxppc-dev+bounces-19201-incoming=patchwork.ozlabs.org@lists.ozlabs.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "linuxppc-dev@lists.ozlabs.org" ], "Delivered-To": "patchwork-incoming@legolas.ozlabs.org", "Authentication-Results": [ "legolas.ozlabs.org;\n\tdkim=pass (1024-bit key;\n unprotected) header.d=huawei.com header.i=@huawei.com header.a=rsa-sha256\n header.s=dkim header.b=aqeG4E+L;\n\tdkim-atps=neutral", "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=lists.ozlabs.org\n (client-ip=2404:9400:21b9:f100::1; helo=lists.ozlabs.org;\n envelope-from=linuxppc-dev+bounces-19201-incoming=patchwork.ozlabs.org@lists.ozlabs.org;\n receiver=patchwork.ozlabs.org)", "lists.ozlabs.org;\n arc=none smtp.remote-ip=113.46.200.225", "lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=huawei.com", "lists.ozlabs.org;\n\tdkim=pass (1024-bit key;\n unprotected) header.d=huawei.com header.i=@huawei.com header.a=rsa-sha256\n header.s=dkim header.b=aqeG4E+L;\n\tdkim-atps=neutral", "lists.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=huawei.com\n (client-ip=113.46.200.225; helo=canpmsgout10.his.huawei.com;\n envelope-from=ruanjinjie@huawei.com; receiver=lists.ozlabs.org)" ], "Received": [ "from lists.ozlabs.org (lists.ozlabs.org\n [IPv6:2404:9400:21b9:f100::1])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fmYMY5GTkz1yCs\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 02 Apr 2026 18:27:05 +1100 (AEDT)", "from boromir.ozlabs.org (localhost [127.0.0.1])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 4fmYLg2bg5z30FR;\n\tThu, 02 Apr 2026 18:26:19 +1100 (AEDT)", "from canpmsgout10.his.huawei.com (canpmsgout10.his.huawei.com\n [113.46.200.225])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 4fmYLf3LCsz2yhG\n\tfor <linuxppc-dev@lists.ozlabs.org>; Thu, 02 Apr 2026 18:26:18 +1100 (AEDT)", "from mail.maildlp.com (unknown [172.19.163.200])\n\tby canpmsgout10.his.huawei.com (SkyGuard) with ESMTPS id 4fmYCW1kzxz1K981;\n\tThu, 2 Apr 2026 15:20:07 +0800 (CST)", "from dggpemf500011.china.huawei.com (unknown [7.185.36.131])\n\tby mail.maildlp.com (Postfix) with ESMTPS id 51DEB4056D;\n\tThu, 2 Apr 2026 15:26:15 +0800 (CST)", "from huawei.com (10.90.53.73) by dggpemf500011.china.huawei.com\n (7.185.36.131) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 2 Apr\n 2026 15:26:12 +0800" ], "ARC-Seal": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707; t=1775114779;\n\tcv=none;\n b=mnJSs1PTZb80gNP9odPKgh6Pwm62HuyocBl6cFkZZ1TuH6tokU5Yw9irLzZA1uNHXnn0Tt97+agvHHfOu1aA0CInxH8cqwGU9uFJxF+QkPQ6S8vPQXZXIIPHxGyOs9O7aY6Q3a823DXZgrSaSilSspFmPvz2Eku56NlDycf4sMMEWcUKQZrYR3uAFZmxsbzQW+mK9JRkMH2D4nuo2PuELx4hpK5c9wjURW4UR/MQ4SO9kYmIqfZvnRCR4qSZ+GgoPACOp2Ccc3vi+wMEptWNTu4Wrv2RhhPQ5zbm7yBF4gNJq3uHtPgJ+BVWUSN6sbR2rtQPwHSiUiB7VqxpbizcFA==", "ARC-Message-Signature": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707;\n\tt=1775114779; c=relaxed/relaxed;\n\tbh=GPCukH3EUrSMBYCRHaxwWi6/BWowSCWPu0qInnaCPMw=;\n\th=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References:\n\t MIME-Version:Content-Type;\n b=IXpIIzQkeRsTcv92gktuFlTsU1DwdipQAU5YtKc28Zif1CZhH5tXPn7YQMBerHrc+FuCTpqBjgZIR8DF2/3FCzFdgBy2nkp8tv0W60kOzgEayz37R4XGk4OgHqQ0lgcvAdDAd1dxYG3mG57/OoK1bExNJwuMBS7YkHoWSe45wI4e+27icplDuyf2HlcaxT5vDzqbzdB/3z+Gy/l3gb+L02/J5HYctnDEz1Tpo6OnINo8qbo+jLnn6GmzkTcH7YcARfU0/1fz5krXiD1GpMys4rn60ibpNSZtSbsYo7WJBqpNrwaoZaXnGUXez6Bb/jJNGO/R2du2pb+9LmtUGE7RlQ==", "ARC-Authentication-Results": "i=1; lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=huawei.com;\n dkim=pass (1024-bit key;\n unprotected) header.d=huawei.com header.i=@huawei.com header.a=rsa-sha256\n header.s=dkim header.b=aqeG4E+L; dkim-atps=neutral;\n spf=pass (client-ip=113.46.200.225; helo=canpmsgout10.his.huawei.com;\n envelope-from=ruanjinjie@huawei.com;\n receiver=lists.ozlabs.org) smtp.mailfrom=huawei.com", "dkim-signature": "v=1; a=rsa-sha256; d=huawei.com; s=dkim;\n\tc=relaxed/relaxed; q=dns/txt;\n\th=From;\n\tbh=GPCukH3EUrSMBYCRHaxwWi6/BWowSCWPu0qInnaCPMw=;\n\tb=aqeG4E+LpS7IctWlo82J8VkIJRmmQsg+LEWrb1zS9/Aner5/I9/Gcd/F7ajWqjBZX1+Z68FD1\n\txqpH7y3lcqLgF0nkso8Z24O1H+QraYXD6KVQStR1FL1/MNPZ6ws4xVj+QHcAhQ37WlJfidb9zgu\n\tF1hw9EfeC9JbCIAB3WZBbW8=", "From": "Jinjie Ruan <ruanjinjie@huawei.com>", "To": "<corbet@lwn.net>, <skhan@linuxfoundation.org>, <catalin.marinas@arm.com>,\n\t<will@kernel.org>, <chenhuacai@kernel.org>, <kernel@xen0n.name>,\n\t<maddy@linux.ibm.com>, <mpe@ellerman.id.au>, <npiggin@gmail.com>,\n\t<chleroy@kernel.org>, <pjw@kernel.org>, <palmer@dabbelt.com>,\n\t<aou@eecs.berkeley.edu>, <alex@ghiti.fr>, <tglx@kernel.org>,\n\t<mingo@redhat.com>, <bp@alien8.de>, <dave.hansen@linux.intel.com>,\n\t<hpa@zytor.com>, <robh@kernel.org>, <saravanak@kernel.org>,\n\t<akpm@linux-foundation.org>, <bhe@redhat.com>, <vgoyal@redhat.com>,\n\t<dyoung@redhat.com>, <rdunlap@infradead.org>, <peterz@infradead.org>,\n\t<pawan.kumar.gupta@linux.intel.com>, <feng.tang@linux.alibaba.com>,\n\t<dapeng1.mi@linux.intel.com>, <kees@kernel.org>, <elver@google.com>,\n\t<paulmck@kernel.org>, <lirongqing@baidu.com>, <rppt@kernel.org>,\n\t<leitao@debian.org>, <ardb@kernel.org>, <jbohac@suse.cz>,\n\t<cfsworks@gmail.com>, <tangyouling@kylinos.cn>, <sourabhjain@linux.ibm.com>,\n\t<ritesh.list@gmail.com>, <hbathini@linux.ibm.com>, <eajames@linux.ibm.com>,\n\t<guoren@kernel.org>, <songshuaishuai@tinylab.org>, <kevin.brodsky@arm.com>,\n\t<vishal.moola@gmail.com>, <junhui.liu@pigmoral.tech>, <coxu@redhat.com>,\n\t<fuqiang.wang@easystack.cn>, <liaoyuanhong@vivo.com>,\n\t<takahiro.akashi@linaro.org>, <james.morse@arm.com>, <lizhengyu3@huawei.com>,\n\t<x86@kernel.org>, <linux-doc@vger.kernel.org>,\n\t<linux-kernel@vger.kernel.org>, <linux-arm-kernel@lists.infradead.org>,\n\t<loongarch@lists.linux.dev>, <linuxppc-dev@lists.ozlabs.org>,\n\t<linux-riscv@lists.infradead.org>, <devicetree@vger.kernel.org>,\n\t<kexec@lists.infradead.org>", "CC": "<ruanjinjie@huawei.com>", "Subject": "[PATCH v12 10/15] x86/kexec: Use crash_prepare_headers() helper to\n simplify code", "Date": "Thu, 2 Apr 2026 15:26:56 +0800", "Message-ID": "<20260402072701.628293-11-ruanjinjie@huawei.com>", "X-Mailer": "git-send-email 2.34.1", "In-Reply-To": "<20260402072701.628293-1-ruanjinjie@huawei.com>", "References": "<20260402072701.628293-1-ruanjinjie@huawei.com>", "X-Mailing-List": "linuxppc-dev@lists.ozlabs.org", "List-Id": "<linuxppc-dev.lists.ozlabs.org>", "List-Help": "<mailto:linuxppc-dev+help@lists.ozlabs.org>", "List-Owner": "<mailto:linuxppc-dev+owner@lists.ozlabs.org>", "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>", "List-Archive": "<https://lore.kernel.org/linuxppc-dev/>,\n <https://lists.ozlabs.org/pipermail/linuxppc-dev/>", "List-Subscribe": "<mailto:linuxppc-dev+subscribe@lists.ozlabs.org>,\n <mailto:linuxppc-dev+subscribe-digest@lists.ozlabs.org>,\n <mailto:linuxppc-dev+subscribe-nomail@lists.ozlabs.org>", "List-Unsubscribe": "<mailto:linuxppc-dev+unsubscribe@lists.ozlabs.org>", "Precedence": "list", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "Content-Type": "text/plain", "X-Originating-IP": "[10.90.53.73]", "X-ClientProxiedBy": "kwepems100002.china.huawei.com (7.221.188.206) To\n dggpemf500011.china.huawei.com (7.185.36.131)", "X-Spam-Status": "No, score=-0.2 required=3.0 tests=DKIM_SIGNED,DKIM_VALID,\n\tDKIM_VALID_AU,DKIM_VALID_EF,SPF_HELO_NONE,SPF_PASS autolearn=disabled\n\tversion=4.0.1 OzLabs 8", "X-Spam-Checker-Version": "SpamAssassin 4.0.1 (2024-03-25) on lists.ozlabs.org" }, "content": "Use the newly introduced crash_prepare_headers() function to replace\nthe existing prepare_elf_headers(), allocate cmem and exclude crash kernel\nmemory in the crash core, which reduce code duplication.\n\nOnly the following three architecture functions need to be implemented:\n- arch_get_system_nr_ranges(). Call get_nr_ram_ranges_callback()\n to pre-count the max number of memory ranges.\n\n- arch_crash_populate_cmem(). Use prepare_elf64_ram_headers_callback()\n to collect the memory ranges and fills them into cmem.\n\n- arch_crash_exclude_ranges(). Exclude the low 1M for x86.\n\nBy the way, remove the unused \"nr_mem_ranges\" in\narch_crash_handle_hotplug_event().\n\nCc: Thomas Gleixner <tglx@kernel.org>\nCc: Ingo Molnar <mingo@redhat.com>\nCc: Borislav Petkov <bp@alien8.de>\nCc: Dave Hansen <dave.hansen@linux.intel.com>\nCc: Andrew Morton <akpm@linux-foundation.org>\nCc: Vivek Goyal <vgoyal@redhat.com>\nReviewed-by: Sourabh Jain <sourabhjain@linux.ibm.com>\nAcked-by: Baoquan He <bhe@redhat.com>\nAcked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>\nSigned-off-by: Jinjie Ruan <ruanjinjie@huawei.com>\n---\n arch/x86/kernel/crash.c | 89 +++++------------------------------------\n 1 file changed, 11 insertions(+), 78 deletions(-)", "diff": "diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c\nindex 7fa6d45ebe3f..10ef24611f2a 100644\n--- a/arch/x86/kernel/crash.c\n+++ b/arch/x86/kernel/crash.c\n@@ -152,16 +152,8 @@ static int get_nr_ram_ranges_callback(struct resource *res, void *arg)\n \treturn 0;\n }\n \n-/* Gather all the required information to prepare elf headers for ram regions */\n-static struct crash_mem *fill_up_crash_elf_data(void)\n+unsigned int arch_get_system_nr_ranges(void)\n {\n-\tunsigned int nr_ranges = 0;\n-\tstruct crash_mem *cmem;\n-\n-\twalk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);\n-\tif (!nr_ranges)\n-\t\treturn NULL;\n-\n \t/*\n \t * Exclusion of crash region, crashk_low_res and/or crashk_cma_ranges\n \t * may cause range splits. So add extra slots here.\n@@ -176,49 +168,16 @@ static struct crash_mem *fill_up_crash_elf_data(void)\n \t * But in order to lest the low 1M could be changed in the future,\n \t * (e.g. [start, 1M]), add a extra slot.\n \t */\n-\tnr_ranges += 3 + crashk_cma_cnt;\n-\tcmem = vzalloc(struct_size(cmem, ranges, nr_ranges));\n-\tif (!cmem)\n-\t\treturn NULL;\n-\n-\tcmem->max_nr_ranges = nr_ranges;\n+\tunsigned int nr_ranges = 3 + crashk_cma_cnt;\n \n-\treturn cmem;\n+\twalk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);\n+\treturn nr_ranges;\n }\n \n-/*\n- * Look for any unwanted ranges between mstart, mend and remove them. This\n- * might lead to split and split ranges are put in cmem->ranges[] array\n- */\n-static int elf_header_exclude_ranges(struct crash_mem *cmem)\n+int arch_crash_exclude_ranges(struct crash_mem *cmem)\n {\n-\tint ret = 0;\n-\tint i;\n-\n \t/* Exclude the low 1M because it is always reserved */\n-\tret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1);\n-\tif (ret)\n-\t\treturn ret;\n-\n-\t/* Exclude crashkernel region */\n-\tret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);\n-\tif (ret)\n-\t\treturn ret;\n-\n-\tif (crashk_low_res.end)\n-\t\tret = crash_exclude_mem_range(cmem, crashk_low_res.start,\n-\t\t\t\t\t crashk_low_res.end);\n-\tif (ret)\n-\t\treturn ret;\n-\n-\tfor (i = 0; i < crashk_cma_cnt; ++i) {\n-\t\tret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,\n-\t\t\t\t\t crashk_cma_ranges[i].end);\n-\t\tif (ret)\n-\t\t\treturn ret;\n-\t}\n-\n-\treturn 0;\n+\treturn crash_exclude_mem_range(cmem, 0, SZ_1M - 1);\n }\n \n static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)\n@@ -235,35 +194,9 @@ static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)\n \treturn 0;\n }\n \n-/* Prepare elf headers. Return addr and size */\n-static int prepare_elf_headers(void **addr, unsigned long *sz,\n-\t\t\t unsigned long *nr_mem_ranges)\n+int arch_crash_populate_cmem(struct crash_mem *cmem)\n {\n-\tstruct crash_mem *cmem;\n-\tint ret;\n-\n-\tcmem = fill_up_crash_elf_data();\n-\tif (!cmem)\n-\t\treturn -ENOMEM;\n-\n-\tret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);\n-\tif (ret)\n-\t\tgoto out;\n-\n-\t/* Exclude unwanted mem ranges */\n-\tret = elf_header_exclude_ranges(cmem);\n-\tif (ret)\n-\t\tgoto out;\n-\n-\t/* Return the computed number of memory ranges, for hotplug usage */\n-\t*nr_mem_ranges = cmem->nr_ranges;\n-\n-\t/* By default prepare 64bit headers */\n-\tret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);\n-\n-out:\n-\tvfree(cmem);\n-\treturn ret;\n+\treturn walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);\n }\n #endif\n \n@@ -421,7 +354,8 @@ int crash_load_segments(struct kimage *image)\n \t\t\t\t .buf_max = ULONG_MAX, .top_down = false };\n \n \t/* Prepare elf headers and add a segment */\n-\tret = prepare_elf_headers(&kbuf.buffer, &kbuf.bufsz, &pnum);\n+\tret = crash_prepare_headers(IS_ENABLED(CONFIG_X86_64), &kbuf.buffer,\n+\t\t\t\t &kbuf.bufsz, &pnum);\n \tif (ret)\n \t\treturn ret;\n \n@@ -514,7 +448,6 @@ unsigned int arch_crash_get_elfcorehdr_size(void)\n void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)\n {\n \tvoid *elfbuf = NULL, *old_elfcorehdr;\n-\tunsigned long nr_mem_ranges;\n \tunsigned long mem, memsz;\n \tunsigned long elfsz = 0;\n \n@@ -532,7 +465,7 @@ void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)\n \t * Create the new elfcorehdr reflecting the changes to CPU and/or\n \t * memory resources.\n \t */\n-\tif (prepare_elf_headers(&elfbuf, &elfsz, &nr_mem_ranges)) {\n+\tif (crash_prepare_headers(IS_ENABLED(CONFIG_X86_64), &elfbuf, &elfsz, NULL)) {\n \t\tpr_err(\"unable to create new elfcorehdr\");\n \t\tgoto out;\n \t}\n", "prefixes": [ "v12", "10/15" ] }