Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/1.2/patches/2219974/?format=api
{ "id": 2219974, "url": "http://patchwork.ozlabs.org/api/1.2/patches/2219974/?format=api", "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260405125240.2558577-50-songmuchun@bytedance.com/", "project": { "id": 2, "url": "http://patchwork.ozlabs.org/api/1.2/projects/2/?format=api", "name": "Linux PPC development", "link_name": "linuxppc-dev", "list_id": "linuxppc-dev.lists.ozlabs.org", "list_email": "linuxppc-dev@lists.ozlabs.org", "web_url": "https://github.com/linuxppc/wiki/wiki", "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git", "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/", "list_archive_url": "https://lore.kernel.org/linuxppc-dev/", "list_archive_url_format": "https://lore.kernel.org/linuxppc-dev/{}/", "commit_url_format": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/commit/?id={}" }, "msgid": "<20260405125240.2558577-50-songmuchun@bytedance.com>", "list_archive_url": "https://lore.kernel.org/linuxppc-dev/20260405125240.2558577-50-songmuchun@bytedance.com/", "date": "2026-04-05T12:52:40", "name": "[49/49] mm: consolidate struct page power-of-2 size checks for HVO", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "08c01cd01b6735bc95aef18f7f0ca8eee8797fc1", "submitter": { "id": 78930, "url": "http://patchwork.ozlabs.org/api/1.2/people/78930/?format=api", "name": "Muchun Song", "email": "songmuchun@bytedance.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260405125240.2558577-50-songmuchun@bytedance.com/mbox/", "series": [ { "id": 498783, "url": "http://patchwork.ozlabs.org/api/1.2/series/498783/?format=api", "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=498783", "date": "2026-04-05T12:51:51", "name": "mm: Generalize vmemmap optimization for DAX and HugeTLB", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/498783/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/2219974/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/2219974/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "\n <linuxppc-dev+bounces-19379-incoming=patchwork.ozlabs.org@lists.ozlabs.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "linuxppc-dev@lists.ozlabs.org" ], "Delivered-To": "patchwork-incoming@legolas.ozlabs.org", "Authentication-Results": [ "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=bytedance.com header.i=@bytedance.com\n header.a=rsa-sha256 header.s=google header.b=Ztpm31+s;\n\tdkim-atps=neutral", "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=lists.ozlabs.org\n (client-ip=2404:9400:21b9:f100::1; helo=lists.ozlabs.org;\n envelope-from=linuxppc-dev+bounces-19379-incoming=patchwork.ozlabs.org@lists.ozlabs.org;\n receiver=patchwork.ozlabs.org)", "lists.ozlabs.org;\n arc=none smtp.remote-ip=\"2607:f8b0:4864:20::102c\"", "lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=bytedance.com", "lists.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=bytedance.com header.i=@bytedance.com\n header.a=rsa-sha256 header.s=google header.b=Ztpm31+s;\n\tdkim-atps=neutral", "lists.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=bytedance.com\n (client-ip=2607:f8b0:4864:20::102c; helo=mail-pj1-x102c.google.com;\n envelope-from=songmuchun@bytedance.com; receiver=lists.ozlabs.org)" ], "Received": [ "from lists.ozlabs.org (lists.ozlabs.org\n [IPv6:2404:9400:21b9:f100::1])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fpXbC72lcz1xy1\n\tfor <incoming@patchwork.ozlabs.org>; Sun, 05 Apr 2026 22:59:03 +1000 (AEST)", "from boromir.ozlabs.org (localhost [127.0.0.1])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 4fpXb84pXzz3fJf;\n\tSun, 05 Apr 2026 22:59:00 +1000 (AEST)", "from mail-pj1-x102c.google.com (mail-pj1-x102c.google.com\n [IPv6:2607:f8b0:4864:20::102c])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 4fpXb759Hqz3069\n\tfor <linuxppc-dev@lists.ozlabs.org>; Sun, 05 Apr 2026 22:58:59 +1000 (AEST)", "by mail-pj1-x102c.google.com with SMTP id\n 98e67ed59e1d1-35dac556bb2so2064940a91.1\n for <linuxppc-dev@lists.ozlabs.org>;\n Sun, 05 Apr 2026 05:58:59 -0700 (PDT)", "from n232-176-004.byted.org ([36.110.163.97])\n by smtp.gmail.com with ESMTPSA id\n 98e67ed59e1d1-35de66b4808sm3748505a91.2.2026.04.05.05.58.51\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Sun, 05 Apr 2026 05:58:57 -0700 (PDT)" ], "ARC-Seal": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707; t=1775393940;\n\tcv=none;\n b=kOQpC0+FNBQt0iA5IEf2eYGbo0vF8q7yGIN58ubs0PuAeyJJNoO5ItrBqcK71TzLugmbu57BfEN+dDwoR423x0Oxo92Q8aqh91td4BOsxYLGodY6ut1LHtEF0JgCDb+pfUJLGGWQH5iB5nRVUGNcSXd3P1ni8Hb+40oC+/i1UA3msaQCo6t9OKm/yXbrOhk6sL3bF5qQT3n36A4jUJhKxc7iaxsd5jsrdwrw1AcfoRdbc6GS3h8wRH2q9cM+bdte7FX9EzENQuVm2mTFAQJdynz8luOEYy9z/7kcE1ctNqRvrL7pVJta9pXEO1U71oEj6ixNHeCOPZ79791aMzLp2A==", "ARC-Message-Signature": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707;\n\tt=1775393940; c=relaxed/relaxed;\n\tbh=5OTMoz77Yb7VHAB553cXlMYhsYoA1Jus+1lDf2tDpdk=;\n\th=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References:\n\t MIME-Version;\n b=b9mSWS8SYioPqLBhrWLKuWdqngqYOpCcA1Gx8YMGhZktbY8EHYqgqO7yXikLbP6B3piYXE7j/WnpvEdYiD6t5sQ+b3MSAEzMPdseUu2VNSoOkyr2e9cUBYXpuWHNJLmIvRR2E8wJ8pUR3X+Ki49hGzokWosWtKsUcaPSmvM6aVIBQEvmW5tbnlWnNcCN0YLaevihdBnO7NzKWBnFYCYsZfViWn9NzyN0S354NWmPDLgq8j1tZQcdhbzgwREU3j0SQ+zG0B4iMvZnmPA3lmZQfADG0cTG0+VXP9FH4W8xbPWG9VZHiYcGm3CvT9mar626yabMvOMwzNDWFpqFvvB75A==", "ARC-Authentication-Results": "i=1; lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=bytedance.com;\n dkim=pass (2048-bit key;\n unprotected) header.d=bytedance.com header.i=@bytedance.com\n header.a=rsa-sha256 header.s=google header.b=Ztpm31+s; dkim-atps=neutral;\n spf=pass (client-ip=2607:f8b0:4864:20::102c; helo=mail-pj1-x102c.google.com;\n envelope-from=songmuchun@bytedance.com;\n receiver=lists.ozlabs.org) smtp.mailfrom=bytedance.com", "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=bytedance.com; s=google; t=1775393938; x=1775998738;\n darn=lists.ozlabs.org;\n h=content-transfer-encoding:mime-version:references:in-reply-to\n :message-id:date:subject:cc:to:from:from:to:cc:subject:date\n :message-id:reply-to;\n bh=5OTMoz77Yb7VHAB553cXlMYhsYoA1Jus+1lDf2tDpdk=;\n b=Ztpm31+smr6/8ppB7M3Y5ScID2YptBeEwyyySnoBS1fslL0v+bMjLjDLqxQgYexaps\n s2sriB7Z7yxrhzXxw0LXdY7CZK77ZFu0rOkZs5KtrE22aq9+yHOQkW5rjw3lK52gGrkr\n MF6RT1C+13qEjwVMq5r8BN5mpxxodWpIw6VSeiKOFTumDE2MxWbMYIpE1edDDMyWyVri\n 2XMyjRZyVnmqpoxGjX59FzRp1baMPjyUSfsirPsE0JFCdfkXRWffgwYFI1Pt6D1S5684\n rbeh9aZ/ix3AVVuoBLX5IMTsnAMLCkvRz3tFB/GL3j+GAcxUhlYbQ/kv/Jfq5tyfM2Ut\n jmfw==", "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20251104; t=1775393938; x=1775998738;\n h=content-transfer-encoding:mime-version:references:in-reply-to\n :message-id:date:subject:cc:to:from:x-gm-gg:x-gm-message-state:from\n :to:cc:subject:date:message-id:reply-to;\n bh=5OTMoz77Yb7VHAB553cXlMYhsYoA1Jus+1lDf2tDpdk=;\n b=ckVIPHUeDZjFPZNL14E93jg95K5YkP923xi2lXppW6Hb/5kOXCYU7ogKo3taagxdwS\n Lulr0pckzkkm05Jb4uIqmbq27w82ZXwuYL4/ajTX0JdJbHMv6Ffy/fGjHxGsLnuSommA\n 5Ql4icQQr/XeGKNrNGOPfB1gpGTY36cT45+i1sShmnBrqcvNG/vc8e7YU53O5PxW3dmu\n pEKOTUnlWfQ582fYqeEVUVz/y+6LTKXFa9dujfoWMVF3DxAq3si6BBzirsGR4AdhJ35a\n utiKaSITs5aG5/SH41zzYGWbvT2SffksDn4anahEMi2K4yf01i2XPoHOkZLKPhcBMqlx\n 0vYQ==", "X-Forwarded-Encrypted": "i=1;\n AJvYcCU84zIvC5TzeybVE4ew3oO9cZAqC9OR2YZ8kYNwQdg1dEwuFC6AQfK63UfWWvFAi+Kfwi8RuPyZOjqiicU=@lists.ozlabs.org", "X-Gm-Message-State": "AOJu0YwOpmc3DiB9HodeNwvyuFOs28z8u54TPMxX1LD0Nu0LEs5hNYvB\n\t8QnojppVRJHE+2OaesVEXXuObEAST4cK8MJ6BSq5Hk2VRGuSHqxQR5gTHpgNml/3824=", "X-Gm-Gg": "AeBDieu0k93F8buQxKmROBKmrger8/bim6l0YARhjdWjLe9enr4MuI4QJMXnN2RTtyy\n\tDWm1PkVzYCnoOO9143FFAd7gLtUSDZlCNCT6how/UrxUBiDDxGtwyxqMfyIVN1irucyQmRMYd3T\n\tWwV3oc+K4MMKDEvWYzRSD7CyZ4KTv80/oSLOYhtDt64+10lKj1YbSdLS8H0NL5wmukrakduavNr\n\tOn1DOk8xBT9lyvZNmFhraz2IpfkzYUN2ivZMywPvK6IXRHlWge5U3cdmt+B6RKsC1jegNc4b7R3\n\tJnViOM+fyg3VTrg6ml8wR+krVlX63CjAtxhmLIOmZ9JxUjC1j6UyuO4tRRpizW+eUIte5z0IQq2\n\tB1yxJSf4Ho6weOxHPVXzhmxsdjgr5gTXuFUBpT1JPF8troDsbG9HOCQRMVzkloMGeWGYJ+wx0nQ\n\t2DQCRyUA6/WzaFjsBk67dEY45PgUeoPWRy3+oRZig7RdI=", "X-Received": "by 2002:a17:90b:2b4e:b0:356:35a5:4a64 with SMTP id\n 98e67ed59e1d1-35de6842dc2mr8500150a91.4.1775393937697;\n Sun, 05 Apr 2026 05:58:57 -0700 (PDT)", "From": "Muchun Song <songmuchun@bytedance.com>", "To": "Andrew Morton <akpm@linux-foundation.org>,\n\tDavid Hildenbrand <david@kernel.org>,\n\tMuchun Song <muchun.song@linux.dev>,\n\tOscar Salvador <osalvador@suse.de>,\n\tMichael Ellerman <mpe@ellerman.id.au>,\n\tMadhavan Srinivasan <maddy@linux.ibm.com>", "Cc": "Lorenzo Stoakes <ljs@kernel.org>,\n\t\"Liam R . Howlett\" <Liam.Howlett@oracle.com>,\n\tVlastimil Babka <vbabka@kernel.org>,\n\tMike Rapoport <rppt@kernel.org>,\n\tSuren Baghdasaryan <surenb@google.com>,\n\tMichal Hocko <mhocko@suse.com>,\n\tNicholas Piggin <npiggin@gmail.com>,\n\tChristophe Leroy <chleroy@kernel.org>,\n\taneesh.kumar@linux.ibm.com,\n\tjoao.m.martins@oracle.com,\n\tlinux-mm@kvack.org,\n\tlinuxppc-dev@lists.ozlabs.org,\n\tlinux-kernel@vger.kernel.org,\n\tMuchun Song <songmuchun@bytedance.com>", "Subject": "[PATCH 49/49] mm: consolidate struct page power-of-2 size checks for\n HVO", "Date": "Sun, 5 Apr 2026 20:52:40 +0800", "Message-Id": "<20260405125240.2558577-50-songmuchun@bytedance.com>", "X-Mailer": "git-send-email 2.20.1", "In-Reply-To": "<20260405125240.2558577-1-songmuchun@bytedance.com>", "References": "<20260405125240.2558577-1-songmuchun@bytedance.com>", "X-Mailing-List": "linuxppc-dev@lists.ozlabs.org", "List-Id": "<linuxppc-dev.lists.ozlabs.org>", "List-Help": "<mailto:linuxppc-dev+help@lists.ozlabs.org>", "List-Owner": "<mailto:linuxppc-dev+owner@lists.ozlabs.org>", "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>", "List-Archive": "<https://lore.kernel.org/linuxppc-dev/>,\n <https://lists.ozlabs.org/pipermail/linuxppc-dev/>", "List-Subscribe": "<mailto:linuxppc-dev+subscribe@lists.ozlabs.org>,\n <mailto:linuxppc-dev+subscribe-digest@lists.ozlabs.org>,\n <mailto:linuxppc-dev+subscribe-nomail@lists.ozlabs.org>", "List-Unsubscribe": "<mailto:linuxppc-dev+unsubscribe@lists.ozlabs.org>", "Precedence": "list", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "X-Spam-Status": "No, score=-0.2 required=3.0 tests=DKIM_SIGNED,DKIM_VALID,\n\tDKIM_VALID_AU,DKIM_VALID_EF,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE,SPF_PASS\n\tautolearn=disabled version=4.0.1 OzLabs 8", "X-Spam-Checker-Version": "SpamAssassin 4.0.1 (2024-03-25) on lists.ozlabs.org" }, "content": "The Hugepage Vmemmap Optimization (HVO) requires that struct page\nsize is a power of two. This size is evaluated by the C compiler\nand currently cannot be natively evaluated by Kconfig. Therefore,\nthe condition is_power_of_2(sizeof(struct page)) was scattered\nacross several macros and static inline functions.\n\nExtract the check into a preprocessor macro\nSTRUCT_PAGE_SIZE_IS_POWER_OF_2 evaluated during the Kbuild process.\n\nDefine SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED as a master toggle\nthat is 1 only if both Kconfig CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION\nand the power of 2 size check are true.\n\nThis allows us to completely remove all scattered sizeof(struct page)\nchecks, making the code much cleaner and eliminating redundant logic.\n\nAdditionally, mm/hugetlb_vmemmap.c and its corresponding header are now\nguarded by SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED. This brings an added\nbenefit: when struct page size is not a power of 2, the compiler can\nentirely optimize away the unused functions in mm/hugetlb_vmemmap.c,\nreducing kernel image size.\n\nSigned-off-by: Muchun Song <songmuchun@bytedance.com>\n---\n include/linux/mm_types.h | 2 ++\n include/linux/mm_types_task.h | 4 ++++\n include/linux/mmzone.h | 32 +++++++++++++++-----------------\n include/linux/page-flags.h | 28 ++++------------------------\n kernel/bounds.c | 2 ++\n mm/hugetlb_vmemmap.c | 2 ++\n mm/hugetlb_vmemmap.h | 4 +---\n mm/internal.h | 3 ---\n mm/sparse.c | 6 ++----\n mm/util.c | 2 +-\n 10 files changed, 33 insertions(+), 52 deletions(-)", "diff": "diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h\nindex a308e2c23b82..6de6c0c20f8b 100644\n--- a/include/linux/mm_types.h\n+++ b/include/linux/mm_types.h\n@@ -15,7 +15,9 @@\n #include <linux/cpumask.h>\n #include <linux/uprobes.h>\n #include <linux/rcupdate.h>\n+#ifndef __GENERATING_BOUNDS_H\n #include <linux/page-flags-layout.h>\n+#endif\n #include <linux/workqueue.h>\n #include <linux/seqlock.h>\n #include <linux/percpu_counter.h>\ndiff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h\nindex 11bf319d78ec..09e5039fff97 100644\n--- a/include/linux/mm_types_task.h\n+++ b/include/linux/mm_types_task.h\n@@ -17,7 +17,11 @@\n #include <asm/tlbbatch.h>\n #endif\n \n+#ifndef __GENERATING_BOUNDS_H\n #define ALLOC_SPLIT_PTLOCKS\t(SPINLOCK_SIZE > BITS_PER_LONG/8)\n+#else\n+#define ALLOC_SPLIT_PTLOCKS\t0\n+#endif\n \n /*\n * When updating this, please also update struct resident_page_types[] in\ndiff --git a/include/linux/mmzone.h b/include/linux/mmzone.h\nindex a6900f585f9b..3a46cb0bfaaa 100644\n--- a/include/linux/mmzone.h\n+++ b/include/linux/mmzone.h\n@@ -96,27 +96,26 @@\n \n #define MAX_FOLIO_NR_PAGES\t(1UL << MAX_FOLIO_ORDER)\n \n-/*\n- * Hugepage Vmemmap Optimization (HVO) requires struct pages of the head page to\n- * be naturally aligned with regard to the folio size.\n- *\n- * HVO which is only active if the size of struct page is a power of 2.\n- */\n-#define MAX_FOLIO_VMEMMAP_ALIGN\t\t\t\t\t\\\n-\t(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION) &&\t\\\n-\t is_power_of_2(sizeof(struct page)) ?\t\t\t\\\n-\t MAX_FOLIO_NR_PAGES * sizeof(struct page) : 0)\n-\n /* The number of vmemmap pages required by a vmemmap-optimized folio. */\n #define OPTIMIZED_FOLIO_VMEMMAP_PAGES\t\t1\n #define OPTIMIZED_FOLIO_VMEMMAP_SIZE\t\t(OPTIMIZED_FOLIO_VMEMMAP_PAGES * PAGE_SIZE)\n #define OPTIMIZED_FOLIO_VMEMMAP_PAGE_STRUCTS\t(OPTIMIZED_FOLIO_VMEMMAP_SIZE / sizeof(struct page))\n #define OPTIMIZABLE_FOLIO_MIN_ORDER\t\t(ilog2(OPTIMIZED_FOLIO_VMEMMAP_PAGE_STRUCTS) + 1)\n \n+#if defined(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION) && STRUCT_PAGE_SIZE_IS_POWER_OF_2\n+#define SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED\t1\n+/*\n+ * Hugepage Vmemmap Optimization (HVO) requires struct pages of the head page to\n+ * be naturally aligned with regard to the folio size.\n+ */\n+#define MAX_FOLIO_VMEMMAP_ALIGN\t\t\t(MAX_FOLIO_NR_PAGES * sizeof(struct page))\n #define __NR_OPTIMIZABLE_FOLIO_SIZES\t\t(MAX_FOLIO_ORDER - OPTIMIZABLE_FOLIO_MIN_ORDER + 1)\n #define NR_OPTIMIZABLE_FOLIO_SIZES\t\t\\\n-\t((__NR_OPTIMIZABLE_FOLIO_SIZES > 0 &&\t\\\n-\t IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION)) ? __NR_OPTIMIZABLE_FOLIO_SIZES : 0)\n+\t(__NR_OPTIMIZABLE_FOLIO_SIZES > 0 ? __NR_OPTIMIZABLE_FOLIO_SIZES : 0)\n+#else\n+#define MAX_FOLIO_VMEMMAP_ALIGN\t\t\t0\n+#define NR_OPTIMIZABLE_FOLIO_SIZES\t\t0\n+#endif\n \n enum migratetype {\n \tMIGRATE_UNMOVABLE,\n@@ -2015,7 +2014,7 @@ struct mem_section {\n \t */\n \tstruct page_ext *page_ext;\n #endif\n-#ifdef CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION\n+#ifdef SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED\n \t/*\n \t * The order of compound pages in this section. Typically, the section\n \t * holds compound pages of this order; a larger compound page will span\n@@ -2208,7 +2207,7 @@ static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long\n }\n #endif\n \n-#ifdef CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION\n+#ifdef SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED\n static inline void section_set_order(struct mem_section *section, unsigned int order)\n {\n \tVM_BUG_ON(section->order && order && section->order != order);\n@@ -2267,8 +2266,7 @@ static inline void section_set_compound_range(unsigned long pfn,\n \n static inline bool section_vmemmap_optimizable(const struct mem_section *section)\n {\n-\treturn is_power_of_2(sizeof(struct page)) &&\n-\t section_order(section) >= OPTIMIZABLE_FOLIO_MIN_ORDER;\n+\treturn section_order(section) >= OPTIMIZABLE_FOLIO_MIN_ORDER;\n }\n \n void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,\ndiff --git a/include/linux/page-flags.h b/include/linux/page-flags.h\nindex 12665b34586c..bea934d49750 100644\n--- a/include/linux/page-flags.h\n+++ b/include/linux/page-flags.h\n@@ -198,32 +198,12 @@ enum pageflags {\n \n #ifndef __GENERATING_BOUNDS_H\n \n-/*\n- * For tail pages, if the size of struct page is power-of-2 ->compound_info\n- * encodes the mask that converts the address of the tail page address to\n- * the head page address.\n- *\n- * Otherwise, ->compound_info has direct pointer to head pages.\n- */\n-static __always_inline bool compound_info_has_mask(void)\n-{\n-\t/*\n-\t * The approach with mask would work in the wider set of conditions,\n-\t * but it requires validating that struct pages are naturally aligned\n-\t * for all orders up to the MAX_FOLIO_ORDER, which can be tricky.\n-\t */\n-\tif (!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION))\n-\t\treturn false;\n-\n-\treturn is_power_of_2(sizeof(struct page));\n-}\n-\n static __always_inline unsigned long _compound_head(const struct page *page)\n {\n \tunsigned long info = READ_ONCE(page->compound_info);\n \tunsigned long mask;\n \n-\tif (!compound_info_has_mask()) {\n+\tif (!IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)) {\n \t\t/* Bit 0 encodes PageTail() */\n \t\tif (info & 1)\n \t\t\treturn info - 1;\n@@ -232,8 +212,8 @@ static __always_inline unsigned long _compound_head(const struct page *page)\n \t}\n \n \t/*\n-\t * If compound_info_has_mask() is true the rest of the info encodes\n-\t * the mask that converts the address of the tail page to the head page.\n+\t * If HVO is enabled the rest of the info encodes the mask that converts\n+\t * the address of the tail page to the head page.\n \t *\n \t * No need to clear bit 0 in the mask as 'page' always has it clear.\n \t *\n@@ -257,7 +237,7 @@ static __always_inline void set_compound_head(struct page *tail,\n \tunsigned int shift;\n \tunsigned long mask;\n \n-\tif (!compound_info_has_mask()) {\n+\tif (!IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)) {\n \t\tWRITE_ONCE(tail->compound_info, (unsigned long)head | 1);\n \t\treturn;\n \t}\ndiff --git a/kernel/bounds.c b/kernel/bounds.c\nindex 02b619eb6106..ff2ec3834d32 100644\n--- a/kernel/bounds.c\n+++ b/kernel/bounds.c\n@@ -8,6 +8,7 @@\n #define __GENERATING_BOUNDS_H\n #define COMPILE_OFFSETS\n /* Include headers that define the enum constants of interest */\n+#include <linux/mm_types.h>\n #include <linux/page-flags.h>\n #include <linux/mmzone.h>\n #include <linux/kbuild.h>\n@@ -30,6 +31,7 @@ int main(void)\n \tDEFINE(LRU_GEN_WIDTH, 0);\n \tDEFINE(__LRU_REFS_WIDTH, 0);\n #endif\n+\tDEFINE(STRUCT_PAGE_SIZE_IS_POWER_OF_2, is_power_of_2(sizeof(struct page)));\n \t/* End of constants */\n \n \treturn 0;\ndiff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c\nindex d595ef759bc2..0347341be156 100644\n--- a/mm/hugetlb_vmemmap.c\n+++ b/mm/hugetlb_vmemmap.c\n@@ -21,6 +21,7 @@\n #include \"hugetlb_vmemmap.h\"\n #include \"internal.h\"\n \n+#ifdef SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED\n /**\n * struct vmemmap_remap_walk - walk vmemmap page table\n *\n@@ -693,3 +694,4 @@ static int __init hugetlb_vmemmap_init(void)\n \treturn 0;\n }\n late_initcall(hugetlb_vmemmap_init);\n+#endif\ndiff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h\nindex 0022f9c5a101..bd576ef41ee7 100644\n--- a/mm/hugetlb_vmemmap.h\n+++ b/mm/hugetlb_vmemmap.h\n@@ -12,7 +12,7 @@\n #include <linux/io.h>\n #include <linux/memblock.h>\n \n-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP\n+#if defined(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) && defined(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)\n int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);\n long hugetlb_vmemmap_restore_folios(const struct hstate *h,\n \t\t\t\t\tstruct list_head *folio_list,\n@@ -34,8 +34,6 @@ static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate\n {\n \tint size = hugetlb_vmemmap_size(h) - OPTIMIZED_FOLIO_VMEMMAP_SIZE;\n \n-\tif (!is_power_of_2(sizeof(struct page)))\n-\t\treturn 0;\n \treturn size > 0 ? size : 0;\n }\n #else\ndiff --git a/mm/internal.h b/mm/internal.h\nindex 02064f21bfe1..121c9076f09a 100644\n--- a/mm/internal.h\n+++ b/mm/internal.h\n@@ -1026,9 +1026,6 @@ static inline bool vmemmap_page_optimizable(const struct page *page)\n \tunsigned long pfn = page_to_pfn(page);\n \tunsigned int order = section_order(__pfn_to_section(pfn));\n \n-\tif (!is_power_of_2(sizeof(struct page)))\n-\t\treturn false;\n-\n \treturn (pfn & ((1L << order) - 1)) >= OPTIMIZED_FOLIO_VMEMMAP_PAGE_STRUCTS;\n }\n \ndiff --git a/mm/sparse.c b/mm/sparse.c\nindex 77bb0113bac5..7375f66a58d5 100644\n--- a/mm/sparse.c\n+++ b/mm/sparse.c\n@@ -404,10 +404,8 @@ void __init sparse_init(void)\n \tunsigned long pnum_end, pnum_begin, map_count = 1;\n \tint nid_begin;\n \n-\tif (compound_info_has_mask()) {\n-\t\tVM_WARN_ON_ONCE(!IS_ALIGNED((unsigned long) pfn_to_page(0),\n-\t\t\t\t MAX_FOLIO_VMEMMAP_ALIGN));\n-\t}\n+\tVM_WARN_ON_ONCE(IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED) &&\n+\t\t\t!IS_ALIGNED((unsigned long)pfn_to_page(0), MAX_FOLIO_VMEMMAP_ALIGN));\n \n \tpnum_begin = first_present_section_nr();\n \tnid_begin = sparse_early_nid(__nr_to_section(pnum_begin));\ndiff --git a/mm/util.c b/mm/util.c\nindex f063fd4de1e8..783b2081ea74 100644\n--- a/mm/util.c\n+++ b/mm/util.c\n@@ -1348,7 +1348,7 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page)\n \t\tfoliop = (struct folio *)page;\n \t} else {\n \t\t/* See compound_head() */\n-\t\tif (compound_info_has_mask()) {\n+\t\tif (IS_ENABLED(SPARSEMEM_VMEMMAP_OPTIMIZATION_ENABLED)) {\n \t\t\tunsigned long p = (unsigned long)page;\n \n \t\t\tfoliop = (struct folio *)(p & info);\n", "prefixes": [ "49/49" ] }