get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.2/patches/2219962/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2219962,
    "url": "http://patchwork.ozlabs.org/api/1.2/patches/2219962/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260405125240.2558577-38-songmuchun@bytedance.com/",
    "project": {
        "id": 2,
        "url": "http://patchwork.ozlabs.org/api/1.2/projects/2/?format=api",
        "name": "Linux PPC development",
        "link_name": "linuxppc-dev",
        "list_id": "linuxppc-dev.lists.ozlabs.org",
        "list_email": "linuxppc-dev@lists.ozlabs.org",
        "web_url": "https://github.com/linuxppc/wiki/wiki",
        "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git",
        "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/",
        "list_archive_url": "https://lore.kernel.org/linuxppc-dev/",
        "list_archive_url_format": "https://lore.kernel.org/linuxppc-dev/{}/",
        "commit_url_format": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/commit/?id={}"
    },
    "msgid": "<20260405125240.2558577-38-songmuchun@bytedance.com>",
    "list_archive_url": "https://lore.kernel.org/linuxppc-dev/20260405125240.2558577-38-songmuchun@bytedance.com/",
    "date": "2026-04-05T12:52:28",
    "name": "[37/49] mm/sparse-vmemmap: unify DAX and HugeTLB vmemmap optimization",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "04f618714110d14f07fa810042ea770f0f285f4f",
    "submitter": {
        "id": 78930,
        "url": "http://patchwork.ozlabs.org/api/1.2/people/78930/?format=api",
        "name": "Muchun Song",
        "email": "songmuchun@bytedance.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260405125240.2558577-38-songmuchun@bytedance.com/mbox/",
    "series": [
        {
            "id": 498783,
            "url": "http://patchwork.ozlabs.org/api/1.2/series/498783/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=498783",
            "date": "2026-04-05T12:51:51",
            "name": "mm: Generalize vmemmap optimization for DAX and HugeTLB",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/498783/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2219962/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2219962/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "\n <linuxppc-dev+bounces-19367-incoming=patchwork.ozlabs.org@lists.ozlabs.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=bytedance.com header.i=@bytedance.com\n header.a=rsa-sha256 header.s=google header.b=TqYvIa//;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=lists.ozlabs.org\n (client-ip=112.213.38.117; helo=lists.ozlabs.org;\n envelope-from=linuxppc-dev+bounces-19367-incoming=patchwork.ozlabs.org@lists.ozlabs.org;\n receiver=patchwork.ozlabs.org)",
            "lists.ozlabs.org;\n arc=none smtp.remote-ip=\"2607:f8b0:4864:20::1035\"",
            "lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=bytedance.com",
            "lists.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=bytedance.com header.i=@bytedance.com\n header.a=rsa-sha256 header.s=google header.b=TqYvIa//;\n\tdkim-atps=neutral",
            "lists.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=bytedance.com\n (client-ip=2607:f8b0:4864:20::1035; helo=mail-pj1-x1035.google.com;\n envelope-from=songmuchun@bytedance.com; receiver=lists.ozlabs.org)"
        ],
        "Received": [
            "from lists.ozlabs.org (lists.ozlabs.org [112.213.38.117])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fpXYf3ZFYz1xy1\n\tfor <incoming@patchwork.ozlabs.org>; Sun, 05 Apr 2026 22:57:42 +1000 (AEST)",
            "from boromir.ozlabs.org (localhost [127.0.0.1])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 4fpXYM1x4zz3f1p;\n\tSun, 05 Apr 2026 22:57:27 +1000 (AEST)",
            "from mail-pj1-x1035.google.com (mail-pj1-x1035.google.com\n [IPv6:2607:f8b0:4864:20::1035])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature RSA-PSS (2048 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 4fpXYL1hp8z2yvc\n\tfor <linuxppc-dev@lists.ozlabs.org>; Sun, 05 Apr 2026 22:57:26 +1000 (AEST)",
            "by mail-pj1-x1035.google.com with SMTP id\n 98e67ed59e1d1-35d9923eec5so1861088a91.2\n        for <linuxppc-dev@lists.ozlabs.org>;\n Sun, 05 Apr 2026 05:57:26 -0700 (PDT)",
            "from n232-176-004.byted.org ([36.110.163.97])\n        by smtp.gmail.com with ESMTPSA id\n 98e67ed59e1d1-35de66b4808sm3748505a91.2.2026.04.05.05.57.16\n        (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n        Sun, 05 Apr 2026 05:57:23 -0700 (PDT)"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707; t=1775393847;\n\tcv=none;\n b=MYG40oUeMawD3aHeoFzSd4Fn6A/RZ+AJdtueWr22q0exIz/LMM3oSuqeOz3b+EGwr+d5wjwPgmFCfpaBMfe5N6TXHtlwinbSCAle6FB7wh8XsieyqHuiZ/cWNvsXqYOrcCCyuvHp0bhyFpWYnFo/lEA/FfMjMaCWZ4ty7y2/Lz10HI6F/b9Fm5W7FnVo0s6t9EmyWhjO7xydVCACDKJkQ2VS/mzKz/QP6HAQqQlawb1LIIsSQ/739FrMlr+7fve7/ZMCfYtmnMJL7GEeWHU4wb124H1sgwy/Xrqk4GjA7cF6gTwiTsoCQdiPovHq2SGllpLNjZjxgQjNF+xA9QOfGQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707;\n\tt=1775393847; c=relaxed/relaxed;\n\tbh=Un8QJIOHiLQFUGTVr1x/7L06qAEVN23h4FxFsc9SMV0=;\n\th=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References:\n\t MIME-Version;\n b=IM35fB/+K/V977dBiS2WYmktwYzF5GaQGdz+MSmxx7Rhb01+txr5opJndU7hRYT0iDL8krq1CT9NchbPu8tyq/V29/5yqFamqee2xu2GIyM4LlLv6689hMEieH6WerGJnjEam6dqoF9jyp2tV4Aq39VqqLCe/8kRqzrhYKzxKHRJvML323qcW/q0cJyKm07mq47C6joqBstfVOP26JdCoqGYUPHRnNA+l7tUuTAfpX5PmOA3wQZ/rA0j9raNdUuw3gGzfg/9G11Ntqy/pSq62/1Bzd8HqM8LinBf0+R0ZAHMnRFNcsW64IWB23n/RPkW40RSgXxtmyabL04JD+MeLw==",
        "ARC-Authentication-Results": "i=1; lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=bytedance.com;\n dkim=pass (2048-bit key;\n unprotected) header.d=bytedance.com header.i=@bytedance.com\n header.a=rsa-sha256 header.s=google header.b=TqYvIa//; dkim-atps=neutral;\n spf=pass (client-ip=2607:f8b0:4864:20::1035; helo=mail-pj1-x1035.google.com;\n envelope-from=songmuchun@bytedance.com;\n receiver=lists.ozlabs.org) smtp.mailfrom=bytedance.com",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n        d=bytedance.com; s=google; t=1775393844; x=1775998644;\n darn=lists.ozlabs.org;\n        h=content-transfer-encoding:mime-version:references:in-reply-to\n         :message-id:date:subject:cc:to:from:from:to:cc:subject:date\n         :message-id:reply-to;\n        bh=Un8QJIOHiLQFUGTVr1x/7L06qAEVN23h4FxFsc9SMV0=;\n        b=TqYvIa//9irIUrXXuOjQOnI2GQN8BA5lEYS2zYq0cQnr4I1dq4EqAlxnHQzyu7L9m/\n         lr87MfkVATRgZ6zZ7+3OPMlBRkycoBzZ2LgQCfClKYF33g2yPK3mufYVufXj3RrNG/yz\n         eflFASdrlQ2WVqKJ6wM3TCfQuLmbXiGw4pybM2FG6lXP1gx9kmDu9uoKU+N8l0sI1UP/\n         +NHPp7ugpenvnJccpNYqgNOjGBQe0ym+MaMHdrQk2yC25FqD+Jc8pbm/s+Jq8u2w0WHk\n         ysYAl4DgF3XvE8C8I81hRtraGV0zwiN66lvk9ljEQS+zyDiAihsxu4Is1HUAY3olbtT1\n         3gKw==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n        d=1e100.net; s=20251104; t=1775393844; x=1775998644;\n        h=content-transfer-encoding:mime-version:references:in-reply-to\n         :message-id:date:subject:cc:to:from:x-gm-gg:x-gm-message-state:from\n         :to:cc:subject:date:message-id:reply-to;\n        bh=Un8QJIOHiLQFUGTVr1x/7L06qAEVN23h4FxFsc9SMV0=;\n        b=rFMajnuAcqFp02GK/wd8s/yvFOQsTBakX3Cc8j/etQZdMEZPXUTlbhg6FiflvAKzIT\n         5gZuKbAOnFo9f+xrgNtLKSgvPQQekNF58/ch6FFPeayW5AWwGWhxZjtpbEkIuO3yJIys\n         wT7Qc7OaYNL7EeivGa2UMdW+v+9zMo8UeUNAAA/UEhfIYgOsNFv9+F8Kuz3qg6HK4zMP\n         gqaGJelm1raXT5CGqTHiyWgu/ZclHzRcJMj/WrVMqVUPRuA9VQmkOiAf9OFjWNZa4WjG\n         VkPYGCg1d/a0YrEz6p9F8mXzWgNFyNQzzIKWanmXL+FLb2SIch1c8R6lV5OE6z2R8C0a\n         Kcxg==",
        "X-Forwarded-Encrypted": "i=1;\n AJvYcCVi0Kmc6IZ0fDBv9dX/Fx8ijnTe+omWc8eBwQzPFPpd5HSVoOqo70I7D8G+ekBEZyJ9kW/M5M2SLpx/LKk=@lists.ozlabs.org",
        "X-Gm-Message-State": "AOJu0YwsPyHw+p9/0azc/24vlB55/S2BTd3nQIzd/Oyv6+pHtlnrLdiG\n\tRISjxfS68EOiJxCSZrKYDe0vQvpavo6IcCbnxnSxmHjJDLiOQawKXcfNNLMBNJJ7OMM=",
        "X-Gm-Gg": "AeBDietVxGqHR28cbwEXNQBnd/7Pr1oqh6WKDZobl/yVhlNSbjiCWcWqb8AO0+9QDuE\n\tUUKI8oZtdsHzAglo/eJfCIennpKgaQnMfiQzYanHSyB9I5iul87JOxAhg0tsiathRfn1bJIWL3j\n\tRlgOXD9IG9u6Mq+J771MOcmEVq+ddQRMNck8a4Fy/bsbOAFyEJqzQQqX6ZoVcHt5XN6hUXmSLj/\n\tCucNyTooTFRJ8MxwfB2si7DKnEQ0hK7iClOrcY7XRRXGemVdFBceEu1Lw4dRIccPDZxXdrXmPru\n\t4Dyplc4U790suCvD3Kblq03/QbmC+6W68lrJ3Vz3EJwHzm/OsDrPfSzVG3Zrs1dIQgUIcGPShvb\n\t/BwA32NdwGB68ICFyVZalYQEq3PbOG42jCTNrFES4DVY8n66AYDbt9qrGgMvbWgQ9a51n8Sgv5d\n\tuTyJ+ApLtYqHfZLQ+h5KFWJmsvXznDiKWlGZMusonzeYA=",
        "X-Received": "by 2002:a17:90b:4fc4:b0:35d:a374:b385 with SMTP id\n 98e67ed59e1d1-35de6a1951bmr7999215a91.29.1775393844264;\n        Sun, 05 Apr 2026 05:57:24 -0700 (PDT)",
        "From": "Muchun Song <songmuchun@bytedance.com>",
        "To": "Andrew Morton <akpm@linux-foundation.org>,\n\tDavid Hildenbrand <david@kernel.org>,\n\tMuchun Song <muchun.song@linux.dev>,\n\tOscar Salvador <osalvador@suse.de>,\n\tMichael Ellerman <mpe@ellerman.id.au>,\n\tMadhavan Srinivasan <maddy@linux.ibm.com>",
        "Cc": "Lorenzo Stoakes <ljs@kernel.org>,\n\t\"Liam R . Howlett\" <Liam.Howlett@oracle.com>,\n\tVlastimil Babka <vbabka@kernel.org>,\n\tMike Rapoport <rppt@kernel.org>,\n\tSuren Baghdasaryan <surenb@google.com>,\n\tMichal Hocko <mhocko@suse.com>,\n\tNicholas Piggin <npiggin@gmail.com>,\n\tChristophe Leroy <chleroy@kernel.org>,\n\taneesh.kumar@linux.ibm.com,\n\tjoao.m.martins@oracle.com,\n\tlinux-mm@kvack.org,\n\tlinuxppc-dev@lists.ozlabs.org,\n\tlinux-kernel@vger.kernel.org,\n\tMuchun Song <songmuchun@bytedance.com>",
        "Subject": "[PATCH 37/49] mm/sparse-vmemmap: unify DAX and HugeTLB vmemmap\n optimization",
        "Date": "Sun,  5 Apr 2026 20:52:28 +0800",
        "Message-Id": "<20260405125240.2558577-38-songmuchun@bytedance.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20260405125240.2558577-1-songmuchun@bytedance.com>",
        "References": "<20260405125240.2558577-1-songmuchun@bytedance.com>",
        "X-Mailing-List": "linuxppc-dev@lists.ozlabs.org",
        "List-Id": "<linuxppc-dev.lists.ozlabs.org>",
        "List-Help": "<mailto:linuxppc-dev+help@lists.ozlabs.org>",
        "List-Owner": "<mailto:linuxppc-dev+owner@lists.ozlabs.org>",
        "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>",
        "List-Archive": "<https://lore.kernel.org/linuxppc-dev/>,\n  <https://lists.ozlabs.org/pipermail/linuxppc-dev/>",
        "List-Subscribe": "<mailto:linuxppc-dev+subscribe@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-digest@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-nomail@lists.ozlabs.org>",
        "List-Unsubscribe": "<mailto:linuxppc-dev+unsubscribe@lists.ozlabs.org>",
        "Precedence": "list",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-Spam-Status": "No, score=-0.2 required=3.0 tests=DKIM_SIGNED,DKIM_VALID,\n\tDKIM_VALID_AU,DKIM_VALID_EF,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE,SPF_PASS\n\tautolearn=disabled version=4.0.1 OzLabs 8",
        "X-Spam-Checker-Version": "SpamAssassin 4.0.1 (2024-03-25) on lists.ozlabs.org"
    },
    "content": "The ultimate goal of the recent refactoring series is to unify the vmemmap\noptimization logic for both DAX and HugeTLB under a common framework\n(CONFIG_SPARSEMEM_VMEMMAP_OPTIMIZATION).\n\nA key breakthrough in this unification is that DAX now only requires 1\nvmemmap page to be preserved (the head page), aligning its requirements\nexactly with HugeTLB. Previously, DAX optimization relied on a dedicated\nupper-level function, vmemmap_populate_compound_pages, which handled the\nmanual allocation of the head page AND the first tail page before reusing\nthe shared tail page for the rest.\n\nBecause DAX and HugeTLB are now perfectly aligned in their optimization\nrequirements (1 reserved page + reused shared tail pages), this patch\neliminates the dedicated compound page mapping loop entirely. Instead, it\npushes the optimization decision down to the lowest level in\nvmemmap_pte_populate. Now, all mapping requests flow through the standard\nvmemmap_populate_basepages.\n\nSigned-off-by: Muchun Song <songmuchun@bytedance.com>\n---\n arch/powerpc/mm/book3s64/radix_pgtable.c |  13 +-\n include/linux/mm.h                       |   2 +-\n mm/mm_init.c                             |   2 +-\n mm/sparse-vmemmap.c                      | 185 +++++------------------\n 4 files changed, 40 insertions(+), 162 deletions(-)",
    "diff": "diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c\nindex 5ce3deb464d5..714d5cdc10ec 100644\n--- a/arch/powerpc/mm/book3s64/radix_pgtable.c\n+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c\n@@ -1326,17 +1326,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,\n \t\t\t\t\treturn -ENOMEM;\n \t\t\t\tvmemmap_verify(pte, node, addr, addr + PAGE_SIZE);\n \n-\t\t\t\t/*\n-\t\t\t\t * Populate the tail pages vmemmap page\n-\t\t\t\t * It can fall in different pmd, hence\n-\t\t\t\t * vmemmap_populate_address()\n-\t\t\t\t */\n-\t\t\t\tpte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL);\n-\t\t\t\tif (!pte)\n-\t\t\t\t\treturn -ENOMEM;\n-\n-\t\t\t\taddr_pfn += 2;\n-\t\t\t\tnext = addr + 2 * PAGE_SIZE;\n+\t\t\t\taddr_pfn += 1;\n+\t\t\t\tnext = addr + PAGE_SIZE;\n \t\t\t\tcontinue;\n \t\t\t}\n \ndiff --git a/include/linux/mm.h b/include/linux/mm.h\nindex 15841829b7eb..bceef0dc578b 100644\n--- a/include/linux/mm.h\n+++ b/include/linux/mm.h\n@@ -4912,7 +4912,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,\n }\n #endif\n \n-#define VMEMMAP_RESERVE_NR\t2\n+#define VMEMMAP_RESERVE_NR\tOPTIMIZED_FOLIO_VMEMMAP_PAGES\n #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP\n static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,\n \t\t\t\t\t  struct dev_pagemap *pgmap)\ndiff --git a/mm/mm_init.c b/mm/mm_init.c\nindex 636a0f9644f6..6b23b5f02544 100644\n--- a/mm/mm_init.c\n+++ b/mm/mm_init.c\n@@ -1066,7 +1066,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,\n  * initialize is a lot smaller that the total amount of struct pages being\n  * mapped. This is a paired / mild layering violation with explicit knowledge\n  * of how the sparse_vmemmap internals handle compound pages in the lack\n- * of an altmap. See vmemmap_populate_compound_pages().\n+ * of an altmap.\n  */\n static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,\n \t\t\t\t\t      struct dev_pagemap *pgmap,\ndiff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c\nindex 1867b5dcc73c..fd7b0e1e5aba 100644\n--- a/mm/sparse-vmemmap.c\n+++ b/mm/sparse-vmemmap.c\n@@ -152,46 +152,40 @@ static pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, in\n \t\t\t\t\t      struct vmem_altmap *altmap,\n \t\t\t\t\t      unsigned long ptpfn)\n {\n-\tpte_t *pte = pte_offset_kernel(pmd, addr);\n-\n-\tif (pte_none(ptep_get(pte))) {\n-\t\tpte_t entry;\n-\n-\t\tif (vmemmap_page_optimizable((struct page *)addr) &&\n-\t\t    ptpfn == (unsigned long)-1) {\n-\t\t\tstruct page *page;\n-\t\t\tunsigned long pfn = page_to_pfn((struct page *)addr);\n-\t\t\tconst struct mem_section *ms = __pfn_to_section(pfn);\n-\n-\t\t\tpage = vmemmap_shared_tail_page(section_order(ms),\n-\t\t\t\t\t\t\tsection_to_zone(ms, node));\n-\t\t\tif (!page)\n-\t\t\t\treturn NULL;\n-\t\t\tptpfn = page_to_pfn(page);\n-\t\t}\n+\tpte_t entry, *pte = pte_offset_kernel(pmd, addr);\n \n-\t\tif (ptpfn == (unsigned long)-1) {\n-\t\t\tvoid *p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);\n-\n-\t\t\tif (!p)\n-\t\t\t\treturn NULL;\n-\t\t\tptpfn = PHYS_PFN(__pa(p));\n-\t\t} else {\n-\t\t\t/*\n-\t\t\t * When a PTE/PMD entry is freed from the init_mm\n-\t\t\t * there's a free_pages() call to this page allocated\n-\t\t\t * above. Thus this get_page() is paired with the\n-\t\t\t * put_page_testzero() on the freeing path.\n-\t\t\t * This can only called by certain ZONE_DEVICE path,\n-\t\t\t * and through vmemmap_populate_compound_pages() when\n-\t\t\t * slab is available.\n-\t\t\t */\n-\t\t\tif (slab_is_available())\n-\t\t\t\tget_page(pfn_to_page(ptpfn));\n-\t\t}\n-\t\tentry = pfn_pte(ptpfn, PAGE_KERNEL);\n-\t\tset_pte_at(&init_mm, addr, pte, entry);\n+\tif (!pte_none(ptep_get(pte)))\n+\t\treturn pte;\n+\n+\t/* See layout diagram in Documentation/mm/vmemmap_dedup.rst. */\n+\tif (vmemmap_page_optimizable((struct page *)addr)) {\n+\t\tstruct page *page;\n+\t\tunsigned long pfn = page_to_pfn((struct page *)addr);\n+\t\tconst struct mem_section *ms = __pfn_to_section(pfn);\n+\n+\t\tpage = vmemmap_shared_tail_page(section_order(ms),\n+\t\t\t\t\t\tsection_to_zone(ms, node));\n+\t\tif (!page)\n+\t\t\treturn NULL;\n+\n+\t\t/*\n+\t\t * When a PTE entry is freed, a free_pages() call occurs. This\n+\t\t * get_page() pairs with put_page_testzero() on the freeing\n+\t\t * path. This can only occur when slab is available.\n+\t\t */\n+\t\tif (slab_is_available())\n+\t\t\tget_page(page);\n+\t\tptpfn = page_to_pfn(page);\n+\t} else {\n+\t\tvoid *p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);\n+\n+\t\tif (!p)\n+\t\t\treturn NULL;\n+\t\tptpfn = PHYS_PFN(__pa(p));\n \t}\n+\tentry = pfn_pte(ptpfn, PAGE_KERNEL);\n+\tset_pte_at(&init_mm, addr, pte, entry);\n+\n \treturn pte;\n }\n \n@@ -287,17 +281,15 @@ static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,\n \treturn pte;\n }\n \n-static int __meminit vmemmap_populate_range(unsigned long start,\n-\t\t\t\t\t    unsigned long end, int node,\n-\t\t\t\t\t    struct vmem_altmap *altmap,\n-\t\t\t\t\t    unsigned long ptpfn)\n+int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,\n+\t\t\t\t\t int node, struct vmem_altmap *altmap,\n+\t\t\t\t\t struct dev_pagemap *pgmap)\n {\n \tunsigned long addr = start;\n \tpte_t *pte;\n \n \tfor (; addr < end; addr += PAGE_SIZE) {\n-\t\tpte = vmemmap_populate_address(addr, node, altmap,\n-\t\t\t\t\t       ptpfn);\n+\t\tpte = vmemmap_populate_address(addr, node, altmap, -1);\n \t\tif (!pte)\n \t\t\treturn -ENOMEM;\n \t}\n@@ -305,19 +297,6 @@ static int __meminit vmemmap_populate_range(unsigned long start,\n \treturn 0;\n }\n \n-static int __meminit vmemmap_populate_compound_pages(unsigned long start,\n-\t\t\t\t\t\t     unsigned long end, int node,\n-\t\t\t\t\t\t     struct dev_pagemap *pgmap);\n-\n-int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,\n-\t\t\t\t\t int node, struct vmem_altmap *altmap,\n-\t\t\t\t\t struct dev_pagemap *pgmap)\n-{\n-\tif (vmemmap_can_optimize(altmap, pgmap))\n-\t\treturn vmemmap_populate_compound_pages(start, end, node, pgmap);\n-\treturn vmemmap_populate_range(start, end, node, altmap, -1);\n-}\n-\n /*\n  * Write protect the mirrored tail page structs for HVO. This will be\n  * called from the hugetlb code when gathering and initializing the\n@@ -397,9 +376,6 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,\n \tpud_t *pud;\n \tpmd_t *pmd;\n \n-\tif (vmemmap_can_optimize(altmap, pgmap))\n-\t\treturn vmemmap_populate_compound_pages(start, end, node, pgmap);\n-\n \tfor (addr = start; addr < end; addr = next) {\n \t\tunsigned long pfn = page_to_pfn((struct page *)addr);\n \t\tconst struct mem_section *ms = __pfn_to_section(pfn);\n@@ -447,95 +423,6 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,\n \treturn 0;\n }\n \n-/*\n- * For compound pages bigger than section size (e.g. x86 1G compound\n- * pages with 2M subsection size) fill the rest of sections as tail\n- * pages.\n- *\n- * Note that memremap_pages() resets @nr_range value and will increment\n- * it after each range successful onlining. Thus the value or @nr_range\n- * at section memmap populate corresponds to the in-progress range\n- * being onlined here.\n- */\n-static bool __meminit reuse_compound_section(unsigned long start_pfn,\n-\t\t\t\t\t     struct dev_pagemap *pgmap)\n-{\n-\tunsigned long nr_pages = pgmap_vmemmap_nr(pgmap);\n-\tunsigned long offset = start_pfn -\n-\t\tPHYS_PFN(pgmap->ranges[pgmap->nr_range].start);\n-\n-\treturn !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION;\n-}\n-\n-static int __meminit vmemmap_populate_compound_pages(unsigned long start,\n-\t\t\t\t\t\t     unsigned long end, int node,\n-\t\t\t\t\t\t     struct dev_pagemap *pgmap)\n-{\n-\tunsigned long size, addr;\n-\tpte_t *pte;\n-\tint rc;\n-\tunsigned long start_pfn = page_to_pfn((struct page *)start);\n-\tconst struct mem_section *ms = __pfn_to_section(start_pfn);\n-\tstruct page *tail;\n-\n-\t/* This may occur in sub-section scenarios. */\n-\tif (!section_vmemmap_optimizable(ms))\n-\t\treturn vmemmap_populate_range(start, end, node, NULL, -1);\n-\n-\ttail = vmemmap_shared_tail_page(section_order(ms),\n-\t\t\t\t\tsection_to_zone(ms, node));\n-\tif (!tail)\n-\t\treturn -ENOMEM;\n-\n-\tif (reuse_compound_section(start_pfn, pgmap))\n-\t\treturn vmemmap_populate_range(start, end, node, NULL,\n-\t\t\t\t\t      page_to_pfn(tail));\n-\n-\tsize = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));\n-\tfor (addr = start; addr < end; addr += size) {\n-\t\tunsigned long next, last = addr + size;\n-\t\tvoid *p;\n-\n-\t\t/* Populate the head page vmemmap page */\n-\t\tpte = vmemmap_populate_address(addr, node, NULL, -1);\n-\t\tif (!pte)\n-\t\t\treturn -ENOMEM;\n-\n-\t\t/*\n-\t\t * Allocate manually since vmemmap_populate_address() will assume DAX\n-\t\t * only needs 1 vmemmap page to be reserved, however DAX now needs 2\n-\t\t * vmemmap pages. This is a temporary solution and will be unified\n-\t\t * with HugeTLB in the future.\n-\t\t */\n-\t\tp = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL);\n-\t\tif (!p)\n-\t\t\treturn -ENOMEM;\n-\n-\t\t/* Populate the tail pages vmemmap page */\n-\t\tnext = addr + PAGE_SIZE;\n-\t\tpte = vmemmap_populate_address(next, node, NULL, PHYS_PFN(__pa(p)));\n-\t\t/*\n-\t\t * get_page() is called above. Since we are not actually\n-\t\t * reusing it, to avoid a memory leak, we call put_page() here.\n-\t\t */\n-\t\tput_page(virt_to_page(p));\n-\t\tif (!pte)\n-\t\t\treturn -ENOMEM;\n-\n-\t\t/*\n-\t\t * Reuse the shared vmemmap page for the rest of tail pages\n-\t\t * See layout diagram in Documentation/mm/vmemmap_dedup.rst\n-\t\t */\n-\t\tnext += PAGE_SIZE;\n-\t\trc = vmemmap_populate_range(next, last, node, NULL,\n-\t\t\t\t\t    page_to_pfn(tail));\n-\t\tif (rc)\n-\t\t\treturn -ENOMEM;\n-\t}\n-\n-\treturn 0;\n-}\n-\n struct page * __meminit __populate_section_memmap(unsigned long pfn,\n \t\tunsigned long nr_pages, int nid, struct vmem_altmap *altmap,\n \t\tstruct dev_pagemap *pgmap)\n",
    "prefixes": [
        "37/49"
    ]
}