get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/2202535/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2202535,
    "url": "http://patchwork.ozlabs.org/api/patches/2202535/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260227200848.114019-15-david@kernel.org/",
    "project": {
        "id": 2,
        "url": "http://patchwork.ozlabs.org/api/projects/2/?format=api",
        "name": "Linux PPC development",
        "link_name": "linuxppc-dev",
        "list_id": "linuxppc-dev.lists.ozlabs.org",
        "list_email": "linuxppc-dev@lists.ozlabs.org",
        "web_url": "https://github.com/linuxppc/wiki/wiki",
        "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git",
        "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/",
        "list_archive_url": "https://lore.kernel.org/linuxppc-dev/",
        "list_archive_url_format": "https://lore.kernel.org/linuxppc-dev/{}/",
        "commit_url_format": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/commit/?id={}"
    },
    "msgid": "<20260227200848.114019-15-david@kernel.org>",
    "list_archive_url": "https://lore.kernel.org/linuxppc-dev/20260227200848.114019-15-david@kernel.org/",
    "date": "2026-02-27T20:08:45",
    "name": "[v1,14/16] mm: rename zap_page_range_single() to zap_vma_range()",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "706b451376743354fcefb6afc5bb93f06d2a0435",
    "submitter": {
        "id": 92023,
        "url": "http://patchwork.ozlabs.org/api/people/92023/?format=api",
        "name": "David Hildenbrand (Arm)",
        "email": "david@kernel.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20260227200848.114019-15-david@kernel.org/mbox/",
    "series": [
        {
            "id": 493807,
            "url": "http://patchwork.ozlabs.org/api/series/493807/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=493807",
            "date": "2026-02-27T20:08:31",
            "name": "mm: cleanups around unmapping / zapping",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/493807/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2202535/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2202535/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "\n <linuxppc-dev+bounces-17422-incoming=patchwork.ozlabs.org@lists.ozlabs.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256\n header.s=k20201202 header.b=JmPRcYz6;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=lists.ozlabs.org\n (client-ip=2404:9400:21b9:f100::1; helo=lists.ozlabs.org;\n envelope-from=linuxppc-dev+bounces-17422-incoming=patchwork.ozlabs.org@lists.ozlabs.org;\n receiver=patchwork.ozlabs.org)",
            "lists.ozlabs.org;\n arc=none smtp.remote-ip=172.105.4.254",
            "lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=kernel.org",
            "lists.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256\n header.s=k20201202 header.b=JmPRcYz6;\n\tdkim-atps=neutral",
            "lists.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=kernel.org\n (client-ip=172.105.4.254; helo=tor.source.kernel.org;\n envelope-from=david@kernel.org; receiver=lists.ozlabs.org)"
        ],
        "Received": [
            "from lists.ozlabs.org (lists.ozlabs.org\n [IPv6:2404:9400:21b9:f100::1])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fMzz31jLSz1xxx\n\tfor <incoming@patchwork.ozlabs.org>; Sat, 28 Feb 2026 07:13:03 +1100 (AEDT)",
            "from boromir.ozlabs.org (localhost [127.0.0.1])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 4fMzz301Lrz3c5y;\n\tSat, 28 Feb 2026 07:13:03 +1100 (AEDT)",
            "from tor.source.kernel.org (tor.source.kernel.org [172.105.4.254])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 4fMzz20X82z30Sv\n\tfor <linuxppc-dev@lists.ozlabs.org>; Sat, 28 Feb 2026 07:13:01 +1100 (AEDT)",
            "from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58])\n\tby tor.source.kernel.org (Postfix) with ESMTP id DF79160126;\n\tFri, 27 Feb 2026 20:12:59 +0000 (UTC)",
            "by smtp.kernel.org (Postfix) with ESMTPSA id 4803DC116C6;\n\tFri, 27 Feb 2026 20:12:44 +0000 (UTC)"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707; t=1772223182;\n\tcv=none;\n b=dCnx0MSjIHhPYd5EW/wtxD7B37QYPp6fN4P8sC7gjx1QWpC3SlBl0nNASdojn9b5aesyterJSrWlBPZbITCCBsUSIa1sS/Z0O9j4KivHC8rs4scwQHPuF/KXIAnlLfUQlRWfc+idNUo2Kt6QgwpVemTiVeHJ1feJ6x1QYlQEi5Deekx1M+1GK3NAOQdW4jaiDI1zV5aPnS+Hgu8j3ZS2Glpg4RMQ/Z87osUxJqICUsc1RF8vW7FN3U/OxwyAA2TwZKkWyogdBElLHcbNfg5D0h9XFTdEiOaUkzWU7qxID0KCYyrm1achbP24cJE7nW+D8vDf/l5Fpo2fRfH8yiprrg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=lists.ozlabs.org; s=201707;\n\tt=1772223182; c=relaxed/relaxed;\n\tbh=A4zxOggyb9U7iPva++/crQzwEJduJ7dDQydh3VuCXl0=;\n\th=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References:\n\t MIME-Version;\n b=KMM9Fwvt+FoFWgl6X1sxeqHY2LtIFm2W3d+Ia3CmxEgI0h0D2dczs76+qERtXz2ks4Zti3jTY8+2bRKTgd+QVbwOJxH3FaZ2FCCTy+NIVTb4xAxYtR2wyObmfpDVIsbC1q/m20yZjokRRvn1hr9LueRVaWkWErapNa11tvSdltFpeiwfQ5awdSXYmFMid6RYaxI0xI+H9oSjZDCzHYSB8NuhzPskUIokJk+bVHTLHH6tYMPfFaF+nrFGYd+U7bvTN2tsY05MP+KvNmE8yXcw0XC/J7cTYpXUvweLCLll8F5sMG7a79hEMktbtsxPFmYrWMwRUQP/AC+511o7M22eYw==",
        "ARC-Authentication-Results": "i=1; lists.ozlabs.org;\n dmarc=pass (p=quarantine dis=none) header.from=kernel.org;\n dkim=pass (2048-bit key;\n unprotected) header.d=kernel.org header.i=@kernel.org header.a=rsa-sha256\n header.s=k20201202 header.b=JmPRcYz6; dkim-atps=neutral;\n spf=pass (client-ip=172.105.4.254; helo=tor.source.kernel.org;\n envelope-from=david@kernel.org;\n receiver=lists.ozlabs.org) smtp.mailfrom=kernel.org",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org;\n\ts=k20201202; t=1772223179;\n\tbh=EEBn7CIEirsPder1HwHu0zod3+a5jIJfhj8EcLUi1fg=;\n\th=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n\tb=JmPRcYz6lMupOBBWcIq2xxzw3ZjqLWbh14Q/EzEzzNOPR2bXzATrQ2or0dA35uMRH\n\t qQSbMuAtuGPbagAqsNu+bWeITN3fvbriqugq0wAAivv3oBid5ydJfxz6kgH0ggobHr\n\t FcqROWqyx31WFNdZnxZqeabN5HI3T4ByIsihTtRpKekFZlg/CHSxlrBVD1TU/sPfLX\n\t PtMx1nnJm8R4PN5iGseQ/FJczpNis/rWhHyMfhYrvINAqI8qSVqCSM1beRCreQXnWF\n\t uj0oTpBBfR/1UoZQaN9rvvSM//RXewIO1Dwy6rYXGjaI0s2HIGsuNkmVQpYm8jSfVj\n\t CC7PaJu+lCODg==",
        "From": "\"David Hildenbrand (Arm)\" <david@kernel.org>",
        "To": "linux-kernel@vger.kernel.org",
        "Cc": "\"linux-mm @ kvack . org\" <linux-mm@kvack.org>,\n \"David Hildenbrand (Arm)\" <david@kernel.org>,\n Andrew Morton <akpm@linux-foundation.org>,\n Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,\n \"Liam R. Howlett\" <Liam.Howlett@oracle.com>,\n Vlastimil Babka <vbabka@kernel.org>, Mike Rapoport <rppt@kernel.org>,\n Suren Baghdasaryan <surenb@google.com>, Michal Hocko <mhocko@suse.com>,\n Jann Horn <jannh@google.com>, Pedro Falcato <pfalcato@suse.de>,\n David Rientjes <rientjes@google.com>, Shakeel Butt <shakeel.butt@linux.dev>,\n \"Matthew Wilcox (Oracle)\" <willy@infradead.org>,\n Alice Ryhl <aliceryhl@google.com>, Madhavan Srinivasan <maddy@linux.ibm.com>,\n Michael Ellerman <mpe@ellerman.id.au>,\n Christian Borntraeger <borntraeger@linux.ibm.com>,\n Janosch Frank <frankja@linux.ibm.com>,\n Claudio Imbrenda <imbrenda@linux.ibm.com>,\n Alexander Gordeev <agordeev@linux.ibm.com>,\n Gerald Schaefer <gerald.schaefer@linux.ibm.com>,\n Heiko Carstens <hca@linux.ibm.com>, Vasily Gorbik <gor@linux.ibm.com>,\n Jarkko Sakkinen <jarkko@kernel.org>, Thomas Gleixner <tglx@kernel.org>,\n Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,\n Greg Kroah-Hartman <gregkh@linuxfoundation.org>, =?utf-8?q?Arve_Hj=C3=B8nne?=\n\t=?utf-8?q?v=C3=A5g?= <arve@android.com>, Todd Kjos <tkjos@android.com>,\n Christian Brauner <brauner@kernel.org>, Carlos Llamas <cmllamas@google.com>,\n Ian Abbott <abbotti@mev.co.uk>,\n H Hartley Sweeten <hsweeten@visionengravers.com>,\n Jani Nikula <jani.nikula@linux.intel.com>,\n Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,\n Rodrigo Vivi <rodrigo.vivi@intel.com>, Tvrtko Ursulin <tursulin@ursulin.net>,\n David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,\n Jason Gunthorpe <jgg@ziepe.ca>, Leon Romanovsky <leon@kernel.org>,\n Dimitri Sivanich <dimitri.sivanich@hpe.com>, Arnd Bergmann <arnd@arndb.de>,\n Alexei Starovoitov <ast@kernel.org>, Daniel Borkmann <daniel@iogearbox.net>,\n Andrii Nakryiko <andrii@kernel.org>, Peter Zijlstra <peterz@infradead.org>,\n Arnaldo Carvalho de Melo <acme@kernel.org>,\n Namhyung Kim <namhyung@kernel.org>, Andy Lutomirski <luto@kernel.org>,\n Vincenzo Frascino <vincenzo.frascino@arm.com>,\n Eric Dumazet <edumazet@google.com>, Neal Cardwell <ncardwell@google.com>,\n \"David S. Miller\" <davem@davemloft.net>, David Ahern <dsahern@kernel.org>,\n Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,\n Miguel Ojeda <ojeda@kernel.org>, linuxppc-dev@lists.ozlabs.org,\n kvm@vger.kernel.org, linux-s390@vger.kernel.org, linux-sgx@vger.kernel.org,\n intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,\n linux-rdma@vger.kernel.org, bpf@vger.kernel.org,\n linux-perf-users@vger.kernel.org, linux-fsdevel@vger.kernel.org,\n netdev@vger.kernel.org, rust-for-linux@vger.kernel.org, x86@kernel.org",
        "Subject": "[PATCH v1 14/16] mm: rename zap_page_range_single() to\n zap_vma_range()",
        "Date": "Fri, 27 Feb 2026 21:08:45 +0100",
        "Message-ID": "<20260227200848.114019-15-david@kernel.org>",
        "X-Mailer": "git-send-email 2.43.0",
        "In-Reply-To": "<20260227200848.114019-1-david@kernel.org>",
        "References": "<20260227200848.114019-1-david@kernel.org>",
        "X-Mailing-List": "linuxppc-dev@lists.ozlabs.org",
        "List-Id": "<linuxppc-dev.lists.ozlabs.org>",
        "List-Help": "<mailto:linuxppc-dev+help@lists.ozlabs.org>",
        "List-Owner": "<mailto:linuxppc-dev+owner@lists.ozlabs.org>",
        "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>",
        "List-Archive": "<https://lore.kernel.org/linuxppc-dev/>,\n  <https://lists.ozlabs.org/pipermail/linuxppc-dev/>",
        "List-Subscribe": "<mailto:linuxppc-dev+subscribe@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-digest@lists.ozlabs.org>,\n  <mailto:linuxppc-dev+subscribe-nomail@lists.ozlabs.org>",
        "List-Unsubscribe": "<mailto:linuxppc-dev+unsubscribe@lists.ozlabs.org>",
        "Precedence": "list",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-Spam-Status": "No, score=-0.2 required=3.0 tests=DKIMWL_WL_HIGH,DKIM_SIGNED,\n\tDKIM_VALID,DKIM_VALID_AU,DKIM_VALID_EF,SPF_HELO_NONE,SPF_PASS\n\tautolearn=disabled version=4.0.1 OzLabs 8",
        "X-Spam-Checker-Version": "SpamAssassin 4.0.1 (2024-03-25) on lists.ozlabs.org"
    },
    "content": "Let's rename it to make it better match our new naming scheme.\n\nWhile at it, polish the kerneldoc.\n\nSigned-off-by: David Hildenbrand (Arm) <david@kernel.org>\n---\n arch/s390/mm/gmap_helpers.c          |  2 +-\n drivers/android/binder/page_range.rs |  4 ++--\n drivers/android/binder_alloc.c       |  2 +-\n include/linux/mm.h                   |  4 ++--\n kernel/bpf/arena.c                   |  2 +-\n kernel/events/core.c                 |  2 +-\n mm/madvise.c                         |  4 ++--\n mm/memory.c                          | 14 +++++++-------\n net/ipv4/tcp.c                       |  6 +++---\n rust/kernel/mm/virt.rs               |  4 ++--\n 10 files changed, 22 insertions(+), 22 deletions(-)",
    "diff": "diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c\nindex ae2d59a19313..f8789ffcc05c 100644\n--- a/arch/s390/mm/gmap_helpers.c\n+++ b/arch/s390/mm/gmap_helpers.c\n@@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned lo\n \t\tif (!vma)\n \t\t\treturn;\n \t\tif (!is_vm_hugetlb_page(vma))\n-\t\t\tzap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr);\n+\t\t\tzap_vma_range(vma, vmaddr, min(end, vma->vm_end) - vmaddr);\n \t\tvmaddr = vma->vm_end;\n \t}\n }\ndiff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs\nindex fdd97112ef5c..2fddd4ed8d4c 100644\n--- a/drivers/android/binder/page_range.rs\n+++ b/drivers/android/binder/page_range.rs\n@@ -130,7 +130,7 @@ pub(crate) struct ShrinkablePageRange {\n     pid: Pid,\n     /// The mm for the relevant process.\n     mm: ARef<Mm>,\n-    /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.\n+    /// Used to synchronize calls to `vm_insert_page` and `zap_vma_range`.\n     #[pin]\n     mm_lock: Mutex<()>,\n     /// Spinlock protecting changes to pages.\n@@ -719,7 +719,7 @@ fn drop(self: Pin<&mut Self>) {\n \n     if let Some(vma) = mmap_read.vma_lookup(vma_addr) {\n         let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);\n-        vma.zap_page_range_single(user_page_addr, PAGE_SIZE);\n+        vma.zap_vma_range(user_page_addr, PAGE_SIZE);\n     }\n \n     drop(mmap_read);\ndiff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c\nindex dd2046bd5cde..e4488ad86a65 100644\n--- a/drivers/android/binder_alloc.c\n+++ b/drivers/android/binder_alloc.c\n@@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,\n \tif (vma) {\n \t\ttrace_binder_unmap_user_start(alloc, index);\n \n-\t\tzap_page_range_single(vma, page_addr, PAGE_SIZE);\n+\t\tzap_vma_range(vma, page_addr, PAGE_SIZE);\n \n \t\ttrace_binder_unmap_user_end(alloc, index);\n \t}\ndiff --git a/include/linux/mm.h b/include/linux/mm.h\nindex 4bd1500b9630..833bedd3f739 100644\n--- a/include/linux/mm.h\n+++ b/include/linux/mm.h\n@@ -2835,7 +2835,7 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,\n \n void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,\n \t\t  unsigned long size);\n-void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,\n+void zap_vma_range(struct vm_area_struct *vma, unsigned long address,\n \t\t\t   unsigned long size);\n /**\n  * zap_vma - zap all page table entries in a vma\n@@ -2843,7 +2843,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,\n  */\n static inline void zap_vma(struct vm_area_struct *vma)\n {\n-\tzap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start);\n+\tzap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);\n }\n struct mmu_notifier_range;\n \ndiff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c\nindex c34510d83b1f..37843c6a4764 100644\n--- a/kernel/bpf/arena.c\n+++ b/kernel/bpf/arena.c\n@@ -656,7 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)\n \tguard(mutex)(&arena->lock);\n \t/* iterate link list under lock */\n \tlist_for_each_entry(vml, &arena->vma_list, head)\n-\t\tzap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt);\n+\t\tzap_vma_range(vml->vma, uaddr, PAGE_SIZE * page_cnt);\n }\n \n static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable)\ndiff --git a/kernel/events/core.c b/kernel/events/core.c\nindex c94c56c94104..5ee02817c3bc 100644\n--- a/kernel/events/core.c\n+++ b/kernel/events/core.c\n@@ -7215,7 +7215,7 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)\n #ifdef CONFIG_MMU\n \t/* Clear any partial mappings on error. */\n \tif (err)\n-\t\tzap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE);\n+\t\tzap_vma_range(vma, vma->vm_start, nr_pages * PAGE_SIZE);\n #endif\n \n \treturn err;\ndiff --git a/mm/madvise.c b/mm/madvise.c\nindex fb5fcdff2b66..6e66f56ff1a6 100644\n--- a/mm/madvise.c\n+++ b/mm/madvise.c\n@@ -832,7 +832,7 @@ static int madvise_free_single_vma(struct madvise_behavior *madv_behavior)\n  * Application no longer needs these pages.  If the pages are dirty,\n  * it's OK to just throw them away.  The app will be more careful about\n  * data it wants to keep.  Be sure to free swap resources too.  The\n- * zap_page_range_single call sets things up for shrink_active_list to actually\n+ * zap_vma_range call sets things up for shrink_active_list to actually\n  * free these pages later if no one else has touched them in the meantime,\n  * although we could add these pages to a global reuse list for\n  * shrink_active_list to pick up before reclaiming other pages.\n@@ -1191,7 +1191,7 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior)\n \t\t * OK some of the range have non-guard pages mapped, zap\n \t\t * them. This leaves existing guard pages in place.\n \t\t */\n-\t\tzap_page_range_single(vma, range->start, range->end - range->start);\n+\t\tzap_vma_range(vma, range->start, range->end - range->start);\n \t}\n \n \t/*\ndiff --git a/mm/memory.c b/mm/memory.c\nindex e611e9af4e85..dd737b6d28c0 100644\n--- a/mm/memory.c\n+++ b/mm/memory.c\n@@ -2215,14 +2215,14 @@ void zap_vma_range_batched(struct mmu_gather *tlb,\n }\n \n /**\n- * zap_page_range_single - remove user pages in a given range\n- * @vma: vm_area_struct holding the applicable pages\n- * @address: starting address of pages to zap\n+ * zap_vma_range - zap all page table entries in a vma range\n+ * @vma: the vma covering the range to zap\n+ * @address: starting address of the range to zap\n  * @size: number of bytes to zap\n  *\n- * The range must fit into one VMA.\n+ * The provided address range must be fully contained within @vma.\n  */\n-void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,\n+void zap_vma_range(struct vm_area_struct *vma, unsigned long address,\n \t\tunsigned long size)\n {\n \tstruct mmu_gather tlb;\n@@ -2250,7 +2250,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,\n \t    \t\t!(vma->vm_flags & VM_PFNMAP))\n \t\treturn;\n \n-\tzap_page_range_single(vma, address, size);\n+\tzap_vma_range(vma, address, size);\n }\n EXPORT_SYMBOL_GPL(zap_vma_ptes);\n \n@@ -3018,7 +3018,7 @@ static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long add\n \t * maintain page reference counts, and callers may free\n \t * pages due to the error. So zap it early.\n \t */\n-\tzap_page_range_single(vma, addr, size);\n+\tzap_vma_range(vma, addr, size);\n \treturn error;\n }\n \ndiff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c\nindex befcde27dee7..cb4477ef1529 100644\n--- a/net/ipv4/tcp.c\n+++ b/net/ipv4/tcp.c\n@@ -2104,7 +2104,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,\n \t\tmaybe_zap_len = total_bytes_to_map -  /* All bytes to map */\n \t\t\t\t*length + /* Mapped or pending */\n \t\t\t\t(pages_remaining * PAGE_SIZE); /* Failed map. */\n-\t\tzap_page_range_single(vma, *address, maybe_zap_len);\n+\t\tzap_vma_range(vma, *address, maybe_zap_len);\n \t\terr = 0;\n \t}\n \n@@ -2112,7 +2112,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,\n \t\tunsigned long leftover_pages = pages_remaining;\n \t\tint bytes_mapped;\n \n-\t\t/* We called zap_page_range_single, try to reinsert. */\n+\t\t/* We called zap_vma_range, try to reinsert. */\n \t\terr = vm_insert_pages(vma, *address,\n \t\t\t\t      pending_pages,\n \t\t\t\t      &pages_remaining);\n@@ -2269,7 +2269,7 @@ static int tcp_zerocopy_receive(struct sock *sk,\n \ttotal_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);\n \tif (total_bytes_to_map) {\n \t\tif (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))\n-\t\t\tzap_page_range_single(vma, address, total_bytes_to_map);\n+\t\t\tzap_vma_range(vma, address, total_bytes_to_map);\n \t\tzc->length = total_bytes_to_map;\n \t\tzc->recv_skip_hint = 0;\n \t} else {\ndiff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs\nindex b8e59e4420f3..04b3cc925d67 100644\n--- a/rust/kernel/mm/virt.rs\n+++ b/rust/kernel/mm/virt.rs\n@@ -113,7 +113,7 @@ pub fn end(&self) -> usize {\n     /// kernel goes further in freeing unused page tables, but for the purposes of this operation\n     /// we must only assume that the leaf level is cleared.\n     #[inline]\n-    pub fn zap_page_range_single(&self, address: usize, size: usize) {\n+    pub fn zap_vma_range(&self, address: usize, size: usize) {\n         let (end, did_overflow) = address.overflowing_add(size);\n         if did_overflow || address < self.start() || self.end() < end {\n             // TODO: call WARN_ONCE once Rust version of it is added\n@@ -124,7 +124,7 @@ pub fn zap_page_range_single(&self, address: usize, size: usize) {\n         // sufficient for this method call. This method has no requirements on the vma flags. The\n         // address range is checked to be within the vma.\n         unsafe {\n-            bindings::zap_page_range_single(self.as_ptr(), address, size)\n+            bindings::zap_vma_range(self.as_ptr(), address, size)\n         };\n     }\n \n",
    "prefixes": [
        "v1",
        "14/16"
    ]
}