get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/811788/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 811788,
    "url": "http://patchwork.ozlabs.org/api/patches/811788/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/1504894024-2750-7-git-send-email-ldufour@linux.vnet.ibm.com/",
    "project": {
        "id": 2,
        "url": "http://patchwork.ozlabs.org/api/projects/2/?format=api",
        "name": "Linux PPC development",
        "link_name": "linuxppc-dev",
        "list_id": "linuxppc-dev.lists.ozlabs.org",
        "list_email": "linuxppc-dev@lists.ozlabs.org",
        "web_url": "https://github.com/linuxppc/wiki/wiki",
        "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git",
        "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/",
        "list_archive_url": "https://lore.kernel.org/linuxppc-dev/",
        "list_archive_url_format": "https://lore.kernel.org/linuxppc-dev/{}/",
        "commit_url_format": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/commit/?id={}"
    },
    "msgid": "<1504894024-2750-7-git-send-email-ldufour@linux.vnet.ibm.com>",
    "list_archive_url": "https://lore.kernel.org/linuxppc-dev/1504894024-2750-7-git-send-email-ldufour@linux.vnet.ibm.com/",
    "date": "2017-09-08T18:06:50",
    "name": "[v3,06/20] mm: RCU free VMAs",
    "commit_ref": null,
    "pull_url": null,
    "state": "not-applicable",
    "archived": false,
    "hash": "8041e98c3af4ed3b2c6b25217a3df39c53a16118",
    "submitter": {
        "id": 40248,
        "url": "http://patchwork.ozlabs.org/api/people/40248/?format=api",
        "name": "Laurent Dufour",
        "email": "ldufour@linux.vnet.ibm.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/1504894024-2750-7-git-send-email-ldufour@linux.vnet.ibm.com/mbox/",
    "series": [
        {
            "id": 2269,
            "url": "http://patchwork.ozlabs.org/api/series/2269/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=2269",
            "date": "2017-09-08T18:06:44",
            "name": "Speculative page faults",
            "version": 3,
            "mbox": "http://patchwork.ozlabs.org/series/2269/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/811788/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/811788/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org>",
        "X-Original-To": [
            "patchwork-incoming@ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Received": [
            "from lists.ozlabs.org (lists.ozlabs.org [103.22.144.68])\n\t(using TLSv1.2 with cipher ADH-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xpm5B2RDpz9s7C\n\tfor <patchwork-incoming@ozlabs.org>;\n\tSat,  9 Sep 2017 04:27:26 +1000 (AEST)",
            "from lists.ozlabs.org (lists.ozlabs.org [IPv6:2401:3900:2:1::3])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 3xpm5973bVzDqGZ\n\tfor <patchwork-incoming@ozlabs.org>;\n\tSat,  9 Sep 2017 04:27:25 +1000 (AEST)",
            "from mx0a-001b2d01.pphosted.com (mx0a-001b2d01.pphosted.com\n\t[148.163.156.1])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 3xplfK13XkzDrWS\n\tfor <linuxppc-dev@lists.ozlabs.org>;\n\tSat,  9 Sep 2017 04:07:36 +1000 (AEST)",
            "from pps.filterd (m0098393.ppops.net [127.0.0.1])\n\tby mx0a-001b2d01.pphosted.com (8.16.0.21/8.16.0.21) with SMTP id\n\tv88I5U4p105569\n\tfor <linuxppc-dev@lists.ozlabs.org>; Fri, 8 Sep 2017 14:07:35 -0400",
            "from e06smtp13.uk.ibm.com (e06smtp13.uk.ibm.com [195.75.94.109])\n\tby mx0a-001b2d01.pphosted.com with ESMTP id 2cuvgwkkch-1\n\t(version=TLSv1.2 cipher=AES256-SHA bits=256 verify=NOT)\n\tfor <linuxppc-dev@lists.ozlabs.org>; Fri, 08 Sep 2017 14:07:34 -0400",
            "from localhost\n\tby e06smtp13.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use\n\tOnly! Violators will be prosecuted\n\tfor <linuxppc-dev@lists.ozlabs.org> from <ldufour@linux.vnet.ibm.com>;\n\tFri, 8 Sep 2017 19:07:32 +0100",
            "from b06cxnps4074.portsmouth.uk.ibm.com (9.149.109.196)\n\tby e06smtp13.uk.ibm.com (192.168.101.143) with IBM ESMTP SMTP\n\tGateway: Authorized Use Only! Violators will be prosecuted; \n\tFri, 8 Sep 2017 19:07:25 +0100",
            "from d06av24.portsmouth.uk.ibm.com (mk.ibm.com [9.149.105.60])\n\tby b06cxnps4074.portsmouth.uk.ibm.com (8.14.9/8.14.9/NCO v10.0) with\n\tESMTP id v88I7PFV23265484; Fri, 8 Sep 2017 18:07:25 GMT",
            "from d06av24.portsmouth.uk.ibm.com (unknown [127.0.0.1])\n\tby IMSVA (Postfix) with ESMTP id A631342045;\n\tFri,  8 Sep 2017 19:03:52 +0100 (BST)",
            "from d06av24.portsmouth.uk.ibm.com (unknown [127.0.0.1])\n\tby IMSVA (Postfix) with ESMTP id D3E244203F;\n\tFri,  8 Sep 2017 19:03:50 +0100 (BST)",
            "from nimbus.lab.toulouse-stg.fr.ibm.com (unknown [9.145.31.125])\n\tby d06av24.portsmouth.uk.ibm.com (Postfix) with ESMTP;\n\tFri,  8 Sep 2017 19:03:50 +0100 (BST)"
        ],
        "Authentication-Results": "ozlabs.org;\n\tspf=none (mailfrom) smtp.mailfrom=linux.vnet.ibm.com\n\t(client-ip=148.163.156.1; helo=mx0a-001b2d01.pphosted.com;\n\tenvelope-from=ldufour@linux.vnet.ibm.com; receiver=<UNKNOWN>)",
        "From": "Laurent Dufour <ldufour@linux.vnet.ibm.com>",
        "To": "paulmck@linux.vnet.ibm.com, peterz@infradead.org,\n\takpm@linux-foundation.org, kirill@shutemov.name, ak@linux.intel.com, \n\tmhocko@kernel.org, dave@stgolabs.net, jack@suse.cz,\n\tMatthew Wilcox <willy@infradead.org>, benh@kernel.crashing.org,\n\tmpe@ellerman.id.au, paulus@samba.org,\n\tThomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@redhat.com>, \n\thpa@zytor.com, Will Deacon <will.deacon@arm.com>,\n\tSergey Senozhatsky <sergey.senozhatsky@gmail.com>",
        "Subject": "[PATCH v3 06/20] mm: RCU free VMAs",
        "Date": "Fri,  8 Sep 2017 20:06:50 +0200",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1504894024-2750-1-git-send-email-ldufour@linux.vnet.ibm.com>",
        "References": "<1504894024-2750-1-git-send-email-ldufour@linux.vnet.ibm.com>",
        "X-TM-AS-GCONF": "00",
        "x-cbid": "17090818-0012-0000-0000-00000577A9A6",
        "X-IBM-AV-DETECTION": "SAVI=unused REMOTE=unused XFE=unused",
        "x-cbparentid": "17090818-0013-0000-0000-000018F0B122",
        "Message-Id": "<1504894024-2750-7-git-send-email-ldufour@linux.vnet.ibm.com>",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10432:, ,\n\tdefinitions=2017-09-08_12:, , signatures=0",
        "X-Proofpoint-Spam-Details": "rule=outbound_notspam policy=outbound score=0\n\tspamscore=0 suspectscore=2\n\tmalwarescore=0 phishscore=0 adultscore=0 bulkscore=0 classifier=spam\n\tadjust=0 reason=mlx scancount=1 engine=8.0.1-1707230000\n\tdefinitions=main-1709080270",
        "X-BeenThere": "linuxppc-dev@lists.ozlabs.org",
        "X-Mailman-Version": "2.1.23",
        "Precedence": "list",
        "List-Id": "Linux on PowerPC Developers Mail List\n\t<linuxppc-dev.lists.ozlabs.org>",
        "List-Unsubscribe": "<https://lists.ozlabs.org/options/linuxppc-dev>,\n\t<mailto:linuxppc-dev-request@lists.ozlabs.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.ozlabs.org/pipermail/linuxppc-dev/>",
        "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>",
        "List-Help": "<mailto:linuxppc-dev-request@lists.ozlabs.org?subject=help>",
        "List-Subscribe": "<https://lists.ozlabs.org/listinfo/linuxppc-dev>,\n\t<mailto:linuxppc-dev-request@lists.ozlabs.org?subject=subscribe>",
        "Cc": "linuxppc-dev@lists.ozlabs.org, x86@kernel.org,\n\tlinux-kernel@vger.kernel.org, npiggin@gmail.com, linux-mm@kvack.org,\n\tTim Chen <tim.c.chen@linux.intel.com>, \n\tharen@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com",
        "Errors-To": "linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org",
        "Sender": "\"Linuxppc-dev\"\n\t<linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org>"
    },
    "content": "From: Peter Zijlstra <peterz@infradead.org>\n\nManage the VMAs with SRCU such that we can do a lockless VMA lookup.\n\nWe put the fput(vma->vm_file) in the SRCU callback, this keeps files\nvalid during speculative faults, this is possible due to the delayed\nfput work by Al Viro -- do we need srcu_barrier() in unmount\nsomeplace?\n\nWe guard the mm_rb tree with a seqlock (this could be a seqcount but\nwe'd have to disable preemption around the write side in order to make\nthe retry loop in __read_seqcount_begin() work) such that we can know\nif the rb tree walk was correct. We cannot trust the restult of a\nlockless tree walk in the face of concurrent tree rotations; although\nwe can trust on the termination of such walks -- tree rotations\nguarantee the end result is a tree again after all.\n\nFurthermore, we rely on the WMB implied by the\nwrite_seqlock/count_begin() to separate the VMA initialization and the\npublishing stores, analogous to the RELEASE in rcu_assign_pointer().\nWe also rely on the RMB from read_seqretry() to separate the vma load\nfrom further loads like the smp_read_barrier_depends() in regular\nRCU.\n\nWe must not touch the vmacache while doing SRCU lookups as that is not\nproperly serialized against changes. We update gap information after\npublishing the VMA, but A) we don't use that and B) the seqlock\nread side would fix that anyhow.\n\nWe clear vma->vm_rb for nodes removed from the vma tree such that we\ncan easily detect such 'dead' nodes, we rely on the WMB from\nwrite_sequnlock() to separate the tree removal and clearing the node.\n\nProvide find_vma_srcu() which wraps the required magic.\n\nSigned-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>\n\n[Remove the warnings in description about the SRCU global lock which\n has been removed now]\n[Rename vma_is_dead() to vma_has_changed() and move its adding to the next\n patch]\nSigned-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>\n---\n include/linux/mm_types.h |   2 +\n kernel/fork.c            |   1 +\n mm/init-mm.c             |   1 +\n mm/internal.h            |   5 +++\n mm/mmap.c                | 100 +++++++++++++++++++++++++++++++++++------------\n 5 files changed, 83 insertions(+), 26 deletions(-)",
    "diff": "diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h\nindex df9a530c8ca1..19ed70f79873 100644\n--- a/include/linux/mm_types.h\n+++ b/include/linux/mm_types.h\n@@ -345,6 +345,7 @@ struct vm_area_struct {\n #endif\n \tstruct vm_userfaultfd_ctx vm_userfaultfd_ctx;\n \tseqcount_t vm_sequence;\n+\tstruct rcu_head vm_rcu_head;\n } __randomize_layout;\n \n struct core_thread {\n@@ -362,6 +363,7 @@ struct kioctx_table;\n struct mm_struct {\n \tstruct vm_area_struct *mmap;\t\t/* list of VMAs */\n \tstruct rb_root mm_rb;\n+\tseqlock_t mm_seq;\n \tu32 vmacache_seqnum;                   /* per-thread vmacache */\n #ifdef CONFIG_MMU\n \tunsigned long (*get_unmapped_area) (struct file *filp,\ndiff --git a/kernel/fork.c b/kernel/fork.c\nindex 59c7a8775dd7..bb8205faf3c4 100644\n--- a/kernel/fork.c\n+++ b/kernel/fork.c\n@@ -808,6 +808,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,\n \tmm->mmap = NULL;\n \tmm->mm_rb = RB_ROOT;\n \tmm->vmacache_seqnum = 0;\n+\tseqlock_init(&mm->mm_seq);\n \tatomic_set(&mm->mm_users, 1);\n \tatomic_set(&mm->mm_count, 1);\n \tinit_rwsem(&mm->mmap_sem);\ndiff --git a/mm/init-mm.c b/mm/init-mm.c\nindex 975e49f00f34..2b1fa061684f 100644\n--- a/mm/init-mm.c\n+++ b/mm/init-mm.c\n@@ -16,6 +16,7 @@\n \n struct mm_struct init_mm = {\n \t.mm_rb\t\t= RB_ROOT,\n+\t.mm_seq\t\t= __SEQLOCK_UNLOCKED(init_mm.mm_seq),\n \t.pgd\t\t= swapper_pg_dir,\n \t.mm_users\t= ATOMIC_INIT(2),\n \t.mm_count\t= ATOMIC_INIT(1),\ndiff --git a/mm/internal.h b/mm/internal.h\nindex 0aaa05af7833..84360184eafd 100644\n--- a/mm/internal.h\n+++ b/mm/internal.h\n@@ -40,6 +40,11 @@ void page_writeback_init(void);\n \n int do_swap_page(struct vm_fault *vmf);\n \n+extern struct srcu_struct vma_srcu;\n+\n+extern struct vm_area_struct *find_vma_srcu(struct mm_struct *mm,\n+\t\t\t\t\t    unsigned long addr);\n+\n void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,\n \t\tunsigned long floor, unsigned long ceiling);\n \ndiff --git a/mm/mmap.c b/mm/mmap.c\nindex 04e72314274d..ad125d8c2e14 100644\n--- a/mm/mmap.c\n+++ b/mm/mmap.c\n@@ -160,6 +160,23 @@ void unlink_file_vma(struct vm_area_struct *vma)\n \t}\n }\n \n+DEFINE_SRCU(vma_srcu);\n+\n+static void __free_vma(struct rcu_head *head)\n+{\n+\tstruct vm_area_struct *vma =\n+\t\tcontainer_of(head, struct vm_area_struct, vm_rcu_head);\n+\n+\tif (vma->vm_file)\n+\t\tfput(vma->vm_file);\n+\tkmem_cache_free(vm_area_cachep, vma);\n+}\n+\n+static void free_vma(struct vm_area_struct *vma)\n+{\n+\tcall_srcu(&vma_srcu, &vma->vm_rcu_head, __free_vma);\n+}\n+\n /*\n  * Close a vm structure and free it, returning the next.\n  */\n@@ -170,10 +187,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)\n \tmight_sleep();\n \tif (vma->vm_ops && vma->vm_ops->close)\n \t\tvma->vm_ops->close(vma);\n-\tif (vma->vm_file)\n-\t\tfput(vma->vm_file);\n \tmpol_put(vma_policy(vma));\n-\tkmem_cache_free(vm_area_cachep, vma);\n+\tfree_vma(vma);\n \treturn next;\n }\n \n@@ -411,26 +426,37 @@ static void vma_gap_update(struct vm_area_struct *vma)\n }\n \n static inline void vma_rb_insert(struct vm_area_struct *vma,\n-\t\t\t\t struct rb_root *root)\n+\t\t\t\t struct mm_struct *mm)\n {\n+\tstruct rb_root *root = &mm->mm_rb;\n+\n \t/* All rb_subtree_gap values must be consistent prior to insertion */\n \tvalidate_mm_rb(root, NULL);\n \n \trb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);\n }\n \n-static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)\n+static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm)\n {\n+\tstruct rb_root *root = &mm->mm_rb;\n \t/*\n \t * Note rb_erase_augmented is a fairly large inline function,\n \t * so make sure we instantiate it only once with our desired\n \t * augmented rbtree callbacks.\n \t */\n+\twrite_seqlock(&mm->mm_seq);\n \trb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);\n+\twrite_sequnlock(&mm->mm_seq); /* wmb */\n+\n+\t/*\n+\t * Ensure the removal is complete before clearing the node.\n+\t * Matched by vma_has_changed()/handle_speculative_fault().\n+\t */\n+\tRB_CLEAR_NODE(&vma->vm_rb);\n }\n \n static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,\n-\t\t\t\t\t\tstruct rb_root *root,\n+\t\t\t\t\t\tstruct mm_struct *mm,\n \t\t\t\t\t\tstruct vm_area_struct *ignore)\n {\n \t/*\n@@ -438,21 +464,21 @@ static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,\n \t * with the possible exception of the \"next\" vma being erased if\n \t * next->vm_start was reduced.\n \t */\n-\tvalidate_mm_rb(root, ignore);\n+\tvalidate_mm_rb(&mm->mm_rb, ignore);\n \n-\t__vma_rb_erase(vma, root);\n+\t__vma_rb_erase(vma, mm);\n }\n \n static __always_inline void vma_rb_erase(struct vm_area_struct *vma,\n-\t\t\t\t\t struct rb_root *root)\n+\t\t\t\t\t struct mm_struct *mm)\n {\n \t/*\n \t * All rb_subtree_gap values must be consistent prior to erase,\n \t * with the possible exception of the vma being erased.\n \t */\n-\tvalidate_mm_rb(root, vma);\n+\tvalidate_mm_rb(&mm->mm_rb, vma);\n \n-\t__vma_rb_erase(vma, root);\n+\t__vma_rb_erase(vma, mm);\n }\n \n /*\n@@ -569,10 +595,12 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,\n \t * immediately update the gap to the correct value. Finally we\n \t * rebalance the rbtree after all augmented values have been set.\n \t */\n+\twrite_seqlock(&mm->mm_seq);\n \trb_link_node(&vma->vm_rb, rb_parent, rb_link);\n \tvma->rb_subtree_gap = 0;\n \tvma_gap_update(vma);\n-\tvma_rb_insert(vma, &mm->mm_rb);\n+\tvma_rb_insert(vma, mm);\n+\twrite_sequnlock(&mm->mm_seq);\n }\n \n static void __vma_link_file(struct vm_area_struct *vma)\n@@ -648,7 +676,7 @@ static __always_inline void __vma_unlink_common(struct mm_struct *mm,\n {\n \tstruct vm_area_struct *next;\n \n-\tvma_rb_erase_ignore(vma, &mm->mm_rb, ignore);\n+\tvma_rb_erase_ignore(vma, mm, ignore);\n \tnext = vma->vm_next;\n \tif (has_prev)\n \t\tprev->vm_next = next;\n@@ -902,15 +930,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,\n \t}\n \n \tif (remove_next) {\n-\t\tif (file) {\n+\t\tif (file)\n \t\t\tuprobe_munmap(next, next->vm_start, next->vm_end);\n-\t\t\tfput(file);\n-\t\t}\n \t\tif (next->anon_vma)\n \t\t\tanon_vma_merge(vma, next);\n \t\tmm->map_count--;\n \t\tmpol_put(vma_policy(next));\n-\t\tkmem_cache_free(vm_area_cachep, next);\n+\t\tfree_vma(next);\n \t\twrite_seqcount_end(&next->vm_sequence);\n \t\t/*\n \t\t * In mprotect's case 6 (see comments on vma_merge),\n@@ -2131,15 +2157,10 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,\n EXPORT_SYMBOL(get_unmapped_area);\n \n /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */\n-struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)\n+static struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)\n {\n \tstruct rb_node *rb_node;\n-\tstruct vm_area_struct *vma;\n-\n-\t/* Check the cache first. */\n-\tvma = vmacache_find(mm, addr);\n-\tif (likely(vma))\n-\t\treturn vma;\n+\tstruct vm_area_struct *vma = NULL;\n \n \trb_node = mm->mm_rb.rb_node;\n \n@@ -2157,13 +2178,40 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)\n \t\t\trb_node = rb_node->rb_right;\n \t}\n \n+\treturn vma;\n+}\n+\n+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)\n+{\n+\tstruct vm_area_struct *vma;\n+\n+\t/* Check the cache first. */\n+\tvma = vmacache_find(mm, addr);\n+\tif (likely(vma))\n+\t\treturn vma;\n+\n+\tvma = __find_vma(mm, addr);\n \tif (vma)\n \t\tvmacache_update(addr, vma);\n \treturn vma;\n }\n-\n EXPORT_SYMBOL(find_vma);\n \n+struct vm_area_struct *find_vma_srcu(struct mm_struct *mm, unsigned long addr)\n+{\n+\tstruct vm_area_struct *vma;\n+\tunsigned int seq;\n+\n+\tWARN_ON_ONCE(!srcu_read_lock_held(&vma_srcu));\n+\n+\tdo {\n+\t\tseq = read_seqbegin(&mm->mm_seq);\n+\t\tvma = __find_vma(mm, addr);\n+\t} while (read_seqretry(&mm->mm_seq, seq));\n+\n+\treturn vma;\n+}\n+\n /*\n  * Same as find_vma, but also return a pointer to the previous VMA in *pprev.\n  */\n@@ -2531,7 +2579,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,\n \tinsertion_point = (prev ? &prev->vm_next : &mm->mmap);\n \tvma->vm_prev = NULL;\n \tdo {\n-\t\tvma_rb_erase(vma, &mm->mm_rb);\n+\t\tvma_rb_erase(vma, mm);\n \t\tmm->map_count--;\n \t\ttail_vma = vma;\n \t\tvma = vma->vm_next;\n",
    "prefixes": [
        "v3",
        "06/20"
    ]
}