get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/811787/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 811787,
    "url": "http://patchwork.ozlabs.org/api/patches/811787/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/1504894024-2750-6-git-send-email-ldufour@linux.vnet.ibm.com/",
    "project": {
        "id": 2,
        "url": "http://patchwork.ozlabs.org/api/projects/2/?format=api",
        "name": "Linux PPC development",
        "link_name": "linuxppc-dev",
        "list_id": "linuxppc-dev.lists.ozlabs.org",
        "list_email": "linuxppc-dev@lists.ozlabs.org",
        "web_url": "https://github.com/linuxppc/wiki/wiki",
        "scm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git",
        "webscm_url": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/",
        "list_archive_url": "https://lore.kernel.org/linuxppc-dev/",
        "list_archive_url_format": "https://lore.kernel.org/linuxppc-dev/{}/",
        "commit_url_format": "https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git/commit/?id={}"
    },
    "msgid": "<1504894024-2750-6-git-send-email-ldufour@linux.vnet.ibm.com>",
    "list_archive_url": "https://lore.kernel.org/linuxppc-dev/1504894024-2750-6-git-send-email-ldufour@linux.vnet.ibm.com/",
    "date": "2017-09-08T18:06:49",
    "name": "[v3,05/20] mm: Protect VMA modifications using VMA sequence count",
    "commit_ref": null,
    "pull_url": null,
    "state": "not-applicable",
    "archived": false,
    "hash": "4ca1a641f53880f56f85986159b3a0dcaa25919d",
    "submitter": {
        "id": 40248,
        "url": "http://patchwork.ozlabs.org/api/people/40248/?format=api",
        "name": "Laurent Dufour",
        "email": "ldufour@linux.vnet.ibm.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linuxppc-dev/patch/1504894024-2750-6-git-send-email-ldufour@linux.vnet.ibm.com/mbox/",
    "series": [
        {
            "id": 2269,
            "url": "http://patchwork.ozlabs.org/api/series/2269/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/linuxppc-dev/list/?series=2269",
            "date": "2017-09-08T18:06:44",
            "name": "Speculative page faults",
            "version": 3,
            "mbox": "http://patchwork.ozlabs.org/series/2269/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/811787/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/811787/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org>",
        "X-Original-To": [
            "patchwork-incoming@ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@ozlabs.org",
            "linuxppc-dev@lists.ozlabs.org"
        ],
        "Received": [
            "from lists.ozlabs.org (lists.ozlabs.org [IPv6:2401:3900:2:1::3])\n\t(using TLSv1.2 with cipher ADH-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xpm214s9qz9s7C\n\tfor <patchwork-incoming@ozlabs.org>;\n\tSat,  9 Sep 2017 04:24:41 +1000 (AEST)",
            "from lists.ozlabs.org (lists.ozlabs.org [IPv6:2401:3900:2:1::3])\n\tby lists.ozlabs.org (Postfix) with ESMTP id 3xpm213VrmzDqh4\n\tfor <patchwork-incoming@ozlabs.org>;\n\tSat,  9 Sep 2017 04:24:41 +1000 (AEST)",
            "from mx0a-001b2d01.pphosted.com (mx0b-001b2d01.pphosted.com\n\t[148.163.158.5])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby lists.ozlabs.org (Postfix) with ESMTPS id 3xplfG27snzDrXd\n\tfor <linuxppc-dev@lists.ozlabs.org>;\n\tSat,  9 Sep 2017 04:07:34 +1000 (AEST)",
            "from pps.filterd (m0098417.ppops.net [127.0.0.1])\n\tby mx0a-001b2d01.pphosted.com (8.16.0.21/8.16.0.21) with SMTP id\n\tv88I4Peh074079\n\tfor <linuxppc-dev@lists.ozlabs.org>; Fri, 8 Sep 2017 14:07:31 -0400",
            "from e06smtp11.uk.ibm.com (e06smtp11.uk.ibm.com [195.75.94.107])\n\tby mx0a-001b2d01.pphosted.com with ESMTP id 2cux3968hy-1\n\t(version=TLSv1.2 cipher=AES256-SHA bits=256 verify=NOT)\n\tfor <linuxppc-dev@lists.ozlabs.org>; Fri, 08 Sep 2017 14:07:31 -0400",
            "from localhost\n\tby e06smtp11.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use\n\tOnly! Violators will be prosecuted\n\tfor <linuxppc-dev@lists.ozlabs.org> from <ldufour@linux.vnet.ibm.com>;\n\tFri, 8 Sep 2017 19:07:29 +0100",
            "from b06cxnps4076.portsmouth.uk.ibm.com (9.149.109.198)\n\tby e06smtp11.uk.ibm.com (192.168.101.141) with IBM ESMTP SMTP\n\tGateway: Authorized Use Only! Violators will be prosecuted; \n\tFri, 8 Sep 2017 19:07:23 +0100",
            "from d06av24.portsmouth.uk.ibm.com (d06av24.portsmouth.uk.ibm.com\n\t[9.149.105.60])\n\tby b06cxnps4076.portsmouth.uk.ibm.com (8.14.9/8.14.9/NCO v10.0) with\n\tESMTP id v88I7MRo15597714; Fri, 8 Sep 2017 18:07:22 GMT",
            "from d06av24.portsmouth.uk.ibm.com (unknown [127.0.0.1])\n\tby IMSVA (Postfix) with ESMTP id B41B442041;\n\tFri,  8 Sep 2017 19:03:49 +0100 (BST)",
            "from d06av24.portsmouth.uk.ibm.com (unknown [127.0.0.1])\n\tby IMSVA (Postfix) with ESMTP id E8A074203F;\n\tFri,  8 Sep 2017 19:03:47 +0100 (BST)",
            "from nimbus.lab.toulouse-stg.fr.ibm.com (unknown [9.145.31.125])\n\tby d06av24.portsmouth.uk.ibm.com (Postfix) with ESMTP;\n\tFri,  8 Sep 2017 19:03:47 +0100 (BST)"
        ],
        "Authentication-Results": "ozlabs.org;\n\tspf=none (mailfrom) smtp.mailfrom=linux.vnet.ibm.com\n\t(client-ip=148.163.158.5; helo=mx0a-001b2d01.pphosted.com;\n\tenvelope-from=ldufour@linux.vnet.ibm.com; receiver=<UNKNOWN>)",
        "From": "Laurent Dufour <ldufour@linux.vnet.ibm.com>",
        "To": "paulmck@linux.vnet.ibm.com, peterz@infradead.org,\n\takpm@linux-foundation.org, kirill@shutemov.name, ak@linux.intel.com, \n\tmhocko@kernel.org, dave@stgolabs.net, jack@suse.cz,\n\tMatthew Wilcox <willy@infradead.org>, benh@kernel.crashing.org,\n\tmpe@ellerman.id.au, paulus@samba.org,\n\tThomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@redhat.com>, \n\thpa@zytor.com, Will Deacon <will.deacon@arm.com>,\n\tSergey Senozhatsky <sergey.senozhatsky@gmail.com>",
        "Subject": "[PATCH v3 05/20] mm: Protect VMA modifications using VMA sequence\n\tcount",
        "Date": "Fri,  8 Sep 2017 20:06:49 +0200",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1504894024-2750-1-git-send-email-ldufour@linux.vnet.ibm.com>",
        "References": "<1504894024-2750-1-git-send-email-ldufour@linux.vnet.ibm.com>",
        "X-TM-AS-GCONF": "00",
        "x-cbid": "17090818-0040-0000-0000-000003F89DB9",
        "X-IBM-AV-DETECTION": "SAVI=unused REMOTE=unused XFE=unused",
        "x-cbparentid": "17090818-0041-0000-0000-00002099A538",
        "Message-Id": "<1504894024-2750-6-git-send-email-ldufour@linux.vnet.ibm.com>",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10432:, ,\n\tdefinitions=2017-09-08_12:, , signatures=0",
        "X-Proofpoint-Spam-Details": "rule=outbound_notspam policy=outbound score=0\n\tspamscore=0 suspectscore=2\n\tmalwarescore=0 phishscore=0 adultscore=0 bulkscore=0 classifier=spam\n\tadjust=0 reason=mlx scancount=1 engine=8.0.1-1707230000\n\tdefinitions=main-1709080270",
        "X-BeenThere": "linuxppc-dev@lists.ozlabs.org",
        "X-Mailman-Version": "2.1.23",
        "Precedence": "list",
        "List-Id": "Linux on PowerPC Developers Mail List\n\t<linuxppc-dev.lists.ozlabs.org>",
        "List-Unsubscribe": "<https://lists.ozlabs.org/options/linuxppc-dev>,\n\t<mailto:linuxppc-dev-request@lists.ozlabs.org?subject=unsubscribe>",
        "List-Archive": "<http://lists.ozlabs.org/pipermail/linuxppc-dev/>",
        "List-Post": "<mailto:linuxppc-dev@lists.ozlabs.org>",
        "List-Help": "<mailto:linuxppc-dev-request@lists.ozlabs.org?subject=help>",
        "List-Subscribe": "<https://lists.ozlabs.org/listinfo/linuxppc-dev>,\n\t<mailto:linuxppc-dev-request@lists.ozlabs.org?subject=subscribe>",
        "Cc": "linuxppc-dev@lists.ozlabs.org, x86@kernel.org,\n\tlinux-kernel@vger.kernel.org, npiggin@gmail.com, linux-mm@kvack.org,\n\tTim Chen <tim.c.chen@linux.intel.com>, \n\tharen@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com",
        "Errors-To": "linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org",
        "Sender": "\"Linuxppc-dev\"\n\t<linuxppc-dev-bounces+patchwork-incoming=ozlabs.org@lists.ozlabs.org>"
    },
    "content": "The VMA sequence count has been introduced to allow fast detection of\nVMA modification when running a page fault handler without holding\nthe mmap_sem.\n\nThis patch provides protection against the VMA modification done in :\n\t- madvise()\n\t- mremap()\n\t- mpol_rebind_policy()\n\t- vma_replace_policy()\n\t- change_prot_numa()\n\t- mlock(), munlock()\n\t- mprotect()\n\t- mmap_region()\n\t- collapse_huge_page()\n\t- userfaultd registering services\n\nIn addition, VMA fields which will be read during the speculative fault\npath needs to be written using WRITE_ONCE to prevent write to be split\nand intermediate values to be pushed to other CPUs.\n\nSigned-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>\n---\n fs/proc/task_mmu.c |  5 ++++-\n fs/userfaultfd.c   | 17 +++++++++++++----\n mm/khugepaged.c    |  3 +++\n mm/madvise.c       |  6 +++++-\n mm/mempolicy.c     | 51 ++++++++++++++++++++++++++++++++++-----------------\n mm/mlock.c         | 13 ++++++++-----\n mm/mmap.c          | 17 ++++++++++-------\n mm/mprotect.c      |  4 +++-\n mm/mremap.c        |  7 +++++++\n 9 files changed, 87 insertions(+), 36 deletions(-)",
    "diff": "diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c\nindex 5589b4bd4b85..550bbc852143 100644\n--- a/fs/proc/task_mmu.c\n+++ b/fs/proc/task_mmu.c\n@@ -1152,8 +1152,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,\n \t\t\t\t\tgoto out_mm;\n \t\t\t\t}\n \t\t\t\tfor (vma = mm->mmap; vma; vma = vma->vm_next) {\n-\t\t\t\t\tvma->vm_flags &= ~VM_SOFTDIRTY;\n+\t\t\t\t\twrite_seqcount_begin(&vma->vm_sequence);\n+\t\t\t\t\tWRITE_ONCE(vma->vm_flags,\n+\t\t\t\t\t\t   vma->vm_flags & ~VM_SOFTDIRTY);\n \t\t\t\t\tvma_set_page_prot(vma);\n+\t\t\t\t\twrite_seqcount_end(&vma->vm_sequence);\n \t\t\t\t}\n \t\t\t\tdowngrade_write(&mm->mmap_sem);\n \t\t\t\tbreak;\ndiff --git a/fs/userfaultfd.c b/fs/userfaultfd.c\nindex ef4b48d1ea42..856570f327c3 100644\n--- a/fs/userfaultfd.c\n+++ b/fs/userfaultfd.c\n@@ -634,8 +634,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)\n \n \toctx = vma->vm_userfaultfd_ctx.ctx;\n \tif (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {\n+\t\twrite_seqcount_begin(&vma->vm_sequence);\n \t\tvma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;\n-\t\tvma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);\n+\t\tWRITE_ONCE(vma->vm_flags,\n+\t\t\t   vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING));\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n \t\treturn 0;\n \t}\n \n@@ -860,8 +863,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)\n \t\t\tvma = prev;\n \t\telse\n \t\t\tprev = vma;\n-\t\tvma->vm_flags = new_flags;\n+\t\twrite_seqcount_begin(&vma->vm_sequence);\n+\t\tWRITE_ONCE(vma->vm_flags, new_flags);\n \t\tvma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n \t}\n \tup_write(&mm->mmap_sem);\n \tmmput(mm);\n@@ -1379,8 +1384,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,\n \t\t * the next vma was merged into the current one and\n \t\t * the current one has not been updated yet.\n \t\t */\n-\t\tvma->vm_flags = new_flags;\n+\t\twrite_seqcount_begin(&vma->vm_sequence);\n+\t\tWRITE_ONCE(vma->vm_flags, new_flags);\n \t\tvma->vm_userfaultfd_ctx.ctx = ctx;\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n \n \tskip:\n \t\tprev = vma;\n@@ -1537,8 +1544,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,\n \t\t * the next vma was merged into the current one and\n \t\t * the current one has not been updated yet.\n \t\t */\n-\t\tvma->vm_flags = new_flags;\n+\t\twrite_seqcount_begin(&vma->vm_sequence);\n+\t\tWRITE_ONCE(vma->vm_flags, new_flags);\n \t\tvma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n \n \tskip:\n \t\tprev = vma;\ndiff --git a/mm/khugepaged.c b/mm/khugepaged.c\nindex c01f177a1120..56dd994c05d0 100644\n--- a/mm/khugepaged.c\n+++ b/mm/khugepaged.c\n@@ -1005,6 +1005,7 @@ static void collapse_huge_page(struct mm_struct *mm,\n \tif (mm_find_pmd(mm, address) != pmd)\n \t\tgoto out;\n \n+\twrite_seqcount_begin(&vma->vm_sequence);\n \tanon_vma_lock_write(vma->anon_vma);\n \n \tpte = pte_offset_map(pmd, address);\n@@ -1040,6 +1041,7 @@ static void collapse_huge_page(struct mm_struct *mm,\n \t\tpmd_populate(mm, pmd, pmd_pgtable(_pmd));\n \t\tspin_unlock(pmd_ptl);\n \t\tanon_vma_unlock_write(vma->anon_vma);\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n \t\tresult = SCAN_FAIL;\n \t\tgoto out;\n \t}\n@@ -1074,6 +1076,7 @@ static void collapse_huge_page(struct mm_struct *mm,\n \tset_pmd_at(mm, address, pmd, _pmd);\n \tupdate_mmu_cache_pmd(vma, address, pmd);\n \tspin_unlock(pmd_ptl);\n+\twrite_seqcount_end(&vma->vm_sequence);\n \n \t*hpage = NULL;\n \ndiff --git a/mm/madvise.c b/mm/madvise.c\nindex 21261ff0466f..bedb0ec25c77 100644\n--- a/mm/madvise.c\n+++ b/mm/madvise.c\n@@ -183,7 +183,9 @@ static long madvise_behavior(struct vm_area_struct *vma,\n \t/*\n \t * vm_flags is protected by the mmap_sem held in write mode.\n \t */\n-\tvma->vm_flags = new_flags;\n+\twrite_seqcount_begin(&vma->vm_sequence);\n+\tWRITE_ONCE(vma->vm_flags, new_flags);\n+\twrite_seqcount_end(&vma->vm_sequence);\n out:\n \treturn error;\n }\n@@ -451,9 +453,11 @@ static void madvise_free_page_range(struct mmu_gather *tlb,\n \t\t.private = tlb,\n \t};\n \n+\twrite_seqcount_begin(&vma->vm_sequence);\n \ttlb_start_vma(tlb, vma);\n \twalk_page_range(addr, end, &free_walk);\n \ttlb_end_vma(tlb, vma);\n+\twrite_seqcount_end(&vma->vm_sequence);\n }\n \n static int madvise_free_single_vma(struct vm_area_struct *vma,\ndiff --git a/mm/mempolicy.c b/mm/mempolicy.c\nindex 006ba625c0b8..ac1096b1be21 100644\n--- a/mm/mempolicy.c\n+++ b/mm/mempolicy.c\n@@ -379,8 +379,11 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)\n \tstruct vm_area_struct *vma;\n \n \tdown_write(&mm->mmap_sem);\n-\tfor (vma = mm->mmap; vma; vma = vma->vm_next)\n+\tfor (vma = mm->mmap; vma; vma = vma->vm_next) {\n+\t\twrite_seqcount_begin(&vma->vm_sequence);\n \t\tmpol_rebind_policy(vma->vm_policy, new);\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n+\t}\n \tup_write(&mm->mmap_sem);\n }\n \n@@ -578,9 +581,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,\n {\n \tint nr_updated;\n \n+\twrite_seqcount_begin(&vma->vm_sequence);\n \tnr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);\n \tif (nr_updated)\n \t\tcount_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);\n+\twrite_seqcount_end(&vma->vm_sequence);\n \n \treturn nr_updated;\n }\n@@ -681,6 +686,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,\n \tif (IS_ERR(new))\n \t\treturn PTR_ERR(new);\n \n+\twrite_seqcount_begin(&vma->vm_sequence);\n \tif (vma->vm_ops && vma->vm_ops->set_policy) {\n \t\terr = vma->vm_ops->set_policy(vma, new);\n \t\tif (err)\n@@ -688,11 +694,17 @@ static int vma_replace_policy(struct vm_area_struct *vma,\n \t}\n \n \told = vma->vm_policy;\n-\tvma->vm_policy = new; /* protected by mmap_sem */\n+\t/*\n+\t * The speculative page fault handler access this field without\n+\t * hodling the mmap_sem.\n+\t */\n+\tWRITE_ONCE(vma->vm_policy,  new);\n+\twrite_seqcount_end(&vma->vm_sequence);\n \tmpol_put(old);\n \n \treturn 0;\n  err_out:\n+\twrite_seqcount_end(&vma->vm_sequence);\n \tmpol_put(new);\n \treturn err;\n }\n@@ -1562,23 +1574,28 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,\n struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,\n \t\t\t\t\t\tunsigned long addr)\n {\n-\tstruct mempolicy *pol = NULL;\n+\tstruct mempolicy *pol;\n \n-\tif (vma) {\n-\t\tif (vma->vm_ops && vma->vm_ops->get_policy) {\n-\t\t\tpol = vma->vm_ops->get_policy(vma, addr);\n-\t\t} else if (vma->vm_policy) {\n-\t\t\tpol = vma->vm_policy;\n+\tif (!vma)\n+\t\treturn NULL;\n \n-\t\t\t/*\n-\t\t\t * shmem_alloc_page() passes MPOL_F_SHARED policy with\n-\t\t\t * a pseudo vma whose vma->vm_ops=NULL. Take a reference\n-\t\t\t * count on these policies which will be dropped by\n-\t\t\t * mpol_cond_put() later\n-\t\t\t */\n-\t\t\tif (mpol_needs_cond_ref(pol))\n-\t\t\t\tmpol_get(pol);\n-\t\t}\n+\tif (vma->vm_ops && vma->vm_ops->get_policy)\n+\t\treturn vma->vm_ops->get_policy(vma, addr);\n+\n+\t/*\n+\t * This could be called without holding the mmap_sem in the\n+\t * speculative page fault handler's path.\n+\t */\n+\tpol = READ_ONCE(vma->vm_policy);\n+\tif (pol) {\n+\t\t/*\n+\t\t * shmem_alloc_page() passes MPOL_F_SHARED policy with\n+\t\t * a pseudo vma whose vma->vm_ops=NULL. Take a reference\n+\t\t * count on these policies which will be dropped by\n+\t\t * mpol_cond_put() later\n+\t\t */\n+\t\tif (mpol_needs_cond_ref(pol))\n+\t\t\tmpol_get(pol);\n \t}\n \n \treturn pol;\ndiff --git a/mm/mlock.c b/mm/mlock.c\nindex dfc6f1912176..4793a96cbc35 100644\n--- a/mm/mlock.c\n+++ b/mm/mlock.c\n@@ -438,7 +438,9 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,\n void munlock_vma_pages_range(struct vm_area_struct *vma,\n \t\t\t     unsigned long start, unsigned long end)\n {\n-\tvma->vm_flags &= VM_LOCKED_CLEAR_MASK;\n+\twrite_seqcount_begin(&vma->vm_sequence);\n+\tWRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK);\n+\twrite_seqcount_end(&vma->vm_sequence);\n \n \twhile (start < end) {\n \t\tstruct page *page;\n@@ -561,10 +563,11 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,\n \t * It's okay if try_to_unmap_one unmaps a page just after we\n \t * set VM_LOCKED, populate_vma_page_range will bring it back.\n \t */\n-\n-\tif (lock)\n-\t\tvma->vm_flags = newflags;\n-\telse\n+\tif (lock) {\n+\t\twrite_seqcount_begin(&vma->vm_sequence);\n+\t\tWRITE_ONCE(vma->vm_flags, newflags);\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n+\t} else\n \t\tmunlock_vma_pages_range(vma, start, end);\n \n out:\ndiff --git a/mm/mmap.c b/mm/mmap.c\nindex 0a0012c7e50c..04e72314274d 100644\n--- a/mm/mmap.c\n+++ b/mm/mmap.c\n@@ -826,17 +826,18 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,\n \t}\n \n \tif (start != vma->vm_start) {\n-\t\tvma->vm_start = start;\n+\t\tWRITE_ONCE(vma->vm_start, start);\n \t\tstart_changed = true;\n \t}\n \tif (end != vma->vm_end) {\n-\t\tvma->vm_end = end;\n+\t\tWRITE_ONCE(vma->vm_end, end);\n \t\tend_changed = true;\n \t}\n-\tvma->vm_pgoff = pgoff;\n+\tWRITE_ONCE(vma->vm_pgoff, pgoff);\n \tif (adjust_next) {\n-\t\tnext->vm_start += adjust_next << PAGE_SHIFT;\n-\t\tnext->vm_pgoff += adjust_next;\n+\t\tWRITE_ONCE(next->vm_start,\n+\t\t\t   next->vm_start + (adjust_next << PAGE_SHIFT));\n+\t\tWRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next);\n \t}\n \n \tif (root) {\n@@ -1735,6 +1736,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,\n out:\n \tperf_event_mmap(vma);\n \n+\twrite_seqcount_begin(&vma->vm_sequence);\n \tvm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);\n \tif (vm_flags & VM_LOCKED) {\n \t\tif (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||\n@@ -1757,6 +1759,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,\n \tvma->vm_flags |= VM_SOFTDIRTY;\n \n \tvma_set_page_prot(vma);\n+\twrite_seqcount_end(&vma->vm_sequence);\n \n \treturn addr;\n \n@@ -2385,8 +2388,8 @@ int expand_downwards(struct vm_area_struct *vma,\n \t\t\t\t\tmm->locked_vm += grow;\n \t\t\t\tvm_stat_account(mm, vma->vm_flags, grow);\n \t\t\t\tanon_vma_interval_tree_pre_update_vma(vma);\n-\t\t\t\tvma->vm_start = address;\n-\t\t\t\tvma->vm_pgoff -= grow;\n+\t\t\t\tWRITE_ONCE(vma->vm_start, address);\n+\t\t\t\tWRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow);\n \t\t\t\tanon_vma_interval_tree_post_update_vma(vma);\n \t\t\t\tvma_gap_update(vma);\n \t\t\t\tspin_unlock(&mm->page_table_lock);\ndiff --git a/mm/mprotect.c b/mm/mprotect.c\nindex 6d3e2f082290..0c9aa0b1a74e 100644\n--- a/mm/mprotect.c\n+++ b/mm/mprotect.c\n@@ -358,7 +358,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,\n \t * vm_flags and vm_page_prot are protected by the mmap_sem\n \t * held in write mode.\n \t */\n-\tvma->vm_flags = newflags;\n+\twrite_seqcount_begin(&vma->vm_sequence);\n+\tWRITE_ONCE(vma->vm_flags, newflags);\n \tdirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);\n \tvma_set_page_prot(vma);\n \n@@ -373,6 +374,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,\n \t\t\t(newflags & VM_WRITE)) {\n \t\tpopulate_vma_page_range(vma, start, end, NULL);\n \t}\n+\twrite_seqcount_end(&vma->vm_sequence);\n \n \tvm_stat_account(mm, oldflags, -nrpages);\n \tvm_stat_account(mm, newflags, nrpages);\ndiff --git a/mm/mremap.c b/mm/mremap.c\nindex cfec004c4ff9..240618950215 100644\n--- a/mm/mremap.c\n+++ b/mm/mremap.c\n@@ -301,6 +301,10 @@ static unsigned long move_vma(struct vm_area_struct *vma,\n \tif (!new_vma)\n \t\treturn -ENOMEM;\n \n+\twrite_seqcount_begin(&vma->vm_sequence);\n+\twrite_seqcount_begin_nested(&new_vma->vm_sequence,\n+\t\t\t\t    SINGLE_DEPTH_NESTING);\n+\n \tmoved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,\n \t\t\t\t     need_rmap_locks);\n \tif (moved_len < old_len) {\n@@ -317,6 +321,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,\n \t\t */\n \t\tmove_page_tables(new_vma, new_addr, vma, old_addr, moved_len,\n \t\t\t\t true);\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n \t\tvma = new_vma;\n \t\told_len = new_len;\n \t\told_addr = new_addr;\n@@ -325,7 +330,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,\n \t\tmremap_userfaultfd_prep(new_vma, uf);\n \t\tarch_remap(mm, old_addr, old_addr + old_len,\n \t\t\t   new_addr, new_addr + new_len);\n+\t\twrite_seqcount_end(&vma->vm_sequence);\n \t}\n+\twrite_seqcount_end(&new_vma->vm_sequence);\n \n \t/* Conceal VM_ACCOUNT so old reservation is not undone */\n \tif (vm_flags & VM_ACCOUNT) {\n",
    "prefixes": [
        "v3",
        "05/20"
    ]
}