diff mbox

[2/2] powerpc: split hugepage when using subpage protection

Message ID 1368778503-23230-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com (mailing list archive)
State Superseded
Headers show

Commit Message

Aneesh Kumar K.V May 17, 2013, 8:15 a.m. UTC
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

We find all the overlapping vma and mark them such that we don't allocate
hugepage in that range. Also we split existing huge page so that the
normal page hash can be invalidated and new page faulted in with new
protection bits.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/mm/subpage-prot.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

Comments

Aneesh Kumar K.V May 17, 2013, 8:36 a.m. UTC | #1
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:

> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>
> We find all the overlapping vma and mark them such that we don't allocate
> hugepage in that range. Also we split existing huge page so that the
> normal page hash can be invalidated and new page faulted in with new
> protection bits.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/mm/subpage-prot.c | 27 +++++++++++++++++++++++++++
>  1 file changed, 27 insertions(+)
>
> diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
> index 7c415dd..33fd329 100644
> --- a/arch/powerpc/mm/subpage-prot.c
> +++ b/arch/powerpc/mm/subpage-prot.c
> @@ -130,6 +130,14 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
>  	up_write(&mm->mmap_sem);
>  }
>
> +static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
> +				  unsigned long end, struct mm_walk *walk)
> +{
> +	struct vm_area_struct *vma = walk->private;
> +	split_huge_page_pmd(vma, addr, pmd);
> +	return 0;
> +}
> +
>  /*
>   * Copy in a subpage protection map for an address range.
>   * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
> @@ -149,6 +157,12 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
>  	size_t nw;
>  	unsigned long next, limit;
>  	int err;
> +	struct vm_area_struct *vma;
> +
> +	struct mm_walk subpage_proto_walk = {
> +		.mm = mm,
> +		.pmd_entry = subpage_walk_pmd_entry,
> +	};
>
>  	/* Check parameters */
>  	if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
> @@ -168,6 +182,19 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
>  		return -EFAULT;
>
>  	down_write(&mm->mmap_sem);
> +
> +	/*
> +	 * We don't try too hard, we just mark all the vma in that range
> +	 * VM_NOHUGEPAGE and split them.
> +	 */
> +	for (vma = find_vma(mm, addr);
> +	     (vma && vma->vm_end < (addr + len)); vma = vma->vm_next) {

should be, Missed commit -amend 

	     (vma && vma->vm_start < (addr + len)); vma = vma->vm_next) {

> +		vma->vm_flags |= VM_NOHUGEPAGE;
> +		subpage_proto_walk.private = vma;
> +		walk_page_range(vma->vm_start, vma->vm_end,
> +				&subpage_proto_walk);
> +	}
>  	for (limit = addr + len; addr < limit; addr = next) {
>  		next = pmd_addr_end(addr, limit);
>  		err = -ENOMEM;

-aneesh
diff mbox

Patch

diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 7c415dd..33fd329 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -130,6 +130,14 @@  static void subpage_prot_clear(unsigned long addr, unsigned long len)
 	up_write(&mm->mmap_sem);
 }
 
+static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
+				  unsigned long end, struct mm_walk *walk)
+{
+	struct vm_area_struct *vma = walk->private;
+	split_huge_page_pmd(vma, addr, pmd);
+	return 0;
+}
+
 /*
  * Copy in a subpage protection map for an address range.
  * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
@@ -149,6 +157,12 @@  long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
 	size_t nw;
 	unsigned long next, limit;
 	int err;
+	struct vm_area_struct *vma;
+
+	struct mm_walk subpage_proto_walk = {
+		.mm = mm,
+		.pmd_entry = subpage_walk_pmd_entry,
+	};
 
 	/* Check parameters */
 	if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
@@ -168,6 +182,19 @@  long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
 		return -EFAULT;
 
 	down_write(&mm->mmap_sem);
+
+	/*
+	 * We don't try too hard, we just mark all the vma in that range
+	 * VM_NOHUGEPAGE and split them.
+	 */
+	for (vma = find_vma(mm, addr);
+	     (vma && vma->vm_end < (addr + len)); vma = vma->vm_next) {
+
+		vma->vm_flags |= VM_NOHUGEPAGE;
+		subpage_proto_walk.private = vma;
+		walk_page_range(vma->vm_start, vma->vm_end,
+				&subpage_proto_walk);
+	}
 	for (limit = addr + len; addr < limit; addr = next) {
 		next = pmd_addr_end(addr, limit);
 		err = -ENOMEM;