diff mbox

[1/1,CVE-2012-2390,LUCID,NATTY] hugetlb: fix resv_map leak in error path

Message ID 1341524433-10088-2-git-send-email-brad.figg@canonical.com
State New
Headers show

Commit Message

Brad Figg July 5, 2012, 9:40 p.m. UTC
From: Dave Hansen <dave@linux.vnet.ibm.com>

CVE-2012-2390

BugLink: http://bugs.launchpad.net/bugs/1004621

When called for anonymous (non-shared) mappings, hugetlb_reserve_pages()
does a resv_map_alloc().  It depends on code in hugetlbfs's
vm_ops->close() to release that allocation.

However, in the mmap() failure path, we do a plain unmap_region() without
the remove_vma() which actually calls vm_ops->close().

This is a decent fix.  This leak could get reintroduced if new code (say,
after hugetlb_reserve_pages() in hugetlbfs_file_mmap()) decides to return
an error.  But, I think it would have to unroll the reservation anyway.

Christoph's test case:

	http://marc.info/?l=linux-mm&m=133728900729735

This patch applies to 3.4 and later.  A version for earlier kernels is at
https://lkml.org/lkml/2012/5/22/418.

Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reported-by: Christoph Lameter <cl@linux.com>
Tested-by: Christoph Lameter <cl@linux.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: <stable@vger.kernel.org>	[2.6.32+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit c50ac050811d6485616a193eb0f37bfbd191cc89)
Signed-off-by: Brad Figg <brad.figg@canonical.com>
---
 mm/hugetlb.c |   28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)

Comments

Herton Ronaldo Krzesinski July 6, 2012, 6:24 p.m. UTC | #1
On Thu, Jul 05, 2012 at 02:40:33PM -0700, Brad Figg wrote:
> From: Dave Hansen <dave@linux.vnet.ibm.com>
> 
> CVE-2012-2390
> 
> BugLink: http://bugs.launchpad.net/bugs/1004621
> 
> When called for anonymous (non-shared) mappings, hugetlb_reserve_pages()
> does a resv_map_alloc().  It depends on code in hugetlbfs's
> vm_ops->close() to release that allocation.
> 
> However, in the mmap() failure path, we do a plain unmap_region() without
> the remove_vma() which actually calls vm_ops->close().
> 
> This is a decent fix.  This leak could get reintroduced if new code (say,
> after hugetlb_reserve_pages() in hugetlbfs_file_mmap()) decides to return
> an error.  But, I think it would have to unroll the reservation anyway.
> 
> Christoph's test case:
> 
> 	http://marc.info/?l=linux-mm&m=133728900729735
> 
> This patch applies to 3.4 and later.  A version for earlier kernels is at
> https://lkml.org/lkml/2012/5/22/418.
> 
> Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
> Acked-by: Mel Gorman <mel@csn.ul.ie>
> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
> Reported-by: Christoph Lameter <cl@linux.com>
> Tested-by: Christoph Lameter <cl@linux.com>
> Cc: Andrea Arcangeli <aarcange@redhat.com>
> Cc: <stable@vger.kernel.org>	[2.6.32+]
> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
> (cherry picked from commit c50ac050811d6485616a193eb0f37bfbd191cc89)
> Signed-off-by: Brad Figg <brad.figg@canonical.com>
> ---
>  mm/hugetlb.c |   28 ++++++++++++++++++++++------
>  1 file changed, 22 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 20f9240..3d61035 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1772,6 +1772,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
>  		kref_get(&reservations->refs);
>  }
>  
> +static void resv_map_put(struct vm_area_struct *vma)
> +{
> +	struct resv_map *reservations = vma_resv_map(vma);
> +
> +	if (!reservations)
> +		return;
> +	kref_put(&reservations->refs, resv_map_release);
> +}
> +
>  static void hugetlb_vm_op_close(struct vm_area_struct *vma)
>  {
>  	struct hstate *h = hstate_vma(vma);
> @@ -1788,7 +1797,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
>  		reserve = (end - start) -
>  			region_count(&reservations->regions, start, end);
>  
> -		kref_put(&reservations->refs, resv_map_release);
> +		resv_map_put(vma);
>  
>  		if (reserve) {
>  			hugetlb_acct_memory(h, -reserve);
> @@ -2472,12 +2481,16 @@ int hugetlb_reserve_pages(struct inode *inode,
>  		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
>  	}
>  
> -	if (chg < 0)
> -		return chg;
> +	if (chg < 0) {
> +		ret = chg;
> +		goto out_err;
> +	}
>  
>  	/* There must be enough pages in the subpool for the mapping */
> -	if (hugepage_subpool_get_pages(spool, chg))
> -		return -ENOSPC;
> +	if (hugepage_subpool_get_pages(spool, chg)) {
> +		ret = -ENOSPC;
> +		goto out_err;
> +	}
>  
>  	/*
>  	 * Check enough hugepages are available for the reservation.
> @@ -2486,7 +2499,7 @@ int hugetlb_reserve_pages(struct inode *inode,
>  	ret = hugetlb_acct_memory(h, chg);
>  	if (ret < 0) {
>  		hugepage_subpool_put_pages(spool, chg);
> -		return ret;
> +		goto out_err;
>  	}
>  
>  	/*
> @@ -2503,6 +2516,9 @@ int hugetlb_reserve_pages(struct inode *inode,
>  	if (!vma || vma->vm_flags & VM_MAYSHARE)
>  		region_add(&inode->i_mapping->private_list, from, to);
>  	return 0;
> +out_err:
> +	resv_map_put(vma);
> +	return ret;

Looks like we have also have to pick 4523e1458566a0e8ecfaff90f380dd23acc44d27
on top after applying this, to avoid issues here.

>  }
>  
>  void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
> -- 
> 1.7.9.5
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
>
Tim Gardner July 6, 2012, 6:57 p.m. UTC | #2

diff mbox

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 20f9240..3d61035 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1772,6 +1772,15 @@  static void hugetlb_vm_op_open(struct vm_area_struct *vma)
 		kref_get(&reservations->refs);
 }
 
+static void resv_map_put(struct vm_area_struct *vma)
+{
+	struct resv_map *reservations = vma_resv_map(vma);
+
+	if (!reservations)
+		return;
+	kref_put(&reservations->refs, resv_map_release);
+}
+
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
 	struct hstate *h = hstate_vma(vma);
@@ -1788,7 +1797,7 @@  static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 		reserve = (end - start) -
 			region_count(&reservations->regions, start, end);
 
-		kref_put(&reservations->refs, resv_map_release);
+		resv_map_put(vma);
 
 		if (reserve) {
 			hugetlb_acct_memory(h, -reserve);
@@ -2472,12 +2481,16 @@  int hugetlb_reserve_pages(struct inode *inode,
 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
 	}
 
-	if (chg < 0)
-		return chg;
+	if (chg < 0) {
+		ret = chg;
+		goto out_err;
+	}
 
 	/* There must be enough pages in the subpool for the mapping */
-	if (hugepage_subpool_get_pages(spool, chg))
-		return -ENOSPC;
+	if (hugepage_subpool_get_pages(spool, chg)) {
+		ret = -ENOSPC;
+		goto out_err;
+	}
 
 	/*
 	 * Check enough hugepages are available for the reservation.
@@ -2486,7 +2499,7 @@  int hugetlb_reserve_pages(struct inode *inode,
 	ret = hugetlb_acct_memory(h, chg);
 	if (ret < 0) {
 		hugepage_subpool_put_pages(spool, chg);
-		return ret;
+		goto out_err;
 	}
 
 	/*
@@ -2503,6 +2516,9 @@  int hugetlb_reserve_pages(struct inode *inode,
 	if (!vma || vma->vm_flags & VM_MAYSHARE)
 		region_add(&inode->i_mapping->private_list, from, to);
 	return 0;
+out_err:
+	resv_map_put(vma);
+	return ret;
 }
 
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)