diff mbox series

powerpc/mm: Use refcount_t for refcount

Message ID 20190808071808.6531-1-hslester96@gmail.com (mailing list archive)
State Changes Requested
Headers show
Series powerpc/mm: Use refcount_t for refcount | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch next (f3365d1a959d5c6527efe3d38276acc9b58e3f3f)
snowpatch_ozlabs/build-ppc64le success Build succeeded
snowpatch_ozlabs/build-ppc64be success Build succeeded
snowpatch_ozlabs/build-ppc64e success Build succeeded
snowpatch_ozlabs/build-pmac32 success Build succeeded
snowpatch_ozlabs/checkpatch success total: 0 errors, 0 warnings, 0 checks, 81 lines checked

Commit Message

Chuhong Yuan Aug. 8, 2019, 7:18 a.m. UTC
Reference counters are preferred to use refcount_t instead of
atomic_t.
This is because the implementation of refcount_t can prevent
overflows and detect possible use-after-free.
So convert atomic_t ref counters to refcount_t.

Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
---
 arch/powerpc/mm/book3s64/mmu_context.c | 2 +-
 arch/powerpc/mm/book3s64/pgtable.c     | 7 +++----
 arch/powerpc/mm/pgtable-frag.c         | 9 ++++-----
 include/linux/mm_types.h               | 3 ++-
 4 files changed, 10 insertions(+), 11 deletions(-)

Comments

Michael Ellerman Aug. 9, 2019, 12:36 p.m. UTC | #1
Chuhong Yuan <hslester96@gmail.com> writes:
> Reference counters are preferred to use refcount_t instead of
> atomic_t.
> This is because the implementation of refcount_t can prevent
> overflows and detect possible use-after-free.
> So convert atomic_t ref counters to refcount_t.
>
> Signed-off-by: Chuhong Yuan <hslester96@gmail.com>

Thanks.

We don't have a fast implementation of refcount_t, so I'm worried this
could cause a measurable performance regression.

Did you benchmark it at all?

cheers

> diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
> index 2d0cb5ba9a47..f836fd5a6abc 100644
> --- a/arch/powerpc/mm/book3s64/mmu_context.c
> +++ b/arch/powerpc/mm/book3s64/mmu_context.c
> @@ -231,7 +231,7 @@ static void pmd_frag_destroy(void *pmd_frag)
>  	/* drop all the pending references */
>  	count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
>  	/* We allow PTE_FRAG_NR fragments from a PTE page */
> -	if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
> +	if (refcount_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
>  		pgtable_pmd_page_dtor(page);
>  		__free_page(page);
>  	}
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index 7d0e0d0d22c4..40056896ce4e 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -277,7 +277,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
>  		return NULL;
>  	}
>  
> -	atomic_set(&page->pt_frag_refcount, 1);
> +	refcount_set(&page->pt_frag_refcount, 1);
>  
>  	ret = page_address(page);
>  	/*
> @@ -294,7 +294,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
>  	 * count.
>  	 */
>  	if (likely(!mm->context.pmd_frag)) {
> -		atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
> +		refcount_set(&page->pt_frag_refcount, PMD_FRAG_NR);
>  		mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
>  	}
>  	spin_unlock(&mm->page_table_lock);
> @@ -317,8 +317,7 @@ void pmd_fragment_free(unsigned long *pmd)
>  {
>  	struct page *page = virt_to_page(pmd);
>  
> -	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
> -	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
> +	if (refcount_dec_and_test(&page->pt_frag_refcount)) {
>  		pgtable_pmd_page_dtor(page);
>  		__free_page(page);
>  	}
> diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
> index a7b05214760c..4ef8231b677f 100644
> --- a/arch/powerpc/mm/pgtable-frag.c
> +++ b/arch/powerpc/mm/pgtable-frag.c
> @@ -24,7 +24,7 @@ void pte_frag_destroy(void *pte_frag)
>  	/* drop all the pending references */
>  	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
>  	/* We allow PTE_FRAG_NR fragments from a PTE page */
> -	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
> +	if (refcount_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
>  		pgtable_page_dtor(page);
>  		__free_page(page);
>  	}
> @@ -71,7 +71,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
>  			return NULL;
>  	}
>  
> -	atomic_set(&page->pt_frag_refcount, 1);
> +	refcount_set(&page->pt_frag_refcount, 1);
>  
>  	ret = page_address(page);
>  	/*
> @@ -87,7 +87,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
>  	 * count.
>  	 */
>  	if (likely(!pte_frag_get(&mm->context))) {
> -		atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
> +		refcount_set(&page->pt_frag_refcount, PTE_FRAG_NR);
>  		pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
>  	}
>  	spin_unlock(&mm->page_table_lock);
> @@ -110,8 +110,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
>  {
>  	struct page *page = virt_to_page(table);
>  
> -	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
> -	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
> +	if (refcount_dec_and_test(&page->pt_frag_refcount)) {
>  		if (!kernel)
>  			pgtable_page_dtor(page);
>  		__free_page(page);
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 3a37a89eb7a7..7fe23a3faf95 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -14,6 +14,7 @@
>  #include <linux/uprobes.h>
>  #include <linux/page-flags-layout.h>
>  #include <linux/workqueue.h>
> +#include <linux/refcount.h>
>  
>  #include <asm/mmu.h>
>  
> @@ -147,7 +148,7 @@ struct page {
>  			unsigned long _pt_pad_2;	/* mapping */
>  			union {
>  				struct mm_struct *pt_mm; /* x86 pgds only */
> -				atomic_t pt_frag_refcount; /* powerpc */
> +				refcount_t pt_frag_refcount; /* powerpc */
>  			};
>  #if ALLOC_SPLIT_PTLOCKS
>  			spinlock_t *ptl;
> -- 
> 2.20.1
Chuhong Yuan Aug. 10, 2019, 1:25 a.m. UTC | #2
On Fri, Aug 9, 2019 at 8:36 PM Michael Ellerman <mpe@ellerman.id.au> wrote:
>
> Chuhong Yuan <hslester96@gmail.com> writes:
> > Reference counters are preferred to use refcount_t instead of
> > atomic_t.
> > This is because the implementation of refcount_t can prevent
> > overflows and detect possible use-after-free.
> > So convert atomic_t ref counters to refcount_t.
> >
> > Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
>
> Thanks.
>
> We don't have a fast implementation of refcount_t, so I'm worried this
> could cause a measurable performance regression.
>
> Did you benchmark it at all?
>

I did not benchmark it and I don't have the testing environment...

> cheers
>
> > diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
> > index 2d0cb5ba9a47..f836fd5a6abc 100644
> > --- a/arch/powerpc/mm/book3s64/mmu_context.c
> > +++ b/arch/powerpc/mm/book3s64/mmu_context.c
> > @@ -231,7 +231,7 @@ static void pmd_frag_destroy(void *pmd_frag)
> >       /* drop all the pending references */
> >       count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
> >       /* We allow PTE_FRAG_NR fragments from a PTE page */
> > -     if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
> > +     if (refcount_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
> >               pgtable_pmd_page_dtor(page);
> >               __free_page(page);
> >       }
> > diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> > index 7d0e0d0d22c4..40056896ce4e 100644
> > --- a/arch/powerpc/mm/book3s64/pgtable.c
> > +++ b/arch/powerpc/mm/book3s64/pgtable.c
> > @@ -277,7 +277,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
> >               return NULL;
> >       }
> >
> > -     atomic_set(&page->pt_frag_refcount, 1);
> > +     refcount_set(&page->pt_frag_refcount, 1);
> >
> >       ret = page_address(page);
> >       /*
> > @@ -294,7 +294,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
> >        * count.
> >        */
> >       if (likely(!mm->context.pmd_frag)) {
> > -             atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
> > +             refcount_set(&page->pt_frag_refcount, PMD_FRAG_NR);
> >               mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
> >       }
> >       spin_unlock(&mm->page_table_lock);
> > @@ -317,8 +317,7 @@ void pmd_fragment_free(unsigned long *pmd)
> >  {
> >       struct page *page = virt_to_page(pmd);
> >
> > -     BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
> > -     if (atomic_dec_and_test(&page->pt_frag_refcount)) {
> > +     if (refcount_dec_and_test(&page->pt_frag_refcount)) {
> >               pgtable_pmd_page_dtor(page);
> >               __free_page(page);
> >       }
> > diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
> > index a7b05214760c..4ef8231b677f 100644
> > --- a/arch/powerpc/mm/pgtable-frag.c
> > +++ b/arch/powerpc/mm/pgtable-frag.c
> > @@ -24,7 +24,7 @@ void pte_frag_destroy(void *pte_frag)
> >       /* drop all the pending references */
> >       count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
> >       /* We allow PTE_FRAG_NR fragments from a PTE page */
> > -     if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
> > +     if (refcount_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
> >               pgtable_page_dtor(page);
> >               __free_page(page);
> >       }
> > @@ -71,7 +71,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
> >                       return NULL;
> >       }
> >
> > -     atomic_set(&page->pt_frag_refcount, 1);
> > +     refcount_set(&page->pt_frag_refcount, 1);
> >
> >       ret = page_address(page);
> >       /*
> > @@ -87,7 +87,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
> >        * count.
> >        */
> >       if (likely(!pte_frag_get(&mm->context))) {
> > -             atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
> > +             refcount_set(&page->pt_frag_refcount, PTE_FRAG_NR);
> >               pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
> >       }
> >       spin_unlock(&mm->page_table_lock);
> > @@ -110,8 +110,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
> >  {
> >       struct page *page = virt_to_page(table);
> >
> > -     BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
> > -     if (atomic_dec_and_test(&page->pt_frag_refcount)) {
> > +     if (refcount_dec_and_test(&page->pt_frag_refcount)) {
> >               if (!kernel)
> >                       pgtable_page_dtor(page);
> >               __free_page(page);
> > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > index 3a37a89eb7a7..7fe23a3faf95 100644
> > --- a/include/linux/mm_types.h
> > +++ b/include/linux/mm_types.h
> > @@ -14,6 +14,7 @@
> >  #include <linux/uprobes.h>
> >  #include <linux/page-flags-layout.h>
> >  #include <linux/workqueue.h>
> > +#include <linux/refcount.h>
> >
> >  #include <asm/mmu.h>
> >
> > @@ -147,7 +148,7 @@ struct page {
> >                       unsigned long _pt_pad_2;        /* mapping */
> >                       union {
> >                               struct mm_struct *pt_mm; /* x86 pgds only */
> > -                             atomic_t pt_frag_refcount; /* powerpc */
> > +                             refcount_t pt_frag_refcount; /* powerpc */
> >                       };
> >  #if ALLOC_SPLIT_PTLOCKS
> >                       spinlock_t *ptl;
> > --
> > 2.20.1
Christophe Leroy Nov. 26, 2021, 5:55 p.m. UTC | #3
Le 09/08/2019 à 14:36, Michael Ellerman a écrit :
> Chuhong Yuan <hslester96@gmail.com> writes:
>> Reference counters are preferred to use refcount_t instead of
>> atomic_t.
>> This is because the implementation of refcount_t can prevent
>> overflows and detect possible use-after-free.
>> So convert atomic_t ref counters to refcount_t.
>>
>> Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
> 
> Thanks.
> 
> We don't have a fast implementation of refcount_t, so I'm worried this
> could cause a measurable performance regression.

Fast implementations have been removed by commit 
https://github.com/linuxppc/linux/commit/fb041bb7c0a918b95c6889fc965cdc4a75b4c0ca

It's now considered that the generic implementation is good enough for 
everybody.

However, this series doesn't apply anymore and needs rebase:

Applying: powerpc/mm: Use refcount_t for refcount
Using index info to reconstruct a base tree...
M	arch/powerpc/mm/book3s64/mmu_context.c
M	arch/powerpc/mm/book3s64/pgtable.c
M	arch/powerpc/mm/pgtable-frag.c
M	include/linux/mm_types.h
Falling back to patching base and 3-way merge...
Auto-merging include/linux/mm_types.h
CONFLICT (content): Merge conflict in include/linux/mm_types.h
Auto-merging arch/powerpc/mm/pgtable-frag.c
CONFLICT (content): Merge conflict in arch/powerpc/mm/pgtable-frag.c
Auto-merging arch/powerpc/mm/book3s64/pgtable.c
CONFLICT (content): Merge conflict in arch/powerpc/mm/book3s64/pgtable.c
Auto-merging arch/powerpc/mm/book3s64/mmu_context.c
Patch failed at 0001 powerpc/mm: Use refcount_t for refcount
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

Thanks
Christophe


> 
> Did you benchmark it at all?
> 
> cheers
> 
>> diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
>> index 2d0cb5ba9a47..f836fd5a6abc 100644
>> --- a/arch/powerpc/mm/book3s64/mmu_context.c
>> +++ b/arch/powerpc/mm/book3s64/mmu_context.c
>> @@ -231,7 +231,7 @@ static void pmd_frag_destroy(void *pmd_frag)
>>   	/* drop all the pending references */
>>   	count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
>>   	/* We allow PTE_FRAG_NR fragments from a PTE page */
>> -	if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
>> +	if (refcount_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
>>   		pgtable_pmd_page_dtor(page);
>>   		__free_page(page);
>>   	}
>> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
>> index 7d0e0d0d22c4..40056896ce4e 100644
>> --- a/arch/powerpc/mm/book3s64/pgtable.c
>> +++ b/arch/powerpc/mm/book3s64/pgtable.c
>> @@ -277,7 +277,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
>>   		return NULL;
>>   	}
>>   
>> -	atomic_set(&page->pt_frag_refcount, 1);
>> +	refcount_set(&page->pt_frag_refcount, 1);
>>   
>>   	ret = page_address(page);
>>   	/*
>> @@ -294,7 +294,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
>>   	 * count.
>>   	 */
>>   	if (likely(!mm->context.pmd_frag)) {
>> -		atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
>> +		refcount_set(&page->pt_frag_refcount, PMD_FRAG_NR);
>>   		mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
>>   	}
>>   	spin_unlock(&mm->page_table_lock);
>> @@ -317,8 +317,7 @@ void pmd_fragment_free(unsigned long *pmd)
>>   {
>>   	struct page *page = virt_to_page(pmd);
>>   
>> -	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
>> -	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
>> +	if (refcount_dec_and_test(&page->pt_frag_refcount)) {
>>   		pgtable_pmd_page_dtor(page);
>>   		__free_page(page);
>>   	}
>> diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
>> index a7b05214760c..4ef8231b677f 100644
>> --- a/arch/powerpc/mm/pgtable-frag.c
>> +++ b/arch/powerpc/mm/pgtable-frag.c
>> @@ -24,7 +24,7 @@ void pte_frag_destroy(void *pte_frag)
>>   	/* drop all the pending references */
>>   	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
>>   	/* We allow PTE_FRAG_NR fragments from a PTE page */
>> -	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
>> +	if (refcount_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
>>   		pgtable_page_dtor(page);
>>   		__free_page(page);
>>   	}
>> @@ -71,7 +71,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
>>   			return NULL;
>>   	}
>>   
>> -	atomic_set(&page->pt_frag_refcount, 1);
>> +	refcount_set(&page->pt_frag_refcount, 1);
>>   
>>   	ret = page_address(page);
>>   	/*
>> @@ -87,7 +87,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
>>   	 * count.
>>   	 */
>>   	if (likely(!pte_frag_get(&mm->context))) {
>> -		atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
>> +		refcount_set(&page->pt_frag_refcount, PTE_FRAG_NR);
>>   		pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
>>   	}
>>   	spin_unlock(&mm->page_table_lock);
>> @@ -110,8 +110,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
>>   {
>>   	struct page *page = virt_to_page(table);
>>   
>> -	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
>> -	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
>> +	if (refcount_dec_and_test(&page->pt_frag_refcount)) {
>>   		if (!kernel)
>>   			pgtable_page_dtor(page);
>>   		__free_page(page);
>> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
>> index 3a37a89eb7a7..7fe23a3faf95 100644
>> --- a/include/linux/mm_types.h
>> +++ b/include/linux/mm_types.h
>> @@ -14,6 +14,7 @@
>>   #include <linux/uprobes.h>
>>   #include <linux/page-flags-layout.h>
>>   #include <linux/workqueue.h>
>> +#include <linux/refcount.h>
>>   
>>   #include <asm/mmu.h>
>>   
>> @@ -147,7 +148,7 @@ struct page {
>>   			unsigned long _pt_pad_2;	/* mapping */
>>   			union {
>>   				struct mm_struct *pt_mm; /* x86 pgds only */
>> -				atomic_t pt_frag_refcount; /* powerpc */
>> +				refcount_t pt_frag_refcount; /* powerpc */
>>   			};
>>   #if ALLOC_SPLIT_PTLOCKS
>>   			spinlock_t *ptl;
>> -- 
>> 2.20.1
diff mbox series

Patch

diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 2d0cb5ba9a47..f836fd5a6abc 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -231,7 +231,7 @@  static void pmd_frag_destroy(void *pmd_frag)
 	/* drop all the pending references */
 	count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
 	/* We allow PTE_FRAG_NR fragments from a PTE page */
-	if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
+	if (refcount_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
 		pgtable_pmd_page_dtor(page);
 		__free_page(page);
 	}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 7d0e0d0d22c4..40056896ce4e 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -277,7 +277,7 @@  static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
 		return NULL;
 	}
 
-	atomic_set(&page->pt_frag_refcount, 1);
+	refcount_set(&page->pt_frag_refcount, 1);
 
 	ret = page_address(page);
 	/*
@@ -294,7 +294,7 @@  static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
 	 * count.
 	 */
 	if (likely(!mm->context.pmd_frag)) {
-		atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
+		refcount_set(&page->pt_frag_refcount, PMD_FRAG_NR);
 		mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
 	}
 	spin_unlock(&mm->page_table_lock);
@@ -317,8 +317,7 @@  void pmd_fragment_free(unsigned long *pmd)
 {
 	struct page *page = virt_to_page(pmd);
 
-	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
-	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
+	if (refcount_dec_and_test(&page->pt_frag_refcount)) {
 		pgtable_pmd_page_dtor(page);
 		__free_page(page);
 	}
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index a7b05214760c..4ef8231b677f 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -24,7 +24,7 @@  void pte_frag_destroy(void *pte_frag)
 	/* drop all the pending references */
 	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
 	/* We allow PTE_FRAG_NR fragments from a PTE page */
-	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
+	if (refcount_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
 		pgtable_page_dtor(page);
 		__free_page(page);
 	}
@@ -71,7 +71,7 @@  static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
 			return NULL;
 	}
 
-	atomic_set(&page->pt_frag_refcount, 1);
+	refcount_set(&page->pt_frag_refcount, 1);
 
 	ret = page_address(page);
 	/*
@@ -87,7 +87,7 @@  static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
 	 * count.
 	 */
 	if (likely(!pte_frag_get(&mm->context))) {
-		atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
+		refcount_set(&page->pt_frag_refcount, PTE_FRAG_NR);
 		pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
 	}
 	spin_unlock(&mm->page_table_lock);
@@ -110,8 +110,7 @@  void pte_fragment_free(unsigned long *table, int kernel)
 {
 	struct page *page = virt_to_page(table);
 
-	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
-	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
+	if (refcount_dec_and_test(&page->pt_frag_refcount)) {
 		if (!kernel)
 			pgtable_page_dtor(page);
 		__free_page(page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3a37a89eb7a7..7fe23a3faf95 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -14,6 +14,7 @@ 
 #include <linux/uprobes.h>
 #include <linux/page-flags-layout.h>
 #include <linux/workqueue.h>
+#include <linux/refcount.h>
 
 #include <asm/mmu.h>
 
@@ -147,7 +148,7 @@  struct page {
 			unsigned long _pt_pad_2;	/* mapping */
 			union {
 				struct mm_struct *pt_mm; /* x86 pgds only */
-				atomic_t pt_frag_refcount; /* powerpc */
+				refcount_t pt_frag_refcount; /* powerpc */
 			};
 #if ALLOC_SPLIT_PTLOCKS
 			spinlock_t *ptl;