diff mbox

powerpc/mm: numa pte should be handled via slow path in get_user_pages_fast

Message ID 1396454859-5274-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com (mailing list archive)
State Accepted
Delegated to: Benjamin Herrenschmidt
Headers show

Commit Message

Aneesh Kumar K.V April 2, 2014, 4:07 p.m. UTC
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

We need to handle numa pte via the slow path

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/mm/gup.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

Comments

Benjamin Herrenschmidt April 6, 2014, 11:49 p.m. UTC | #1
On Wed, 2014-04-02 at 21:37 +0530, Aneesh Kumar K.V wrote:
> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
> 
> We need to handle numa pte via the slow path

Is this -stable material ? If yes how far back ?

Cheers,
Ben.

> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/mm/gup.c | 13 +++++++++++++
>  1 file changed, 13 insertions(+)
> 
> diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
> index c5f734e20b0f..d8746684f606 100644
> --- a/arch/powerpc/mm/gup.c
> +++ b/arch/powerpc/mm/gup.c
> @@ -36,6 +36,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
>  	do {
>  		pte_t pte = ACCESS_ONCE(*ptep);
>  		struct page *page;
> +		/*
> +		 * Similar to the PMD case, NUMA hinting must take slow path
> +		 */
> +		if (pte_numa(pte))
> +			return 0;
>  
>  		if ((pte_val(pte) & mask) != result)
>  			return 0;
> @@ -75,6 +80,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
>  		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
>  			return 0;
>  		if (pmd_huge(pmd) || pmd_large(pmd)) {
> +			/*
> +			 * NUMA hinting faults need to be handled in the GUP
> +			 * slowpath for accounting purposes and so that they
> +			 * can be serialised against THP migration.
> +			 */
> +			if (pmd_numa(pmd))
> +				return 0;
> +
>  			if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
>  					 write, pages, nr))
>  				return 0;
Aneesh Kumar K.V April 7, 2014, 7:38 a.m. UTC | #2
Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:

> On Wed, 2014-04-02 at 21:37 +0530, Aneesh Kumar K.V wrote:
>> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>> 
>> We need to handle numa pte via the slow path
>
> Is this -stable material ? If yes how far back ?

I am not sure we really need to backport this. We got numa faulting bits
in 3.14. Currently there are two out-standing patches. One is this and
the other. http://mid.gmane.org/1390292129-15871-1-git-send-email-pingfank@linux.vnet.ibm.com

powernv: kvm: make _PAGE_NUMA take effect

I would not consider them critical enough to impact the general user of
3.14 kernel.

-aneesh

>
> Cheers,
> Ben.
>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
>> ---
>>  arch/powerpc/mm/gup.c | 13 +++++++++++++
>>  1 file changed, 13 insertions(+)
>> 
>> diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
>> index c5f734e20b0f..d8746684f606 100644
>> --- a/arch/powerpc/mm/gup.c
>> +++ b/arch/powerpc/mm/gup.c
>> @@ -36,6 +36,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
>>  	do {
>>  		pte_t pte = ACCESS_ONCE(*ptep);
>>  		struct page *page;
>> +		/*
>> +		 * Similar to the PMD case, NUMA hinting must take slow path
>> +		 */
>> +		if (pte_numa(pte))
>> +			return 0;
>>  
>>  		if ((pte_val(pte) & mask) != result)
>>  			return 0;
>> @@ -75,6 +80,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
>>  		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
>>  			return 0;
>>  		if (pmd_huge(pmd) || pmd_large(pmd)) {
>> +			/*
>> +			 * NUMA hinting faults need to be handled in the GUP
>> +			 * slowpath for accounting purposes and so that they
>> +			 * can be serialised against THP migration.
>> +			 */
>> +			if (pmd_numa(pmd))
>> +				return 0;
>> +
>>  			if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
>>  					 write, pages, nr))
>>  				return 0;
diff mbox

Patch

diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index c5f734e20b0f..d8746684f606 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -36,6 +36,11 @@  static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
 	do {
 		pte_t pte = ACCESS_ONCE(*ptep);
 		struct page *page;
+		/*
+		 * Similar to the PMD case, NUMA hinting must take slow path
+		 */
+		if (pte_numa(pte))
+			return 0;
 
 		if ((pte_val(pte) & mask) != result)
 			return 0;
@@ -75,6 +80,14 @@  static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 			return 0;
 		if (pmd_huge(pmd) || pmd_large(pmd)) {
+			/*
+			 * NUMA hinting faults need to be handled in the GUP
+			 * slowpath for accounting purposes and so that they
+			 * can be serialised against THP migration.
+			 */
+			if (pmd_numa(pmd))
+				return 0;
+
 			if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
 					 write, pages, nr))
 				return 0;