diff mbox series

powerpc/mm/hash/4k: Free hugetlb page table caches correctly.

Message ID 20180614103152.7344-1-aneesh.kumar@linux.ibm.com (mailing list archive)
State Accepted
Commit fadd03c615922d8521a2e76d4ba2335891cb2790
Headers show
Series powerpc/mm/hash/4k: Free hugetlb page table caches correctly. | expand

Commit Message

Aneesh Kumar K V June 14, 2018, 10:31 a.m. UTC
With 4k page size for hugetlb we allocate hugepage directories from its on slab
cache. With patch 0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
we missed to free these allocated hugepd tables.

Update pgtable_free to handle hugetlb hugepd directory table.

Fixes:  0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/32/pgalloc.h  |  1 +
 .../include/asm/book3s/64/pgtable-4k.h        | 21 +++++++++++++++++++
 .../include/asm/book3s/64/pgtable-64k.h       |  9 ++++++++
 arch/powerpc/include/asm/book3s/64/pgtable.h  |  5 +++++
 arch/powerpc/include/asm/nohash/32/pgalloc.h  |  1 +
 arch/powerpc/include/asm/nohash/64/pgalloc.h  |  1 +
 arch/powerpc/mm/hugetlbpage.c                 |  3 ++-
 arch/powerpc/mm/pgtable-book3s64.c            | 12 +++++++++++
 8 files changed, 52 insertions(+), 1 deletion(-)

Comments

Michael Ellerman June 19, 2018, 11:17 a.m. UTC | #1
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:

> With 4k page size for hugetlb we allocate hugepage directories from its on slab
> cache. With patch 0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
> we missed to free these allocated hugepd tables.
>
> Update pgtable_free to handle hugetlb hugepd directory table.
>
> Fixes:  0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
>  arch/powerpc/include/asm/book3s/32/pgalloc.h  |  1 +
>  .../include/asm/book3s/64/pgtable-4k.h        | 21 +++++++++++++++++++
>  .../include/asm/book3s/64/pgtable-64k.h       |  9 ++++++++
>  arch/powerpc/include/asm/book3s/64/pgtable.h  |  5 +++++
>  arch/powerpc/include/asm/nohash/32/pgalloc.h  |  1 +
>  arch/powerpc/include/asm/nohash/64/pgalloc.h  |  1 +
>  arch/powerpc/mm/hugetlbpage.c                 |  3 ++-
>  arch/powerpc/mm/pgtable-book3s64.c            | 12 +++++++++++

Fails with 4K=y HUGETLBFS=n:

  arch/powerpc/mm/pgtable-book3s64.c:415:16: error: ‘H_16M_CACHE_INDEX’ undeclared (first use in this function); did you mean ‘H_PUD_CACHE_INDEX’?

...

> diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
> index c1f4ca45c93a..468c3d83a2aa 100644
> --- a/arch/powerpc/mm/pgtable-book3s64.c
> +++ b/arch/powerpc/mm/pgtable-book3s64.c
> @@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
>  	case PUD_INDEX:
>  		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
>  		break;
> +#ifdef CONFIG_PPC_4K_PAGES
> +		/* 16M hugepd directory at pud level */
> +	case HTLB_16M_INDEX:
> +		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
> +		kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
> +		break;
> +		/* 16G hugepd directory at the pgd level */
> +	case HTLB_16G_INDEX:
> +		BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
> +		kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
> +		break;
> +#endif

Because this isn't protected by CONFIG_HUGETLBFS.

I assume this is correct?

diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 468c3d83a2aa..9b7007fd075e 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -409,7 +409,7 @@ static inline void pgtable_free(void *table, int index)
 	case PUD_INDEX:
 		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
 		break;
-#ifdef CONFIG_PPC_4K_PAGES
+#if defined(CONFIG_PPC_4K_PAGES) && defined (CONFIG_HUGETLBFS)
 		/* 16M hugepd directory at pud level */
 	case HTLB_16M_INDEX:
 		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);


cheers
Aneesh Kumar K V June 19, 2018, 1:25 p.m. UTC | #2
On 06/19/2018 04:47 PM, Michael Ellerman wrote:
> "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:
> 
>> With 4k page size for hugetlb we allocate hugepage directories from its on slab
>> cache. With patch 0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
>> we missed to free these allocated hugepd tables.
>>
>> Update pgtable_free to handle hugetlb hugepd directory table.
>>
>> Fixes:  0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>>   arch/powerpc/include/asm/book3s/32/pgalloc.h  |  1 +
>>   .../include/asm/book3s/64/pgtable-4k.h        | 21 +++++++++++++++++++
>>   .../include/asm/book3s/64/pgtable-64k.h       |  9 ++++++++
>>   arch/powerpc/include/asm/book3s/64/pgtable.h  |  5 +++++
>>   arch/powerpc/include/asm/nohash/32/pgalloc.h  |  1 +
>>   arch/powerpc/include/asm/nohash/64/pgalloc.h  |  1 +
>>   arch/powerpc/mm/hugetlbpage.c                 |  3 ++-
>>   arch/powerpc/mm/pgtable-book3s64.c            | 12 +++++++++++
> 
> Fails with 4K=y HUGETLBFS=n:
> 
>    arch/powerpc/mm/pgtable-book3s64.c:415:16: error: ‘H_16M_CACHE_INDEX’ undeclared (first use in this function); did you mean ‘H_PUD_CACHE_INDEX’?
> 
> ...
> 
>> diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
>> index c1f4ca45c93a..468c3d83a2aa 100644
>> --- a/arch/powerpc/mm/pgtable-book3s64.c
>> +++ b/arch/powerpc/mm/pgtable-book3s64.c
>> @@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
>>   	case PUD_INDEX:
>>   		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
>>   		break;
>> +#ifdef CONFIG_PPC_4K_PAGES
>> +		/* 16M hugepd directory at pud level */
>> +	case HTLB_16M_INDEX:
>> +		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
>> +		kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
>> +		break;
>> +		/* 16G hugepd directory at the pgd level */
>> +	case HTLB_16G_INDEX:
>> +		BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
>> +		kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
>> +		break;
>> +#endif
> 
> Because this isn't protected by CONFIG_HUGETLBFS.
> 
> I assume this is correct?
> 
> diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
> index 468c3d83a2aa..9b7007fd075e 100644
> --- a/arch/powerpc/mm/pgtable-book3s64.c
> +++ b/arch/powerpc/mm/pgtable-book3s64.c
> @@ -409,7 +409,7 @@ static inline void pgtable_free(void *table, int index)
>   	case PUD_INDEX:
>   		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
>   		break;
> -#ifdef CONFIG_PPC_4K_PAGES
> +#if defined(CONFIG_PPC_4K_PAGES) && defined (CONFIG_HUGETLBFS)
>   		/* 16M hugepd directory at pud level */
>   	case HTLB_16M_INDEX:
>   		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
> 
> 
> cheers
> 

Sorry missed that. Can we use #ifdef CONFIG_HUGETLB_PAGE ? That is what 
we use to protect that in pgtable-4k.h

-aneesh

-aneesh
Michael Ellerman June 23, 2018, 12:56 p.m. UTC | #3
On Thu, 2018-06-14 at 10:31:52 UTC, "Aneesh Kumar K.V" wrote:
> With 4k page size for hugetlb we allocate hugepage directories from its on slab
> cache. With patch 0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
> we missed to free these allocated hugepd tables.
> 
> Update pgtable_free to handle hugetlb hugepd directory table.
> 
> Fixes:  0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

Applied to powerpc fixes, thanks.

https://git.kernel.org/powerpc/c/fadd03c615922d8521a2e76d4ba233

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 6a6673907e45..e4633803fe43 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -108,6 +108,7 @@  static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()	do { } while (0)
+#define get_hugepd_cache_index(x)  (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index af5f2baac80f..a069dfcac9a9 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -49,6 +49,27 @@  static inline int hugepd_ok(hugepd_t hpd)
 }
 #define is_hugepd(hpd)		(hugepd_ok(hpd))
 
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX                                                      \
+	(PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+	switch (index) {
+	case H_16M_CACHE_INDEX:
+		return HTLB_16M_INDEX;
+	case H_16G_CACHE_INDEX:
+		return HTLB_16G_INDEX;
+	default:
+		BUG();
+	}
+	/* should not reach */
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index fb4b3ba52339..d7ee249d6890 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -45,8 +45,17 @@  static inline int hugepd_ok(hugepd_t hpd)
 {
 	return 0;
 }
+
 #define is_hugepd(pdep)			0
 
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+	BUG();
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 42fe7c2ff2df..1022f622397f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -287,6 +287,11 @@  enum pgtable_index {
 	PMD_INDEX,
 	PUD_INDEX,
 	PGD_INDEX,
+	/*
+	 * Below are used with 4k page size and hugetlb
+	 */
+	HTLB_16M_INDEX,
+	HTLB_16G_INDEX,
 };
 
 extern unsigned long __vmalloc_start;
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 1707781d2f20..9de40eb614da 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -109,6 +109,7 @@  static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()	do { } while (0)
+#define get_hugepd_cache_index(x)	(x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 0e693f322cb2..e2d62d033708 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -141,6 +141,7 @@  static inline void pgtable_free(void *table, int shift)
 	}
 }
 
+#define get_hugepd_cache_index(x)	(x)
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 {
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7c5f479c5c00..8a9a49c13865 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -337,7 +337,8 @@  static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
 	if (shift >= pdshift)
 		hugepd_free(tlb, hugepte);
 	else
-		pgtable_free_tlb(tlb, hugepte, pdshift - shift);
+		pgtable_free_tlb(tlb, hugepte,
+				 get_hugepd_cache_index(pdshift - shift));
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index c1f4ca45c93a..468c3d83a2aa 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -409,6 +409,18 @@  static inline void pgtable_free(void *table, int index)
 	case PUD_INDEX:
 		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
 		break;
+#ifdef CONFIG_PPC_4K_PAGES
+		/* 16M hugepd directory at pud level */
+	case HTLB_16M_INDEX:
+		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
+		kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
+		break;
+		/* 16G hugepd directory at the pgd level */
+	case HTLB_16G_INDEX:
+		BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
+		kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
+		break;
+#endif
 		/* We don't free pgd table via RCU callback */
 	default:
 		BUG();