@@ -139,4 +139,20 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0);
}
+
+static inline void pgd_ctor(void *addr)
+{
+ memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static inline void pud_ctor(void *addr)
+{
+ memset(addr, 0, PUD_TABLE_SIZE);
+}
+
+static inline void pmd_ctor(void *addr)
+{
+ memset(addr, 0, PMD_TABLE_SIZE);
+}
+
#endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
@@ -73,10 +73,13 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
+ pgd_t *pgd;
if (radix_enabled())
return radix__pgd_alloc(mm);
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ memset(pgd, 0, PGD_TABLE_SIZE);
+ return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -93,8 +96,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pud_t *pud;
+ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ memset(pud, 0, PUD_TABLE_SIZE);
+ return pud;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -120,8 +126,12 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pmd_t *pmd;
+ pmd = kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ memset(pmd, 0, PMD_TABLE_SIZE);
+ return pmd;
+
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -218,4 +228,16 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#define check_pgt_cache() do { } while (0)
+static inline void pgd_ctor(void *addr)
+{
+}
+
+static inline void pud_ctor(void *addr)
+{
+}
+
+static inline void pmd_ctor(void *addr)
+{
+}
+
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
@@ -21,4 +21,20 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
#else
#include <asm/nohash/32/pgalloc.h>
#endif
+
+static inline void pgd_ctor(void *addr)
+{
+ memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static inline void pud_ctor(void *addr)
+{
+ memset(addr, 0, PUD_TABLE_SIZE);
+}
+
+static inline void pmd_ctor(void *addr)
+{
+ memset(addr, 0, PMD_TABLE_SIZE);
+}
+
#endif /* _ASM_POWERPC_NOHASH_PGALLOC_H */
@@ -25,21 +25,6 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-static void pgd_ctor(void *addr)
-{
- memset(addr, 0, PGD_TABLE_SIZE);
-}
-
-static void pud_ctor(void *addr)
-{
- memset(addr, 0, PUD_TABLE_SIZE);
-}
-
-static void pmd_ctor(void *addr)
-{
- memset(addr, 0, PMD_TABLE_SIZE);
-}
-
struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */
Now that we are using second half of the table to store slot details and we don't clear them in the huge_pte_get_and_clear, we need to make sure we zero out the range on allocation. Simplify this by calling the object initialization after kmem_cache_alloc and update the constructor do nothing. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> --- arch/powerpc/include/asm/book3s/32/pgalloc.h | 16 +++++++++++++ arch/powerpc/include/asm/book3s/64/pgalloc.h | 34 +++++++++++++++++++++++----- arch/powerpc/include/asm/nohash/pgalloc.h | 16 +++++++++++++ arch/powerpc/mm/init-common.c | 15 ------------ 4 files changed, 60 insertions(+), 21 deletions(-)