@@ -164,15 +164,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#else /* if CONFIG_PPC_64K_PAGES */
/*
- * we support 16 fragments per PTE page.
+ * we support 8 fragments per PTE page.
*/
-#define PTE_FRAG_NR 16
+#define PTE_FRAG_NR 8
/*
- * We use a 2K PTE page fragment and another 2K for storing
- * real_pte_t hash index
+ * We use a 2K PTE page fragment and another 4K for storing
+ * real_pte_t hash index. Rounding the entire thing to 8K
*/
-#define PTE_FRAG_SIZE_SHIFT 12
-#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
+#define PTE_FRAG_SIZE_SHIFT 13
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
extern void page_table_free(struct mm_struct *, unsigned long *, int);
We will use the increased size to store more information of 4K pte when using 64K page size. The idea is to free up bits in pte_t. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> --- arch/powerpc/include/asm/pgalloc-64.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)