@@ -233,7 +233,7 @@ typedef struct {
#endif
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
- struct page *pgtable_page;
+ void *pte_frag;
#endif
} mm_context_t;
@@ -511,7 +511,7 @@ typedef struct {
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
- struct page *pgtable_page;
+ void *pte_frag;
#endif
} mm_context_t;
@@ -149,6 +149,16 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
}
#else /* if CONFIG_PPC_64K_PAGES */
+/*
+ * we support 16 fragments per PTE page.
+ */
+#define PTE_FRAG_NR 16
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
extern void page_table_free(struct mm_struct *, unsigned long *, int);
@@ -584,7 +584,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
#ifdef CONFIG_PPC_64K_PAGES
- init_mm.context.pgtable_page = NULL;
+ init_mm.context.pte_frag = NULL;
#endif
irqstack_early_init();
exc_lvl_early_init();
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
#include "icswx.h"
@@ -86,7 +87,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
- mm->context.pgtable_page = NULL;
+ mm->context.pte_frag = NULL;
#endif
return 0;
}
@@ -103,16 +104,19 @@ EXPORT_SYMBOL_GPL(__destroy_context);
static void destroy_pagetable_page(struct mm_struct *mm)
{
int count;
+ void *pte_frag;
struct page *page;
- page = mm->context.pgtable_page;
- if (!page)
+ pte_frag = mm->context.pte_frag;
+ if (!pte_frag)
return;
+ page = virt_to_page(pte_frag);
/* drop all the pending references */
- count = atomic_read(&page->_mapcount) + 1;
- /* We allow PTE_FRAG_NR(16) fragments from a PTE page */
- count = atomic_sub_return(16 - count, &page->_count);
+ count = ((unsigned long )pte_frag &
+ (PAGE_SIZE -1)) >> PTE_FRAG_SIZE_SHIFT;
+ /* We allow PTE_FRAG_NR fragments from a PTE page */
+ count = atomic_sub_return(PTE_FRAG_NR - count, &page->_count);
if (!count) {
pgtable_page_dtor(page);
page_mapcount_reset(page);
@@ -352,66 +352,50 @@ struct page *pmd_page(pmd_t pmd)
}
#ifdef CONFIG_PPC_64K_PAGES
-/*
- * we support 16 fragments per PTE page. This is limited by how many
- * bits we can pack in page->_mapcount. We use the first half for
- * tracking the usage for rcu page table free.
- */
-#define PTE_FRAG_NR 16
-/*
- * We use a 2K PTE page fragment and another 2K for storing
- * real_pte_t hash index
- */
-#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
-
static pte_t *get_from_cache(struct mm_struct *mm)
{
- int index;
- pte_t *ret = NULL;
- struct page *page;
+ void *ret = NULL;
spin_lock(&mm->page_table_lock);
- page = mm->context.pgtable_page;
- if (page) {
- void *p = page_address(page);
- index = atomic_add_return(1, &page->_mapcount);
- ret = (pte_t *) (p + (index * PTE_FRAG_SIZE));
+ ret = mm->context.pte_frag;
+ if (ret) {
+ ret += PTE_FRAG_SIZE;
/*
* If we have taken up all the fragments mark PTE page NULL
*/
- if (index == PTE_FRAG_NR - 1)
- mm->context.pgtable_page = NULL;
+ if (((unsigned long )ret & (PAGE_SIZE - 1)) == 0)
+ ret = NULL;
+ mm->context.pte_frag = ret;
}
spin_unlock(&mm->page_table_lock);
- return ret;
+ return (pte_t *)ret;
}
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{
- pte_t *ret = NULL;
+ void *ret = NULL;
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
__GFP_REPEAT | __GFP_ZERO);
if (!page)
return NULL;
+ ret = page_address(page);
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* the allocated page with single fragement
* count.
*/
- if (likely(!mm->context.pgtable_page)) {
+ if (likely(!mm->context.pte_frag)) {
atomic_set(&page->_count, PTE_FRAG_NR);
- atomic_set(&page->_mapcount, 0);
- mm->context.pgtable_page = page;
+ mm->context.pte_frag = ret + PTE_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
- ret = (unsigned long *)page_address(page);
if (!kernel)
pgtable_page_ctor(page);
- return ret;
+ return (pte_t *)ret;
}
pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)