Patchwork [6/8] mm: Make transparent huge code not depend upon the details of pgtable_t

login
register
mail settings
Submitter David Miller
Date Oct. 2, 2012, 10:27 p.m.
Message ID <20121002.182718.250164928532772411.davem@davemloft.net>
Download mbox | patch
Permalink /patch/188670/
State RFC
Delegated to: David Miller
Headers show

Comments

David Miller - Oct. 2, 2012, 10:27 p.m.
The code currently assumes that pgtable_t is a struct page pointer.

Fix this by pushing pgtable management behind arch helper functions.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/x86/include/asm/pgalloc.h |   26 ++++++++++++++++++++++++++
 mm/huge_memory.c               |   22 ++--------------------
 2 files changed, 28 insertions(+), 20 deletions(-)
Andrea Arcangeli - Oct. 2, 2012, 10:49 p.m.
Hi Dave,

On Tue, Oct 02, 2012 at 06:27:18PM -0400, David Miller wrote:
> 
> The code currently assumes that pgtable_t is a struct page pointer.
> 
> Fix this by pushing pgtable management behind arch helper functions.

This should be fixed in -mm already, it's from the s390x support.
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index b4389a4..f2a12e9 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -136,4 +136,30 @@  static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 #endif	/* PAGETABLE_LEVELS > 3 */
 #endif	/* PAGETABLE_LEVELS > 2 */
 
+static inline void pmd_huge_pte_insert(struct mm_struct *mm, pgtable_t pgtable)
+{
+	/* FIFO */
+	if (!mm->pmd_huge_pte)
+		INIT_LIST_HEAD(&pgtable->lru);
+	else
+		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
+	mm->pmd_huge_pte = pgtable;
+}
+
+static inline pgtable_t pmd_huge_pte_remove(struct mm_struct *mm)
+{
+	pgtable_t pgtable;
+
+	/* FIFO */
+	pgtable = mm->pmd_huge_pte;
+	if (list_empty(&pgtable->lru))
+		mm->pmd_huge_pte = NULL;
+	else {
+		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
+					      struct page, lru);
+		list_del(&pgtable->lru);
+	}
+	return pgtable;
+}
+
 #endif /* _ASM_X86_PGALLOC_H */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 29414c1..5d44785 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -616,12 +616,7 @@  static void prepare_pmd_huge_pte(pgtable_t pgtable,
 {
 	assert_spin_locked(&mm->page_table_lock);
 
-	/* FIFO */
-	if (!mm->pmd_huge_pte)
-		INIT_LIST_HEAD(&pgtable->lru);
-	else
-		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
-	mm->pmd_huge_pte = pgtable;
+	pmd_huge_pte_insert(mm, pgtable);
 }
 
 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
@@ -805,20 +800,9 @@  out:
 /* no "address" argument so destroys page coloring of some arch */
 pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
 {
-	pgtable_t pgtable;
-
 	assert_spin_locked(&mm->page_table_lock);
 
-	/* FIFO */
-	pgtable = mm->pmd_huge_pte;
-	if (list_empty(&pgtable->lru))
-		mm->pmd_huge_pte = NULL;
-	else {
-		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
-					      struct page, lru);
-		list_del(&pgtable->lru);
-	}
-	return pgtable;
+	return pmd_huge_pte_remove(mm);
 }
 
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
@@ -1971,8 +1955,6 @@  static void collapse_huge_page(struct mm_struct *mm,
 	pte_unmap(pte);
 	__SetPageUptodate(new_page);
 	pgtable = pmd_pgtable(_pmd);
-	VM_BUG_ON(page_count(pgtable) != 1);
-	VM_BUG_ON(page_mapcount(pgtable) != 0);
 
 	_pmd = mk_pmd(new_page, vma->vm_page_prot);
 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);