diff mbox

[07/13] powerpc: Add hugepage support to 64-bit tablewalk code for FSL_BOOK3E

Message ID 13182798791090-git-send-email-beckyb@kernel.crashing.org (mailing list archive)
State Accepted, archived
Delegated to: Benjamin Herrenschmidt
Headers show

Commit Message

Becky Bruce Oct. 10, 2011, 8:50 p.m. UTC
From: Becky Bruce <beckyb@kernel.crashing.org>

Before hugetlb, at each level of the table, we test for
!0 to determine if we have a valid table entry.  With hugetlb, this
compare becomes:
        < 0 is a normal entry
        0 is an invalid entry
        > 0 is huge

This works because the hugepage code pulls the top bit off the entry
(which for non-huge entries always has the top bit set) as an
indicator that we have a hugepage.

Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org>
---
 arch/powerpc/mm/tlb_low_64e.S |   14 +++++++-------
 1 files changed, 7 insertions(+), 7 deletions(-)
diff mbox

Patch

diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 71d5d9a..ff672bd 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -136,22 +136,22 @@  ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
 #ifndef CONFIG_PPC_64K_PAGES
 	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
 	clrrdi	r15,r15,3
-	cmlpdi	cr0,r14,0
-	beq	tlb_miss_fault_bolted	/* Bad pgd entry */
+	cmpdi	cr0,r14,0
+	bge	tlb_miss_fault_bolted	/* Bad pgd entry or hugepage; bail */
 	ldx	r14,r14,r15		/* grab pud entry */
 #endif /* CONFIG_PPC_64K_PAGES */
 
 	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
 	clrrdi	r15,r15,3
-	cmpldi	cr0,r14,0
-	beq	tlb_miss_fault_bolted
+	cmpdi	cr0,r14,0
+	bge	tlb_miss_fault_bolted
 	ldx	r14,r14,r15		/* Grab pmd entry */
 
 	rldicl	r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
 	clrrdi	r15,r15,3
-	cmpldi	cr0,r14,0
-	beq	tlb_miss_fault_bolted
-	ldx	r14,r14,r15		/* Grab PTE */
+	cmpdi	cr0,r14,0
+	bge	tlb_miss_fault_bolted
+	ldx	r14,r14,r15		/* Grab PTE, normal (!huge) page */
 
 	/* Check if required permissions are met */
 	andc.	r15,r11,r14