diff mbox series

powerpc/8xx: Map a second 8M text page at startup when needed.

Message ID 27edd227d3b9dcda4e2ce7d00d9fddeb7a5520a1.1545241146.git.christophe.leroy@c-s.fr (mailing list archive)
State Superseded
Headers show
Series powerpc/8xx: Map a second 8M text page at startup when needed. | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success next/apply_patch Successfully applied
snowpatch_ozlabs/build-ppc64le success build succeeded & removed 0 sparse warning(s)
snowpatch_ozlabs/build-ppc64be success build succeeded & removed 0 sparse warning(s)
snowpatch_ozlabs/build-ppc64e success build succeeded & removed 0 sparse warning(s)
snowpatch_ozlabs/build-pmac32 success build succeeded & removed 0 sparse warning(s)
snowpatch_ozlabs/checkpatch success total: 0 errors, 0 warnings, 0 checks, 49 lines checked

Commit Message

Christophe Leroy Dec. 20, 2018, 5:48 a.m. UTC
Some debug setup like CONFIG_KASAN generate huge
kernels with text size over the 8M limit.

This patch maps a second 8M page when _einittext is over 8M.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/kernel/head_8xx.S | 27 +++++++++++++++++++++++++--
 arch/powerpc/mm/8xx_mmu.c      |  4 ++++
 2 files changed, 29 insertions(+), 2 deletions(-)

Comments

Christoph Hellwig Dec. 20, 2018, 8:24 a.m. UTC | #1
On Thu, Dec 20, 2018 at 05:48:25AM +0000, Christophe Leroy wrote:
> Some debug setup like CONFIG_KASAN generate huge
> kernels with text size over the 8M limit.
> 
> This patch maps a second 8M page when _einittext is over 8M.

Do we also need a check to generate a useful warning if we ever overflow
the 16MB?
Christophe Leroy Dec. 20, 2018, 8:50 a.m. UTC | #2
Le 20/12/2018 à 09:24, Christoph Hellwig a écrit :
> On Thu, Dec 20, 2018 at 05:48:25AM +0000, Christophe Leroy wrote:
>> Some debug setup like CONFIG_KASAN generate huge
>> kernels with text size over the 8M limit.
>>
>> This patch maps a second 8M page when _einittext is over 8M.
> 
> Do we also need a check to generate a useful warning if we ever overflow
> the 16MB?
> 

I don't think any other platform does that (the 40x also maps 16Mb, 
book3s/601 maps 24Mb)

But why not, could do that in another patch.

Is there an easy way to get the link to fail when CONFIG_PIN_TLB_TEXT is 
set and _einittext is higher than 16Mb ?

Or should we just map up to 24Mb on the 8xx and consider we are on the 
safe side with that much ?

Christophe
diff mbox series

Patch

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index b171b7c0a0e7..f6bc4392ea9f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -334,8 +334,8 @@  InstructionTLBMiss:
 	rlwinm	r10, r10, 16, 0xfff8
 	cmpli	cr0, r10, PAGE_OFFSET@h
 #ifndef CONFIG_PIN_TLB_TEXT
-	/* It is assumed that kernel code fits into the first 8M page */
-0:	cmpli	cr7, r10, (PAGE_OFFSET + 0x0800000)@h
+	/* It is assumed that kernel code fits into the two first 8M pages */
+0:	cmpli	cr7, r10, (PAGE_OFFSET + 0x1000000)@h
 	patch_site	0b, patch__itlbmiss_linmem_top
 #endif
 #endif
@@ -904,6 +904,29 @@  initial_mmu:
 	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */
 	mtspr	SPRN_MI_RPN, r8		/* Store TLB entry */
 
+	/* Map a second 8M page if needed */
+	lis	r9, _einittext@h
+	oris	r9, r9, _einittext@l
+	cmpli	cr0, r9, (PAGE_OFFSET + 0x8000000)@h
+	blt	1f
+
+#ifdef CONFIG_PIN_TLB_TEXT
+	lis	r8, MI_RSV4I@h
+	ori	r8, r8, 0x1d00
+
+	mtspr	SPRN_MI_CTR, r8	/* Set instruction MMU control */
+#endif
+
+	lis	r8, (KERNELBASE + 0x800000)@h	/* Create vaddr for TLB */
+	ori	r8, r8, MI_EVALID	/* Mark it valid */
+	mtspr	SPRN_MI_EPN, r8
+	li	r8, MI_PS8MEG /* Set 8M byte page */
+	ori	r8, r8, MI_SVALID	/* Make it valid */
+	mtspr	SPRN_MI_TWC, r8
+	li	r8, MI_BOOTINIT		/* Create RPN for address 0 */
+	addis	r8, r8, 0x80
+	mtspr	SPRN_MI_RPN, r8		/* Store TLB entry */
+1:
 	lis	r8, MI_APG_INIT@h	/* Set protection modes */
 	ori	r8, r8, MI_APG_INIT@l
 	mtspr	SPRN_MI_AP, r8
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index e2b6687ebb50..1bdbfbf9fe16 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -122,6 +122,10 @@  unsigned long __init mmu_mapin_ram(unsigned long top)
 #endif
 	} else {
 		mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
+#ifndef CONFIG_PIN_TLB_TEXT
+		mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top,
+				    _ALIGN(__pa(_einittext), 8 << 20));
+#endif
 	}
 
 	mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);