Patchwork [3/4] powerpc: Don't update r10 early in the call

login
register
mail settings
Submitter Aneesh Kumar K.V
Date Feb. 14, 2013, 8:36 a.m.
Message ID <1360830983-1812-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/220399/
State Superseded
Headers show

Comments

Aneesh Kumar K.V - Feb. 14, 2013, 8:36 a.m.
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

This enables us to use r10 as scratch in the code.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb_low.S |    8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

Patch

diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 487f998..2a233cb 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -34,7 +34,6 @@  _GLOBAL(slb_allocate_realmode)
 	/* r3 = faulting address */
 
 	srdi	r9,r3,60		/* get region */
-	srdi	r10,r3,28		/* get esid */
 	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */
 
 	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -63,7 +62,7 @@  _GLOBAL(slb_miss_kernel_load_linear)
 	subi	r9,r9,(0xc + 4 + 1)
 	lis	r10, 8
 	add	r9,r9,r10
-	srdi	r10,r3,28 /* FIXME!! doing it twice */
+	srdi	r10,r3,SID_SHIFT	/* get esid */
 	/*
 	 * for 1T we shift 12 bits more.  slb_finish_load_1T will do
 	 * the necessary adjustment
@@ -75,6 +74,7 @@  END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
 	b	slb_finish_load_1T
 
 1:
+	srdi	r10,r3,SID_SHIFT	/* get esid */
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 	/* Check virtual memmap region. To be patches at kernel boot */
 	cmpldi	cr0,r9,0xf
@@ -116,9 +116,11 @@  BEGIN_FTR_SECTION
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
 	b	slb_finish_load_1T
 
-0:	/* user address: proto-VSID = context << 15 | ESID. First check
+0:	/*
+	 * user address: proto-VSID = context << 15 | ESID. First check
 	 * if the address is within the boundaries of the user region
 	 */
+	srdi	r10,r3,SID_SHIFT	/* get esid */
 	srdi.	r9,r10,USER_ESID_BITS
 	bne-	8f			/* invalid ea bits set */