Patchwork [-V4,11/12] arch/powerpc: properly offset the context bits for 1T segemnts

login
register
mail settings
Submitter Aneesh Kumar K.V
Date July 25, 2012, 12:58 p.m.
Message ID <1343221085-30661-12-git-send-email-aneesh.kumar@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/173159/
State Not Applicable
Headers show

Comments

Aneesh Kumar K.V - July 25, 2012, 12:58 p.m.
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

We should do rldimi r10,r9,USER_ESID_BITS,0 only after populating
r10 with ESID bits.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/mm/slb_low.S |   12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
Paul Mackerras - July 30, 2012, 12:58 a.m.
On Wed, Jul 25, 2012 at 06:28:04PM +0530, Aneesh Kumar K.V wrote:
> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
> 
> We should do rldimi r10,r9,USER_ESID_BITS,0 only after populating
> r10 with ESID bits.

This needs a lot more explanation as to what the problem is that this
patch aims to fix.  Is it a problem today without your other patches,
or is it introduced by previous patches?

In any case I think there is an error in the patch, see below...

>  0:	/* user address: proto-VSID = context << 15 | ESID. First check
> @@ -155,13 +157,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
>  	ld	r9,PACACONTEXTID(r13)
>  BEGIN_FTR_SECTION
>  	cmpldi	r10,0x1000
> +	bge	9f
>  END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
>  	rldimi	r10,r9,USER_ESID_BITS,0
> +	b	slb_finish_load
>  BEGIN_FTR_SECTION
> -	bge	slb_finish_load_1T
> +9:
> +	srdi	r10,r10,40-28		/* get 1T ESID */
> +	rldimi	r10,r9,USER_ESID_BITS,0

Shouldn't this one be USER_ESID_BITS_1T?  And in that case, since
USER_ESID_BITS == USER_ESID_BITS_1T + 12, I think the patch would
then introduce no change in behaviour (other than being slightly
slower than the current code).  Or am I missing something? -- in
that case we really need a longer and better explanation with the
patch.

Paul.
Aneesh Kumar K.V - July 30, 2012, 5:36 a.m.
Paul Mackerras <paulus@samba.org> writes:

> On Wed, Jul 25, 2012 at 06:28:04PM +0530, Aneesh Kumar K.V wrote:
>> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>> 
>> We should do rldimi r10,r9,USER_ESID_BITS,0 only after populating
>> r10 with ESID bits.
>
> This needs a lot more explanation as to what the problem is that this
> patch aims to fix.  Is it a problem today without your other patches,
> or is it introduced by previous patches?
>
> In any case I think there is an error in the patch, see below...
>
>>  0:	/* user address: proto-VSID = context << 15 | ESID. First check
>> @@ -155,13 +157,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
>>  	ld	r9,PACACONTEXTID(r13)
>>  BEGIN_FTR_SECTION
>>  	cmpldi	r10,0x1000
>> +	bge	9f
>>  END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
>>  	rldimi	r10,r9,USER_ESID_BITS,0
>> +	b	slb_finish_load
>>  BEGIN_FTR_SECTION
>> -	bge	slb_finish_load_1T
>> +9:
>> +	srdi	r10,r10,40-28		/* get 1T ESID */
>> +	rldimi	r10,r9,USER_ESID_BITS,0
>
> Shouldn't this one be USER_ESID_BITS_1T?  And in that case, since
> USER_ESID_BITS == USER_ESID_BITS_1T + 12, I think the patch would
> then introduce no change in behaviour (other than being slightly
> slower than the current code).  Or am I missing something? -- in
> that case we really need a longer and better explanation with the
> patch.

Ok I missed that we used USER_ESID_BITS there. This patch can be
dropped. I looked at the 1T code, and assumed that we were wrongly
shifting the context bits there.

-aneesh

Patch

diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index db2cb3f..7bd8438 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -59,6 +59,7 @@  _GLOBAL(slb_miss_kernel_load_linear)
 BEGIN_FTR_SECTION
 	b	slb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+	srdi	r10,r10,40-28		/* get 1T ESID */
 	b	slb_finish_load_1T
 
 1:
@@ -88,6 +89,7 @@  _GLOBAL(slb_miss_kernel_load_vmemmap)
 BEGIN_FTR_SECTION
 	b	slb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+	srdi	r10,r10,40-28		/* get 1T ESID */
 	b	slb_finish_load_1T
 
 0:	/* user address: proto-VSID = context << 15 | ESID. First check
@@ -155,13 +157,16 @@  END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
 	ld	r9,PACACONTEXTID(r13)
 BEGIN_FTR_SECTION
 	cmpldi	r10,0x1000
+	bge	9f
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 	rldimi	r10,r9,USER_ESID_BITS,0
+	b	slb_finish_load
 BEGIN_FTR_SECTION
-	bge	slb_finish_load_1T
+9:
+	srdi	r10,r10,40-28		/* get 1T ESID */
+	rldimi	r10,r9,USER_ESID_BITS,0
+	b	slb_finish_load_1T
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-	b	slb_finish_load
-
 8:	/* invalid EA */
 	li	r10,0			/* BAD_VSID */
 	li	r11,SLB_VSID_USER	/* flags don't much matter */
@@ -292,7 +297,6 @@  _GLOBAL(slb_compare_rr_to_size)
  * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
  */
 slb_finish_load_1T:
-	srdi	r10,r10,40-28		/* get 1T ESID */
 	ASM_VSID_SCRAMBLE(r10,r9,1T)
 	/*
 	 * bits above VSID_BITS_1T need to be ignored from r10