diff mbox

KVM: PPC: Book3S PR: Rework SLB switching code

Message ID 1400157833-52294-1-git-send-email-agraf@suse.de
State New, archived
Headers show

Commit Message

Alexander Graf May 15, 2014, 12:43 p.m. UTC
On LPAR guest systems Linux enables the shadow SLB to indicate to the
hypervisor a number of SLB entries that always have to be available.

Today we go through this shadow SLB and disable all ESID's valid bits.
However, pHyp doesn't like this approach very much and honors us with
fancy machine checks.

Fortunately the shadow SLB descriptor also has an entry that indicates
the number of valid entries following. During the lifetime of a guest
we can just swap that value to 0 and don't have to worry about the
SLB restoration magic.

While we're touching the code, let's also make it more readable (get
rid of rldicl), allow it to deal with a dynamic number of bolted
SLB entries and only do shadow SLB swizzling on LPAR systems.

Signed-off-by: Alexander Graf <agraf@suse.de>
---
 arch/powerpc/kvm/book3s_64_slb.S | 90 +++++++++++++++++++---------------------
 1 file changed, 43 insertions(+), 47 deletions(-)

Comments

Paul Mackerras May 17, 2014, 5:36 a.m. UTC | #1
On Thu, May 15, 2014 at 02:43:53PM +0200, Alexander Graf wrote:
> On LPAR guest systems Linux enables the shadow SLB to indicate to the
> hypervisor a number of SLB entries that always have to be available.
> 
> Today we go through this shadow SLB and disable all ESID's valid bits.
> However, pHyp doesn't like this approach very much and honors us with
> fancy machine checks.
> 
> Fortunately the shadow SLB descriptor also has an entry that indicates
> the number of valid entries following. During the lifetime of a guest
> we can just swap that value to 0 and don't have to worry about the
> SLB restoration magic.

I think this is a great idea; I have been thinking we should do
something like this.

> While we're touching the code, let's also make it more readable (get
> rid of rldicl), allow it to deal with a dynamic number of bolted
> SLB entries and only do shadow SLB swizzling on LPAR systems.
> 
> Signed-off-by: Alexander Graf <agraf@suse.de>

[snip]

> +#define SHADOW_SLB_ENTRY_LEN	0x10

Normally we'd define structure offsets/sizes like this in
asm-offsets.c.  However, since the structure can't change I guess this
is OK.

>  	/* Fill SLB with our shadow */
>  
> +	lis	r7, SLB_ESID_V@h
> +
>  	lbz	r12, SVCPU_SLB_MAX(r3)
>  	mulli	r12, r12, 16
>  	addi	r12, r12, SVCPU_SLB
> @@ -99,7 +76,7 @@ slb_loop_enter:
>  
>  	ld	r10, 0(r11)
>  
> -	rldicl. r0, r10, 37, 63
> +	and.	r9, r10, r7

Or...
	andis.	r9, r10, SLB_ESID_V@h
and save a register and an instruction.

>  	cmpd	cr0, r11, r12
>  	blt	slb_loop_enter
>  
> +	isync
> +	sync

Why?

> +BEGIN_FW_FTR_SECTION
> +
> +	/* Declare SLB shadow as SLB_NUM_BOLTED entries big */
> +
> +	li	r8, SLB_NUM_BOLTED
> +	stb	r8, 3(r11)

Currently it's true that slb_shadow.persistent is always
SLB_NUM_BOLTED, but if you are going to embed that assumption here in
the KVM code you should at least add some comments over in
arch/powerpc/mm/slb.c and in arch/powerpc/kernel/paca.c (where
slb_shadow.persistent gets initialized) warning people that if they
break that assumption they need to fix KVM code as well.

Paul.
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf May 28, 2014, 2:13 p.m. UTC | #2
On 17.05.14 07:36, Paul Mackerras wrote:
> On Thu, May 15, 2014 at 02:43:53PM +0200, Alexander Graf wrote:
>> On LPAR guest systems Linux enables the shadow SLB to indicate to the
>> hypervisor a number of SLB entries that always have to be available.
>>
>> Today we go through this shadow SLB and disable all ESID's valid bits.
>> However, pHyp doesn't like this approach very much and honors us with
>> fancy machine checks.
>>
>> Fortunately the shadow SLB descriptor also has an entry that indicates
>> the number of valid entries following. During the lifetime of a guest
>> we can just swap that value to 0 and don't have to worry about the
>> SLB restoration magic.
> I think this is a great idea; I have been thinking we should do
> something like this.
>
>> While we're touching the code, let's also make it more readable (get
>> rid of rldicl), allow it to deal with a dynamic number of bolted
>> SLB entries and only do shadow SLB swizzling on LPAR systems.
>>
>> Signed-off-by: Alexander Graf <agraf@suse.de>
> [snip]
>
>> +#define SHADOW_SLB_ENTRY_LEN	0x10
> Normally we'd define structure offsets/sizes like this in
> asm-offsets.c.  However, since the structure can't change I guess this
> is OK.
>
>>   	/* Fill SLB with our shadow */
>>   
>> +	lis	r7, SLB_ESID_V@h
>> +
>>   	lbz	r12, SVCPU_SLB_MAX(r3)
>>   	mulli	r12, r12, 16
>>   	addi	r12, r12, SVCPU_SLB
>> @@ -99,7 +76,7 @@ slb_loop_enter:
>>   
>>   	ld	r10, 0(r11)
>>   
>> -	rldicl. r0, r10, 37, 63
>> +	and.	r9, r10, r7
> Or...
> 	andis.	r9, r10, SLB_ESID_V@h
> and save a register and an instruction.

Good idea :)

>
>>   	cmpd	cr0, r11, r12
>>   	blt	slb_loop_enter
>>   
>> +	isync
>> +	sync
> Why?

Hrm, I guess I was trying to narrow down why things broke. I'll omit it 
and see whether my test machine can still successfully run PR KVM.

>
>> +BEGIN_FW_FTR_SECTION
>> +
>> +	/* Declare SLB shadow as SLB_NUM_BOLTED entries big */
>> +
>> +	li	r8, SLB_NUM_BOLTED
>> +	stb	r8, 3(r11)
> Currently it's true that slb_shadow.persistent is always
> SLB_NUM_BOLTED, but if you are going to embed that assumption here in

We had that assumption before too ;)

> the KVM code you should at least add some comments over in
> arch/powerpc/mm/slb.c and in arch/powerpc/kernel/paca.c (where
> slb_shadow.persistent gets initialized) warning people that if they
> break that assumption they need to fix KVM code as well.

but I like warnings, so I'll add some.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 596140e..aa2e2da 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,29 +17,9 @@ 
  * Authors: Alexander Graf <agraf@suse.de>
  */
 
-#define SHADOW_SLB_ESID(num)	(SLBSHADOW_SAVEAREA + (num * 0x10))
-#define SHADOW_SLB_VSID(num)	(SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
-#define UNBOLT_SLB_ENTRY(num) \
-	li	r11, SHADOW_SLB_ESID(num);	\
-	LDX_BE	r9, r12, r11;			\
-	/* Invalid? Skip. */;			\
-	rldicl. r0, r9, 37, 63;			\
-	beq	slb_entry_skip_ ## num;		\
-	xoris	r9, r9, SLB_ESID_V@h;		\
-	STDX_BE	r9, r12, r11;			\
-  slb_entry_skip_ ## num:
-
-#define REBOLT_SLB_ENTRY(num) \
-	li	r8, SHADOW_SLB_ESID(num);	\
-	li	r7, SHADOW_SLB_VSID(num);	\
-	LDX_BE	r10, r11, r8;			\
-	cmpdi	r10, 0;				\
-	beq	slb_exit_skip_ ## num;		\
-	oris	r10, r10, SLB_ESID_V@h;		\
-	LDX_BE	r9, r11, r7;			\
-	slbmte	r9, r10;			\
-	STDX_BE	r10, r11, r8;			\
-slb_exit_skip_ ## num:
+#define SHADOW_SLB_ENTRY_LEN	0x10
+#define OFFSET_ESID(x)		(SHADOW_SLB_ENTRY_LEN * x)
+#define OFFSET_VSID(x)		((SHADOW_SLB_ENTRY_LEN * x) + 8)
 
 /******************************************************************************
  *                                                                            *
@@ -63,20 +43,15 @@  slb_exit_skip_ ## num:
 	 * SVCPU[LR]  = guest LR
 	 */
 
-	/* Remove LPAR shadow entries */
+BEGIN_FW_FTR_SECTION
 
-#if SLB_NUM_BOLTED == 3
+	/* Declare SLB shadow as 0 entries big */
 
-	ld	r12, PACA_SLBSHADOWPTR(r13)
+	ld	r11, PACA_SLBSHADOWPTR(r13)
+	li	r8, 0
+	stb	r8, 3(r11)
 
-	/* Remove bolted entries */
-	UNBOLT_SLB_ENTRY(0)
-	UNBOLT_SLB_ENTRY(1)
-	UNBOLT_SLB_ENTRY(2)
-	
-#else
-#error unknown number of bolted entries
-#endif
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
 
 	/* Flush SLB */
 
@@ -86,6 +61,8 @@  slb_exit_skip_ ## num:
 
 	/* Fill SLB with our shadow */
 
+	lis	r7, SLB_ESID_V@h
+
 	lbz	r12, SVCPU_SLB_MAX(r3)
 	mulli	r12, r12, 16
 	addi	r12, r12, SVCPU_SLB
@@ -99,7 +76,7 @@  slb_loop_enter:
 
 	ld	r10, 0(r11)
 
-	rldicl. r0, r10, 37, 63
+	and.	r9, r10, r7
 	beq	slb_loop_enter_skip
 
 	ld	r9, 8(r11)
@@ -110,6 +87,9 @@  slb_loop_enter_skip:
 	cmpd	cr0, r11, r12
 	blt	slb_loop_enter
 
+	isync
+	sync
+
 slb_do_enter:
 
 .endm
@@ -136,23 +116,39 @@  slb_do_enter:
 	 *
 	 */
 
-	/* Restore bolted entries from the shadow and fix it along the way */
-
-	/* We don't store anything in entry 0, so we don't need to take care of it */
+	/* Remove all SLB entries that are in use. */
 	slbia
-	isync
 
-#if SLB_NUM_BOLTED == 3
+	/* Restore bolted entries from the shadow */
 
 	ld	r11, PACA_SLBSHADOWPTR(r13)
 
-	REBOLT_SLB_ENTRY(0)
-	REBOLT_SLB_ENTRY(1)
-	REBOLT_SLB_ENTRY(2)
-	
-#else
-#error unknown number of bolted entries
-#endif
+BEGIN_FW_FTR_SECTION
+
+	/* Declare SLB shadow as SLB_NUM_BOLTED entries big */
+
+	li	r8, SLB_NUM_BOLTED
+	stb	r8, 3(r11)
+
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
+
+	/* Manually load all entries from shadow SLB */
+
+	li	r8, SLBSHADOW_SAVEAREA
+	li	r7, SLBSHADOW_SAVEAREA + 8
+
+	.rept	SLB_NUM_BOLTED
+	LDX_BE	r10, r11, r8
+	cmpdi	r10, 0
+	beq	1f
+	LDX_BE	r9, r11, r7
+	slbmte	r9, r10
+1:	addi	r7, r7, SHADOW_SLB_ENTRY_LEN
+	addi	r8, r8, SHADOW_SLB_ENTRY_LEN
+	.endr
+
+	isync
+	sync
 
 slb_do_exit: