Patchwork KVM: PPC: Save/Restore CR over vcpu_run

login
register
mail settings
Submitter Alexander Graf
Date March 5, 2012, 4:02 p.m.
Message ID <1330963334-5717-1-git-send-email-agraf@suse.de>
Download mbox | patch
Permalink /patch/144708/
State New
Headers show

Comments

Alexander Graf - March 5, 2012, 4:02 p.m.
On PPC, CR2-CR4 are nonvolatile, thus have to be saved across function calls.
We didn't respect that for any architecture until Paul spotted it in his
patch for Book3S-HV. This patch saves/restores CR for all KVM capable PPC hosts.

Signed-off-by: Alexander Graf <agraf@suse.de>
---
 arch/powerpc/kvm/book3s_interrupts.S  |    7 +++++++
 arch/powerpc/kvm/booke_interrupts.S   |    7 ++++++-
 arch/powerpc/kvm/bookehv_interrupts.S |    8 +++++++-
 3 files changed, 20 insertions(+), 2 deletions(-)
Scott Wood - March 5, 2012, 10:54 p.m.
On 03/05/2012 10:02 AM, Alexander Graf wrote:
> @@ -442,6 +444,7 @@ heavyweight_exit:
>  
>  	/* Return to kvm_vcpu_run(). */
>  	mtlr	r5
> +	mtcr	r6
>  	addi	r1, r1, HOST_STACK_SIZE
>  	/* r3 still contains the return code from kvmppc_handle_exit(). */
>  	blr
> @@ -459,6 +462,9 @@ _GLOBAL(__kvmppc_vcpu_run)
>  	mflr	r3
>  	PPC_STL	r3, HOST_STACK_LR(r1)
>  
> +	mfcr	r5
> +	stw	r5, HOST_CR(r1)

If you move the mfcr before the PPC_STL they should be able to run in
parallel.  Otherwise on e500mc mfcr will wait for PPC_STL to take its 3
cycles and then mfcr will take 5 cyles before the stw of HOST_CR.
Alternatively, consider using mcrf/mtocrf three times.

Similar issues in booke_interrupts.S (except we can't assume mtocrf
exists there), but I'm less worried about that one as it still needs an
optimization pass in general.

-Scott

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 0a8515a..3e35383 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -84,6 +84,10 @@  kvm_start_entry:
 	/* Save non-volatile registers (r14 - r31) */
 	SAVE_NVGPRS(r1)
 
+	/* Save CR */
+	mfcr	r14
+	stw	r14, _CCR(r1)
+
 	/* Save LR */
 	PPC_STL	r0, _LINK(r1)
 
@@ -165,6 +169,9 @@  kvm_exit_loop:
 	PPC_LL	r4, _LINK(r1)
 	mtlr	r4
 
+	lwz	r14, _CCR(r1)
+	mtcr	r14
+
 	/* Restore non-volatile host registers (r14 - r31) */
 	REST_NVGPRS(r1)
 
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 10d8ef6..c8c4b87 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -34,7 +34,8 @@ 
 /* r2 is special: it holds 'current', and it made nonvolatile in the
  * kernel with the -ffixed-r2 gcc option. */
 #define HOST_R2         12
-#define HOST_NV_GPRS    16
+#define HOST_CR         16
+#define HOST_NV_GPRS    20
 #define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
 #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
@@ -296,8 +297,10 @@  heavyweight_exit:
 
 	/* Return to kvm_vcpu_run(). */
 	lwz	r4, HOST_STACK_LR(r1)
+	lwz	r5, HOST_CR(r1)
 	addi	r1, r1, HOST_STACK_SIZE
 	mtlr	r4
+	mtcr	r5
 	/* r3 still contains the return code from kvmppc_handle_exit(). */
 	blr
 
@@ -314,6 +317,8 @@  _GLOBAL(__kvmppc_vcpu_run)
 	stw	r3, HOST_RUN(r1)
 	mflr	r3
 	stw	r3, HOST_STACK_LR(r1)
+	mfcr	r5
+	stw	r5, HOST_CR(r1)
 
 	/* Save host non-volatile register state to stack. */
 	stw	r14, HOST_NV_GPR(r14)(r1)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 63fc5f0..3989b5a 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -49,7 +49,8 @@ 
  * kernel with the -ffixed-r2 gcc option.
  */
 #define HOST_R2         (3 * LONGBYTES)
-#define HOST_NV_GPRS    (4 * LONGBYTES)
+#define HOST_CR         (4 * LONGBYTES)
+#define HOST_NV_GPRS    (5 * LONGBYTES)
 #define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
 #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
@@ -396,6 +397,7 @@  skip_nv_load:
 heavyweight_exit:
 	/* Not returning to guest. */
 	PPC_LL	r5, HOST_STACK_LR(r1)
+	lwz	r6, HOST_CR(r1)
 
 	/*
 	 * We already saved guest volatile register state; now save the
@@ -442,6 +444,7 @@  heavyweight_exit:
 
 	/* Return to kvm_vcpu_run(). */
 	mtlr	r5
+	mtcr	r6
 	addi	r1, r1, HOST_STACK_SIZE
 	/* r3 still contains the return code from kvmppc_handle_exit(). */
 	blr
@@ -459,6 +462,9 @@  _GLOBAL(__kvmppc_vcpu_run)
 	mflr	r3
 	PPC_STL	r3, HOST_STACK_LR(r1)
 
+	mfcr	r5
+	stw	r5, HOST_CR(r1)
+
 	/* Save host non-volatile register state to stack. */
 	PPC_STL	r14, HOST_NV_GPR(r14)(r1)
 	PPC_STL	r15, HOST_NV_GPR(r15)(r1)