Patchwork KVM: PPC: Save/Restore CR over vcpu_run

login
register
mail settings
Submitter Alexander Graf
Date March 12, 2012, 7:26 p.m.
Message ID <1331580410-25847-1-git-send-email-agraf@suse.de>
Download mbox | patch
Permalink /patch/146222/
State New
Headers show

Comments

Alexander Graf - March 12, 2012, 7:26 p.m.
On PPC, CR2-CR4 are nonvolatile, thus have to be saved across function calls.
We didn't respect that for any architecture until Paul spotted it in his
patch for Book3S-HV. This patch saves/restores CR for all KVM capable PPC hosts.

Signed-off-by: Alexander Graf <agraf@suse.de>

---

v1 -> v2:

  - optimize bookehv path
---
 arch/powerpc/kvm/book3s_interrupts.S  |    7 +++++++
 arch/powerpc/kvm/booke_interrupts.S   |    7 ++++++-
 arch/powerpc/kvm/bookehv_interrupts.S |    8 +++++++-
 3 files changed, 20 insertions(+), 2 deletions(-)

Patch

diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 0a8515a..3e35383 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -84,6 +84,10 @@  kvm_start_entry:
 	/* Save non-volatile registers (r14 - r31) */
 	SAVE_NVGPRS(r1)
 
+	/* Save CR */
+	mfcr	r14
+	stw	r14, _CCR(r1)
+
 	/* Save LR */
 	PPC_STL	r0, _LINK(r1)
 
@@ -165,6 +169,9 @@  kvm_exit_loop:
 	PPC_LL	r4, _LINK(r1)
 	mtlr	r4
 
+	lwz	r14, _CCR(r1)
+	mtcr	r14
+
 	/* Restore non-volatile host registers (r14 - r31) */
 	REST_NVGPRS(r1)
 
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 10d8ef6..c8c4b87 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -34,7 +34,8 @@ 
 /* r2 is special: it holds 'current', and it made nonvolatile in the
  * kernel with the -ffixed-r2 gcc option. */
 #define HOST_R2         12
-#define HOST_NV_GPRS    16
+#define HOST_CR         16
+#define HOST_NV_GPRS    20
 #define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
 #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
@@ -296,8 +297,10 @@  heavyweight_exit:
 
 	/* Return to kvm_vcpu_run(). */
 	lwz	r4, HOST_STACK_LR(r1)
+	lwz	r5, HOST_CR(r1)
 	addi	r1, r1, HOST_STACK_SIZE
 	mtlr	r4
+	mtcr	r5
 	/* r3 still contains the return code from kvmppc_handle_exit(). */
 	blr
 
@@ -314,6 +317,8 @@  _GLOBAL(__kvmppc_vcpu_run)
 	stw	r3, HOST_RUN(r1)
 	mflr	r3
 	stw	r3, HOST_STACK_LR(r1)
+	mfcr	r5
+	stw	r5, HOST_CR(r1)
 
 	/* Save host non-volatile register state to stack. */
 	stw	r14, HOST_NV_GPR(r14)(r1)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 63fc5f0..b71ddaf 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -49,7 +49,8 @@ 
  * kernel with the -ffixed-r2 gcc option.
  */
 #define HOST_R2         (3 * LONGBYTES)
-#define HOST_NV_GPRS    (4 * LONGBYTES)
+#define HOST_CR         (4 * LONGBYTES)
+#define HOST_NV_GPRS    (5 * LONGBYTES)
 #define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
 #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
@@ -396,6 +397,7 @@  skip_nv_load:
 heavyweight_exit:
 	/* Not returning to guest. */
 	PPC_LL	r5, HOST_STACK_LR(r1)
+	lwz	r6, HOST_CR(r1)
 
 	/*
 	 * We already saved guest volatile register state; now save the
@@ -442,6 +444,7 @@  heavyweight_exit:
 
 	/* Return to kvm_vcpu_run(). */
 	mtlr	r5
+	mtcr	r6
 	addi	r1, r1, HOST_STACK_SIZE
 	/* r3 still contains the return code from kvmppc_handle_exit(). */
 	blr
@@ -457,8 +460,11 @@  _GLOBAL(__kvmppc_vcpu_run)
 	/* Save host state to stack. */
 	PPC_STL	r3, HOST_RUN(r1)
 	mflr	r3
+	mfcr	r5
 	PPC_STL	r3, HOST_STACK_LR(r1)
 
+	stw	r5, HOST_CR(r1)
+
 	/* Save host non-volatile register state to stack. */
 	PPC_STL	r14, HOST_NV_GPR(r14)(r1)
 	PPC_STL	r15, HOST_NV_GPR(r15)(r1)