diff mbox

[1/2] KVM: PPC: bookehv: Use a Macro for saving/restoring guest registers to/from their 64 bit copies.

Message ID 1335353203-18766-1-git-send-email-Varun.Sethi@freescale.com
State New, archived
Headers show

Commit Message

Varun Sethi April 25, 2012, 11:26 a.m. UTC
Introduced PPC_STD/PPC_LD macros for saving/restoring guest registers to/from their 64 bit copies.

Signed-off-by: Varun Sethi <Varun.Sethi@freescale.com>
---
 arch/powerpc/include/asm/kvm_asm.h    |    8 ++++++++
 arch/powerpc/kvm/bookehv_interrupts.S |   24 ++++--------------------
 2 files changed, 12 insertions(+), 20 deletions(-)
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 0978152..7d4018d 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -20,6 +20,14 @@ 
 #ifndef __POWERPC_KVM_ASM_H__
 #define __POWERPC_KVM_ASM_H__
 
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg)  std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg)   ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg)  stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg)   lwz treg, (offset+4)(areg)
+#endif
+
 /* IVPR must be 64KiB-aligned. */
 #define VCPU_SIZE_ORDER 4
 #define VCPU_SIZE_LOG   (VCPU_SIZE_ORDER + 12)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 909e96e..41d3485 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -93,11 +93,7 @@ 
 #endif
 
 	oris	r8, r6, MSR_CE@h
-#ifdef CONFIG_64BIT
-	std	r6, (VCPU_SHARED_MSR)(r11)
-#else
-	stw	r6, (VCPU_SHARED_MSR + 4)(r11)
-#endif
+	PPC_STD(r6, VCPU_SHARED_MSR, r11)
 	ori	r8, r8, MSR_ME | MSR_RI
 	PPC_STL	r5, VCPU_PC(r4)
 
@@ -335,11 +331,7 @@  _GLOBAL(kvmppc_resume_host)
 	stw	r5, VCPU_SHARED_MAS0(r11)
 	mfspr	r7, SPRN_MAS2
 	stw	r6, VCPU_SHARED_MAS1(r11)
-#ifdef CONFIG_64BIT
-	std	r7, (VCPU_SHARED_MAS2)(r11)
-#else
-	stw	r7, (VCPU_SHARED_MAS2 + 4)(r11)
-#endif
+	PPC_STD(r7, VCPU_SHARED_MAS2, r11)
 	mfspr	r5, SPRN_MAS3
 	mfspr	r6, SPRN_MAS4
 	stw	r5, VCPU_SHARED_MAS7_3+4(r11)
@@ -527,11 +519,7 @@  lightweight_exit:
 	stw	r3, VCPU_HOST_MAS6(r4)
 	lwz	r3, VCPU_SHARED_MAS0(r11)
 	lwz	r5, VCPU_SHARED_MAS1(r11)
-#ifdef CONFIG_64BIT
-	ld	r6, (VCPU_SHARED_MAS2)(r11)
-#else
-	lwz	r6, (VCPU_SHARED_MAS2 + 4)(r11)
-#endif
+	PPC_LD(r6, VCPU_SHARED_MAS2, r11)
 	lwz	r7, VCPU_SHARED_MAS7_3+4(r11)
 	lwz	r8, VCPU_SHARED_MAS4(r11)
 	mtspr	SPRN_MAS0, r3
@@ -565,11 +553,7 @@  lightweight_exit:
 	PPC_LL	r6, VCPU_CTR(r4)
 	PPC_LL	r7, VCPU_CR(r4)
 	PPC_LL	r8, VCPU_PC(r4)
-#ifdef CONFIG_64BIT
-	ld	r9, (VCPU_SHARED_MSR)(r11)
-#else
-	lwz	r9, (VCPU_SHARED_MSR + 4)(r11)
-#endif
+	PPC_LD(r9, VCPU_SHARED_MSR, r11)
 	PPC_LL	r0, VCPU_GPR(r0)(r4)
 	PPC_LL	r1, VCPU_GPR(r1)(r4)
 	PPC_LL	r2, VCPU_GPR(r2)(r4)