Patchwork [2/2] KVM: PPC: bookehv: Use lwz/stw instead of PPC_LL/PPC_STL for 32-bit fields

login
register
mail settings
Submitter Mihai Caraman
Date April 16, 2012, 2:08 p.m.
Message ID <1334585334-14934-2-git-send-email-mihai.caraman@freescale.com>
Download mbox | patch
Permalink /patch/152866/
State New
Headers show

Comments

Mihai Caraman - April 16, 2012, 2:08 p.m.
Interrupt code used PPC_LL/PPC_STL macros to load/store some of u32 fields
which led to memory overflow on 64-bit. Use lwz/stw instead.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/bookehv_interrupts.S |   16 ++++++++--------
 1 files changed, 8 insertions(+), 8 deletions(-)
Alexander Graf - April 26, 2012, 11:40 a.m.
On 16.04.2012, at 16:08, Mihai Caraman wrote:

> Interrupt code used PPC_LL/PPC_STL macros to load/store some of u32 fields
> which led to memory overflow on 64-bit. Use lwz/stw instead.

Phew, pretty ugly :). Applied to kvm-ppc-next.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 0ca987d..e351c53 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -95,9 +95,9 @@ 
 	mfspr	r8, SPRN_TBRL
 	mfspr	r9, SPRN_TBRU
 	cmpw	r9, r7
-	PPC_STL	r8, VCPU_TIMING_EXIT_TBL(r4)
+	stw	r8, VCPU_TIMING_EXIT_TBL(r4)
 	bne-	1b
-	PPC_STL	r9, VCPU_TIMING_EXIT_TBU(r4)
+	stw	r9, VCPU_TIMING_EXIT_TBU(r4)
 #endif
 
 	oris	r8, r6, MSR_CE@h
@@ -228,7 +228,7 @@  _GLOBAL(kvmppc_handler_\intno\()_\srr1)
 	PPC_STL	r4, VCPU_GPR(r4)(r11)
 	PPC_LL	r4, THREAD_NORMSAVE(0)(r10)
 	PPC_STL	r5, VCPU_GPR(r5)(r11)
-	PPC_STL	r13, VCPU_CR(r11)
+	stw	r13, VCPU_CR(r11)
 	mfspr	r5, \srr0
 	PPC_STL	r3, VCPU_GPR(r10)(r11)
 	PPC_LL	r3, THREAD_NORMSAVE(2)(r10)
@@ -255,7 +255,7 @@  _GLOBAL(kvmppc_handler_\intno\()_\srr1)
 	PPC_STL	r4, VCPU_GPR(r4)(r11)
 	PPC_LL	r4, GPR9(r8)
 	PPC_STL	r5, VCPU_GPR(r5)(r11)
-	PPC_STL	r9, VCPU_CR(r11)
+	stw	r9, VCPU_CR(r11)
 	mfspr	r5, \srr0
 	PPC_STL	r3, VCPU_GPR(r8)(r11)
 	PPC_LL	r3, GPR10(r8)
@@ -327,7 +327,7 @@  _GLOBAL(kvmppc_resume_host)
 	mfspr	r6, SPRN_SPRG4
 	PPC_STL	r5, VCPU_LR(r4)
 	mfspr	r7, SPRN_SPRG5
-	PPC_STL	r3, VCPU_VRSAVE(r4)
+	stw	r3, VCPU_VRSAVE(r4)
 	PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
 	mfspr	r8, SPRN_SPRG6
 	PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
@@ -571,7 +571,7 @@  lightweight_exit:
 	PPC_LL	r3, VCPU_LR(r4)
 	PPC_LL	r5, VCPU_XER(r4)
 	PPC_LL	r6, VCPU_CTR(r4)
-	PPC_LL	r7, VCPU_CR(r4)
+	lwz	r7, VCPU_CR(r4)
 	PPC_LL	r8, VCPU_PC(r4)
 #ifdef CONFIG_64BIT
 	ld	r9, (VCPU_SHARED_MSR)(r11)
@@ -598,9 +598,9 @@  lightweight_exit:
 	mfspr	r9, SPRN_TBRL
 	mfspr	r8, SPRN_TBRU
 	cmpw	r8, r6
-	PPC_STL	r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
+	stw	r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
 	bne	1b
-	PPC_STL	r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+	stw	r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
 #endif
 
 	/*