diff mbox

[1/1] KVM: PPC: Book3S: correct width in XER handling

Message ID d0600fe7952351dfd176c97d416a6a351812a78b.1432099563.git.sam.bobroff@au1.ibm.com
State Superseded, archived
Headers show

Commit Message

Sam Bobroff May 20, 2015, 5:26 a.m. UTC
In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64
bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is
accessed as such.

This patch corrects places where it is accessed as a 32 bit field by a
64 bit kernel.  In some cases this is via a 32 bit load or store
instruction which, depending on endianness, will cause either the
lower or upper 32 bits to be missed.  In another case it is cast as a
u32, causing the upper 32 bits to be cleared.

This patch corrects those places by extending the access methods to
64 bits.

Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
---

 arch/powerpc/include/asm/kvm_book3s.h   |    4 ++--
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |    6 +++---
 arch/powerpc/kvm/book3s_segment.S       |    4 ++--
 3 files changed, 7 insertions(+), 7 deletions(-)

Comments

Paul Mackerras May 20, 2015, 10:26 p.m. UTC | #1
On Wed, May 20, 2015 at 03:26:12PM +1000, Sam Bobroff wrote:
> In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64
> bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is
> accessed as such.
> 
> This patch corrects places where it is accessed as a 32 bit field by a
> 64 bit kernel.  In some cases this is via a 32 bit load or store
> instruction which, depending on endianness, will cause either the
> lower or upper 32 bits to be missed.  In another case it is cast as a
> u32, causing the upper 32 bits to be cleared.
> 
> This patch corrects those places by extending the access methods to
> 64 bits.
> 
> Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>

Acked-by: Paul Mackerras <paulus@samba.org>
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Scott Wood May 20, 2015, 10:35 p.m. UTC | #2
On Wed, 2015-05-20 at 15:26 +1000, Sam Bobroff wrote:
> In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64
> bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is
> accessed as such.
> 
> This patch corrects places where it is accessed as a 32 bit field by a
> 64 bit kernel.  In some cases this is via a 32 bit load or store
> instruction which, depending on endianness, will cause either the
> lower or upper 32 bits to be missed.  In another case it is cast as a
> u32, causing the upper 32 bits to be cleared.
> 
> This patch corrects those places by extending the access methods to
> 64 bits.
> 
> Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
> ---
> 
>  arch/powerpc/include/asm/kvm_book3s.h   |    4 ++--
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S |    6 +++---
>  arch/powerpc/kvm/book3s_segment.S       |    4 ++--
>  3 files changed, 7 insertions(+), 7 deletions(-)

It's nominally a 64-bit register, but the upper 32 bits are reserved in
ISA 2.06.  Do newer ISAs or certain implementations define things in the
upper 32 bits, or is this just about the asm accesses being wrong on
big-endian?

Also, I see the same issue in the booke code (CCing Mihai).

-Scott

> diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
> index b91e74a..05a875a 100644
> --- a/arch/powerpc/include/asm/kvm_book3s.h
> +++ b/arch/powerpc/include/asm/kvm_book3s.h
> @@ -225,12 +225,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
>  	return vcpu->arch.cr;
>  }
>  
> -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
> +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
>  {
>  	vcpu->arch.xer = val;
>  }
>  
> -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
> +static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
>  {
>  	return vcpu->arch.xer;
>  }
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 4d70df2..d75be59 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -870,7 +870,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
>  	blt	hdec_soon
>  
>  	ld	r6, VCPU_CTR(r4)
> -	lwz	r7, VCPU_XER(r4)
> +	ld	r7, VCPU_XER(r4)
>  
>  	mtctr	r6
>  	mtxer	r7
> @@ -1103,7 +1103,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>  	mfctr	r3
>  	mfxer	r4
>  	std	r3, VCPU_CTR(r9)
> -	stw	r4, VCPU_XER(r9)
> +	std	r4, VCPU_XER(r9)
>  
>  	/* If this is a page table miss then see if it's theirs or ours */
>  	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
> @@ -1675,7 +1675,7 @@ kvmppc_hdsi:
>  	bl	kvmppc_msr_interrupt
>  fast_interrupt_c_return:
>  6:	ld	r7, VCPU_CTR(r9)
> -	lwz	r8, VCPU_XER(r9)
> +	ld	r8, VCPU_XER(r9)
>  	mtctr	r7
>  	mtxer	r8
>  	mr	r4, r9
> diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
> index acee37c..ca8f174 100644
> --- a/arch/powerpc/kvm/book3s_segment.S
> +++ b/arch/powerpc/kvm/book3s_segment.S
> @@ -123,7 +123,7 @@ no_dcbz32_on:
>  	PPC_LL	r8, SVCPU_CTR(r3)
>  	PPC_LL	r9, SVCPU_LR(r3)
>  	lwz	r10, SVCPU_CR(r3)
> -	lwz	r11, SVCPU_XER(r3)
> +	PPC_LL	r11, SVCPU_XER(r3)
>  
>  	mtctr	r8
>  	mtlr	r9
> @@ -237,7 +237,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
>  	mfctr	r8
>  	mflr	r9
>  
> -	stw	r5, SVCPU_XER(r13)
> +	PPC_STL	r5, SVCPU_XER(r13)
>  	PPC_STL	r6, SVCPU_FAULT_DAR(r13)
>  	stw	r7, SVCPU_FAULT_DSISR(r13)
>  	PPC_STL	r8, SVCPU_CTR(r13)


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paul Mackerras May 21, 2015, 2:51 a.m. UTC | #3
On Wed, May 20, 2015 at 05:35:08PM -0500, Scott Wood wrote:
> 
> It's nominally a 64-bit register, but the upper 32 bits are reserved in
> ISA 2.06.  Do newer ISAs or certain implementations define things in the
> upper 32 bits, or is this just about the asm accesses being wrong on
> big-endian?

It's primarily about the asm accesses being wrong.

Paul.
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf May 25, 2015, 9:08 p.m. UTC | #4
On 20.05.15 07:26, Sam Bobroff wrote:
> In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64
> bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is
> accessed as such.
> 
> This patch corrects places where it is accessed as a 32 bit field by a
> 64 bit kernel.  In some cases this is via a 32 bit load or store
> instruction which, depending on endianness, will cause either the
> lower or upper 32 bits to be missed.  In another case it is cast as a
> u32, causing the upper 32 bits to be cleared.
> 
> This patch corrects those places by extending the access methods to
> 64 bits.
> 
> Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
> ---
> 
>  arch/powerpc/include/asm/kvm_book3s.h   |    4 ++--
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S |    6 +++---
>  arch/powerpc/kvm/book3s_segment.S       |    4 ++--
>  3 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
> index b91e74a..05a875a 100644
> --- a/arch/powerpc/include/asm/kvm_book3s.h
> +++ b/arch/powerpc/include/asm/kvm_book3s.h
> @@ -225,12 +225,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
>  	return vcpu->arch.cr;
>  }
>  
> -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
> +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
>  {
>  	vcpu->arch.xer = val;
>  }
>  
> -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
> +static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
>  {
>  	return vcpu->arch.xer;
>  }
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 4d70df2..d75be59 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -870,7 +870,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
>  	blt	hdec_soon
>  
>  	ld	r6, VCPU_CTR(r4)
> -	lwz	r7, VCPU_XER(r4)
> +	ld	r7, VCPU_XER(r4)
>  
>  	mtctr	r6
>  	mtxer	r7
> @@ -1103,7 +1103,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>  	mfctr	r3
>  	mfxer	r4
>  	std	r3, VCPU_CTR(r9)
> -	stw	r4, VCPU_XER(r9)
> +	std	r4, VCPU_XER(r9)
>  
>  	/* If this is a page table miss then see if it's theirs or ours */
>  	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
> @@ -1675,7 +1675,7 @@ kvmppc_hdsi:
>  	bl	kvmppc_msr_interrupt
>  fast_interrupt_c_return:
>  6:	ld	r7, VCPU_CTR(r9)
> -	lwz	r8, VCPU_XER(r9)
> +	ld	r8, VCPU_XER(r9)
>  	mtctr	r7
>  	mtxer	r8
>  	mr	r4, r9
> diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
> index acee37c..ca8f174 100644
> --- a/arch/powerpc/kvm/book3s_segment.S
> +++ b/arch/powerpc/kvm/book3s_segment.S
> @@ -123,7 +123,7 @@ no_dcbz32_on:
>  	PPC_LL	r8, SVCPU_CTR(r3)
>  	PPC_LL	r9, SVCPU_LR(r3)
>  	lwz	r10, SVCPU_CR(r3)
> -	lwz	r11, SVCPU_XER(r3)
> +	PPC_LL	r11, SVCPU_XER(r3)

struct kvmppc_book3s_shadow_vcpu {
        bool in_use;
        ulong gpr[14];
        u32 cr;
        u32 xer;
[...]

so at least this change looks wrong. Please double-check all fields in
your patch again.


Alex
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sam Bobroff May 26, 2015, 12:14 a.m. UTC | #5
On Mon, May 25, 2015 at 11:08:08PM +0200, Alexander Graf wrote:
> 
> 
> On 20.05.15 07:26, Sam Bobroff wrote:
> > In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64
> > bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is
> > accessed as such.
> > 
> > This patch corrects places where it is accessed as a 32 bit field by a
> > 64 bit kernel.  In some cases this is via a 32 bit load or store
> > instruction which, depending on endianness, will cause either the
> > lower or upper 32 bits to be missed.  In another case it is cast as a
> > u32, causing the upper 32 bits to be cleared.
> > 
> > This patch corrects those places by extending the access methods to
> > 64 bits.
> > 
> > Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
> > ---
> > 
> >  arch/powerpc/include/asm/kvm_book3s.h   |    4 ++--
> >  arch/powerpc/kvm/book3s_hv_rmhandlers.S |    6 +++---
> >  arch/powerpc/kvm/book3s_segment.S       |    4 ++--
> >  3 files changed, 7 insertions(+), 7 deletions(-)
> > 
> > diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
> > index b91e74a..05a875a 100644
> > --- a/arch/powerpc/include/asm/kvm_book3s.h
> > +++ b/arch/powerpc/include/asm/kvm_book3s.h
> > @@ -225,12 +225,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
> >  	return vcpu->arch.cr;
> >  }
> >  
> > -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
> > +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
> >  {
> >  	vcpu->arch.xer = val;
> >  }
> >  
> > -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
> > +static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
> >  {
> >  	return vcpu->arch.xer;
> >  }
> > diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > index 4d70df2..d75be59 100644
> > --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > @@ -870,7 +870,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
> >  	blt	hdec_soon
> >  
> >  	ld	r6, VCPU_CTR(r4)
> > -	lwz	r7, VCPU_XER(r4)
> > +	ld	r7, VCPU_XER(r4)
> >  
> >  	mtctr	r6
> >  	mtxer	r7
> > @@ -1103,7 +1103,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
> >  	mfctr	r3
> >  	mfxer	r4
> >  	std	r3, VCPU_CTR(r9)
> > -	stw	r4, VCPU_XER(r9)
> > +	std	r4, VCPU_XER(r9)
> >  
> >  	/* If this is a page table miss then see if it's theirs or ours */
> >  	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
> > @@ -1675,7 +1675,7 @@ kvmppc_hdsi:
> >  	bl	kvmppc_msr_interrupt
> >  fast_interrupt_c_return:
> >  6:	ld	r7, VCPU_CTR(r9)
> > -	lwz	r8, VCPU_XER(r9)
> > +	ld	r8, VCPU_XER(r9)
> >  	mtctr	r7
> >  	mtxer	r8
> >  	mr	r4, r9
> > diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
> > index acee37c..ca8f174 100644
> > --- a/arch/powerpc/kvm/book3s_segment.S
> > +++ b/arch/powerpc/kvm/book3s_segment.S
> > @@ -123,7 +123,7 @@ no_dcbz32_on:
> >  	PPC_LL	r8, SVCPU_CTR(r3)
> >  	PPC_LL	r9, SVCPU_LR(r3)
> >  	lwz	r10, SVCPU_CR(r3)
> > -	lwz	r11, SVCPU_XER(r3)
> > +	PPC_LL	r11, SVCPU_XER(r3)
> 
> struct kvmppc_book3s_shadow_vcpu {
>         bool in_use;
>         ulong gpr[14];
>         u32 cr;
>         u32 xer;
> [...]
> 
> so at least this change looks wrong. Please double-check all fields in
> your patch again.
> 
> 
> Alex

Thanks for the review and the catch!

The xer field in kvm_vcpu_arch is already ulong, so it looks like the one in
kvmppc_book3s_shadow_vcpu is the only other case. I'll fix that and repost.

Cheers,
Sam.

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf May 26, 2015, 12:23 a.m. UTC | #6
On 26.05.15 02:14, Sam Bobroff wrote:
> On Mon, May 25, 2015 at 11:08:08PM +0200, Alexander Graf wrote:
>>
>>
>> On 20.05.15 07:26, Sam Bobroff wrote:
>>> In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64
>>> bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is
>>> accessed as such.
>>>
>>> This patch corrects places where it is accessed as a 32 bit field by a
>>> 64 bit kernel.  In some cases this is via a 32 bit load or store
>>> instruction which, depending on endianness, will cause either the
>>> lower or upper 32 bits to be missed.  In another case it is cast as a
>>> u32, causing the upper 32 bits to be cleared.
>>>
>>> This patch corrects those places by extending the access methods to
>>> 64 bits.
>>>
>>> Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
>>> ---
>>>
>>>  arch/powerpc/include/asm/kvm_book3s.h   |    4 ++--
>>>  arch/powerpc/kvm/book3s_hv_rmhandlers.S |    6 +++---
>>>  arch/powerpc/kvm/book3s_segment.S       |    4 ++--
>>>  3 files changed, 7 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
>>> index b91e74a..05a875a 100644
>>> --- a/arch/powerpc/include/asm/kvm_book3s.h
>>> +++ b/arch/powerpc/include/asm/kvm_book3s.h
>>> @@ -225,12 +225,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
>>>  	return vcpu->arch.cr;
>>>  }
>>>  
>>> -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
>>> +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
>>>  {
>>>  	vcpu->arch.xer = val;
>>>  }
>>>  
>>> -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
>>> +static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
>>>  {
>>>  	return vcpu->arch.xer;
>>>  }
>>> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>>> index 4d70df2..d75be59 100644
>>> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>>> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>>> @@ -870,7 +870,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
>>>  	blt	hdec_soon
>>>  
>>>  	ld	r6, VCPU_CTR(r4)
>>> -	lwz	r7, VCPU_XER(r4)
>>> +	ld	r7, VCPU_XER(r4)
>>>  
>>>  	mtctr	r6
>>>  	mtxer	r7
>>> @@ -1103,7 +1103,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>>>  	mfctr	r3
>>>  	mfxer	r4
>>>  	std	r3, VCPU_CTR(r9)
>>> -	stw	r4, VCPU_XER(r9)
>>> +	std	r4, VCPU_XER(r9)
>>>  
>>>  	/* If this is a page table miss then see if it's theirs or ours */
>>>  	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
>>> @@ -1675,7 +1675,7 @@ kvmppc_hdsi:
>>>  	bl	kvmppc_msr_interrupt
>>>  fast_interrupt_c_return:
>>>  6:	ld	r7, VCPU_CTR(r9)
>>> -	lwz	r8, VCPU_XER(r9)
>>> +	ld	r8, VCPU_XER(r9)
>>>  	mtctr	r7
>>>  	mtxer	r8
>>>  	mr	r4, r9
>>> diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
>>> index acee37c..ca8f174 100644
>>> --- a/arch/powerpc/kvm/book3s_segment.S
>>> +++ b/arch/powerpc/kvm/book3s_segment.S
>>> @@ -123,7 +123,7 @@ no_dcbz32_on:
>>>  	PPC_LL	r8, SVCPU_CTR(r3)
>>>  	PPC_LL	r9, SVCPU_LR(r3)
>>>  	lwz	r10, SVCPU_CR(r3)
>>> -	lwz	r11, SVCPU_XER(r3)
>>> +	PPC_LL	r11, SVCPU_XER(r3)
>>
>> struct kvmppc_book3s_shadow_vcpu {
>>         bool in_use;
>>         ulong gpr[14];
>>         u32 cr;
>>         u32 xer;
>> [...]
>>
>> so at least this change looks wrong. Please double-check all fields in
>> your patch again.
>>
>>
>> Alex
> 
> Thanks for the review and the catch!
> 
> The xer field in kvm_vcpu_arch is already ulong, so it looks like the one in
> kvmppc_book3s_shadow_vcpu is the only other case. I'll fix that and repost.

I guess given that the one in pt_regs is also ulong going ulong rather
than u32 is the better choice, yes.

While at it, could you please just do a grep -i xer across all kvm (.c
and .h) files and just sanity check that we're staying in sync?


Thanks!

Alex
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index b91e74a..05a875a 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -225,12 +225,12 @@  static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
 	return vcpu->arch.cr;
 }
 
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
 {
 	vcpu->arch.xer = val;
 }
 
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.xer;
 }
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 4d70df2..d75be59 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -870,7 +870,7 @@  END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 	blt	hdec_soon
 
 	ld	r6, VCPU_CTR(r4)
-	lwz	r7, VCPU_XER(r4)
+	ld	r7, VCPU_XER(r4)
 
 	mtctr	r6
 	mtxer	r7
@@ -1103,7 +1103,7 @@  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	mfctr	r3
 	mfxer	r4
 	std	r3, VCPU_CTR(r9)
-	stw	r4, VCPU_XER(r9)
+	std	r4, VCPU_XER(r9)
 
 	/* If this is a page table miss then see if it's theirs or ours */
 	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
@@ -1675,7 +1675,7 @@  kvmppc_hdsi:
 	bl	kvmppc_msr_interrupt
 fast_interrupt_c_return:
 6:	ld	r7, VCPU_CTR(r9)
-	lwz	r8, VCPU_XER(r9)
+	ld	r8, VCPU_XER(r9)
 	mtctr	r7
 	mtxer	r8
 	mr	r4, r9
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index acee37c..ca8f174 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -123,7 +123,7 @@  no_dcbz32_on:
 	PPC_LL	r8, SVCPU_CTR(r3)
 	PPC_LL	r9, SVCPU_LR(r3)
 	lwz	r10, SVCPU_CR(r3)
-	lwz	r11, SVCPU_XER(r3)
+	PPC_LL	r11, SVCPU_XER(r3)
 
 	mtctr	r8
 	mtlr	r9
@@ -237,7 +237,7 @@  END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 	mfctr	r8
 	mflr	r9
 
-	stw	r5, SVCPU_XER(r13)
+	PPC_STL	r5, SVCPU_XER(r13)
 	PPC_STL	r6, SVCPU_FAULT_DAR(r13)
 	stw	r7, SVCPU_FAULT_DSISR(r13)
 	PPC_STL	r8, SVCPU_CTR(r13)