diff mbox

[v5,4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail

Message ID 1405596148-1507-5-git-send-email-mihai.caraman@freescale.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Mihai Caraman July 17, 2014, 11:22 a.m. UTC
On book3e, guest last instruction is read on the exit path using load
external pid (lwepx) dedicated instruction. This load operation may fail
due to TLB eviction and execute-but-not-read entries.

This patch lay down the path for an alternative solution to read the guest
last instruction, by allowing kvmppc_get_lat_inst() function to fail.
Architecture specific implmentations of kvmppc_load_last_inst() may read
last guest instruction and instruct the emulation layer to re-execute the
guest in case of failure.

Make kvmppc_get_last_inst() definition common between architectures.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
v5
 - don't swap when load fail
 - convert the return value space of kvmppc_ld()

v4:
 - these changes compile on book3s, please validate the functionality and
   do the necessary adaptations!
 - common declaration and enum for kvmppc_load_last_inst()
 - remove kvmppc_read_inst() in a preceding patch

v3:
 - rework patch description
 - add common definition for kvmppc_get_last_inst()
 - check return values in book3s code

v2:
 - integrated kvmppc_get_last_inst() in book3s code and checked build
 - addressed cosmetic feedback

 arch/powerpc/include/asm/kvm_book3s.h    | 26 -------------
 arch/powerpc/include/asm/kvm_booke.h     |  5 ---
 arch/powerpc/include/asm/kvm_ppc.h       | 25 +++++++++++++
 arch/powerpc/kvm/book3s.c                | 17 +++++++++
 arch/powerpc/kvm/book3s_64_mmu_hv.c      | 17 +++------
 arch/powerpc/kvm/book3s_paired_singles.c | 38 ++++++++++++-------
 arch/powerpc/kvm/book3s_pr.c             | 63 ++++++++++++++++++++++----------
 arch/powerpc/kvm/booke.c                 |  3 ++
 arch/powerpc/kvm/e500_mmu_host.c         |  6 +++
 arch/powerpc/kvm/emulate.c               | 18 ++++++---
 arch/powerpc/kvm/powerpc.c               | 11 +++++-
 11 files changed, 144 insertions(+), 85 deletions(-)

Comments

Alexander Graf July 17, 2014, 2:20 p.m. UTC | #1
On 17.07.14 13:22, Mihai Caraman wrote:
> On book3e, guest last instruction is read on the exit path using load
> external pid (lwepx) dedicated instruction. This load operation may fail
> due to TLB eviction and execute-but-not-read entries.
>
> This patch lay down the path for an alternative solution to read the guest
> last instruction, by allowing kvmppc_get_lat_inst() function to fail.
> Architecture specific implmentations of kvmppc_load_last_inst() may read
> last guest instruction and instruct the emulation layer to re-execute the
> guest in case of failure.
>
> Make kvmppc_get_last_inst() definition common between architectures.
>
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
> v5
>   - don't swap when load fail
>   - convert the return value space of kvmppc_ld()
>
> v4:
>   - these changes compile on book3s, please validate the functionality and
>     do the necessary adaptations!
>   - common declaration and enum for kvmppc_load_last_inst()
>   - remove kvmppc_read_inst() in a preceding patch
>
> v3:
>   - rework patch description
>   - add common definition for kvmppc_get_last_inst()
>   - check return values in book3s code
>
> v2:
>   - integrated kvmppc_get_last_inst() in book3s code and checked build
>   - addressed cosmetic feedback
>
>   arch/powerpc/include/asm/kvm_book3s.h    | 26 -------------
>   arch/powerpc/include/asm/kvm_booke.h     |  5 ---
>   arch/powerpc/include/asm/kvm_ppc.h       | 25 +++++++++++++
>   arch/powerpc/kvm/book3s.c                | 17 +++++++++
>   arch/powerpc/kvm/book3s_64_mmu_hv.c      | 17 +++------
>   arch/powerpc/kvm/book3s_paired_singles.c | 38 ++++++++++++-------
>   arch/powerpc/kvm/book3s_pr.c             | 63 ++++++++++++++++++++++----------
>   arch/powerpc/kvm/booke.c                 |  3 ++
>   arch/powerpc/kvm/e500_mmu_host.c         |  6 +++
>   arch/powerpc/kvm/emulate.c               | 18 ++++++---
>   arch/powerpc/kvm/powerpc.c               | 11 +++++-
>   11 files changed, 144 insertions(+), 85 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
> index 20fb6f2..a86ca65 100644
> --- a/arch/powerpc/include/asm/kvm_book3s.h
> +++ b/arch/powerpc/include/asm/kvm_book3s.h
> @@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
>   	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
>   }
>   
> -static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
> -{
> -	/* Load the instruction manually if it failed to do so in the
> -	 * exit path */
> -	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
> -		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
> -
> -	return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
> -		vcpu->arch.last_inst;
> -}
> -
> -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
> -{
> -	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
> -}
> -
> -/*
> - * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
> - * Because the sc instruction sets SRR0 to point to the following
> - * instruction, we have to fetch from pc - 4.
> - */
> -static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
> -{
> -	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
> -}
> -
>   static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
>   {
>   	return vcpu->arch.fault_dar;
> diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
> index c7aed61..cbb1990 100644
> --- a/arch/powerpc/include/asm/kvm_booke.h
> +++ b/arch/powerpc/include/asm/kvm_booke.h
> @@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
>   	return false;
>   }
>   
> -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
> -{
> -	return vcpu->arch.last_inst;
> -}
> -
>   static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
>   {
>   	vcpu->arch.ctr = val;
> diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
> index e2fd5a1..7f9c634 100644
> --- a/arch/powerpc/include/asm/kvm_ppc.h
> +++ b/arch/powerpc/include/asm/kvm_ppc.h
> @@ -47,6 +47,11 @@ enum emulation_result {
>   	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
>   };
>   
> +enum instruction_type {
> +	INST_GENERIC,
> +	INST_SC,		/* system call */
> +};
> +
>   extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
>   extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
>   extern void kvmppc_handler_highmem(void);
> @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
>   			       u64 val, unsigned int bytes,
>   			       int is_default_endian);
>   
> +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
> +				 enum instruction_type type, u32 *inst);
> +
>   extern int kvmppc_emulate_instruction(struct kvm_run *run,
>                                         struct kvm_vcpu *vcpu);
>   extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
> @@ -234,6 +242,23 @@ struct kvmppc_ops {
>   extern struct kvmppc_ops *kvmppc_hv_ops;
>   extern struct kvmppc_ops *kvmppc_pr_ops;
>   
> +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
> +					enum instruction_type type, u32 *inst)
> +{
> +	int ret = EMULATE_DONE;
> +
> +	/* Load the instruction manually if it failed to do so in the
> +	 * exit path */
> +	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
> +		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
> +
> +
> +	*inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ?
> +		swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst;

This makes even less sense than the previous version. Either you treat 
inst as "definitely overwritten" or as "preserves previous data on failure".

So either you unconditionally swap like you did before or you do

if (ret == EMULATE_DONE)
     *inst = kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : 
vcpu->arch.last_inst;

> +
> +	return ret;
> +}
> +
>   static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
>   {
>   	return kvm->arch.kvm_ops == kvmppc_hv_ops;
> diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
> index 31facfc..522be6b 100644
> --- a/arch/powerpc/kvm/book3s.c
> +++ b/arch/powerpc/kvm/book3s.c
> @@ -488,6 +488,23 @@ mmio:
>   }
>   EXPORT_SYMBOL_GPL(kvmppc_ld);
>   
> +int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
> +					 u32 *inst)
> +{
> +	ulong pc = kvmppc_get_pc(vcpu);
> +	int r;
> +
> +	if (type == INST_SC)
> +		pc -= 4;
> +
> +	r = kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);

inst is unused?

The rest looks pretty nice though :).


Alex
Mihai Caraman July 18, 2014, 9:05 a.m. UTC | #2
> -----Original Message-----
> From: Alexander Graf [mailto:agraf@suse.de]
> Sent: Thursday, July 17, 2014 5:21 PM
> To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org
> Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org
> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail
> 
> 
> On 17.07.14 13:22, Mihai Caraman wrote:
> > On book3e, guest last instruction is read on the exit path using load
> > external pid (lwepx) dedicated instruction. This load operation may
> fail
> > due to TLB eviction and execute-but-not-read entries.
> >
> > This patch lay down the path for an alternative solution to read the
> guest
> > last instruction, by allowing kvmppc_get_lat_inst() function to fail.
> > Architecture specific implmentations of kvmppc_load_last_inst() may
> read
> > last guest instruction and instruct the emulation layer to re-execute
> the
> > guest in case of failure.
> >
> > Make kvmppc_get_last_inst() definition common between architectures.
> >
> > Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> > ---

...

> > diff --git a/arch/powerpc/include/asm/kvm_ppc.h
> b/arch/powerpc/include/asm/kvm_ppc.h
> > index e2fd5a1..7f9c634 100644
> > --- a/arch/powerpc/include/asm/kvm_ppc.h
> > +++ b/arch/powerpc/include/asm/kvm_ppc.h
> > @@ -47,6 +47,11 @@ enum emulation_result {
> >   	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
> >   };
> >
> > +enum instruction_type {
> > +	INST_GENERIC,
> > +	INST_SC,		/* system call */
> > +};
> > +
> >   extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu
> *vcpu);
> >   extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu
> *vcpu);
> >   extern void kvmppc_handler_highmem(void);
> > @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run,
> struct kvm_vcpu *vcpu,
> >   			       u64 val, unsigned int bytes,
> >   			       int is_default_endian);
> >
> > +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
> > +				 enum instruction_type type, u32 *inst);
> > +
> >   extern int kvmppc_emulate_instruction(struct kvm_run *run,
> >                                         struct kvm_vcpu *vcpu);
> >   extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu
> *vcpu);
> > @@ -234,6 +242,23 @@ struct kvmppc_ops {
> >   extern struct kvmppc_ops *kvmppc_hv_ops;
> >   extern struct kvmppc_ops *kvmppc_pr_ops;
> >
> > +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
> > +					enum instruction_type type, u32 *inst)
> > +{
> > +	int ret = EMULATE_DONE;
> > +
> > +	/* Load the instruction manually if it failed to do so in the
> > +	 * exit path */
> > +	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
> > +		ret = kvmppc_load_last_inst(vcpu, type, &vcpu-
> >arch.last_inst);
> > +
> > +
> > +	*inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ?
> > +		swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst;
> 
> This makes even less sense than the previous version. Either you treat
> inst as "definitely overwritten" or as "preserves previous data on
> failure".

Both v4 and v5 versions treat inst as "definitely overwritten".

> 
> So either you unconditionally swap like you did before

If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is operated
in host endianness, so it doesn't need byte swap.

I agree with your reasoning if last_inst is initialized and compared with
data in guest endianess, which is not the case yet for KVM_INST_FETCH_FAILED.

> or you do
> 
> if (ret == EMULATE_DONE)
>      *inst = kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
> vcpu->arch.last_inst;

-Mike
Mihai Caraman July 21, 2014, 9:59 a.m. UTC | #3
> -----Original Message-----

> From: Linuxppc-dev [mailto:linuxppc-dev-

> bounces+mihai.caraman=freescale.com@lists.ozlabs.org] On Behalf Of

> mihai.caraman@freescale.com

> Sent: Friday, July 18, 2014 12:06 PM

> To: Alexander Graf; kvm-ppc@vger.kernel.org

> Cc: linuxppc-dev@lists.ozlabs.org; kvm@vger.kernel.org

> Subject: RE: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail

> 

> > -----Original Message-----

> > From: Alexander Graf [mailto:agraf@suse.de]

> > Sent: Thursday, July 17, 2014 5:21 PM

> > To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org

> > Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org

> > Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to

> fail

> >

> >

> > On 17.07.14 13:22, Mihai Caraman wrote:

> > > On book3e, guest last instruction is read on the exit path using load

> > > external pid (lwepx) dedicated instruction. This load operation may

> > fail

> > > due to TLB eviction and execute-but-not-read entries.

> > >

> > > This patch lay down the path for an alternative solution to read the

> > guest

> > > last instruction, by allowing kvmppc_get_lat_inst() function to fail.

> > > Architecture specific implmentations of kvmppc_load_last_inst() may

> > read

> > > last guest instruction and instruct the emulation layer to re-execute

> > the

> > > guest in case of failure.

> > >

> > > Make kvmppc_get_last_inst() definition common between architectures.

> > >

> > > Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>

> > > ---

> 

> ...

> 

> > > diff --git a/arch/powerpc/include/asm/kvm_ppc.h

> > b/arch/powerpc/include/asm/kvm_ppc.h

> > > index e2fd5a1..7f9c634 100644

> > > --- a/arch/powerpc/include/asm/kvm_ppc.h

> > > +++ b/arch/powerpc/include/asm/kvm_ppc.h

> > > @@ -47,6 +47,11 @@ enum emulation_result {

> > >   	EMULATE_EXIT_USER,    /* emulation requires exit to user-

> space */

> > >   };

> > >

> > > +enum instruction_type {

> > > +	INST_GENERIC,

> > > +	INST_SC,		/* system call */

> > > +};

> > > +

> > >   extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu

> > *vcpu);

> > >   extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct

> kvm_vcpu

> > *vcpu);

> > >   extern void kvmppc_handler_highmem(void);

> > > @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run,

> > struct kvm_vcpu *vcpu,

> > >   			       u64 val, unsigned int bytes,

> > >   			       int is_default_endian);

> > >

> > > +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,

> > > +				 enum instruction_type type, u32 *inst);

> > > +

> > >   extern int kvmppc_emulate_instruction(struct kvm_run *run,

> > >                                         struct kvm_vcpu *vcpu);

> > >   extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu

> > *vcpu);

> > > @@ -234,6 +242,23 @@ struct kvmppc_ops {

> > >   extern struct kvmppc_ops *kvmppc_hv_ops;

> > >   extern struct kvmppc_ops *kvmppc_pr_ops;

> > >

> > > +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,

> > > +					enum instruction_type type, u32 *inst)

> > > +{

> > > +	int ret = EMULATE_DONE;

> > > +

> > > +	/* Load the instruction manually if it failed to do so in the

> > > +	 * exit path */

> > > +	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)

> > > +		ret = kvmppc_load_last_inst(vcpu, type, &vcpu-

> > >arch.last_inst);

> > > +

> > > +

> > > +	*inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ?

> > > +		swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst;

> >

> > This makes even less sense than the previous version. Either you treat

> > inst as "definitely overwritten" or as "preserves previous data on

> > failure".

> 

> Both v4 and v5 versions treat inst as "definitely overwritten".

> 

> >

> > So either you unconditionally swap like you did before

> 

> If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is operated

> in host endianness, so it doesn't need byte swap.

> 

> I agree with your reasoning if last_inst is initialized and compared with

> data in guest endianess, which is not the case yet for

> KVM_INST_FETCH_FAILED.


Alex, are you relying on the fact that KVM_INST_FETCH_FAILED value is symmetrical?
With a non symmetrical value like 0xDEADBEEF, and considering a little-endian guest
on a big-endian host, we need to fix kvm logic to initialize and compare last_inst
with 0xEFBEADDE swaped value.

Your suggestion to unconditionally swap makes sense only with the above fix, otherwise
inst may end up with 0xEFBEADDE swaped value with is wrong.

-Mike
Alexander Graf July 22, 2014, 9:21 p.m. UTC | #4
On 21.07.14 11:59, mihai.caraman@freescale.com wrote:
>> -----Original Message-----
>> From: Linuxppc-dev [mailto:linuxppc-dev-
>> bounces+mihai.caraman=freescale.com@lists.ozlabs.org] On Behalf Of
>> mihai.caraman@freescale.com
>> Sent: Friday, July 18, 2014 12:06 PM
>> To: Alexander Graf; kvm-ppc@vger.kernel.org
>> Cc: linuxppc-dev@lists.ozlabs.org; kvm@vger.kernel.org
>> Subject: RE: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail
>>
>>> -----Original Message-----
>>> From: Alexander Graf [mailto:agraf@suse.de]
>>> Sent: Thursday, July 17, 2014 5:21 PM
>>> To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org
>>> Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org
>>> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to
>> fail
>>>
>>> On 17.07.14 13:22, Mihai Caraman wrote:
>>>> On book3e, guest last instruction is read on the exit path using load
>>>> external pid (lwepx) dedicated instruction. This load operation may
>>> fail
>>>> due to TLB eviction and execute-but-not-read entries.
>>>>
>>>> This patch lay down the path for an alternative solution to read the
>>> guest
>>>> last instruction, by allowing kvmppc_get_lat_inst() function to fail.
>>>> Architecture specific implmentations of kvmppc_load_last_inst() may
>>> read
>>>> last guest instruction and instruct the emulation layer to re-execute
>>> the
>>>> guest in case of failure.
>>>>
>>>> Make kvmppc_get_last_inst() definition common between architectures.
>>>>
>>>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
>>>> ---
>> ...
>>
>>>> diff --git a/arch/powerpc/include/asm/kvm_ppc.h
>>> b/arch/powerpc/include/asm/kvm_ppc.h
>>>> index e2fd5a1..7f9c634 100644
>>>> --- a/arch/powerpc/include/asm/kvm_ppc.h
>>>> +++ b/arch/powerpc/include/asm/kvm_ppc.h
>>>> @@ -47,6 +47,11 @@ enum emulation_result {
>>>>    	EMULATE_EXIT_USER,    /* emulation requires exit to user-
>> space */
>>>>    };
>>>>
>>>> +enum instruction_type {
>>>> +	INST_GENERIC,
>>>> +	INST_SC,		/* system call */
>>>> +};
>>>> +
>>>>    extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu
>>> *vcpu);
>>>>    extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct
>> kvm_vcpu
>>> *vcpu);
>>>>    extern void kvmppc_handler_highmem(void);
>>>> @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run,
>>> struct kvm_vcpu *vcpu,
>>>>    			       u64 val, unsigned int bytes,
>>>>    			       int is_default_endian);
>>>>
>>>> +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
>>>> +				 enum instruction_type type, u32 *inst);
>>>> +
>>>>    extern int kvmppc_emulate_instruction(struct kvm_run *run,
>>>>                                          struct kvm_vcpu *vcpu);
>>>>    extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu
>>> *vcpu);
>>>> @@ -234,6 +242,23 @@ struct kvmppc_ops {
>>>>    extern struct kvmppc_ops *kvmppc_hv_ops;
>>>>    extern struct kvmppc_ops *kvmppc_pr_ops;
>>>>
>>>> +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
>>>> +					enum instruction_type type, u32 *inst)
>>>> +{
>>>> +	int ret = EMULATE_DONE;
>>>> +
>>>> +	/* Load the instruction manually if it failed to do so in the
>>>> +	 * exit path */
>>>> +	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
>>>> +		ret = kvmppc_load_last_inst(vcpu, type, &vcpu-
>>>> arch.last_inst);
>>>> +
>>>> +
>>>> +	*inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ?
>>>> +		swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst;
>>> This makes even less sense than the previous version. Either you treat
>>> inst as "definitely overwritten" or as "preserves previous data on
>>> failure".
>> Both v4 and v5 versions treat inst as "definitely overwritten".
>>
>>> So either you unconditionally swap like you did before
>> If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is operated
>> in host endianness, so it doesn't need byte swap.
>>
>> I agree with your reasoning if last_inst is initialized and compared with
>> data in guest endianess, which is not the case yet for
>> KVM_INST_FETCH_FAILED.
> Alex, are you relying on the fact that KVM_INST_FETCH_FAILED value is symmetrical?
> With a non symmetrical value like 0xDEADBEEF, and considering a little-endian guest
> on a big-endian host, we need to fix kvm logic to initialize and compare last_inst
> with 0xEFBEADDE swaped value.
>
> Your suggestion to unconditionally swap makes sense only with the above fix, otherwise
> inst may end up with 0xEFBEADDE swaped value with is wrong.

Only for *inst which we would treat as "undefined" after the function 
returned EMULATE_AGAIN. last_inst stays  unmodified.


Alex
Mihai Caraman July 23, 2014, 8:24 a.m. UTC | #5
> -----Original Message-----

> From: kvm-ppc-owner@vger.kernel.org [mailto:kvm-ppc-

> owner@vger.kernel.org] On Behalf Of Alexander Graf

> Sent: Wednesday, July 23, 2014 12:21 AM

> To: Caraman Mihai Claudiu-B02008

> Cc: kvm-ppc@vger.kernel.org; linuxppc-dev@lists.ozlabs.org;

> kvm@vger.kernel.org

> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail

> 

> 

> On 21.07.14 11:59, mihai.caraman@freescale.com wrote:

> >> -----Original Message-----

> >> From: Linuxppc-dev [mailto:linuxppc-dev-

> >> bounces+mihai.caraman=freescale.com@lists.ozlabs.org] On Behalf Of

> >> mihai.caraman@freescale.com

> >> Sent: Friday, July 18, 2014 12:06 PM

> >> To: Alexander Graf; kvm-ppc@vger.kernel.org

> >> Cc: linuxppc-dev@lists.ozlabs.org; kvm@vger.kernel.org

> >> Subject: RE: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to

> fail

> >>

> >>> -----Original Message-----

> >>> From: Alexander Graf [mailto:agraf@suse.de]

> >>> Sent: Thursday, July 17, 2014 5:21 PM

> >>> To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org

> >>> Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org

> >>> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to

> >> fail

> >>>

> >>> On 17.07.14 13:22, Mihai Caraman wrote:

> >>>> On book3e, guest last instruction is read on the exit path using

> load

> >>>> external pid (lwepx) dedicated instruction. This load operation may

> >>> fail

> >>>> due to TLB eviction and execute-but-not-read entries.

> >>>>

> >>>> This patch lay down the path for an alternative solution to read the

> >>> guest

> >>>> last instruction, by allowing kvmppc_get_lat_inst() function to

> fail.

> >>>> Architecture specific implmentations of kvmppc_load_last_inst() may

> >>> read

> >>>> last guest instruction and instruct the emulation layer to re-

> execute

> >>> the

> >>>> guest in case of failure.

> >>>>

> >>>> Make kvmppc_get_last_inst() definition common between architectures.

> >>>>

> >>>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>

> >>>> ---

> >> ...

> >>

> >>>> diff --git a/arch/powerpc/include/asm/kvm_ppc.h

> >>> b/arch/powerpc/include/asm/kvm_ppc.h

> >>>> index e2fd5a1..7f9c634 100644

> >>>> --- a/arch/powerpc/include/asm/kvm_ppc.h

> >>>> +++ b/arch/powerpc/include/asm/kvm_ppc.h

> >>>> @@ -47,6 +47,11 @@ enum emulation_result {

> >>>>    	EMULATE_EXIT_USER,    /* emulation requires exit to user-

> >> space */

> >>>>    };

> >>>>

> >>>> +enum instruction_type {

> >>>> +	INST_GENERIC,

> >>>> +	INST_SC,		/* system call */

> >>>> +};

> >>>> +

> >>>>    extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct

> kvm_vcpu

> >>> *vcpu);

> >>>>    extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct

> >> kvm_vcpu

> >>> *vcpu);

> >>>>    extern void kvmppc_handler_highmem(void);

> >>>> @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run

> *run,

> >>> struct kvm_vcpu *vcpu,

> >>>>    			       u64 val, unsigned int bytes,

> >>>>    			       int is_default_endian);

> >>>>

> >>>> +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,

> >>>> +				 enum instruction_type type, u32 *inst);

> >>>> +

> >>>>    extern int kvmppc_emulate_instruction(struct kvm_run *run,

> >>>>                                          struct kvm_vcpu *vcpu);

> >>>>    extern int kvmppc_emulate_mmio(struct kvm_run *run, struct

> kvm_vcpu

> >>> *vcpu);

> >>>> @@ -234,6 +242,23 @@ struct kvmppc_ops {

> >>>>    extern struct kvmppc_ops *kvmppc_hv_ops;

> >>>>    extern struct kvmppc_ops *kvmppc_pr_ops;

> >>>>

> >>>> +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,

> >>>> +					enum instruction_type type, u32

> *inst)

> >>>> +{

> >>>> +	int ret = EMULATE_DONE;

> >>>> +

> >>>> +	/* Load the instruction manually if it failed to do so in the

> >>>> +	 * exit path */

> >>>> +	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)

> >>>> +		ret = kvmppc_load_last_inst(vcpu, type, &vcpu-

> >>>> arch.last_inst);

> >>>> +

> >>>> +

> >>>> +	*inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ?

> >>>> +		swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst;

> >>> This makes even less sense than the previous version. Either you

> treat

> >>> inst as "definitely overwritten" or as "preserves previous data on

> >>> failure".

> >> Both v4 and v5 versions treat inst as "definitely overwritten".

> >>

> >>> So either you unconditionally swap like you did before

> >> If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is

> operated

> >> in host endianness, so it doesn't need byte swap.

> >>

> >> I agree with your reasoning if last_inst is initialized and compared

> with

> >> data in guest endianess, which is not the case yet for

> >> KVM_INST_FETCH_FAILED.

> > Alex, are you relying on the fact that KVM_INST_FETCH_FAILED value is

> symmetrical?

> > With a non symmetrical value like 0xDEADBEEF, and considering a little-

> endian guest

> > on a big-endian host, we need to fix kvm logic to initialize and

> compare last_inst

> > with 0xEFBEADDE swaped value.

> >

> > Your suggestion to unconditionally swap makes sense only with the above

> fix, otherwise

> > inst may end up with 0xEFBEADDE swaped value with is wrong.

> 

> Only for *inst which we would treat as "undefined" after the function

> returned EMULATE_AGAIN. 


Right. With this do you acknowledge that v5 (definitely overwritten approach)
is ok?

-Mike
Alexander Graf July 23, 2014, 8:39 a.m. UTC | #6
Am 23.07.2014 um 10:24 schrieb "mihai.caraman@freescale.com" <mihai.caraman@freescale.com>:

>> -----Original Message-----
>> From: kvm-ppc-owner@vger.kernel.org [mailto:kvm-ppc-
>> owner@vger.kernel.org] On Behalf Of Alexander Graf
>> Sent: Wednesday, July 23, 2014 12:21 AM
>> To: Caraman Mihai Claudiu-B02008
>> Cc: kvm-ppc@vger.kernel.org; linuxppc-dev@lists.ozlabs.org;
>> kvm@vger.kernel.org
>> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail
>> 
>> 
>> On 21.07.14 11:59, mihai.caraman@freescale.com wrote:
>>>> -----Original Message-----
>>>> From: Linuxppc-dev [mailto:linuxppc-dev-
>>>> bounces+mihai.caraman=freescale.com@lists.ozlabs.org] On Behalf Of
>>>> mihai.caraman@freescale.com
>>>> Sent: Friday, July 18, 2014 12:06 PM
>>>> To: Alexander Graf; kvm-ppc@vger.kernel.org
>>>> Cc: linuxppc-dev@lists.ozlabs.org; kvm@vger.kernel.org
>>>> Subject: RE: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to
>> fail
>>>> 
>>>>> -----Original Message-----
>>>>> From: Alexander Graf [mailto:agraf@suse.de]
>>>>> Sent: Thursday, July 17, 2014 5:21 PM
>>>>> To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org
>>>>> Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org
>>>>> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to
>>>> fail
>>>>> 
>>>>>> On 17.07.14 13:22, Mihai Caraman wrote:
>>>>>> On book3e, guest last instruction is read on the exit path using
>> load
>>>>>> external pid (lwepx) dedicated instruction. This load operation may
>>>>> fail
>>>>>> due to TLB eviction and execute-but-not-read entries.
>>>>>> 
>>>>>> This patch lay down the path for an alternative solution to read the
>>>>> guest
>>>>>> last instruction, by allowing kvmppc_get_lat_inst() function to
>> fail.
>>>>>> Architecture specific implmentations of kvmppc_load_last_inst() may
>>>>> read
>>>>>> last guest instruction and instruct the emulation layer to re-
>> execute
>>>>> the
>>>>>> guest in case of failure.
>>>>>> 
>>>>>> Make kvmppc_get_last_inst() definition common between architectures.
>>>>>> 
>>>>>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
>>>>>> ---
>>>> ...
>>>> 
>>>>>> diff --git a/arch/powerpc/include/asm/kvm_ppc.h
>>>>> b/arch/powerpc/include/asm/kvm_ppc.h
>>>>>> index e2fd5a1..7f9c634 100644
>>>>>> --- a/arch/powerpc/include/asm/kvm_ppc.h
>>>>>> +++ b/arch/powerpc/include/asm/kvm_ppc.h
>>>>>> @@ -47,6 +47,11 @@ enum emulation_result {
>>>>>>       EMULATE_EXIT_USER,    /* emulation requires exit to user-
>>>> space */
>>>>>>   };
>>>>>> 
>>>>>> +enum instruction_type {
>>>>>> +    INST_GENERIC,
>>>>>> +    INST_SC,        /* system call */
>>>>>> +};
>>>>>> +
>>>>>>   extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct
>> kvm_vcpu
>>>>> *vcpu);
>>>>>>   extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct
>>>> kvm_vcpu
>>>>> *vcpu);
>>>>>>   extern void kvmppc_handler_highmem(void);
>>>>>> @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run
>> *run,
>>>>> struct kvm_vcpu *vcpu,
>>>>>>                      u64 val, unsigned int bytes,
>>>>>>                      int is_default_endian);
>>>>>> 
>>>>>> +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
>>>>>> +                 enum instruction_type type, u32 *inst);
>>>>>> +
>>>>>>   extern int kvmppc_emulate_instruction(struct kvm_run *run,
>>>>>>                                         struct kvm_vcpu *vcpu);
>>>>>>   extern int kvmppc_emulate_mmio(struct kvm_run *run, struct
>> kvm_vcpu
>>>>> *vcpu);
>>>>>> @@ -234,6 +242,23 @@ struct kvmppc_ops {
>>>>>>   extern struct kvmppc_ops *kvmppc_hv_ops;
>>>>>>   extern struct kvmppc_ops *kvmppc_pr_ops;
>>>>>> 
>>>>>> +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
>>>>>> +                    enum instruction_type type, u32
>> *inst)
>>>>>> +{
>>>>>> +    int ret = EMULATE_DONE;
>>>>>> +
>>>>>> +    /* Load the instruction manually if it failed to do so in the
>>>>>> +     * exit path */
>>>>>> +    if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
>>>>>> +        ret = kvmppc_load_last_inst(vcpu, type, &vcpu-
>>>>>> arch.last_inst);
>>>>>> +
>>>>>> +
>>>>>> +    *inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ?
>>>>>> +        swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst;
>>>>> This makes even less sense than the previous version. Either you
>> treat
>>>>> inst as "definitely overwritten" or as "preserves previous data on
>>>>> failure".
>>>> Both v4 and v5 versions treat inst as "definitely overwritten".
>>>> 
>>>>> So either you unconditionally swap like you did before
>>>> If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is
>> operated
>>>> in host endianness, so it doesn't need byte swap.
>>>> 
>>>> I agree with your reasoning if last_inst is initialized and compared
>> with
>>>> data in guest endianess, which is not the case yet for
>>>> KVM_INST_FETCH_FAILED.
>>> Alex, are you relying on the fact that KVM_INST_FETCH_FAILED value is
>> symmetrical?
>>> With a non symmetrical value like 0xDEADBEEF, and considering a little-
>> endian guest
>>> on a big-endian host, we need to fix kvm logic to initialize and
>> compare last_inst
>>> with 0xEFBEADDE swaped value.
>>> 
>>> Your suggestion to unconditionally swap makes sense only with the above
>> fix, otherwise
>>> inst may end up with 0xEFBEADDE swaped value with is wrong.
>> 
>> Only for *inst which we would treat as "undefined" after the function
>> returned EMULATE_AGAIN.
> 
> Right. With this do you acknowledge that v5 (definitely overwritten approach)
> is ok?

I think I'm starting to understand your logic of v5. You write fetch_failed into *inst unswapped if the fetch failed.

I think that's ok, but I definitely do not like the code flow - it's too hard to understand at a glimpse. Just rewrite it to swab at local variable level, preferably with if()s and comments what this is about and have a single unconditional *inst = fetched_inst; at the end of the function.

Alex
Mihai Caraman July 23, 2014, 10:06 a.m. UTC | #7
> > Right. With this do you acknowledge that v5 (definitely overwritten
> approach)
> > is ok?
> 
> I think I'm starting to understand your logic of v5. You write
> fetch_failed into *inst unswapped if the fetch failed.

"v5
  - don't swap when load fails" :)

> 
> I think that's ok, but I definitely do not like the code flow - it's too
> hard to understand at a glimpse. Just rewrite it to swab at local
> variable level, preferably with if()s and comments what this is about and
> have a single unconditional *inst = fetched_inst; at the end of the
> function.

I will incorporate these change requests into v6.

Thanks,
-Mike
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 20fb6f2..a86ca65 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -276,32 +276,6 @@  static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
 	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
 }
 
-static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
-{
-	/* Load the instruction manually if it failed to do so in the
-	 * exit path */
-	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
-		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
-
-	return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
-		vcpu->arch.last_inst;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
-	return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
-}
-
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index c7aed61..cbb1990 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@  static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
 	return false;
 }
 
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
-	return vcpu->arch.last_inst;
-}
-
 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
 {
 	vcpu->arch.ctr = val;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e2fd5a1..7f9c634 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -47,6 +47,11 @@  enum emulation_result {
 	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
 };
 
+enum instruction_type {
+	INST_GENERIC,
+	INST_SC,		/* system call */
+};
+
 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
@@ -62,6 +67,9 @@  extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
 			       u64 val, unsigned int bytes,
 			       int is_default_endian);
 
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+				 enum instruction_type type, u32 *inst);
+
 extern int kvmppc_emulate_instruction(struct kvm_run *run,
                                       struct kvm_vcpu *vcpu);
 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
@@ -234,6 +242,23 @@  struct kvmppc_ops {
 extern struct kvmppc_ops *kvmppc_hv_ops;
 extern struct kvmppc_ops *kvmppc_pr_ops;
 
+static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
+					enum instruction_type type, u32 *inst)
+{
+	int ret = EMULATE_DONE;
+
+	/* Load the instruction manually if it failed to do so in the
+	 * exit path */
+	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
+
+
+	*inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ?
+		swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst;
+
+	return ret;
+}
+
 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
 {
 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 31facfc..522be6b 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -488,6 +488,23 @@  mmio:
 }
 EXPORT_SYMBOL_GPL(kvmppc_ld);
 
+int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
+					 u32 *inst)
+{
+	ulong pc = kvmppc_get_pc(vcpu);
+	int r;
+
+	if (type == INST_SC)
+		pc -= 4;
+
+	r = kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+	if (r == EMULATE_DONE)
+		return r;
+	else
+		return EMULATE_AGAIN;
+}
+EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
+
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
 	return 0;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 2d154d9..fa944a3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -530,21 +530,14 @@  static int instruction_is_store(unsigned int instr)
 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
 				  unsigned long gpa, gva_t ea, int is_store)
 {
-	int ret;
 	u32 last_inst;
-	unsigned long srr0 = kvmppc_get_pc(vcpu);
 
-	/* We try to load the last instruction.  We don't let
-	 * emulate_instruction do it as it doesn't check what
-	 * kvmppc_ld returns.
+	/*
 	 * If we fail, we just return to the guest and try executing it again.
 	 */
-	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
-		ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
-		if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
-			return RESUME_GUEST;
-		vcpu->arch.last_inst = last_inst;
-	}
+	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
+		EMULATE_DONE)
+		return RESUME_GUEST;
 
 	/*
 	 * WARNING: We do not know for sure whether the instruction we just
@@ -558,7 +551,7 @@  static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	 * we just return and retry the instruction.
 	 */
 
-	if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
+	if (instruction_is_store(last_inst) != !!is_store)
 		return RESUME_GUEST;
 
 	/*
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index 6c8011f..bfb8035 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -639,26 +639,36 @@  static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
 
 int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
-	u32 inst = kvmppc_get_last_inst(vcpu);
+	u32 inst;
 	enum emulation_result emulated = EMULATE_DONE;
+	int ax_rd, ax_ra, ax_rb, ax_rc;
+	short full_d;
+	u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c;
 
-	int ax_rd = inst_get_field(inst, 6, 10);
-	int ax_ra = inst_get_field(inst, 11, 15);
-	int ax_rb = inst_get_field(inst, 16, 20);
-	int ax_rc = inst_get_field(inst, 21, 25);
-	short full_d = inst_get_field(inst, 16, 31);
-
-	u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
-	u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
-	u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
-	u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
-
-	bool rcomp = (inst & 1) ? true : false;
-	u32 cr = kvmppc_get_cr(vcpu);
+	bool rcomp;
+	u32 cr;
 #ifdef DEBUG
 	int i;
 #endif
 
+	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
+	if (emulated != EMULATE_DONE)
+		return emulated;
+
+	ax_rd = inst_get_field(inst, 6, 10);
+	ax_ra = inst_get_field(inst, 11, 15);
+	ax_rb = inst_get_field(inst, 16, 20);
+	ax_rc = inst_get_field(inst, 21, 25);
+	full_d = inst_get_field(inst, 16, 31);
+
+	fpr_d = &VCPU_FPR(vcpu, ax_rd);
+	fpr_a = &VCPU_FPR(vcpu, ax_ra);
+	fpr_b = &VCPU_FPR(vcpu, ax_rb);
+	fpr_c = &VCPU_FPR(vcpu, ax_rc);
+
+	rcomp = (inst & 1) ? true : false;
+	cr = kvmppc_get_cr(vcpu);
+
 	if (!kvmppc_inst_is_paired_single(vcpu, inst))
 		return EMULATE_FAIL;
 
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 02a983e..af67e93 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1018,15 +1018,24 @@  int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	{
 		enum emulation_result er;
 		ulong flags;
+		u32 last_inst;
+		int emul;
 
 program_interrupt:
 		flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
 
+		emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
+		if (emul != EMULATE_DONE) {
+			r = RESUME_GUEST;
+			break;
+		}
+
 		if (kvmppc_get_msr(vcpu) & MSR_PR) {
 #ifdef EXIT_DEBUG
-			printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+			pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
+				kvmppc_get_pc(vcpu), last_inst);
 #endif
-			if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
+			if ((last_inst & 0xff0007ff) !=
 			    (INS_DCBZ & 0xfffffff7)) {
 				kvmppc_core_queue_program(vcpu, flags);
 				r = RESUME_GUEST;
@@ -1045,7 +1054,7 @@  program_interrupt:
 			break;
 		case EMULATE_FAIL:
 			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
-			       __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+			       __func__, kvmppc_get_pc(vcpu), last_inst);
 			kvmppc_core_queue_program(vcpu, flags);
 			r = RESUME_GUEST;
 			break;
@@ -1062,8 +1071,23 @@  program_interrupt:
 		break;
 	}
 	case BOOK3S_INTERRUPT_SYSCALL:
+	{
+		u32 last_sc;
+		int emul;
+
+		/* Get last sc for papr */
+		if (vcpu->arch.papr_enabled) {
+			/* The sc instuction points SRR0 to the next inst */
+			emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
+			if (emul != EMULATE_DONE) {
+				kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
+				r = RESUME_GUEST;
+				break;
+			}
+		}
+
 		if (vcpu->arch.papr_enabled &&
-		    (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
+		    (last_sc == 0x44000022) &&
 		    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 			/* SC 1 papr hypercalls */
 			ulong cmd = kvmppc_get_gpr(vcpu, 3);
@@ -1108,27 +1132,16 @@  program_interrupt:
 			r = RESUME_GUEST;
 		}
 		break;
+	}
 	case BOOK3S_INTERRUPT_FP_UNAVAIL:
 	case BOOK3S_INTERRUPT_ALTIVEC:
 	case BOOK3S_INTERRUPT_VSX:
 	{
 		int ext_msr = 0;
 		int emul;
-		ulong pc;
 		u32 last_inst;
 
-		if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
-			/* Emulate the instruction */
-
-			pc = kvmppc_get_pc(vcpu);
-			last_inst = kvmppc_get_last_inst(vcpu);
-			emul = kvmppc_ld(vcpu, &pc, sizeof(u32), &last_inst,
-					 false);
-			if (emul == EMULATE_DONE)
-				goto program_interrupt;
-			else
-				r = RESUME_GUEST;
-		} else {
+		if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) {
 			/* Do paired single emulation */
 
 			switch (exit_nr) {
@@ -1146,14 +1159,24 @@  program_interrupt:
 			}
 
 			r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
+			break;
+		}
+
+		emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
+		if (emul == EMULATE_DONE) {
+			/* we need to emulate this instruction */
+			goto program_interrupt;
+			break;
+		} else {
+			r = RESUME_GUEST;
 		}
+
 		break;
 	}
 	case BOOK3S_INTERRUPT_ALIGNMENT:
 	{
-		ulong pc = kvmppc_get_pc(vcpu);
-		u32 last_inst = kvmppc_get_last_inst(vcpu);
-		int emul = kvmppc_ld(vcpu, &pc, sizeof(u32), &last_inst, false);
+		u32 last_inst;
+		int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
 
 		if (emul == EMULATE_DONE) {
 			u32 dsisr;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ab62109..34a42b9 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -752,6 +752,9 @@  static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		 * they were actually modified by emulation. */
 		return RESUME_GUEST_NV;
 
+	case EMULATE_AGAIN:
+		return RESUME_GUEST;
+
 	case EMULATE_DO_DCR:
 		run->exit_reason = KVM_EXIT_DCR;
 		return RESUME_HOST;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 79677d7..4385c14 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -610,6 +610,12 @@  void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 	}
 }
 
+int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
+			  u32 *instr)
+{
+	return EMULATE_AGAIN;
+}
+
 /************* MMU Notifiers *************/
 
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index da86d9b..c5c64b6 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -224,19 +224,25 @@  static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
  * from opcode tables in the future. */
 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
-	u32 inst = kvmppc_get_last_inst(vcpu);
-	int ra = get_ra(inst);
-	int rs = get_rs(inst);
-	int rt = get_rt(inst);
-	int sprn = get_sprn(inst);
-	enum emulation_result emulated = EMULATE_DONE;
+	u32 inst;
+	int ra, rs, rt, sprn;
+	enum emulation_result emulated;
 	int advance = 1;
 
 	/* this default type might be overwritten by subcategories */
 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
 
+	emulated = kvmppc_get_last_inst(vcpu, false, &inst);
+	if (emulated != EMULATE_DONE)
+		return emulated;
+
 	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
 
+	ra = get_ra(inst);
+	rs = get_rs(inst);
+	rt = get_rt(inst);
+	sprn = get_sprn(inst);
+
 	switch (get_op(inst)) {
 	case OP_TRAP:
 #ifdef CONFIG_PPC_BOOK3S
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index fe0257a..cfa6cfa 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -280,6 +280,9 @@  int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		 * actually modified. */
 		r = RESUME_GUEST_NV;
 		break;
+	case EMULATE_AGAIN:
+		r = RESUME_GUEST;
+		break;
 	case EMULATE_DO_MMIO:
 		run->exit_reason = KVM_EXIT_MMIO;
 		/* We must reload nonvolatiles because "update" load/store
@@ -289,11 +292,15 @@  int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		r = RESUME_HOST_NV;
 		break;
 	case EMULATE_FAIL:
+	{
+		u32 last_inst;
+
+		kvmppc_get_last_inst(vcpu, false, &last_inst);
 		/* XXX Deliver Program interrupt to guest. */
-		printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
-		       kvmppc_get_last_inst(vcpu));
+		pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
 		r = RESUME_HOST;
 		break;
+	}
 	default:
 		WARN_ON(1);
 		r = RESUME_GUEST;