diff mbox series

[v4,7/8] KVM: PPC: Ultravisor: Enter a secure guest

Message ID 20190628200825.31049-8-cclaudio@linux.ibm.com
State Superseded
Headers show
Series kvmppc: Paravirtualize KVM to support ultravisor | expand

Commit Message

Claudio Carvalho June 28, 2019, 8:08 p.m. UTC
From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>

To enter a secure guest, we have to go through the ultravisor, therefore
we do a ucall when we are entering a secure guest.

This change is needed for any sort of entry to the secure guest from the
hypervisor, whether it is a return from an hcall, a return from a
hypervisor interrupt, or the first time that a secure guest vCPU is run.

If we are returning from an hcall, the results are already in the
appropriate registers R3:12, except for R3, R6 and R7. R3 has the status
of the reflected hcall, therefore we move it to R0 for the ultravisor and
set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
registers, hence we restore them.

Have fast_guest_return check the kvm_arch.secure_guest field so that a
new CPU enters UV when started (in response to a RTAS start-cpu call).

Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.

Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
[ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
  the MSR_S bit ]
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
[ Fix UV_RETURN ucall number and arch.secure_guest check ]
Signed-off-by: Ram Pai <linuxram@us.ibm.com>
[ Save the actual R3 in R0 for the ultravisor and use R3 for the
  UV_RETURN ucall number. Update commit message and ret_to_ultra comment ]
Signed-off-by: Claudio Carvalho <cclaudio@linux.ibm.com>
---
 arch/powerpc/include/asm/kvm_host.h       |  1 +
 arch/powerpc/include/asm/ultravisor-api.h |  1 +
 arch/powerpc/kernel/asm-offsets.c         |  1 +
 arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
 4 files changed, 37 insertions(+), 6 deletions(-)

Comments

Claudio Carvalho July 8, 2019, 8:52 p.m. UTC | #1
On 7/8/19 5:53 PM, janani wrote:
> On 2019-06-28 15:08, Claudio Carvalho wrote:
>> From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
>>
>> To enter a secure guest, we have to go through the ultravisor, therefore
>> we do a ucall when we are entering a secure guest.
>>
>> This change is needed for any sort of entry to the secure guest from the
>> hypervisor, whether it is a return from an hcall, a return from a
>> hypervisor interrupt, or the first time that a secure guest vCPU is run.
>>
>> If we are returning from an hcall, the results are already in the
>> appropriate registers R3:12, except for R3, R6 and R7. R3 has the status
>> of the reflected hcall, therefore we move it to R0 for the ultravisor and
>> set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
>> registers, hence we restore them.
>>
>> Have fast_guest_return check the kvm_arch.secure_guest field so that a
>> new CPU enters UV when started (in response to a RTAS start-cpu call).
>>
>> Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.
>>
>> Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
>> [ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
>>   the MSR_S bit ]
>> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
>> [ Fix UV_RETURN ucall number and arch.secure_guest check ]
>> Signed-off-by: Ram Pai <linuxram@us.ibm.com>
>> [ Save the actual R3 in R0 for the ultravisor and use R3 for the
>>   UV_RETURN ucall number. Update commit message and ret_to_ultra comment ]
>> Signed-off-by: Claudio Carvalho <cclaudio@linux.ibm.com>
>  Reviewed-by: Janani Janakiraman <janani@linux.ibm.com>


Thanks Janani for reviewing the patchset.

Claudio


>> ---
>>  arch/powerpc/include/asm/kvm_host.h       |  1 +
>>  arch/powerpc/include/asm/ultravisor-api.h |  1 +
>>  arch/powerpc/kernel/asm-offsets.c         |  1 +
>>  arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
>>  4 files changed, 37 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/kvm_host.h
>> b/arch/powerpc/include/asm/kvm_host.h
>> index 013c76a0a03e..184becb62ea4 100644
>> --- a/arch/powerpc/include/asm/kvm_host.h
>> +++ b/arch/powerpc/include/asm/kvm_host.h
>> @@ -294,6 +294,7 @@ struct kvm_arch {
>>      cpumask_t cpu_in_guest;
>>      u8 radix;
>>      u8 fwnmi_enabled;
>> +    u8 secure_guest;
>>      bool threads_indep;
>>      bool nested_enable;
>>      pgd_t *pgtable;
>> diff --git a/arch/powerpc/include/asm/ultravisor-api.h
>> b/arch/powerpc/include/asm/ultravisor-api.h
>> index 141940771add..7c4d0b4ced12 100644
>> --- a/arch/powerpc/include/asm/ultravisor-api.h
>> +++ b/arch/powerpc/include/asm/ultravisor-api.h
>> @@ -19,5 +19,6 @@
>>
>>  /* opcodes */
>>  #define UV_WRITE_PATE            0xF104
>> +#define UV_RETURN            0xF11C
>>
>>  #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
>> diff --git a/arch/powerpc/kernel/asm-offsets.c
>> b/arch/powerpc/kernel/asm-offsets.c
>> index 8e02444e9d3d..44742724513e 100644
>> --- a/arch/powerpc/kernel/asm-offsets.c
>> +++ b/arch/powerpc/kernel/asm-offsets.c
>> @@ -508,6 +508,7 @@ int main(void)
>>      OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
>>      OFFSET(KVM_RADIX, kvm, arch.radix);
>>      OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
>> +    OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
>>      OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
>>      OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
>>      OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
>> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> index cffb365d9d02..89813ca987c2 100644
>> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> @@ -36,6 +36,7 @@
>>  #include <asm/asm-compat.h>
>>  #include <asm/feature-fixups.h>
>>  #include <asm/cpuidle.h>
>> +#include <asm/ultravisor-api.h>
>>
>>  /* Sign-extend HDEC if not on POWER9 */
>>  #define EXTEND_HDEC(reg)            \
>> @@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION
>>  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>>
>>      ld    r5, VCPU_LR(r4)
>> -    ld    r6, VCPU_CR(r4)
>>      mtlr    r5
>> -    mtcr    r6
>>
>>      ld    r1, VCPU_GPR(R1)(r4)
>>      ld    r2, VCPU_GPR(R2)(r4)
>>      ld    r3, VCPU_GPR(R3)(r4)
>>      ld    r5, VCPU_GPR(R5)(r4)
>> -    ld    r6, VCPU_GPR(R6)(r4)
>> -    ld    r7, VCPU_GPR(R7)(r4)
>>      ld    r8, VCPU_GPR(R8)(r4)
>>      ld    r9, VCPU_GPR(R9)(r4)
>>      ld    r10, VCPU_GPR(R10)(r4)
>> @@ -1119,10 +1116,38 @@ BEGIN_FTR_SECTION
>>      mtspr    SPRN_HDSISR, r0
>>  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
>>
>> +    ld    r6, VCPU_KVM(r4)
>> +    lbz    r7, KVM_SECURE_GUEST(r6)
>> +    cmpdi    r7, 0
>> +    bne    ret_to_ultra
>> +
>> +    lwz    r6, VCPU_CR(r4)
>> +    mtcr    r6
>> +
>> +    ld    r7, VCPU_GPR(R7)(r4)
>> +    ld    r6, VCPU_GPR(R6)(r4)
>>      ld    r0, VCPU_GPR(R0)(r4)
>>      ld    r4, VCPU_GPR(R4)(r4)
>>      HRFI_TO_GUEST
>>      b    .
>> +/*
>> + * We are entering a secure guest, so we have to invoke the ultravisor
>> to do
>> + * that. If we are returning from a hcall, the results are already in the
>> + * appropriate registers R3:12, except for R3, R6 and R7. R3 has the
>> status of
>> + * the reflected hcall, therefore we move it to R0 for the ultravisor
>> and set
>> + * R3 to the UV_RETURN ucall number. R6,7 were used as temporary registers
>> + * above, hence we restore them.
>> + */
>> +ret_to_ultra:
>> +    lwz    r6, VCPU_CR(r4)
>> +    mtcr    r6
>> +    mfspr    r11, SPRN_SRR1
>> +    mr    r0, r3
>> +    LOAD_REG_IMMEDIATE(r3, UV_RETURN)
>> +    ld    r7, VCPU_GPR(R7)(r4)
>> +    ld    r6, VCPU_GPR(R6)(r4)
>> +    ld    r4, VCPU_GPR(R4)(r4)
>> +    sc    2
>>
>>  /*
>>   * Enter the guest on a P9 or later system where we have exactly
>> @@ -3318,13 +3343,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
>>   *   r0 is used as a scratch register
>>   */
>>  kvmppc_msr_interrupt:
>> +    andis.    r0, r11, MSR_S@h
>>      rldicl    r0, r11, 64 - MSR_TS_S_LG, 62
>> -    cmpwi    r0, 2 /* Check if we are in transactional state..  */
>> +    cmpwi    cr1, r0, 2 /* Check if we are in transactional state..  */
>>      ld    r11, VCPU_INTR_MSR(r9)
>> -    bne    1f
>> +    bne    cr1, 1f
>>      /* ... if transactional, change to suspended */
>>      li    r0, 1
>>  1:    rldimi    r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
>> +    beqlr
>> +    oris    r11, r11, MSR_S@h        /* preserve MSR_S bit setting */
>>      blr
>>
>>  /*
>
janani July 8, 2019, 8:53 p.m. UTC | #2
On 2019-06-28 15:08, Claudio Carvalho wrote:
> From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> 
> To enter a secure guest, we have to go through the ultravisor, 
> therefore
> we do a ucall when we are entering a secure guest.
> 
> This change is needed for any sort of entry to the secure guest from 
> the
> hypervisor, whether it is a return from an hcall, a return from a
> hypervisor interrupt, or the first time that a secure guest vCPU is 
> run.
> 
> If we are returning from an hcall, the results are already in the
> appropriate registers R3:12, except for R3, R6 and R7. R3 has the 
> status
> of the reflected hcall, therefore we move it to R0 for the ultravisor 
> and
> set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
> registers, hence we restore them.
> 
> Have fast_guest_return check the kvm_arch.secure_guest field so that a
> new CPU enters UV when started (in response to a RTAS start-cpu call).
> 
> Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.
> 
> Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> [ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
>   the MSR_S bit ]
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
> [ Fix UV_RETURN ucall number and arch.secure_guest check ]
> Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> [ Save the actual R3 in R0 for the ultravisor and use R3 for the
>   UV_RETURN ucall number. Update commit message and ret_to_ultra 
> comment ]
> Signed-off-by: Claudio Carvalho <cclaudio@linux.ibm.com>
  Reviewed-by: Janani Janakiraman <janani@linux.ibm.com>
> ---
>  arch/powerpc/include/asm/kvm_host.h       |  1 +
>  arch/powerpc/include/asm/ultravisor-api.h |  1 +
>  arch/powerpc/kernel/asm-offsets.c         |  1 +
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
>  4 files changed, 37 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_host.h
> b/arch/powerpc/include/asm/kvm_host.h
> index 013c76a0a03e..184becb62ea4 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -294,6 +294,7 @@ struct kvm_arch {
>  	cpumask_t cpu_in_guest;
>  	u8 radix;
>  	u8 fwnmi_enabled;
> +	u8 secure_guest;
>  	bool threads_indep;
>  	bool nested_enable;
>  	pgd_t *pgtable;
> diff --git a/arch/powerpc/include/asm/ultravisor-api.h
> b/arch/powerpc/include/asm/ultravisor-api.h
> index 141940771add..7c4d0b4ced12 100644
> --- a/arch/powerpc/include/asm/ultravisor-api.h
> +++ b/arch/powerpc/include/asm/ultravisor-api.h
> @@ -19,5 +19,6 @@
> 
>  /* opcodes */
>  #define UV_WRITE_PATE			0xF104
> +#define UV_RETURN			0xF11C
> 
>  #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
> diff --git a/arch/powerpc/kernel/asm-offsets.c
> b/arch/powerpc/kernel/asm-offsets.c
> index 8e02444e9d3d..44742724513e 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -508,6 +508,7 @@ int main(void)
>  	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
>  	OFFSET(KVM_RADIX, kvm, arch.radix);
>  	OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
> +	OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
>  	OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
>  	OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
>  	OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index cffb365d9d02..89813ca987c2 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -36,6 +36,7 @@
>  #include <asm/asm-compat.h>
>  #include <asm/feature-fixups.h>
>  #include <asm/cpuidle.h>
> +#include <asm/ultravisor-api.h>
> 
>  /* Sign-extend HDEC if not on POWER9 */
>  #define EXTEND_HDEC(reg)			\
> @@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION
>  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
> 
>  	ld	r5, VCPU_LR(r4)
> -	ld	r6, VCPU_CR(r4)
>  	mtlr	r5
> -	mtcr	r6
> 
>  	ld	r1, VCPU_GPR(R1)(r4)
>  	ld	r2, VCPU_GPR(R2)(r4)
>  	ld	r3, VCPU_GPR(R3)(r4)
>  	ld	r5, VCPU_GPR(R5)(r4)
> -	ld	r6, VCPU_GPR(R6)(r4)
> -	ld	r7, VCPU_GPR(R7)(r4)
>  	ld	r8, VCPU_GPR(R8)(r4)
>  	ld	r9, VCPU_GPR(R9)(r4)
>  	ld	r10, VCPU_GPR(R10)(r4)
> @@ -1119,10 +1116,38 @@ BEGIN_FTR_SECTION
>  	mtspr	SPRN_HDSISR, r0
>  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
> 
> +	ld	r6, VCPU_KVM(r4)
> +	lbz	r7, KVM_SECURE_GUEST(r6)
> +	cmpdi	r7, 0
> +	bne	ret_to_ultra
> +
> +	lwz	r6, VCPU_CR(r4)
> +	mtcr	r6
> +
> +	ld	r7, VCPU_GPR(R7)(r4)
> +	ld	r6, VCPU_GPR(R6)(r4)
>  	ld	r0, VCPU_GPR(R0)(r4)
>  	ld	r4, VCPU_GPR(R4)(r4)
>  	HRFI_TO_GUEST
>  	b	.
> +/*
> + * We are entering a secure guest, so we have to invoke the ultravisor 
> to do
> + * that. If we are returning from a hcall, the results are already in 
> the
> + * appropriate registers R3:12, except for R3, R6 and R7. R3 has the 
> status of
> + * the reflected hcall, therefore we move it to R0 for the ultravisor 
> and set
> + * R3 to the UV_RETURN ucall number. R6,7 were used as temporary 
> registers
> + * above, hence we restore them.
> + */
> +ret_to_ultra:
> +	lwz	r6, VCPU_CR(r4)
> +	mtcr	r6
> +	mfspr	r11, SPRN_SRR1
> +	mr	r0, r3
> +	LOAD_REG_IMMEDIATE(r3, UV_RETURN)
> +	ld	r7, VCPU_GPR(R7)(r4)
> +	ld	r6, VCPU_GPR(R6)(r4)
> +	ld	r4, VCPU_GPR(R4)(r4)
> +	sc	2
> 
>  /*
>   * Enter the guest on a P9 or later system where we have exactly
> @@ -3318,13 +3343,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
>   *   r0 is used as a scratch register
>   */
>  kvmppc_msr_interrupt:
> +	andis.	r0, r11, MSR_S@h
>  	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
> -	cmpwi	r0, 2 /* Check if we are in transactional state..  */
> +	cmpwi	cr1, r0, 2 /* Check if we are in transactional state..  */
>  	ld	r11, VCPU_INTR_MSR(r9)
> -	bne	1f
> +	bne	cr1, 1f
>  	/* ... if transactional, change to suspended */
>  	li	r0, 1
>  1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
> +	beqlr
> +	oris	r11, r11, MSR_S@h		/* preserve MSR_S bit setting */
>  	blr
> 
>  /*
Michael Ellerman July 11, 2019, 12:57 p.m. UTC | #3
Claudio Carvalho <cclaudio@linux.ibm.com> writes:
> From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
>
> To enter a secure guest, we have to go through the ultravisor, therefore
> we do a ucall when we are entering a secure guest.
>
> This change is needed for any sort of entry to the secure guest from the
> hypervisor, whether it is a return from an hcall, a return from a
> hypervisor interrupt, or the first time that a secure guest vCPU is run.
>
> If we are returning from an hcall, the results are already in the
> appropriate registers R3:12, except for R3, R6 and R7. R3 has the status
> of the reflected hcall, therefore we move it to R0 for the ultravisor and
> set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
> registers, hence we restore them.

This is another case where some documentation would help people to
review the code.

> Have fast_guest_return check the kvm_arch.secure_guest field so that a
> new CPU enters UV when started (in response to a RTAS start-cpu call).
>
> Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.
>
> Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> [ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
>   the MSR_S bit ]
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
> [ Fix UV_RETURN ucall number and arch.secure_guest check ]
> Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> [ Save the actual R3 in R0 for the ultravisor and use R3 for the
>   UV_RETURN ucall number. Update commit message and ret_to_ultra comment ]
> Signed-off-by: Claudio Carvalho <cclaudio@linux.ibm.com>
> ---
>  arch/powerpc/include/asm/kvm_host.h       |  1 +
>  arch/powerpc/include/asm/ultravisor-api.h |  1 +
>  arch/powerpc/kernel/asm-offsets.c         |  1 +
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
>  4 files changed, 37 insertions(+), 6 deletions(-)
>
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index cffb365d9d02..89813ca987c2 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -36,6 +36,7 @@
>  #include <asm/asm-compat.h>
>  #include <asm/feature-fixups.h>
>  #include <asm/cpuidle.h>
> +#include <asm/ultravisor-api.h>
>  
>  /* Sign-extend HDEC if not on POWER9 */
>  #define EXTEND_HDEC(reg)			\
> @@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION
>  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>  
>  	ld	r5, VCPU_LR(r4)
> -	ld	r6, VCPU_CR(r4)
>  	mtlr	r5
> -	mtcr	r6
>  
>  	ld	r1, VCPU_GPR(R1)(r4)
>  	ld	r2, VCPU_GPR(R2)(r4)
>  	ld	r3, VCPU_GPR(R3)(r4)
>  	ld	r5, VCPU_GPR(R5)(r4)
> -	ld	r6, VCPU_GPR(R6)(r4)
> -	ld	r7, VCPU_GPR(R7)(r4)
>  	ld	r8, VCPU_GPR(R8)(r4)
>  	ld	r9, VCPU_GPR(R9)(r4)
>  	ld	r10, VCPU_GPR(R10)(r4)
> @@ -1119,10 +1116,38 @@ BEGIN_FTR_SECTION
>  	mtspr	SPRN_HDSISR, r0
>  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
>  
> +	ld	r6, VCPU_KVM(r4)
> +	lbz	r7, KVM_SECURE_GUEST(r6)
> +	cmpdi	r7, 0

You could hoist the load of r6 and r7 to here?

> +	bne	ret_to_ultra
> +
> +	lwz	r6, VCPU_CR(r4)
> +	mtcr	r6
> +
> +	ld	r7, VCPU_GPR(R7)(r4)
> +	ld	r6, VCPU_GPR(R6)(r4)
>  	ld	r0, VCPU_GPR(R0)(r4)
>  	ld	r4, VCPU_GPR(R4)(r4)
>  	HRFI_TO_GUEST
>  	b	.
> +/*
> + * We are entering a secure guest, so we have to invoke the ultravisor to do
> + * that. If we are returning from a hcall, the results are already in the
> + * appropriate registers R3:12, except for R3, R6 and R7. R3 has the status of
> + * the reflected hcall, therefore we move it to R0 for the ultravisor and set
> + * R3 to the UV_RETURN ucall number. R6,7 were used as temporary registers
> + * above, hence we restore them.
> + */
> +ret_to_ultra:
> +	lwz	r6, VCPU_CR(r4)
> +	mtcr	r6
> +	mfspr	r11, SPRN_SRR1
> +	mr	r0, r3
> +	LOAD_REG_IMMEDIATE(r3, UV_RETURN)

Worth open coding to save three instructions?

> +	ld	r7, VCPU_GPR(R7)(r4)
> +	ld	r6, VCPU_GPR(R6)(r4)
> +	ld	r4, VCPU_GPR(R4)(r4)
> +	sc	2
>  
>  /*
>   * Enter the guest on a P9 or later system where we have exactly
> @@ -3318,13 +3343,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
>   *   r0 is used as a scratch register
>   */
>  kvmppc_msr_interrupt:
> +	andis.	r0, r11, MSR_S@h
>  	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
> -	cmpwi	r0, 2 /* Check if we are in transactional state..  */
> +	cmpwi	cr1, r0, 2 /* Check if we are in transactional state..  */
>  	ld	r11, VCPU_INTR_MSR(r9)
> -	bne	1f
> +	bne	cr1, 1f
>  	/* ... if transactional, change to suspended */
>  	li	r0, 1
>  1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
> +	beqlr
> +	oris	r11, r11, MSR_S@h		/* preserve MSR_S bit setting */
>  	blr

I don't see this part mentioned in the change log?

It's also pretty subtle, a comment might be helpful.

cheers
Nicholas Piggin July 12, 2019, 2:03 a.m. UTC | #4
Claudio Carvalho's on June 29, 2019 6:08 am:
> From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> 
> To enter a secure guest, we have to go through the ultravisor, therefore
> we do a ucall when we are entering a secure guest.
> 
> This change is needed for any sort of entry to the secure guest from the
> hypervisor, whether it is a return from an hcall, a return from a
> hypervisor interrupt, or the first time that a secure guest vCPU is run.
> 
> If we are returning from an hcall, the results are already in the
> appropriate registers R3:12, except for R3, R6 and R7. R3 has the status
> of the reflected hcall, therefore we move it to R0 for the ultravisor and
> set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
> registers, hence we restore them.
> 
> Have fast_guest_return check the kvm_arch.secure_guest field so that a
> new CPU enters UV when started (in response to a RTAS start-cpu call).
> 
> Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.
> 
> Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> [ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
>   the MSR_S bit ]
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
> [ Fix UV_RETURN ucall number and arch.secure_guest check ]
> Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> [ Save the actual R3 in R0 for the ultravisor and use R3 for the
>   UV_RETURN ucall number. Update commit message and ret_to_ultra comment ]
> Signed-off-by: Claudio Carvalho <cclaudio@linux.ibm.com>
> ---
>  arch/powerpc/include/asm/kvm_host.h       |  1 +
>  arch/powerpc/include/asm/ultravisor-api.h |  1 +
>  arch/powerpc/kernel/asm-offsets.c         |  1 +
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
>  4 files changed, 37 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 013c76a0a03e..184becb62ea4 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -294,6 +294,7 @@ struct kvm_arch {
>  	cpumask_t cpu_in_guest;
>  	u8 radix;
>  	u8 fwnmi_enabled;
> +	u8 secure_guest;
>  	bool threads_indep;
>  	bool nested_enable;
>  	pgd_t *pgtable;
> diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h
> index 141940771add..7c4d0b4ced12 100644
> --- a/arch/powerpc/include/asm/ultravisor-api.h
> +++ b/arch/powerpc/include/asm/ultravisor-api.h
> @@ -19,5 +19,6 @@
>  
>  /* opcodes */
>  #define UV_WRITE_PATE			0xF104
> +#define UV_RETURN			0xF11C
>  
>  #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index 8e02444e9d3d..44742724513e 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -508,6 +508,7 @@ int main(void)
>  	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
>  	OFFSET(KVM_RADIX, kvm, arch.radix);
>  	OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
> +	OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
>  	OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
>  	OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
>  	OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index cffb365d9d02..89813ca987c2 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -36,6 +36,7 @@
>  #include <asm/asm-compat.h>
>  #include <asm/feature-fixups.h>
>  #include <asm/cpuidle.h>
> +#include <asm/ultravisor-api.h>
>  
>  /* Sign-extend HDEC if not on POWER9 */
>  #define EXTEND_HDEC(reg)			\
> @@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION
>  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>  
>  	ld	r5, VCPU_LR(r4)
> -	ld	r6, VCPU_CR(r4)
>  	mtlr	r5
> -	mtcr	r6
>  
>  	ld	r1, VCPU_GPR(R1)(r4)
>  	ld	r2, VCPU_GPR(R2)(r4)
>  	ld	r3, VCPU_GPR(R3)(r4)
>  	ld	r5, VCPU_GPR(R5)(r4)
> -	ld	r6, VCPU_GPR(R6)(r4)
> -	ld	r7, VCPU_GPR(R7)(r4)
>  	ld	r8, VCPU_GPR(R8)(r4)
>  	ld	r9, VCPU_GPR(R9)(r4)
>  	ld	r10, VCPU_GPR(R10)(r4)

Just to try to be less arbitrary about things, could you use regs
adjacent to r4? Generally good because then it has a chance to get
our loads paired up (which may not help some CPUs).

Thanks,
Nick
Sukadev Bhattiprolu July 18, 2019, 2:47 a.m. UTC | #5
Michael Ellerman [mpe@ellerman.id.au] wrote:
> Claudio Carvalho <cclaudio@linux.ibm.com> writes:
> > From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> >
> > To enter a secure guest, we have to go through the ultravisor, therefore
> > we do a ucall when we are entering a secure guest.
> >
> > This change is needed for any sort of entry to the secure guest from the
> > hypervisor, whether it is a return from an hcall, a return from a
> > hypervisor interrupt, or the first time that a secure guest vCPU is run.
> >
> > If we are returning from an hcall, the results are already in the
> > appropriate registers R3:12, except for R3, R6 and R7. R3 has the status
> > of the reflected hcall, therefore we move it to R0 for the ultravisor and
> > set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
> > registers, hence we restore them.
> 
> This is another case where some documentation would help people to
> review the code.
> 
> > Have fast_guest_return check the kvm_arch.secure_guest field so that a
> > new CPU enters UV when started (in response to a RTAS start-cpu call).
> >
> > Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.
> >
> > Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> > [ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
> >   the MSR_S bit ]
> > Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
> > [ Fix UV_RETURN ucall number and arch.secure_guest check ]
> > Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> > [ Save the actual R3 in R0 for the ultravisor and use R3 for the
> >   UV_RETURN ucall number. Update commit message and ret_to_ultra comment ]
> > Signed-off-by: Claudio Carvalho <cclaudio@linux.ibm.com>
> > ---
> >  arch/powerpc/include/asm/kvm_host.h       |  1 +
> >  arch/powerpc/include/asm/ultravisor-api.h |  1 +
> >  arch/powerpc/kernel/asm-offsets.c         |  1 +
> >  arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
> >  4 files changed, 37 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > index cffb365d9d02..89813ca987c2 100644
> > --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > @@ -36,6 +36,7 @@
> >  #include <asm/asm-compat.h>
> >  #include <asm/feature-fixups.h>
> >  #include <asm/cpuidle.h>
> > +#include <asm/ultravisor-api.h>
> >  
> >  /* Sign-extend HDEC if not on POWER9 */
> >  #define EXTEND_HDEC(reg)			\
> > @@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION
> >  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
> >  
> >  	ld	r5, VCPU_LR(r4)
> > -	ld	r6, VCPU_CR(r4)
> >  	mtlr	r5
> > -	mtcr	r6
> >  
> >  	ld	r1, VCPU_GPR(R1)(r4)
> >  	ld	r2, VCPU_GPR(R2)(r4)
> >  	ld	r3, VCPU_GPR(R3)(r4)
> >  	ld	r5, VCPU_GPR(R5)(r4)
> > -	ld	r6, VCPU_GPR(R6)(r4)
> > -	ld	r7, VCPU_GPR(R7)(r4)
> >  	ld	r8, VCPU_GPR(R8)(r4)
> >  	ld	r9, VCPU_GPR(R9)(r4)
> >  	ld	r10, VCPU_GPR(R10)(r4)
> > @@ -1119,10 +1116,38 @@ BEGIN_FTR_SECTION
> >  	mtspr	SPRN_HDSISR, r0
> >  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
> >  
> > +	ld	r6, VCPU_KVM(r4)
> > +	lbz	r7, KVM_SECURE_GUEST(r6)
> > +	cmpdi	r7, 0
> 
> You could hoist the load of r6 and r7 to here?

we could move 'ld r7' here. r6 is used to restore CR below so
it (r6) has to stay there?

> 
> > +	bne	ret_to_ultra
> > +
> > +	lwz	r6, VCPU_CR(r4)
> > +	mtcr	r6
> > +
> > +	ld	r7, VCPU_GPR(R7)(r4)
> > +	ld	r6, VCPU_GPR(R6)(r4)
> >  	ld	r0, VCPU_GPR(R0)(r4)
> >  	ld	r4, VCPU_GPR(R4)(r4)
> >  	HRFI_TO_GUEST
> >  	b	.
> > +/*
> > + * We are entering a secure guest, so we have to invoke the ultravisor to do
> > + * that. If we are returning from a hcall, the results are already in the
> > + * appropriate registers R3:12, except for R3, R6 and R7. R3 has the status of
> > + * the reflected hcall, therefore we move it to R0 for the ultravisor and set
> > + * R3 to the UV_RETURN ucall number. R6,7 were used as temporary registers
> > + * above, hence we restore them.
> > + */
> > +ret_to_ultra:
> > +	lwz	r6, VCPU_CR(r4)
> > +	mtcr	r6
> > +	mfspr	r11, SPRN_SRR1
> > +	mr	r0, r3
> > +	LOAD_REG_IMMEDIATE(r3, UV_RETURN)
> 
> Worth open coding to save three instructions?

Yes, good point:

-       LOAD_REG_IMMEDIATE(r3, UV_RETURN)
+
+       li      r3, 0
+       oris    r3, r3, (UV_RETURN)@__AS_ATHIGH
+       ori     r3, r3, (UV_RETURN)@l
+

> 
> > +	ld	r7, VCPU_GPR(R7)(r4)
> > +	ld	r6, VCPU_GPR(R6)(r4)
> > +	ld	r4, VCPU_GPR(R4)(r4)
> > +	sc	2
> >  
> >  /*
> >   * Enter the guest on a P9 or later system where we have exactly
> > @@ -3318,13 +3343,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
> >   *   r0 is used as a scratch register
> >   */
> >  kvmppc_msr_interrupt:
> > +	andis.	r0, r11, MSR_S@h
> >  	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
> > -	cmpwi	r0, 2 /* Check if we are in transactional state..  */
> > +	cmpwi	cr1, r0, 2 /* Check if we are in transactional state..  */
> >  	ld	r11, VCPU_INTR_MSR(r9)
> > -	bne	1f
> > +	bne	cr1, 1f
> >  	/* ... if transactional, change to suspended */
> >  	li	r0, 1
> >  1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
> > +	beqlr
> > +	oris	r11, r11, MSR_S@h		/* preserve MSR_S bit setting */
> >  	blr
> 
> I don't see this part mentioned in the change log?
> 
> It's also pretty subtle, a comment might be helpful.
> 
> cheers
Michael Ellerman July 22, 2019, 11:05 a.m. UTC | #6
Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> writes:
> Michael Ellerman [mpe@ellerman.id.au] wrote:
>> Claudio Carvalho <cclaudio@linux.ibm.com> writes:
>> > From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
>> >
>> > To enter a secure guest, we have to go through the ultravisor, therefore
>> > we do a ucall when we are entering a secure guest.
>> >
>> > This change is needed for any sort of entry to the secure guest from the
>> > hypervisor, whether it is a return from an hcall, a return from a
>> > hypervisor interrupt, or the first time that a secure guest vCPU is run.
>> >
>> > If we are returning from an hcall, the results are already in the
>> > appropriate registers R3:12, except for R3, R6 and R7. R3 has the status
>> > of the reflected hcall, therefore we move it to R0 for the ultravisor and
>> > set R3 to the UV_RETURN ucall number. R6,7 were used as temporary
>> > registers, hence we restore them.
>> 
>> This is another case where some documentation would help people to
>> review the code.
>> 
>> > Have fast_guest_return check the kvm_arch.secure_guest field so that a
>> > new CPU enters UV when started (in response to a RTAS start-cpu call).
>> >
>> > Thanks to input from Paul Mackerras, Ram Pai and Mike Anderson.
>> >
>> > Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
>> > [ Pass SRR1 in r11 for UV_RETURN, fix kvmppc_msr_interrupt to preserve
>> >   the MSR_S bit ]
>> > Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
>> > [ Fix UV_RETURN ucall number and arch.secure_guest check ]
>> > Signed-off-by: Ram Pai <linuxram@us.ibm.com>
>> > [ Save the actual R3 in R0 for the ultravisor and use R3 for the
>> >   UV_RETURN ucall number. Update commit message and ret_to_ultra comment ]
>> > Signed-off-by: Claudio Carvalho <cclaudio@linux.ibm.com>
>> > ---
>> >  arch/powerpc/include/asm/kvm_host.h       |  1 +
>> >  arch/powerpc/include/asm/ultravisor-api.h |  1 +
>> >  arch/powerpc/kernel/asm-offsets.c         |  1 +
>> >  arch/powerpc/kvm/book3s_hv_rmhandlers.S   | 40 +++++++++++++++++++----
>> >  4 files changed, 37 insertions(+), 6 deletions(-)
>> >
>> > diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> > index cffb365d9d02..89813ca987c2 100644
>> > --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> > +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
>> > @@ -36,6 +36,7 @@
>> >  #include <asm/asm-compat.h>
>> >  #include <asm/feature-fixups.h>
>> >  #include <asm/cpuidle.h>
>> > +#include <asm/ultravisor-api.h>
>> >  
>> >  /* Sign-extend HDEC if not on POWER9 */
>> >  #define EXTEND_HDEC(reg)			\
>> > @@ -1092,16 +1093,12 @@ BEGIN_FTR_SECTION
>> >  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
>> >  
>> >  	ld	r5, VCPU_LR(r4)
>> > -	ld	r6, VCPU_CR(r4)
>> >  	mtlr	r5
>> > -	mtcr	r6
>> >  
>> >  	ld	r1, VCPU_GPR(R1)(r4)
>> >  	ld	r2, VCPU_GPR(R2)(r4)
>> >  	ld	r3, VCPU_GPR(R3)(r4)
>> >  	ld	r5, VCPU_GPR(R5)(r4)
>> > -	ld	r6, VCPU_GPR(R6)(r4)
>> > -	ld	r7, VCPU_GPR(R7)(r4)
>> >  	ld	r8, VCPU_GPR(R8)(r4)
>> >  	ld	r9, VCPU_GPR(R9)(r4)
>> >  	ld	r10, VCPU_GPR(R10)(r4)
>> > @@ -1119,10 +1116,38 @@ BEGIN_FTR_SECTION
>> >  	mtspr	SPRN_HDSISR, r0
>> >  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
>> >  
>> > +	ld	r6, VCPU_KVM(r4)
>> > +	lbz	r7, KVM_SECURE_GUEST(r6)
>> > +	cmpdi	r7, 0
>> 
>> You could hoist the load of r6 and r7 to here?
>
> we could move 'ld r7' here. r6 is used to restore CR below so
> it (r6) has to stay there?

It's used to restore CR in both paths, so both paths load VCPU_CR(r4)
into r6. So we could instead do that load once, before the branch?

>> > +	bne	ret_to_ultra
>> > +
>> > +	lwz	r6, VCPU_CR(r4)
>> > +	mtcr	r6
>> > +
>> > +	ld	r7, VCPU_GPR(R7)(r4)
>> > +	ld	r6, VCPU_GPR(R6)(r4)
>> >  	ld	r0, VCPU_GPR(R0)(r4)
>> >  	ld	r4, VCPU_GPR(R4)(r4)
>> >  	HRFI_TO_GUEST
>> >  	b	.
>> > +/*
>> > + * We are entering a secure guest, so we have to invoke the ultravisor to do
>> > + * that. If we are returning from a hcall, the results are already in the
>> > + * appropriate registers R3:12, except for R3, R6 and R7. R3 has the status of
>> > + * the reflected hcall, therefore we move it to R0 for the ultravisor and set
>> > + * R3 to the UV_RETURN ucall number. R6,7 were used as temporary registers
>> > + * above, hence we restore them.
>> > + */
>> > +ret_to_ultra:
>> > +	lwz	r6, VCPU_CR(r4)
>> > +	mtcr	r6
>> > +	mfspr	r11, SPRN_SRR1
>> > +	mr	r0, r3
>> > +	LOAD_REG_IMMEDIATE(r3, UV_RETURN)
>> 
>> Worth open coding to save three instructions?
>
> Yes, good point:
>
> -       LOAD_REG_IMMEDIATE(r3, UV_RETURN)
> +
> +       li      r3, 0
> +       oris    r3, r3, (UV_RETURN)@__AS_ATHIGH
> +       ori     r3, r3, (UV_RETURN)@l

This should do it no?

       li      r3, 0
       oris    r3, r3, UV_RETURN


cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 013c76a0a03e..184becb62ea4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -294,6 +294,7 @@  struct kvm_arch {
 	cpumask_t cpu_in_guest;
 	u8 radix;
 	u8 fwnmi_enabled;
+	u8 secure_guest;
 	bool threads_indep;
 	bool nested_enable;
 	pgd_t *pgtable;
diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h
index 141940771add..7c4d0b4ced12 100644
--- a/arch/powerpc/include/asm/ultravisor-api.h
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -19,5 +19,6 @@ 
 
 /* opcodes */
 #define UV_WRITE_PATE			0xF104
+#define UV_RETURN			0xF11C
 
 #endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8e02444e9d3d..44742724513e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -508,6 +508,7 @@  int main(void)
 	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
 	OFFSET(KVM_RADIX, kvm, arch.radix);
 	OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
+	OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
 	OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
 	OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
 	OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index cffb365d9d02..89813ca987c2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -36,6 +36,7 @@ 
 #include <asm/asm-compat.h>
 #include <asm/feature-fixups.h>
 #include <asm/cpuidle.h>
+#include <asm/ultravisor-api.h>
 
 /* Sign-extend HDEC if not on POWER9 */
 #define EXTEND_HDEC(reg)			\
@@ -1092,16 +1093,12 @@  BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
 	ld	r5, VCPU_LR(r4)
-	ld	r6, VCPU_CR(r4)
 	mtlr	r5
-	mtcr	r6
 
 	ld	r1, VCPU_GPR(R1)(r4)
 	ld	r2, VCPU_GPR(R2)(r4)
 	ld	r3, VCPU_GPR(R3)(r4)
 	ld	r5, VCPU_GPR(R5)(r4)
-	ld	r6, VCPU_GPR(R6)(r4)
-	ld	r7, VCPU_GPR(R7)(r4)
 	ld	r8, VCPU_GPR(R8)(r4)
 	ld	r9, VCPU_GPR(R9)(r4)
 	ld	r10, VCPU_GPR(R10)(r4)
@@ -1119,10 +1116,38 @@  BEGIN_FTR_SECTION
 	mtspr	SPRN_HDSISR, r0
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
+	ld	r6, VCPU_KVM(r4)
+	lbz	r7, KVM_SECURE_GUEST(r6)
+	cmpdi	r7, 0
+	bne	ret_to_ultra
+
+	lwz	r6, VCPU_CR(r4)
+	mtcr	r6
+
+	ld	r7, VCPU_GPR(R7)(r4)
+	ld	r6, VCPU_GPR(R6)(r4)
 	ld	r0, VCPU_GPR(R0)(r4)
 	ld	r4, VCPU_GPR(R4)(r4)
 	HRFI_TO_GUEST
 	b	.
+/*
+ * We are entering a secure guest, so we have to invoke the ultravisor to do
+ * that. If we are returning from a hcall, the results are already in the
+ * appropriate registers R3:12, except for R3, R6 and R7. R3 has the status of
+ * the reflected hcall, therefore we move it to R0 for the ultravisor and set
+ * R3 to the UV_RETURN ucall number. R6,7 were used as temporary registers
+ * above, hence we restore them.
+ */
+ret_to_ultra:
+	lwz	r6, VCPU_CR(r4)
+	mtcr	r6
+	mfspr	r11, SPRN_SRR1
+	mr	r0, r3
+	LOAD_REG_IMMEDIATE(r3, UV_RETURN)
+	ld	r7, VCPU_GPR(R7)(r4)
+	ld	r6, VCPU_GPR(R6)(r4)
+	ld	r4, VCPU_GPR(R4)(r4)
+	sc	2
 
 /*
  * Enter the guest on a P9 or later system where we have exactly
@@ -3318,13 +3343,16 @@  END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
  *   r0 is used as a scratch register
  */
 kvmppc_msr_interrupt:
+	andis.	r0, r11, MSR_S@h
 	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
-	cmpwi	r0, 2 /* Check if we are in transactional state..  */
+	cmpwi	cr1, r0, 2 /* Check if we are in transactional state..  */
 	ld	r11, VCPU_INTR_MSR(r9)
-	bne	1f
+	bne	cr1, 1f
 	/* ... if transactional, change to suspended */
 	li	r0, 1
 1:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+	beqlr
+	oris	r11, r11, MSR_S@h		/* preserve MSR_S bit setting */
 	blr
 
 /*