diff mbox series

[13/13] KVM: PPC: Ultravisor: Have fast_guest_return check secure_guest

Message ID 1548172784-27414-14-git-send-email-linuxram@us.ibm.com
State Changes Requested
Headers show
Series KVM: PPC: Paravirtualize KVM to support Ultravisor | expand

Commit Message

Ram Pai Jan. 22, 2019, 3:59 p.m. UTC
From: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>

fast_guest_return checks if HSRR1 has the MSR_S bit set to determine
if we should return to UV. The problem is that when a new CPU starts
up (in response to a RTAS start-cpu call), it will not have the MSR_S
bit set in HSRR1 yet so the new CPU will not enter UV.

Have fast_guest_return check the kvm_arch.secure_guest field instead
so even the new CPU will enter UV.

Thanks to input from Paul Mackerras, Ram Pai, Mike Anderson.

Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
---
 arch/powerpc/include/asm/kvm_host.h     |  1 +
 arch/powerpc/kernel/asm-offsets.c       |  1 +
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 +++++++-----
 3 files changed, 9 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0f98f00..162005a 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -288,6 +288,7 @@  struct kvm_arch {
 	cpumask_t cpu_in_guest;
 	u8 radix;
 	u8 fwnmi_enabled;
+	u8 secure_guest;
 	bool threads_indep;
 	bool nested_enable;
 	pgd_t *pgtable;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9ffc72d..05f8a79 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -495,6 +495,7 @@  int main(void)
 	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
 	OFFSET(KVM_RADIX, kvm, arch.radix);
 	OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
+	OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
 	OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
 	OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
 	OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 627b823..b1710c8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1099,7 +1099,6 @@  END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	ld	r2, VCPU_GPR(R2)(r4)
 	ld	r3, VCPU_GPR(R3)(r4)
 	ld	r5, VCPU_GPR(R5)(r4)
-	ld	r7, VCPU_GPR(R7)(r4)
 	ld	r8, VCPU_GPR(R8)(r4)
 	ld	r9, VCPU_GPR(R9)(r4)
 	ld	r10, VCPU_GPR(R10)(r4)
@@ -1117,13 +1116,15 @@  BEGIN_FTR_SECTION
 	mtspr	SPRN_HDSISR, r0
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 
-	mfspr	r6, SPRN_HSRR1
-	andis.	r6, r6, MSR_S@high
-	bne	ret_to_ultra
+	ld	r6, VCPU_KVM(r4)
+	lbz	r7, KVM_SECURE_GUEST(r6)
+	cmpdi	r7, 1
+	beq	ret_to_ultra
 
 	lwz	r6, VCPU_CR(r4)
 	mtcr	r6
 
+	ld	r7, VCPU_GPR(R7)(r4)
 	ld	r6, VCPU_GPR(R6)(r4)
 	ld	r0, VCPU_GPR(R0)(r4)
 	ld	r4, VCPU_GPR(R4)(r4)
@@ -1133,7 +1134,7 @@  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
  * The hcall we just completed was from Ultravisor. Use UV_RETURN
  * ultra call to return to the Ultravisor. Results from the hcall
  * are already in the appropriate registers (r3:12), except for
- * R6 which we used as a temporary register above. Restore that,
+ * R6,7 which we used as temporary registers above. Restore them,
  * and set R0 to the ucall number (UV_RETURN).
  */
 ret_to_ultra:
@@ -1141,6 +1142,7 @@  ret_to_ultra:
 	mtcr	r6
 	mfspr	r11, SPRN_SRR1
 	LOAD_REG_IMMEDIATE(r0, UV_RETURN)
+	ld	r7, VCPU_GPR(R7)(r4)
 	ld	r6, VCPU_GPR(R6)(r4)
 	ld	r4, VCPU_GPR(R4)(r4)
 	sc	2