diff mbox

[11/15] KVM: PPC: Book3S HV: Add little-endian guest support

Message ID 1383995103-24732-12-git-send-email-paulus@samba.org
State New, archived
Headers show

Commit Message

Paul Mackerras Nov. 9, 2013, 11:04 a.m. UTC
From: Anton Blanchard <anton@samba.org>

Add support for the H_SET_MODE hcall so we can select the
endianness of our exceptions.

We create a guest MSR from scratch when delivering exceptions in
a few places.  Instead of extracting LPCR[ILE] and inserting it
into MSR_LE each time, we simply create a new variable intr_msr which
contains the entire MSR to use.

Modified by mikey to use a case statement so we can add more
h_set_mode calls more easily later.

Modified by paulus to call kvmppc_update_lpcr and take kvm->lock.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/hvcall.h       |  6 ++++
 arch/powerpc/include/asm/kvm_host.h     |  1 +
 arch/powerpc/include/asm/kvm_ppc.h      |  2 ++
 arch/powerpc/kernel/asm-offsets.c       |  1 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c     |  2 +-
 arch/powerpc/kvm/book3s_hv.c            | 49 +++++++++++++++++++++++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 +++-----
 7 files changed, 64 insertions(+), 9 deletions(-)

Comments

Alexander Graf Nov. 30, 2013, 4:30 p.m. UTC | #1
On 09.11.2013, at 12:04, Paul Mackerras <paulus@samba.org> wrote:

> From: Anton Blanchard <anton@samba.org>
> 
> Add support for the H_SET_MODE hcall so we can select the
> endianness of our exceptions.
> 
> We create a guest MSR from scratch when delivering exceptions in
> a few places.  Instead of extracting LPCR[ILE] and inserting it
> into MSR_LE each time, we simply create a new variable intr_msr which
> contains the entire MSR to use.
> 
> Modified by mikey to use a case statement so we can add more
> h_set_mode calls more easily later.
> 
> Modified by paulus to call kvmppc_update_lpcr and take kvm->lock.

Please rework this so that

  1) H_SET_MODE becomes a multiplexer similar to H_RTAS. By default all H_SET_MODE hypercalls go to user space and get handled there. User space can enable certain calls to be handled directly by KVM instead
  2) vcpus get a new ioctl (ENABLE_CAP maybe?) that allows us to set and unset LE mode. The H_SET_MODE_RESOURCE_LE call goes through all vcpus and triggers that ioctl from user space. That way it can also remember that it's in this mode.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alexander Graf Nov. 30, 2013, 4:38 p.m. UTC | #2
On 30.11.2013, at 17:30, Alexander Graf <agraf@suse.de> wrote:

> 
> On 09.11.2013, at 12:04, Paul Mackerras <paulus@samba.org> wrote:
> 
>> From: Anton Blanchard <anton@samba.org>
>> 
>> Add support for the H_SET_MODE hcall so we can select the
>> endianness of our exceptions.
>> 
>> We create a guest MSR from scratch when delivering exceptions in
>> a few places.  Instead of extracting LPCR[ILE] and inserting it
>> into MSR_LE each time, we simply create a new variable intr_msr which
>> contains the entire MSR to use.
>> 
>> Modified by mikey to use a case statement so we can add more
>> h_set_mode calls more easily later.
>> 
>> Modified by paulus to call kvmppc_update_lpcr and take kvm->lock.
> 
> Please rework this so that
> 
>  1) H_SET_MODE becomes a multiplexer similar to H_RTAS. By default all H_SET_MODE hypercalls go to user space and get handled there. User space can enable certain calls to be handled directly by KVM instead
>  2) vcpus get a new ioctl (ENABLE_CAP maybe?

Ah, we had an idea a while back to make this a ONE_REG on LPCR, with KVM filtering out bits it doesn't want to see modified, right? That should work just fine here.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 0c7f2bf..089935f 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -274,6 +274,12 @@ 
 /* Platform specific hcalls, used by KVM */
 #define H_RTAS			0xf000
 
+/* Values for 2nd argument to H_SET_MODE */
+#define H_SET_MODE_RESOURCE_SET_CIABR		1
+#define H_SET_MODE_RESOURCE_SET_DAWR		2
+#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE	3
+#define H_SET_MODE_RESOURCE_LE			4
+
 #ifndef __ASSEMBLY__
 
 /**
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d8178e4..7287cb7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -643,6 +643,7 @@  struct kvm_vcpu_arch {
 	spinlock_t tbacct_lock;
 	u64 busy_stolen;
 	u64 busy_preempt;
+	unsigned long intr_msr;
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c8317fb..18f09c8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -169,6 +169,8 @@  extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
 				u32 *priority);
 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
+			       unsigned long mask);
 
 union kvmppc_one_reg {
 	u32	wval;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a371715..c9c6a93 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -492,6 +492,7 @@  int main(void)
 	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
 	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
 	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+	DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
 	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 47bbeaf..488cc61 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,7 @@  int kvmppc_mmu_hv_init(void)
 
 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 {
-	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+	kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
 }
 
 /*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 444cdd4..2473400 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -549,6 +549,48 @@  static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
 	vcpu->arch.dtl.dirty = true;
 }
 
+static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+			     unsigned long resource, unsigned long value1,
+			     unsigned long value2)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_vcpu *v;
+	int n;
+
+	switch (resource) {
+	case H_SET_MODE_RESOURCE_LE:
+		if (value1)
+			return H_P3;
+		if (value2)
+			return H_P4;
+
+		switch (mflags) {
+		case 0:
+			mutex_lock(&kvm->lock);
+			kvmppc_update_lpcr(kvm, 0, LPCR_ILE);
+			kvm_for_each_vcpu(n, v, kvm)
+				v->arch.intr_msr &= ~MSR_LE;
+			mutex_unlock(&kvm->lock);
+			kick_all_cpus_sync();
+			return H_SUCCESS;
+
+		case 1:
+			mutex_lock(&kvm->lock);
+			kvmppc_update_lpcr(kvm, LPCR_ILE, LPCR_ILE);
+			kvm_for_each_vcpu(n, v, kvm)
+				v->arch.intr_msr |= MSR_LE;
+			mutex_unlock(&kvm->lock);
+			kick_all_cpus_sync();
+			return H_SUCCESS;
+
+		default:
+			return H_UNSUPPORTED_FLAG_START;
+		}
+	default:
+		 return H_P2;
+	}
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
 	unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -612,6 +654,12 @@  int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 
 		/* Send the error out to userspace via KVM_RUN */
 		return rc;
+	case H_SET_MODE:
+		ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
+					kvmppc_get_gpr(vcpu, 5),
+					kvmppc_get_gpr(vcpu, 6),
+					kvmppc_get_gpr(vcpu, 7));
+		break;
 
 	case H_XIRR:
 	case H_CPPR:
@@ -1189,6 +1237,7 @@  static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
 	spin_lock_init(&vcpu->arch.vpa_update_lock);
 	spin_lock_init(&vcpu->arch.tbacct_lock);
 	vcpu->arch.busy_preempt = TB_NIL;
+	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
 
 	kvmppc_mmu_book3s_hv_init(vcpu);
 
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 56ee76e..4edff25 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -852,8 +852,7 @@  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 12:	mtspr	SPRN_SRR0, r10
 	mr	r10,r0
 	mtspr	SPRN_SRR1, r11
-	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11,r11,63
+	ld	r11, VCPU_INTR_MSR(r4)
 5:
 
 fast_guest_return:
@@ -1614,8 +1613,7 @@  kvmppc_hdsi:
 	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 fast_interrupt_c_return:
 6:	ld	r7, VCPU_CTR(r9)
 	lwz	r8, VCPU_XER(r9)
@@ -1684,8 +1682,7 @@  kvmppc_hisi:
 1:	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
@@ -2019,8 +2016,7 @@  machine_check_realmode:
 	beq	mc_cont
 	/* If not, deliver a machine check.  SRR0/1 are already set */
 	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 /*