diff mbox

[v6] KVM: PPC: Book3S: MMIO emulation support for little endian guests

Message ID 1389202526-20426-1-git-send-email-clg@fr.ibm.com
State New, archived
Headers show

Commit Message

Cédric Le Goater Jan. 8, 2014, 5:35 p.m. UTC
MMIO emulation reads the last instruction executed by the guest
and then emulates. If the guest is running in Little Endian order,
or more generally in a different endian order of the host, the
instruction needs to be byte-swapped before being emulated.

This patch adds a helper routine which tests the endian order of
the host and the guest in order to decide whether a byteswap is
needed or not. It is then used to byteswap the last instruction
of the guest in the endian order of the host before MMIO emulation
is performed.

Finally, kvmppc_handle_load() of kvmppc_handle_store() are modified
to reverse the endianness of the MMIO if required.

Signed-off-by: Cédric Le Goater <clg@fr.ibm.com>
---

This patch was tested for Big Endian and Little Endian HV guests 
and Big Endian PR guests on 3.13 (plus a h_set_mode hack)

Changes in v6:

 - removed asm changes (Alexander Graf)
 - byteswaps last_inst when used in kvmppc_get_last_inst()
 - postponed Split Little Endian support

Changes in v5:

 - changed register usage slightly (paulus@samba.org)
 - added #ifdef CONFIG_PPC64 in book3s_segment.S (paulus@samba.org)
 - added support for little endian host
 - added support for Split Little Endian (SLE)

Changes in v4:

 - got rid of useless helper routine kvmppc_ld_inst(). (Alexander Graf)

Changes in v3:

 - moved kvmppc_need_byteswap() in kvmppc_ld32. It previously was in
   kvmppc_ld_inst(). (Alexander Graf)

Changes in v2:

 - replaced rldicl. by andi. to test the MSR_LE bit in the guest
   exit paths. (Paul Mackerras)

 - moved the byte swapping logic to kvmppc_handle_load() and 
   kvmppc_handle_load() by changing the is_bigendian parameter
   meaning. (Paul Mackerras)
	

 arch/powerpc/include/asm/kvm_book3s.h |   15 +++++++++++++--
 arch/powerpc/kvm/book3s_64_mmu_hv.c   |    2 +-
 arch/powerpc/kvm/emulate.c            |    1 -
 arch/powerpc/kvm/powerpc.c            |    6 ++++++
 4 files changed, 20 insertions(+), 4 deletions(-)
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bc23b1ba7980..00499f5f16bc 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -271,6 +271,17 @@  static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
 	return vcpu->arch.pc;
 }
 
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
+}
+
+static inline u32 kvmppc_byteswap_last_inst(struct kvm_vcpu *vcpu)
+{
+	return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
+		vcpu->arch.last_inst;
+}
+
 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
 {
 	ulong pc = kvmppc_get_pc(vcpu);
@@ -280,7 +291,7 @@  static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
 		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
 
-	return vcpu->arch.last_inst;
+	return kvmppc_byteswap_last_inst(vcpu);
 }
 
 /*
@@ -297,7 +308,7 @@  static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
 		kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
 
-	return vcpu->arch.last_inst;
+	return kvmppc_byteswap_last_inst(vcpu);
 }
 
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 79e992d8c823..ff10fba29878 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -562,7 +562,7 @@  static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	 * we just return and retry the instruction.
 	 */
 
-	if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
+	if (instruction_is_store(kvmppc_byteswap_last_inst(vcpu)) != !!is_store)
 		return RESUME_GUEST;
 
 	/*
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 2f9a0873b44f..c2b887be2c29 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,6 @@  static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
  * lmw
  * stmw
  *
- * XXX is_bigendian should depend on MMU mapping or MSR[LE]
  */
 /* XXX Should probably auto-generate instruction decoding for a particular core
  * from opcode tables in the future. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9ae97686e9f4..b2adea28f2f0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -677,6 +677,9 @@  int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
 {
 	int idx, ret;
 
+	if (kvmppc_need_byteswap(vcpu))
+		is_bigendian = !is_bigendian;
+
 	if (bytes > sizeof(run->mmio.data)) {
 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 		       run->mmio.len);
@@ -727,6 +730,9 @@  int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	void *data = run->mmio.data;
 	int idx, ret;
 
+	if (kvmppc_need_byteswap(vcpu))
+		is_bigendian = !is_bigendian;
+
 	if (bytes > sizeof(run->mmio.data)) {
 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 		       run->mmio.len);