diff mbox

[v4,3/3] KVM: PPC: Book3S: MMIO emulation support for little endian guests

Message ID 1381250607-30652-4-git-send-email-clg@fr.ibm.com
State New, archived
Headers show

Commit Message

Cédric Le Goater Oct. 8, 2013, 4:43 p.m. UTC
MMIO emulation reads the last instruction executed by the guest
and then emulates. If the guest is running in Little Endian mode,
the instruction needs to be byte-swapped before being emulated.

This patch stores the last instruction in the endian order of the
host, primarily doing a byte-swap if needed. The common code
which fetches 'last_inst' uses a helper routine kvmppc_need_byteswap().
and the exit paths for the Book3S PV and HR guests use their own
version in assembly.

Finally, the meaning of the 'is_bigendian' argument of the
routines kvmppc_handle_load() of kvmppc_handle_store() is
slightly changed to represent an eventual reverse operation. This
is used in conjunction with kvmppc_is_bigendian() to determine if
the instruction being emulated should be byte-swapped.

Signed-off-by: Cédric Le Goater <clg@fr.ibm.com>
---

Changes in v3:

 - moved kvmppc_need_byteswap() in kvmppc_ld32. It previously was in
   kvmppc_ld_inst(). (Alexander Graf)

Changes in v2:

 - replaced rldicl. by andi. to test the MSR_LE bit in the guest
   exit paths. (Paul Mackerras)

 - moved the byte swapping logic to kvmppc_handle_load() and 
   kvmppc_handle_load() by changing the is_bigendian parameter
   meaning. (Paul Mackerras)

 arch/powerpc/include/asm/kvm_book3s.h   |    9 ++++++++-
 arch/powerpc/include/asm/kvm_ppc.h      |   10 +++++-----
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |   11 +++++++++++
 arch/powerpc/kvm/book3s_segment.S       |   10 ++++++++++
 arch/powerpc/kvm/emulate.c              |    1 -
 arch/powerpc/kvm/powerpc.c              |   16 ++++++++++++----
 6 files changed, 46 insertions(+), 11 deletions(-)
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 22ec875..ac06434 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -283,7 +283,14 @@  static inline bool kvmppc_is_bigendian(struct kvm_vcpu *vcpu)
 static inline int kvmppc_ld32(struct kvm_vcpu *vcpu, ulong *eaddr,
 			      u32 *ptr, bool data)
 {
-	return kvmppc_ld(vcpu, eaddr, sizeof(u32), ptr, data);
+	int ret;
+
+	ret = kvmppc_ld(vcpu, eaddr, sizeof(u32), ptr, data);
+
+	if (kvmppc_need_byteswap(vcpu))
+		*ptr = swab32(*ptr);
+
+	return ret;
 }
 
 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b15554a..3769a13 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -53,13 +53,13 @@  extern void kvmppc_handler_highmem(void);
 
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                              unsigned int rt, unsigned int bytes,
-                              int is_bigendian);
+			      unsigned int rt, unsigned int bytes,
+			      int not_reverse);
 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                               unsigned int rt, unsigned int bytes,
-                               int is_bigendian);
+			       unsigned int rt, unsigned int bytes,
+			       int not_reverse);
 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                               u64 val, unsigned int bytes, int is_bigendian);
+			       u64 val, unsigned int bytes, int not_reverse);
 
 extern int kvmppc_emulate_instruction(struct kvm_run *run,
                                       struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 77f1baa..ff7da8b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1404,10 +1404,21 @@  fast_interrupt_c_return:
 	lwz	r8, 0(r10)
 	mtmsrd	r3
 
+	ld	r0, VCPU_MSR(r9)
+
+	andi.	r10, r0, MSR_LE
+
 	/* Store the result */
 	stw	r8, VCPU_LAST_INST(r9)
 
+	beq	after_inst_store
+
+	/* Swap and store the result */
+	addi	r11, r9, VCPU_LAST_INST
+	stwbrx	r8, 0, r11
+
 	/* Unset guest mode. */
+after_inst_store:
 	li	r0, KVM_GUEST_MODE_HOST_HV
 	stb	r0, HSTATE_IN_GUEST(r13)
 	b	guest_exit_cont
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1abe478..677ef7a 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -287,8 +287,18 @@  ld_last_inst:
 	sync
 
 #endif
+	ld	r8, SVCPU_SHADOW_SRR1(r13)
+
+	andi.	r10, r8, MSR_LE
+
 	stw	r0, SVCPU_LAST_INST(r13)
 
+	beq	no_ld_last_inst
+
+	/* swap and store the result */
+	addi	r11, r13, SVCPU_LAST_INST
+	stwbrx	r0, 0, r11
+
 no_ld_last_inst:
 
 	/* Unset guest mode */
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 751cd45..5e38004 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,6 @@  static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
  * lmw
  * stmw
  *
- * XXX is_bigendian should depend on MMU mapping or MSR[LE]
  */
 /* XXX Should probably auto-generate instruction decoding for a particular core
  * from opcode tables in the future. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 07c0106..6950f2b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -625,9 +625,13 @@  static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
 }
 
 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                       unsigned int rt, unsigned int bytes, int is_bigendian)
+			unsigned int rt, unsigned int bytes, int not_reverse)
 {
 	int idx, ret;
+	int is_bigendian = not_reverse;
+
+	if (!kvmppc_is_bigendian(vcpu))
+		is_bigendian = !not_reverse;
 
 	if (bytes > sizeof(run->mmio.data)) {
 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
@@ -662,21 +666,25 @@  int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
 /* Same as above, but sign extends */
 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                        unsigned int rt, unsigned int bytes, int is_bigendian)
+			unsigned int rt, unsigned int bytes, int not_reverse)
 {
 	int r;
 
 	vcpu->arch.mmio_sign_extend = 1;
-	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
+	r = kvmppc_handle_load(run, vcpu, rt, bytes, not_reverse);
 
 	return r;
 }
 
 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                        u64 val, unsigned int bytes, int is_bigendian)
+			u64 val, unsigned int bytes, int not_reverse)
 {
 	void *data = run->mmio.data;
 	int idx, ret;
+	int is_bigendian = not_reverse;
+
+	if (!kvmppc_is_bigendian(vcpu))
+		is_bigendian = !not_reverse;
 
 	if (bytes > sizeof(run->mmio.data)) {
 		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,