diff mbox

KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers

Message ID 20130711105120.GA21353@iris.ozlabs.ibm.com
State New, archived
Headers show

Commit Message

Paul Mackerras July 11, 2013, 10:51 a.m. UTC
Currently HV-style KVM does not save and restore the SIAR and SDAR
registers in the PMU (performance monitor unit) on guest entry and
exit.  The result is that performance monitoring tools in the guest
could get false information about where a program was executing and
what data it was accessing at the time of a performance monitor
interrupt.  This fixes it by saving and restoring these registers
along with the other PMU registers on guest entry/exit.

This also provides a way for userspace to access these values for a
vcpu via the one_reg interface.  There is a gap between the values for
MMCRA and SIAR in order to leave room for two more MMCR registers
that exist on POWER8.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
This is against Alex Graf's kvm-ppc-queue branch.

 arch/powerpc/include/asm/kvm_host.h     |    2 ++
 arch/powerpc/include/uapi/asm/kvm.h     |    2 ++
 arch/powerpc/kernel/asm-offsets.c       |    2 ++
 arch/powerpc/kvm/book3s_hv.c            |   12 ++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |    8 ++++++++
 5 files changed, 26 insertions(+)

Comments

Alexander Graf July 11, 2013, 11:06 a.m. UTC | #1
On 11.07.2013, at 12:51, Paul Mackerras wrote:

> Currently HV-style KVM does not save and restore the SIAR and SDAR
> registers in the PMU (performance monitor unit) on guest entry and
> exit.  The result is that performance monitoring tools in the guest
> could get false information about where a program was executing and
> what data it was accessing at the time of a performance monitor
> interrupt.  This fixes it by saving and restoring these registers
> along with the other PMU registers on guest entry/exit.
> 
> This also provides a way for userspace to access these values for a
> vcpu via the one_reg interface.  There is a gap between the values for
> MMCRA and SIAR in order to leave room for two more MMCR registers
> that exist on POWER8.

Can we add the ONE_REG defines for those right away in a prepending patch? No need for the implementation yet if that requires additional work that needs to go through Ben.

> 
> Signed-off-by: Paul Mackerras <paulus@samba.org>
> ---
> This is against Alex Graf's kvm-ppc-queue branch.
> 
> arch/powerpc/include/asm/kvm_host.h     |    2 ++
> arch/powerpc/include/uapi/asm/kvm.h     |    2 ++
> arch/powerpc/kernel/asm-offsets.c       |    2 ++
> arch/powerpc/kvm/book3s_hv.c            |   12 ++++++++++++
> arch/powerpc/kvm/book3s_hv_rmhandlers.S |    8 ++++++++
> 5 files changed, 26 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 3328353..91b833d 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
> 
> 	u64 mmcr[3];
> 	u32 pmc[8];
> +	u64 siar;
> +	u64 sdar;
> 
> #ifdef CONFIG_KVM_EXIT_TIMING
> 	struct mutex exit_timing_lock;
> diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
> index 0fb1a6e..3cf47c8 100644
> --- a/arch/powerpc/include/uapi/asm/kvm.h
> +++ b/arch/powerpc/include/uapi/asm/kvm.h
> @@ -429,6 +429,8 @@ struct kvm_get_htab_header {
> #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
> #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
> #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
> +#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
> +#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)

These are missing entries in Documentation/virtual/kvm/api.txt :).


Otherwise looks good.

Alex

> 
> #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
> #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index 6f16ffa..ad66058 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -505,6 +505,8 @@ int main(void)
> 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
> 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
> 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
> +	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
> +	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
> 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
> 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
> 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 2b95c45..ee2352a 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -750,6 +750,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
> 		i = id - KVM_REG_PPC_PMC1;
> 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
> 		break;
> +	case KVM_REG_PPC_SIAR:
> +		*val = get_reg_val(id, vcpu->arch.siar);
> +		break;
> +	case KVM_REG_PPC_SDAR:
> +		*val = get_reg_val(id, vcpu->arch.sdar);
> +		break;
> #ifdef CONFIG_VSX
> 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
> 		if (cpu_has_feature(CPU_FTR_VSX)) {
> @@ -834,6 +840,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
> 		i = id - KVM_REG_PPC_PMC1;
> 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
> 		break;
> +	case KVM_REG_PPC_SIAR:
> +		vcpu->arch.siar = set_reg_val(id, *val);
> +		break;
> +	case KVM_REG_PPC_SDAR:
> +		vcpu->arch.sdar = set_reg_val(id, *val);
> +		break;
> #ifdef CONFIG_VSX
> 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
> 		if (cpu_has_feature(CPU_FTR_VSX)) {
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 60dce5b..bfb4b0a 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -196,8 +196,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
> 	ld	r3, VCPU_MMCR(r4)
> 	ld	r5, VCPU_MMCR + 8(r4)
> 	ld	r6, VCPU_MMCR + 16(r4)
> +	ld	r7, VCPU_SIAR(r4)
> +	ld	r8, VCPU_SDAR(r4)
> 	mtspr	SPRN_MMCR1, r5
> 	mtspr	SPRN_MMCRA, r6
> +	mtspr	SPRN_SIAR, r7
> +	mtspr	SPRN_SDAR, r8
> 	mtspr	SPRN_MMCR0, r3
> 	isync
> 
> @@ -1122,9 +1126,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
> 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
> 	b	22f
> 21:	mfspr	r5, SPRN_MMCR1
> +	mfspr	r7, SPRN_SIAR
> +	mfspr	r8, SPRN_SDAR
> 	std	r4, VCPU_MMCR(r9)
> 	std	r5, VCPU_MMCR + 8(r9)
> 	std	r6, VCPU_MMCR + 16(r9)
> +	std	r7, VCPU_SIAR(r9)
> +	std	r8, VCPU_SDAR(r9)
> 	mfspr	r3, SPRN_PMC1
> 	mfspr	r4, SPRN_PMC2
> 	mfspr	r5, SPRN_PMC3
> -- 
> 1.7.10.4
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..91b833d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -498,6 +498,8 @@  struct kvm_vcpu_arch {
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..3cf47c8 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -429,6 +429,8 @@  struct kvm_get_htab_header {
 #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
 #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
 #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
 
 #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
 #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6f16ffa..ad66058 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -505,6 +505,8 @@  int main(void)
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2b95c45..ee2352a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -750,6 +750,12 @@  int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
 		break;
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -834,6 +840,12 @@  int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 60dce5b..bfb4b0a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -196,8 +196,12 @@  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	ld	r3, VCPU_MMCR(r4)
 	ld	r5, VCPU_MMCR + 8(r4)
 	ld	r6, VCPU_MMCR + 16(r4)
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -1122,9 +1126,13 @@  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
 	b	22f
 21:	mfspr	r5, SPRN_MMCR1
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3