diff mbox series

[v4,2/2] powerpc/pseries/svm: Disable BHRB/EBB/PMU access

Message ID 20200122055054.6482-2-sukadev@linux.ibm.com (mailing list archive)
State Not Applicable
Headers show
Series [v4,1/2] powerpc/pseries/svm: Use FW_FEATURE to detect SVM | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/merge (20862247a368dbb75d6e97d82345999adaacf3cc)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/next (3a9d970f17e05a7b26f782beb8f7f2118d1741ea)
snowpatch_ozlabs/apply_patch success Successfully applied on branch linus/master (d96d875ef5dd372f533059a44f98e92de9cf0d42)
snowpatch_ozlabs/build-ppc64le success Build succeeded
snowpatch_ozlabs/build-ppc64be success Build succeeded
snowpatch_ozlabs/build-ppc64e success Build succeeded
snowpatch_ozlabs/build-pmac32 success Build succeeded
snowpatch_ozlabs/checkpatch warning total: 0 errors, 2 warnings, 5 checks, 140 lines checked
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Sukadev Bhattiprolu Jan. 22, 2020, 5:50 a.m. UTC
Ultravisor disables some CPU features like BHRB, EBB and PMU in secure
virtual machines (SVMs) for now. Skip accessing those registers in
SVMs to avoid getting a Program Interrupt.

Basic performance monitoring in SVMs is likely to be enabled in the future
after adding the necessary security mechanisms in Ultravisor. Some features,
like BHRB or monitoring event counts in HV-mode (e.g: perf stat -e cycles:h)
may still be restricted for the longer term.

Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
Acked-by: Madhavan Srinivasan <maddy@linux.ibm.com>
---
Changelog[v4]
	- [Paul Mackerras] Drop is_secure_guest() checks in HV-only code
	  and indicate if the disabling of PMU is temporary.
	- For consistency, also skip registering PMUs in secure guests.

Changelog[v2]
	- [Michael Ellerman] Optimize the code using FW_FEATURE_SVM
	- Merged EBB/BHRB and PMU patches into one and reorganized code.
	- Fix some build errors reported by <lkp@intel.org>
---
 arch/powerpc/kernel/cpu_setup_power.S | 21 +++++++++++++++++++
 arch/powerpc/kernel/process.c         | 23 ++++++++++++--------
 arch/powerpc/perf/power9-pmu.c        | 10 +++++++++
 arch/powerpc/xmon/xmon.c              | 30 +++++++++++++++++----------
 4 files changed, 64 insertions(+), 20 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index a460298c7ddb..9e895d8db468 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -206,14 +206,35 @@  __init_PMU_HV_ISA207:
 	blr
 
 __init_PMU:
+#ifdef CONFIG_PPC_SVM
+	/*
+	 * SVM's are restricted from accessing PMU, so skip.
+	 */
+	mfmsr   r5
+	rldicl  r5, r5, 64-MSR_S_LG, 62
+	cmpwi   r5,1
+	beq     skip1
+#endif
 	li	r5,0
 	mtspr	SPRN_MMCRA,r5
 	mtspr	SPRN_MMCR0,r5
 	mtspr	SPRN_MMCR1,r5
 	mtspr	SPRN_MMCR2,r5
+skip1:
 	blr
 
 __init_PMU_ISA207:
+
+#ifdef CONFIG_PPC_SVM
+	/*
+	 * SVM's are restricted from accessing PMU, so skip.
+	*/
+	mfmsr   r5
+	rldicl  r5, r5, 64-MSR_S_LG, 62
+	cmpwi   r5,1
+	beq     skip2
+#endif
 	li	r5,0
 	mtspr	SPRN_MMCRS,r5
+skip2:
 	blr
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 639ceae7da9d..83c7c4119305 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -64,6 +64,7 @@ 
 #include <asm/asm-prototypes.h>
 #include <asm/stacktrace.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/svm.h>
 
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
@@ -1059,9 +1060,11 @@  static inline void save_sprs(struct thread_struct *t)
 		t->dscr = mfspr(SPRN_DSCR);
 
 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
-		t->bescr = mfspr(SPRN_BESCR);
-		t->ebbhr = mfspr(SPRN_EBBHR);
-		t->ebbrr = mfspr(SPRN_EBBRR);
+		if (!is_secure_guest()) {
+			t->bescr = mfspr(SPRN_BESCR);
+			t->ebbhr = mfspr(SPRN_EBBHR);
+			t->ebbrr = mfspr(SPRN_EBBRR);
+		}
 
 		t->fscr = mfspr(SPRN_FSCR);
 
@@ -1097,12 +1100,14 @@  static inline void restore_sprs(struct thread_struct *old_thread,
 	}
 
 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
-		if (old_thread->bescr != new_thread->bescr)
-			mtspr(SPRN_BESCR, new_thread->bescr);
-		if (old_thread->ebbhr != new_thread->ebbhr)
-			mtspr(SPRN_EBBHR, new_thread->ebbhr);
-		if (old_thread->ebbrr != new_thread->ebbrr)
-			mtspr(SPRN_EBBRR, new_thread->ebbrr);
+		if (!is_secure_guest()) {
+			if (old_thread->bescr != new_thread->bescr)
+				mtspr(SPRN_BESCR, new_thread->bescr);
+			if (old_thread->ebbhr != new_thread->ebbhr)
+				mtspr(SPRN_EBBHR, new_thread->ebbhr);
+			if (old_thread->ebbrr != new_thread->ebbrr)
+				mtspr(SPRN_EBBRR, new_thread->ebbrr);
+		}
 
 		if (old_thread->fscr != new_thread->fscr)
 			mtspr(SPRN_FSCR, new_thread->fscr);
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 08c3ef796198..c6eca682180d 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -10,6 +10,7 @@ 
 #define pr_fmt(fmt)	"power9-pmu: " fmt
 
 #include "isa207-common.h"
+#include <asm/svm.h>
 
 /*
  * Raw event encoding for Power9:
@@ -446,6 +447,15 @@  int init_power9_pmu(void)
 	    strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
 		return -ENODEV;
 
+	/*
+	 * Disable PMUs in secure guests until we evaluate security
+	 * exposure and add relevant functionality in Ultravisor.
+	 */
+	if (is_secure_guest()) {
+		printk("Not registering Performance Monitor in secure guest\n");
+		return 0;
+	}
+
 	/* Blacklist events */
 	if (!(pvr & PVR_POWER9_CUMULUS)) {
 		if ((PVR_CFG(pvr) == 2) && (PVR_MIN(pvr) == 1)) {
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 8057aafd5f5e..2d6c4963ec3c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -53,6 +53,7 @@ 
 #include <asm/firmware.h>
 #include <asm/code-patching.h>
 #include <asm/sections.h>
+#include <asm/svm.h>
 
 #ifdef CONFIG_PPC64
 #include <asm/hvcall.h>
@@ -1861,17 +1862,24 @@  static void dump_207_sprs(void)
 			mfspr(SPRN_TEXASR));
 	}
 
-	printf("mmcr0  = %.16lx  mmcr1 = %.16lx mmcr2  = %.16lx\n",
-		mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCR2));
-	printf("pmc1   = %.8lx pmc2 = %.8lx  pmc3 = %.8lx  pmc4   = %.8lx\n",
-		mfspr(SPRN_PMC1), mfspr(SPRN_PMC2),
-		mfspr(SPRN_PMC3), mfspr(SPRN_PMC4));
-	printf("mmcra  = %.16lx   siar = %.16lx pmc5   = %.8lx\n",
-		mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5));
-	printf("sdar   = %.16lx   sier = %.16lx pmc6   = %.8lx\n",
-		mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6));
-	printf("ebbhr  = %.16lx  ebbrr = %.16lx bescr  = %.16lx\n",
-		mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+	if (!is_secure_guest()) {
+		printf("mmcr0  = %.16lx  mmcr1 = %.16lx mmcr2  = %.16lx\n",
+			mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1),
+			mfspr(SPRN_MMCR2));
+		printf("pmc1   = %.8lx pmc2 = %.8lx  pmc3 = %.8lx  pmc4   = %.8lx\n",
+			mfspr(SPRN_PMC1), mfspr(SPRN_PMC2),
+			mfspr(SPRN_PMC3), mfspr(SPRN_PMC4));
+		printf("mmcra  = %.16lx   siar = %.16lx pmc5   = %.8lx\n",
+			mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5));
+		printf("sdar   = %.16lx   sier = %.16lx pmc6   = %.8lx\n",
+			mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6));
+
+		printf("ebbhr  = %.16lx  ebbrr = %.16lx bescr  = %.16lx\n",
+			mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR),
+			mfspr(SPRN_BESCR));
+	}
+
+
 	printf("iamr   = %.16lx\n", mfspr(SPRN_IAMR));
 
 	if (!(msr & MSR_HV))