diff mbox series

[v2,2/2] powerpc/perf: Fix unit_sel/cache_sel checks

Message ID 1507558360-27145-2-git-send-email-maddy@linux.vnet.ibm.com (mailing list archive)
State Accepted
Commit 2d46d4877b1afd14059393a48bdb8ce27955174c
Headers show
Series [v2,1/2] powerpc/perf: Cleanup cache_sel bits comment | expand

Commit Message

maddy Oct. 9, 2017, 2:12 p.m. UTC
Raw event code has couple of fields "unit" and "cache" in it, to capture
the "unit" to monitor for a given pmcxsel and cache reload qualifier to
program in MMCR1.

isa207_get_constraint() refers "unit" field to update the MMCRC (L2/L3)
Event bus control fields with "cache" bits of the raw event code.
These are power8 specific and not supported by PowerISA v3.0 pmu. So wrap
the checks to be power8 specific. Also, "cache" bit field is referred to
update MMCR1[16:17] and this check can be power8 specific.

Fixes: 7ffd948fae4cd ('powerpc/perf: factor out power8 pmu functions')
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
These changes should not have any impact since none of the published events
in power9 pmu-event json use the "cache" fields.

Changelog v1:
- Cleanup previously used macro
- Update the constraint check

 arch/powerpc/perf/isa207-common.c | 25 ++++++++++++++++++-------
 arch/powerpc/perf/isa207-common.h |  4 ++--
 2 files changed, 20 insertions(+), 9 deletions(-)

Comments

Michael Ellerman Oct. 10, 2017, 4:47 a.m. UTC | #1
Madhavan Srinivasan <maddy@linux.vnet.ibm.com> writes:

> diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
> index 2efee3f196f5..9fd2e5c7a063 100644
> --- a/arch/powerpc/perf/isa207-common.c
> +++ b/arch/powerpc/perf/isa207-common.c
> @@ -285,10 +293,10 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
>  		 * have a cache selector of zero. The bank selector (bit 3) is
>  		 * irrelevant, as long as the rest of the value is 0.
>  		 */
> -		if (cache & 0x7)
> +		if (!cpu_has_feature(CPU_FTR_ARCH_300) && (cache & 0x7))
>  			return -1;
>  
> -	} else if (event & EVENT_IS_L1) {
> +	} else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
>  		mask  |= CNST_L1_QUAL_MASK;
>  		value |= CNST_L1_QUAL_VAL(cache);

I don't understand this. You're saying all non L2/L3 events are L1
events on Power9. That seems wrong.

>  	}
> @@ -391,11 +399,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
>  		/* In continuous sampling mode, update SDAR on TLB miss */
>  		mmcra_sdar_mode(event[i], &mmcra);
>  
> -		if (event[i] & EVENT_IS_L1) {
> -			cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
> -			mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
> -			cache >>= 1;
> -			mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
> +		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
> +			cache = dc_ic_rld_quad_l1_sel(event[i]);
> +			mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
> +		} else {
> +			if (event[i] & EVENT_IS_L1) {
> +				cache = dc_ic_rld_quad_l1_sel(event[i]);
> +				mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
> +			}
>  		}

I also don't understand this. Both legs of the if have the exact same
code, so at the very least it's factored badly.

But why are we doing it for all events on Power9?


cheers
diff mbox series

Patch

diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 2efee3f196f5..9fd2e5c7a063 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -150,6 +150,14 @@  static bool is_thresh_cmp_valid(u64 event)
 	return true;
 }
 
+static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
+{
+	unsigned int cache;
+
+	cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
+	return cache;
+}
+
 static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
 {
 	u64 ret = PERF_MEM_NA;
@@ -285,10 +293,10 @@  int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
 		 * have a cache selector of zero. The bank selector (bit 3) is
 		 * irrelevant, as long as the rest of the value is 0.
 		 */
-		if (cache & 0x7)
+		if (!cpu_has_feature(CPU_FTR_ARCH_300) && (cache & 0x7))
 			return -1;
 
-	} else if (event & EVENT_IS_L1) {
+	} else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
 		mask  |= CNST_L1_QUAL_MASK;
 		value |= CNST_L1_QUAL_VAL(cache);
 	}
@@ -391,11 +399,14 @@  int isa207_compute_mmcr(u64 event[], int n_ev,
 		/* In continuous sampling mode, update SDAR on TLB miss */
 		mmcra_sdar_mode(event[i], &mmcra);
 
-		if (event[i] & EVENT_IS_L1) {
-			cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
-			mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
-			cache >>= 1;
-			mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
+		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+			cache = dc_ic_rld_quad_l1_sel(event[i]);
+			mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
+		} else {
+			if (event[i] & EVENT_IS_L1) {
+				cache = dc_ic_rld_quad_l1_sel(event[i]);
+				mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
+			}
 		}
 
 		if (is_event_marked(event[i])) {
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 6c737d675792..493e5cc5fa8a 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -232,8 +232,8 @@ 
 #define MMCR1_COMBINE_SHIFT(pmc)	(35 - ((pmc) - 1))
 #define MMCR1_PMCSEL_SHIFT(pmc)		(24 - (((pmc) - 1)) * 8)
 #define MMCR1_FAB_SHIFT			36
-#define MMCR1_DC_QUAL_SHIFT		47
-#define MMCR1_IC_QUAL_SHIFT		46
+#define MMCR1_DC_IC_QUAL_MASK		0x3
+#define MMCR1_DC_IC_QUAL_SHIFT		46
 
 /* MMCR1 Combine bits macro for power9 */
 #define p9_MMCR1_COMBINE_SHIFT(pmc)	(38 - ((pmc - 1) * 2))