@@ -165,6 +165,10 @@ struct dtl_entry {
#define DTL_LOG_FAULT 0x4
#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
+extern u8 dtl_mask;
+int set_dtl_mask(int cpu, int mask);
+void reset_dtl_mask(int cpu);
+
extern struct kmem_cache *dtl_cache;
/*
@@ -104,7 +104,8 @@ static int dtl_start(struct dtl *dtl)
dtlr->write_ptr = dtl->buf;
/* enable event logging */
- lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
+ if (set_dtl_mask(dtl->cpu, dtl_event_mask))
+ return -EBUSY;
dtl_consumer = consume_dtle;
atomic_inc(&dtl_count);
@@ -121,7 +122,7 @@ static void dtl_stop(struct dtl *dtl)
dtlr->buf = NULL;
/* restore dtl_enable_mask */
- lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
+ reset_dtl_mask(dtl->cpu);
if (atomic_dec_and_test(&dtl_count))
dtl_consumer = NULL;
@@ -58,12 +58,74 @@
#define HBR_AVPN 0x0200000000000000UL
#define HBR_ANDCOND 0x0100000000000000UL
+u8 dtl_mask = DTL_LOG_PREEMPT;
+static u8 dtl_mask_global_refctr, dtl_mask_percpu_inuse;
+static DEFINE_MUTEX(dtl_mask_mutex);
/* in hvCall.S */
EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall9);
EXPORT_SYMBOL(plpar_hcall_norets);
+int set_dtl_mask(int cpu, int mask)
+{
+ int rc = 0;
+
+ mutex_lock(&dtl_mask_mutex);
+
+ if ((cpu == -1 && dtl_mask_percpu_inuse) ||
+ (cpu >= 0 && dtl_mask_global_refctr)) {
+ rc = -1;
+ goto out;
+ }
+
+ if (cpu >= 0) {
+ dtl_mask_percpu_inuse++;
+ lppaca_of(cpu).dtl_enable_mask = mask;
+ goto out;
+ }
+
+ if (dtl_mask_global_refctr && mask != dtl_mask) {
+ rc = -1;
+ goto out;
+ }
+
+ if (!dtl_mask_global_refctr) {
+ dtl_mask = mask;
+ for_each_present_cpu(cpu)
+ lppaca_of(cpu).dtl_enable_mask = mask;
+ }
+
+ dtl_mask_global_refctr++;
+
+out:
+ mutex_unlock(&dtl_mask_mutex);
+
+ return rc;
+}
+
+void reset_dtl_mask(int cpu)
+{
+ mutex_lock(&dtl_mask_mutex);
+
+ if (cpu >= 0) {
+ dtl_mask_percpu_inuse--;
+ lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
+ goto out;
+ }
+
+ dtl_mask_global_refctr--;
+
+ if (!dtl_mask_global_refctr) {
+ dtl_mask = DTL_LOG_PREEMPT;
+ for_each_present_cpu(cpu)
+ lppaca_of(cpu).dtl_enable_mask = dtl_mask;
+ }
+
+out:
+ mutex_unlock(&dtl_mask_mutex);
+}
+
void vpa_init(int cpu)
{
int hwcpu = get_hard_smp_processor_id(cpu);
@@ -125,7 +187,7 @@ void vpa_init(int cpu)
pr_err("WARNING: DTL registration of cpu %d (hw %d) "
"failed with %ld\n", smp_processor_id(),
hwcpu, ret);
- lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
+ lppaca_of(cpu).dtl_enable_mask = dtl_mask;
}
}
@@ -293,7 +293,7 @@ static int alloc_dispatch_logs(void)
pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
"with %d\n", smp_processor_id(),
hard_smp_processor_id(), ret);
- get_paca()->lppaca_ptr->dtl_enable_mask = DTL_LOG_PREEMPT;
+ get_paca()->lppaca_ptr->dtl_enable_mask = dtl_mask;
return 0;
}
In a subsequent patch, we want to be able to change the DTL enable mask globally for all cpus. This conflicts with the current debugfs interface that provides access to the DTL buffer contents. To ensure consistent behavior, we introduce helpers to change the DTL enable mask on either a specific cpu, or globally for all cpus. Setting the DTL enable mask globally prevents changes to cpu-specific DTL enable mask, and vice versa. We also introduce 'dtl_mask' so that when the DTL enable mask is changed globally, new cpus also honor that. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> --- arch/powerpc/include/asm/lppaca.h | 4 ++ arch/powerpc/platforms/pseries/dtl.c | 5 +- arch/powerpc/platforms/pseries/lpar.c | 64 +++++++++++++++++++++++++- arch/powerpc/platforms/pseries/setup.c | 2 +- 4 files changed, 71 insertions(+), 4 deletions(-)