diff mbox series

powerpc/mce: Remove per cpu variables from MCE handlers

Message ID 20201204102310.76213-1-ganeshgr@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series powerpc/mce: Remove per cpu variables from MCE handlers | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch powerpc/merge (a1aeabd25a36d9e019381278e543e2d538dd44a7)
snowpatch_ozlabs/build-ppc64le success Build succeeded
snowpatch_ozlabs/build-ppc64be success Build succeeded
snowpatch_ozlabs/build-ppc64e success Build succeeded
snowpatch_ozlabs/build-pmac32 success Build succeeded
snowpatch_ozlabs/checkpatch success total: 0 errors, 0 warnings, 0 checks, 166 lines checked
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Ganesh Goudar Dec. 4, 2020, 10:23 a.m. UTC
Access to per-cpu variables requires translation to be enabled on
pseries machine running in hash mmu mode, Since part of MCE handler
runs in realmode and part of MCE handling code is shared between ppc
architectures pseries and powernv, it becomes difficult to manage
these variables differently on different architectures, So have
these variables in paca instead of having them as per-cpu variables
to avoid complications.

Maximum recursive depth of MCE is 4, Considering the maximum depth
allowed reduce the size of event to 10 from 100.

Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com>
---
 arch/powerpc/include/asm/mce.h  |  2 +-
 arch/powerpc/include/asm/paca.h | 12 ++++++++
 arch/powerpc/kernel/mce.c       | 54 +++++++++++++--------------------
 3 files changed, 34 insertions(+), 34 deletions(-)

Comments

Michael Ellerman Dec. 8, 2020, 10:31 a.m. UTC | #1
Ganesh Goudar <ganeshgr@linux.ibm.com> writes:
> diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
> index 9454d29ff4b4..4769954efa7d 100644
> --- a/arch/powerpc/include/asm/paca.h
> +++ b/arch/powerpc/include/asm/paca.h
> @@ -273,6 +274,17 @@ struct paca_struct {
>  #ifdef CONFIG_MMIOWB
>  	struct mmiowb_state mmiowb_state;
>  #endif
> +#ifdef CONFIG_PPC_BOOK3S_64
> +	int mce_nest_count;
> +	struct machine_check_event mce_event[MAX_MC_EVT];
> +	/* Queue for delayed MCE events. */
> +	int mce_queue_count;
> +	struct machine_check_event mce_event_queue[MAX_MC_EVT];
> +
> +	/* Queue for delayed MCE UE events. */
> +	int mce_ue_count;
> +	struct machine_check_event  mce_ue_event_queue[MAX_MC_EVT];
> +#endif /* CONFIG_PPC_BOOK3S_64 */
>  } ____cacheline_aligned;

How much does this expand the paca by?

pahole can tell you.

cheers
Ganesh Goudar Dec. 8, 2020, 10:46 a.m. UTC | #2
On 12/8/20 4:01 PM, Michael Ellerman wrote:
> Ganesh Goudar <ganeshgr@linux.ibm.com> writes:
>> diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
>> index 9454d29ff4b4..4769954efa7d 100644
>> --- a/arch/powerpc/include/asm/paca.h
>> +++ b/arch/powerpc/include/asm/paca.h
>> @@ -273,6 +274,17 @@ struct paca_struct {
>>   #ifdef CONFIG_MMIOWB
>>   	struct mmiowb_state mmiowb_state;
>>   #endif
>> +#ifdef CONFIG_PPC_BOOK3S_64
>> +	int mce_nest_count;
>> +	struct machine_check_event mce_event[MAX_MC_EVT];
>> +	/* Queue for delayed MCE events. */
>> +	int mce_queue_count;
>> +	struct machine_check_event mce_event_queue[MAX_MC_EVT];
>> +
>> +	/* Queue for delayed MCE UE events. */
>> +	int mce_ue_count;
>> +	struct machine_check_event  mce_ue_event_queue[MAX_MC_EVT];
>> +#endif /* CONFIG_PPC_BOOK3S_64 */
>>   } ____cacheline_aligned;
> How much does this expand the paca by?

Size of paca is 4480 bytes, these add up another 2160 bytes, so expands it by 48%.
Mahesh J Salgaonkar Dec. 8, 2020, 12:04 p.m. UTC | #3
On 12/8/20 4:16 PM, Ganesh wrote:
> 
> On 12/8/20 4:01 PM, Michael Ellerman wrote:
>> Ganesh Goudar <ganeshgr@linux.ibm.com> writes:
>>> diff --git a/arch/powerpc/include/asm/paca.h
>>> b/arch/powerpc/include/asm/paca.h
>>> index 9454d29ff4b4..4769954efa7d 100644
>>> --- a/arch/powerpc/include/asm/paca.h
>>> +++ b/arch/powerpc/include/asm/paca.h
>>> @@ -273,6 +274,17 @@ struct paca_struct {
>>>   #ifdef CONFIG_MMIOWB
>>>       struct mmiowb_state mmiowb_state;
>>>   #endif
>>> +#ifdef CONFIG_PPC_BOOK3S_64
>>> +    int mce_nest_count;
>>> +    struct machine_check_event mce_event[MAX_MC_EVT];
>>> +    /* Queue for delayed MCE events. */
>>> +    int mce_queue_count;
>>> +    struct machine_check_event mce_event_queue[MAX_MC_EVT];
>>> +
>>> +    /* Queue for delayed MCE UE events. */
>>> +    int mce_ue_count;
>>> +    struct machine_check_event  mce_ue_event_queue[MAX_MC_EVT];
>>> +#endif /* CONFIG_PPC_BOOK3S_64 */
>>>   } ____cacheline_aligned;
>> How much does this expand the paca by?
> 
> Size of paca is 4480 bytes, these add up another 2160 bytes, so expands
> it by 48%.
> 

Should we dynamically allocate the array sizes early as similar to that
of paca->mce_faulty_slbs so that we don't bump up paca size ?

Thanks,
-Mahesh.
Michael Ellerman Dec. 9, 2020, 5:09 a.m. UTC | #4
Mahesh Jagannath Salgaonkar <mahesh@linux.ibm.com> writes:
> On 12/8/20 4:16 PM, Ganesh wrote:
>> 
>> On 12/8/20 4:01 PM, Michael Ellerman wrote:
>>> Ganesh Goudar <ganeshgr@linux.ibm.com> writes:
>>>> diff --git a/arch/powerpc/include/asm/paca.h
>>>> b/arch/powerpc/include/asm/paca.h
>>>> index 9454d29ff4b4..4769954efa7d 100644
>>>> --- a/arch/powerpc/include/asm/paca.h
>>>> +++ b/arch/powerpc/include/asm/paca.h
>>>> @@ -273,6 +274,17 @@ struct paca_struct {
>>>>   #ifdef CONFIG_MMIOWB
>>>>       struct mmiowb_state mmiowb_state;
>>>>   #endif
>>>> +#ifdef CONFIG_PPC_BOOK3S_64
>>>> +    int mce_nest_count;
>>>> +    struct machine_check_event mce_event[MAX_MC_EVT];
>>>> +    /* Queue for delayed MCE events. */
>>>> +    int mce_queue_count;
>>>> +    struct machine_check_event mce_event_queue[MAX_MC_EVT];
>>>> +
>>>> +    /* Queue for delayed MCE UE events. */
>>>> +    int mce_ue_count;
>>>> +    struct machine_check_event  mce_ue_event_queue[MAX_MC_EVT];
>>>> +#endif /* CONFIG_PPC_BOOK3S_64 */
>>>>   } ____cacheline_aligned;
>>> How much does this expand the paca by?
>> 
>> Size of paca is 4480 bytes, these add up another 2160 bytes, so expands
>> it by 48%.
>
> Should we dynamically allocate the array sizes early as similar to that
> of paca->mce_faulty_slbs so that we don't bump up paca size ?

Yeah I think that would be preferable.

That way those allocations can be normal node-local allocations on bare
metal, or when using radix. (Or even on KVM).

In fact what we probably want is a separate struct for all the MCE
related data, eg something like:

struct mce_stuff {
  int nest_count;
  /* Queue for delayed MCE events. */
  int queue_count;
  /* Queue for delayed MCE UE events. */
  int mce_ue_count;

  struct machine_check_event events[MAX_MC_EVT];
  struct machine_check_event event_queue[MAX_MC_EVT];
  struct machine_check_event  ue_event_queue[MAX_MC_EVT];
};

And then you allocate one of those per CPU, inside the RMO for pseries
with hash, and node-local otherwise.

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index 89aa8248a57d..feef45f2b51b 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -204,7 +204,7 @@  struct mce_error_info {
 	bool			ignore_event;
 };
 
-#define MAX_MC_EVT	100
+#define MAX_MC_EVT	10
 
 /* Release flags for get_mce_event() */
 #define MCE_EVENT_RELEASE	true
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 9454d29ff4b4..4769954efa7d 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -29,6 +29,7 @@ 
 #include <asm/hmi.h>
 #include <asm/cpuidle.h>
 #include <asm/atomic.h>
+#include <asm/mce.h>
 
 #include <asm-generic/mmiowb_types.h>
 
@@ -273,6 +274,17 @@  struct paca_struct {
 #ifdef CONFIG_MMIOWB
 	struct mmiowb_state mmiowb_state;
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+	int mce_nest_count;
+	struct machine_check_event mce_event[MAX_MC_EVT];
+	/* Queue for delayed MCE events. */
+	int mce_queue_count;
+	struct machine_check_event mce_event_queue[MAX_MC_EVT];
+
+	/* Queue for delayed MCE UE events. */
+	int mce_ue_count;
+	struct machine_check_event  mce_ue_event_queue[MAX_MC_EVT];
+#endif /* CONFIG_PPC_BOOK3S_64 */
 } ____cacheline_aligned;
 
 extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 63702c0badb9..5f53d02d6cbb 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -22,18 +22,6 @@ 
 #include <asm/mce.h>
 #include <asm/nmi.h>
 
-static DEFINE_PER_CPU(int, mce_nest_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
-
-/* Queue for delayed MCE events. */
-static DEFINE_PER_CPU(int, mce_queue_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
-
-/* Queue for delayed MCE UE events. */
-static DEFINE_PER_CPU(int, mce_ue_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
-					mce_ue_event_queue);
-
 static void machine_check_process_queued_event(struct irq_work *work);
 static void machine_check_ue_irq_work(struct irq_work *work);
 static void machine_check_ue_event(struct machine_check_event *evt);
@@ -103,8 +91,8 @@  void save_mce_event(struct pt_regs *regs, long handled,
 		    struct mce_error_info *mce_err,
 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
 {
-	int index = __this_cpu_inc_return(mce_nest_count) - 1;
-	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
+	int index = get_paca()->mce_nest_count++;
+	struct machine_check_event *mce = &get_paca()->mce_event[index];
 
 	/*
 	 * Return if we don't have enough space to log mce event.
@@ -191,7 +179,7 @@  void save_mce_event(struct pt_regs *regs, long handled,
  */
 int get_mce_event(struct machine_check_event *mce, bool release)
 {
-	int index = __this_cpu_read(mce_nest_count) - 1;
+	int index = get_paca()->mce_nest_count - 1;
 	struct machine_check_event *mc_evt;
 	int ret = 0;
 
@@ -201,7 +189,7 @@  int get_mce_event(struct machine_check_event *mce, bool release)
 
 	/* Check if we have MCE info to process. */
 	if (index < MAX_MC_EVT) {
-		mc_evt = this_cpu_ptr(&mce_event[index]);
+		mc_evt = &get_paca()->mce_event[index];
 		/* Copy the event structure and release the original */
 		if (mce)
 			*mce = *mc_evt;
@@ -211,7 +199,7 @@  int get_mce_event(struct machine_check_event *mce, bool release)
 	}
 	/* Decrement the count to free the slot. */
 	if (release)
-		__this_cpu_dec(mce_nest_count);
+		get_paca()->mce_nest_count--;
 
 	return ret;
 }
@@ -233,13 +221,13 @@  static void machine_check_ue_event(struct machine_check_event *evt)
 {
 	int index;
 
-	index = __this_cpu_inc_return(mce_ue_count) - 1;
+	index = get_paca()->mce_ue_count++;
 	/* If queue is full, just return for now. */
 	if (index >= MAX_MC_EVT) {
-		__this_cpu_dec(mce_ue_count);
+		get_paca()->mce_ue_count--;
 		return;
 	}
-	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
+	memcpy(&get_paca()->mce_ue_event_queue[index], evt, sizeof(*evt));
 
 	/* Queue work to process this event later. */
 	irq_work_queue(&mce_ue_event_irq_work);
@@ -256,13 +244,13 @@  void machine_check_queue_event(void)
 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
 		return;
 
-	index = __this_cpu_inc_return(mce_queue_count) - 1;
+	index = get_paca()->mce_queue_count++;
 	/* If queue is full, just return for now. */
 	if (index >= MAX_MC_EVT) {
-		__this_cpu_dec(mce_queue_count);
+		get_paca()->mce_queue_count--;
 		return;
 	}
-	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
+	memcpy(&get_paca()->mce_event_queue[index], &evt, sizeof(evt));
 
 	/* Queue irq work to process this event later. */
 	irq_work_queue(&mce_event_process_work);
@@ -289,9 +277,9 @@  static void machine_process_ue_event(struct work_struct *work)
 	int index;
 	struct machine_check_event *evt;
 
-	while (__this_cpu_read(mce_ue_count) > 0) {
-		index = __this_cpu_read(mce_ue_count) - 1;
-		evt = this_cpu_ptr(&mce_ue_event_queue[index]);
+	while (get_paca()->mce_ue_count > 0) {
+		index = get_paca()->mce_ue_count - 1;
+		evt = &get_paca()->mce_ue_event_queue[index];
 		blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
 #ifdef CONFIG_MEMORY_FAILURE
 		/*
@@ -304,7 +292,7 @@  static void machine_process_ue_event(struct work_struct *work)
 		 */
 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
 			if (evt->u.ue_error.ignore_event) {
-				__this_cpu_dec(mce_ue_count);
+				get_paca()->mce_ue_count--;
 				continue;
 			}
 
@@ -320,7 +308,7 @@  static void machine_process_ue_event(struct work_struct *work)
 					"was generated\n");
 		}
 #endif
-		__this_cpu_dec(mce_ue_count);
+		get_paca()->mce_ue_count--;
 	}
 }
 /*
@@ -338,17 +326,17 @@  static void machine_check_process_queued_event(struct irq_work *work)
 	 * For now just print it to console.
 	 * TODO: log this error event to FSP or nvram.
 	 */
-	while (__this_cpu_read(mce_queue_count) > 0) {
-		index = __this_cpu_read(mce_queue_count) - 1;
-		evt = this_cpu_ptr(&mce_event_queue[index]);
+	while (get_paca()->mce_queue_count > 0) {
+		index = get_paca()->mce_queue_count - 1;
+		evt = &get_paca()->mce_event_queue[index];
 
 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
 		    evt->u.ue_error.ignore_event) {
-			__this_cpu_dec(mce_queue_count);
+			get_paca()->mce_queue_count--;
 			continue;
 		}
 		machine_check_print_event_info(evt, false, false);
-		__this_cpu_dec(mce_queue_count);
+		get_paca()->mce_queue_count--;
 	}
 }