@@ -7,6 +7,7 @@
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the paca. Based on the x86-64 implementation.
*/
+#define PER_CPU_OFFSET_POISON 0xfeeeeeeeeeeeeeeeULL
#ifdef CONFIG_SMP
@@ -198,7 +198,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->hw_cpu_id = 0xffff;
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
- new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
+ new_paca->data_offset = PER_CPU_OFFSET_POISON;
#ifdef CONFIG_PPC_64S_HASH_MMU
new_paca->slb_shadow_ptr = NULL;
#endif
@@ -362,6 +362,7 @@ void __init early_setup(unsigned long dt_ptr)
* So set up a temporary paca. It will be replaced below once we know
* what CPU we are on.
*/
+ __per_cpu_offset[0] = 0;
initialise_paca(&boot_paca, 0);
fixup_boot_paca(&boot_paca);
WARN_ON(local_paca != 0);
@@ -828,7 +829,7 @@ static __init int pcpu_cpu_to_node(int cpu)
return early_cpu_to_node(cpu);
}
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1 ] = PER_CPU_OFFSET_POISON };
EXPORT_SYMBOL(__per_cpu_offset);
void __init setup_per_cpu_areas(void)
If the boot CPU tries to access per-cpu data of other CPUs before per cpu areas are set up, it will unexpectedly use offset 0. Try to catch such accesses by poisoning the __per_cpu_offset array. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- With the early boot machine check handler added to 64s, this might be worth another try. We did have a bug in mce_init() that was using cpu_to_node() too early that would be caught by this. Thanks, Nick arch/powerpc/include/asm/percpu.h | 1 + arch/powerpc/kernel/paca.c | 2 +- arch/powerpc/kernel/setup_64.c | 3 ++- 3 files changed, 4 insertions(+), 2 deletions(-)