@@ -311,6 +311,8 @@ void slb_setup_new_exec(void)
struct mm_struct *mm = current->mm;
unsigned long exec = 0x10000000;
+ WARN_ON(irqs_disabled());
+
/*
* preload cache can only be used to determine whether a SLB
* entry exists if it does not start to overflow.
@@ -318,6 +320,8 @@ void slb_setup_new_exec(void)
if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
return;
+ hard_irq_disable();
+
/*
* We have no good place to clear the slb preload cache on exec,
* flush_thread is about the earliest arch hook but that happens
@@ -347,6 +351,8 @@ void slb_setup_new_exec(void)
/* see switch_slb */
asm volatile("isync" : : : "memory");
+
+ local_irq_enable();
}
void preload_new_slb_context(unsigned long start, unsigned long sp)
@@ -355,10 +361,14 @@ void preload_new_slb_context(unsigned long start, unsigned long sp)
struct mm_struct *mm = current->mm;
unsigned long heap = mm->start_brk;
+ WARN_ON(irqs_disabled());
+
/* see above */
if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
return;
+ hard_irq_disable();
+
/* Userspace entry address. */
if (!is_kernel_addr(start)) {
if (preload_add(ti, start))
@@ -379,6 +389,8 @@ void preload_new_slb_context(unsigned long start, unsigned long sp)
/* see switch_slb */
asm volatile("isync" : : : "memory");
+
+ local_irq_enable();
}
slb_setup_new_exec and preload_new_slb_context run with interrupts and preemption enabled, which can be corrupted by re-entrant interrupt or process touching SLB preload cache or SLB allocator. Hard disable interrupts over these regions. Fixes: 5e46e29e6a97 ("powerpc/64s/hash: convert SLB miss handlers to C") Fixes: 89ca4e126a3f ("powerpc/64s/hash: Add a SLB preload cache") Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- arch/powerpc/mm/slb.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)