diff mbox series

[v3,1/6] powerpc/64/interrupt: make normal synchronous interrupts enable MSR[EE] if possible

Message ID 20210922145452.352571-2-npiggin@gmail.com (mailing list archive)
State Accepted
Headers show
Series powerpc/64s: interrupt speedups | expand

Commit Message

Nicholas Piggin Sept. 22, 2021, 2:54 p.m. UTC
Make synchronous interrupt handler entry wrappers enable MSR[EE] if
MSR[EE] was enabled in the interrupted context. IRQs are soft-disabled
at this point so there is no change to high level code, but it's a
masked interrupt could fire.

This is a performance disadvantage for interrupts which do not later
call interrupt_cond_local_irq_enable(), because an an additional mtmsrd
or wrtee instruction is executed. However the important synchronous
interrupts (e.g., page fault) do enable interrupts, so the performance
disadvantage is mostly avoided.

In the next patch, MSR[RI] enabling can be combined with MSR[EE]
enabling, which mitigates the performance drop for the former and gives
a performance advanage for the latter interrupts, on 64s machines. 64e
is coming along for the ride for now to avoid divergences with 64s in
this tricky code.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 arch/powerpc/include/asm/interrupt.h | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

Comments

Christophe Leroy Sept. 23, 2021, 5:05 a.m. UTC | #1
Le 22/09/2021 à 16:54, Nicholas Piggin a écrit :
> Make synchronous interrupt handler entry wrappers enable MSR[EE] if
> MSR[EE] was enabled in the interrupted context. IRQs are soft-disabled
> at this point so there is no change to high level code, but it's a
> masked interrupt could fire.
> 
> This is a performance disadvantage for interrupts which do not later
> call interrupt_cond_local_irq_enable(), because an an additional mtmsrd
> or wrtee instruction is executed. However the important synchronous
> interrupts (e.g., page fault) do enable interrupts, so the performance
> disadvantage is mostly avoided.
> 
> In the next patch, MSR[RI] enabling can be combined with MSR[EE]
> enabling, which mitigates the performance drop for the former and gives
> a performance advanage for the latter interrupts, on 64s machines. 64e
> is coming along for the ride for now to avoid divergences with 64s in
> this tricky code.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>   arch/powerpc/include/asm/interrupt.h | 19 ++++++++++++++++++-
>   1 file changed, 18 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
> index b76ab848aa0d..3802390d8eea 100644
> --- a/arch/powerpc/include/asm/interrupt.h
> +++ b/arch/powerpc/include/asm/interrupt.h
> @@ -150,7 +150,20 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
>   #ifdef CONFIG_PPC64
>   	if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
>   		trace_hardirqs_off();
> -	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
> +
> +	/*
> +	 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
> +	 * Asynchronous interrupts get here with HARD_DIS set (see below), so
> +	 * this enables MSR[EE] for synchronous interrupts. IRQs remain
> +	 * soft-masked. The interrupt handler may later call
> +	 * interrupt_cond_local_irq_enable() to achieve a regular process
> +	 * context.
> +	 */
> +	if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
> +		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
> +			BUG_ON(!(regs->msr & MSR_EE));

Could be:
	BUG_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(regs->msr & 
MSR_EE));

> +		__hard_irq_enable();
> +	}
>   
>   	if (user_mode(regs)) {
>   		CT_WARN_ON(ct_state() != CONTEXT_USER);
> @@ -200,6 +213,10 @@ static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt
>   
>   static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
>   {
> +#ifdef CONFIG_PPC64
> +	/* Ensure interrupt_enter_prepare does not enable MSR[EE] */
> +	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
> +#endif
>   #ifdef CONFIG_PPC_BOOK3S_64
>   	if (cpu_has_feature(CPU_FTR_CTRL) &&
>   	    !test_thread_local_flags(_TLF_RUNLATCH))
>
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index b76ab848aa0d..3802390d8eea 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -150,7 +150,20 @@  static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
 #ifdef CONFIG_PPC64
 	if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
 		trace_hardirqs_off();
-	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+	/*
+	 * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
+	 * Asynchronous interrupts get here with HARD_DIS set (see below), so
+	 * this enables MSR[EE] for synchronous interrupts. IRQs remain
+	 * soft-masked. The interrupt handler may later call
+	 * interrupt_cond_local_irq_enable() to achieve a regular process
+	 * context.
+	 */
+	if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
+		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+			BUG_ON(!(regs->msr & MSR_EE));
+		__hard_irq_enable();
+	}
 
 	if (user_mode(regs)) {
 		CT_WARN_ON(ct_state() != CONTEXT_USER);
@@ -200,6 +213,10 @@  static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt
 
 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
 {
+#ifdef CONFIG_PPC64
+	/* Ensure interrupt_enter_prepare does not enable MSR[EE] */
+	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+#endif
 #ifdef CONFIG_PPC_BOOK3S_64
 	if (cpu_has_feature(CPU_FTR_CTRL) &&
 	    !test_thread_local_flags(_TLF_RUNLATCH))