diff mbox series

powerpc/irq: Remove arch_local_irq_restore() for !CONFIG_CC_HAS_ASM_GOTO

Message ID 58df50c9e77e2ed945bacdead30412770578886b.1652715336.git.christophe.leroy@csgroup.eu (mailing list archive)
State Accepted
Headers show
Series powerpc/irq: Remove arch_local_irq_restore() for !CONFIG_CC_HAS_ASM_GOTO | expand

Checks

Context Check Description
snowpatch_ozlabs/github-powerpc_ppctests success Successfully ran 10 jobs.
snowpatch_ozlabs/github-powerpc_selftests success Successfully ran 10 jobs.
snowpatch_ozlabs/github-powerpc_kernel_qemu success Successfully ran 24 jobs.
snowpatch_ozlabs/github-powerpc_clang success Successfully ran 7 jobs.
snowpatch_ozlabs/github-powerpc_sparse success Successfully ran 4 jobs.

Commit Message

Christophe Leroy May 16, 2022, 3:36 p.m. UTC
All supported versions of GCC support asm goto.

Remove the !CONFIG_CC_HAS_ASM_GOTO version of arch_local_irq_restore()

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/kernel/irq.c | 77 ---------------------------------------
 1 file changed, 77 deletions(-)

Comments

Michael Ellerman May 17, 2022, 12:48 p.m. UTC | #1
Christophe Leroy <christophe.leroy@csgroup.eu> writes:
> All supported versions of GCC support asm goto.

I thought clang was the one that only recently added support for asm
goto.

<looks>

Apparently clang added support in 2019, in clang 9. The earliest clang
we claim to support is 11.

So this patch is good, I'll just adjust the change log to say GCC/clang.

cheers

> diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
> index c768cde03e36..dd09919c3c66 100644
> --- a/arch/powerpc/kernel/irq.c
> +++ b/arch/powerpc/kernel/irq.c
> @@ -216,7 +216,6 @@ static inline void replay_soft_interrupts_irqrestore(void)
>  #define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
>  #endif
>  
> -#ifdef CONFIG_CC_HAS_ASM_GOTO
>  notrace void arch_local_irq_restore(unsigned long mask)
>  {
>  	unsigned char irq_happened;
> @@ -312,82 +311,6 @@ notrace void arch_local_irq_restore(unsigned long mask)
>  	__hard_irq_enable();
>  	preempt_enable();
>  }
> -#else
> -notrace void arch_local_irq_restore(unsigned long mask)
> -{
> -	unsigned char irq_happened;
> -
> -	/* Write the new soft-enabled value */
> -	irq_soft_mask_set(mask);
> -	if (mask)
> -		return;
> -
> -	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
> -		WARN_ON_ONCE(in_nmi() || in_hardirq());
> -
> -	/*
> -	 * From this point onward, we can take interrupts, preempt,
> -	 * etc... unless we got hard-disabled. We check if an event
> -	 * happened. If none happened, we know we can just return.
> -	 *
> -	 * We may have preempted before the check below, in which case
> -	 * we are checking the "new" CPU instead of the old one. This
> -	 * is only a problem if an event happened on the "old" CPU.
> -	 *
> -	 * External interrupt events will have caused interrupts to
> -	 * be hard-disabled, so there is no problem, we
> -	 * cannot have preempted.
> -	 */
> -	irq_happened = get_irq_happened();
> -	if (!irq_happened) {
> -		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
> -			WARN_ON_ONCE(!(mfmsr() & MSR_EE));
> -		return;
> -	}
> -
> -	/* We need to hard disable to replay. */
> -	if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
> -		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
> -			WARN_ON_ONCE(!(mfmsr() & MSR_EE));
> -		__hard_irq_disable();
> -		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
> -	} else {
> -		/*
> -		 * We should already be hard disabled here. We had bugs
> -		 * where that wasn't the case so let's dbl check it and
> -		 * warn if we are wrong. Only do that when IRQ tracing
> -		 * is enabled as mfmsr() can be costly.
> -		 */
> -		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
> -			if (WARN_ON_ONCE(mfmsr() & MSR_EE))
> -				__hard_irq_disable();
> -		}
> -
> -		if (irq_happened == PACA_IRQ_HARD_DIS) {
> -			local_paca->irq_happened = 0;
> -			__hard_irq_enable();
> -			return;
> -		}
> -	}
> -
> -	/*
> -	 * Disable preempt here, so that the below preempt_enable will
> -	 * perform resched if required (a replayed interrupt may set
> -	 * need_resched).
> -	 */
> -	preempt_disable();
> -	irq_soft_mask_set(IRQS_ALL_DISABLED);
> -	trace_hardirqs_off();
> -
> -	replay_soft_interrupts_irqrestore();
> -	local_paca->irq_happened = 0;
> -
> -	trace_hardirqs_on();
> -	irq_soft_mask_set(IRQS_ENABLED);
> -	__hard_irq_enable();
> -	preempt_enable();
> -}
> -#endif
>  EXPORT_SYMBOL(arch_local_irq_restore);
>  
>  /*
> -- 
> 2.35.1
Michael Ellerman May 24, 2022, 11:09 a.m. UTC | #2
On Mon, 16 May 2022 17:36:04 +0200, Christophe Leroy wrote:
> All supported versions of GCC support asm goto.
> 
> Remove the !CONFIG_CC_HAS_ASM_GOTO version of arch_local_irq_restore()
> 
> 

Applied to powerpc/next.

[1/1] powerpc/irq: Remove arch_local_irq_restore() for !CONFIG_CC_HAS_ASM_GOTO
      https://git.kernel.org/powerpc/c/5fe855169f9782c669f640b66242662209ffb72a

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c768cde03e36..dd09919c3c66 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -216,7 +216,6 @@  static inline void replay_soft_interrupts_irqrestore(void)
 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
 #endif
 
-#ifdef CONFIG_CC_HAS_ASM_GOTO
 notrace void arch_local_irq_restore(unsigned long mask)
 {
 	unsigned char irq_happened;
@@ -312,82 +311,6 @@  notrace void arch_local_irq_restore(unsigned long mask)
 	__hard_irq_enable();
 	preempt_enable();
 }
-#else
-notrace void arch_local_irq_restore(unsigned long mask)
-{
-	unsigned char irq_happened;
-
-	/* Write the new soft-enabled value */
-	irq_soft_mask_set(mask);
-	if (mask)
-		return;
-
-	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-		WARN_ON_ONCE(in_nmi() || in_hardirq());
-
-	/*
-	 * From this point onward, we can take interrupts, preempt,
-	 * etc... unless we got hard-disabled. We check if an event
-	 * happened. If none happened, we know we can just return.
-	 *
-	 * We may have preempted before the check below, in which case
-	 * we are checking the "new" CPU instead of the old one. This
-	 * is only a problem if an event happened on the "old" CPU.
-	 *
-	 * External interrupt events will have caused interrupts to
-	 * be hard-disabled, so there is no problem, we
-	 * cannot have preempted.
-	 */
-	irq_happened = get_irq_happened();
-	if (!irq_happened) {
-		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-			WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-		return;
-	}
-
-	/* We need to hard disable to replay. */
-	if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
-		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-			WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-		__hard_irq_disable();
-		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-	} else {
-		/*
-		 * We should already be hard disabled here. We had bugs
-		 * where that wasn't the case so let's dbl check it and
-		 * warn if we are wrong. Only do that when IRQ tracing
-		 * is enabled as mfmsr() can be costly.
-		 */
-		if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
-			if (WARN_ON_ONCE(mfmsr() & MSR_EE))
-				__hard_irq_disable();
-		}
-
-		if (irq_happened == PACA_IRQ_HARD_DIS) {
-			local_paca->irq_happened = 0;
-			__hard_irq_enable();
-			return;
-		}
-	}
-
-	/*
-	 * Disable preempt here, so that the below preempt_enable will
-	 * perform resched if required (a replayed interrupt may set
-	 * need_resched).
-	 */
-	preempt_disable();
-	irq_soft_mask_set(IRQS_ALL_DISABLED);
-	trace_hardirqs_off();
-
-	replay_soft_interrupts_irqrestore();
-	local_paca->irq_happened = 0;
-
-	trace_hardirqs_on();
-	irq_soft_mask_set(IRQS_ENABLED);
-	__hard_irq_enable();
-	preempt_enable();
-}
-#endif
 EXPORT_SYMBOL(arch_local_irq_restore);
 
 /*