diff mbox

[RFC,v4,11/12] powerpc: Add a Kconfig and a function to set new soft_enabled mask

Message ID 1472409448-18172-12-git-send-email-maddy@linux.vnet.ibm.com (mailing list archive)
State Superseded
Headers show

Commit Message

maddy Aug. 28, 2016, 6:37 p.m. UTC
New Kconfig is added "CONFIG_IRQ_DEBUG_SUPPORT" to add a warn_on
to alert the usage of soft_irq_set_mask() for disabling lower
bitmask interrupts.

Have also moved the code under the CONFIG_TRACE_IRQFLAGS in
arch_local_irq_restore() to new Kconfig as suggested.

Patch also adds a new soft_irq_set_mask() to update paca->soft_enabled.

Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
 arch/powerpc/Kconfig              |  4 ++++
 arch/powerpc/include/asm/hw_irq.h | 17 +++++++++++++++++
 arch/powerpc/kernel/irq.c         |  4 ++--
 3 files changed, 23 insertions(+), 2 deletions(-)

Comments

Nicholas Piggin Aug. 29, 2016, 12:23 a.m. UTC | #1
On Mon, 29 Aug 2016 00:07:27 +0530
Madhavan Srinivasan <maddy@linux.vnet.ibm.com> wrote:

> New Kconfig is added "CONFIG_IRQ_DEBUG_SUPPORT" to add a warn_on
> to alert the usage of soft_irq_set_mask() for disabling lower
> bitmask interrupts.
> 
> Have also moved the code under the CONFIG_TRACE_IRQFLAGS in
> arch_local_irq_restore() to new Kconfig as suggested.
> 
> Patch also adds a new soft_irq_set_mask() to update paca->soft_enabled.
> 
> Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
> ---
>  arch/powerpc/Kconfig              |  4 ++++
>  arch/powerpc/include/asm/hw_irq.h | 17 +++++++++++++++++
>  arch/powerpc/kernel/irq.c         |  4 ++--
>  3 files changed, 23 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 927d2ab2ce08..878f05925340 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -51,6 +51,10 @@ config TRACE_IRQFLAGS_SUPPORT
>  	bool
>  	default y
>  
> +config IRQ_DEBUG_SUPPORT
> +	bool
> +	default n
> +
>  config LOCKDEP_SUPPORT
>  	bool
>  	default y
> diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
> index 415734c07cfa..9f71559ce868 100644
> --- a/arch/powerpc/include/asm/hw_irq.h
> +++ b/arch/powerpc/include/asm/hw_irq.h
> @@ -81,6 +81,23 @@ static inline unsigned long arch_local_irq_disable(void)
>  	return flags;
>  }
>  
> +static inline unsigned long soft_irq_set_mask(int value)
> +{
> +	unsigned long flags, zero;
> +
> +#ifdef CONFIG_IRQ_DEBUG_SUPPORT
> +	WARN_ON(value <= IRQ_DISABLE_MASK_LINUX);
> +#endif
> +	asm volatile(
> +		"li %1,%3; lbz %0,%2(13); stb %1,%2(13)"
> +		: "=r" (flags), "=&r" (zero)
> +		: "i" (offsetof(struct paca_struct, soft_enabled)),\
> +		 "i" (value)
> +		: "memory");
> +
> +	return flags;
> +}

I think the warning condition should be something like

WARN_ON((value & local_paca->soft_enabled) != local_paca->soft_enabled);

We never want to re-enable an irq bit, because this function does not
do the necessary replay. A small comment may be in order too.

Thanks,
Nick
Nicholas Piggin Aug. 29, 2016, 1:41 a.m. UTC | #2
On Mon, 29 Aug 2016 00:07:27 +0530
Madhavan Srinivasan <maddy@linux.vnet.ibm.com> wrote:

> New Kconfig is added "CONFIG_IRQ_DEBUG_SUPPORT" to add a warn_on
> to alert the usage of soft_irq_set_mask() for disabling lower
> bitmask interrupts.
> 
> Have also moved the code under the CONFIG_TRACE_IRQFLAGS in
> arch_local_irq_restore() to new Kconfig as suggested.
> 
> Patch also adds a new soft_irq_set_mask() to update paca->soft_enabled.
> 
> Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
> ---
>  arch/powerpc/Kconfig              |  4 ++++
>  arch/powerpc/include/asm/hw_irq.h | 17 +++++++++++++++++
>  arch/powerpc/kernel/irq.c         |  4 ++--
>  3 files changed, 23 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 927d2ab2ce08..878f05925340 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -51,6 +51,10 @@ config TRACE_IRQFLAGS_SUPPORT
>  	bool
>  	default y
>  
> +config IRQ_DEBUG_SUPPORT
> +	bool
> +	default n
> +
>  config LOCKDEP_SUPPORT
>  	bool
>  	default y
> diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
> index 415734c07cfa..9f71559ce868 100644
> --- a/arch/powerpc/include/asm/hw_irq.h
> +++ b/arch/powerpc/include/asm/hw_irq.h
> @@ -81,6 +81,23 @@ static inline unsigned long arch_local_irq_disable(void)
>  	return flags;
>  }
>  
> +static inline unsigned long soft_irq_set_mask(int value)
> +{
> +	unsigned long flags, zero;
> +
> +#ifdef CONFIG_IRQ_DEBUG_SUPPORT
> +	WARN_ON(value <= IRQ_DISABLE_MASK_LINUX);
> +#endif
> +	asm volatile(
> +		"li %1,%3; lbz %0,%2(13); stb %1,%2(13)"
> +		: "=r" (flags), "=&r" (zero)
> +		: "i" (offsetof(struct paca_struct, soft_enabled)),\
> +		 "i" (value)
> +		: "memory");
> +
> +	return flags;
> +}

One other thing, if we have:

local_irq_save(flags);              // disable LINUX mask -> LINUX
local_irq_and_pmu_save(flags);      // disable PMU   mask -> LINUX|PMU
local_irq_and_pmu_restore(flags);   // enable  PMU   mask -> LINUX
local_irq_restore(flags);           // enable  LINUX mask -> NONE

Then the nested code that re-enables PMUs will not replay PMU interrupt
until the outer irq is re-enabled. I don't *think* this is a problem,
but it probably should be commented.

Which brings us to arch_local_irq_restore() (from another patch):


@@ -208,7 +209,7 @@ notrace void arch_local_irq_restore(unsigned long en)
 
 	/* Write the new soft-enabled value */
 	set_soft_enabled(en);
-	if (!en)
+	if (en == IRQ_DISABLE_MASK_LINUX)
 		return;
 	/*
 	 * From this point onward, we can take interrupts, preempt,

I think this is a bit buggy for some cases of nested disables. For the
above it is okay, but if we have:

local_irq_and_pmu_save(flags);
local_irq_and_pmu_save(flags);
local_irq_and_pmu_restore(flags);
local_irq_and_pmu_restore(flags);


The first restore will restore LINUX|PMU mask, which will not be caught
by this check.

Testing instead for non-zero (any IRQ bits masked) should work. We should
probably also add an IRQ_DEBUG_SUPPORT check to ensure LINUX bit is always
one of the outer-most bits to be cleared (theoretically we could support
masking other bits without masking LINUX, but let's disallow that until a
compelling use case shows up).

Thanks,
Nick
maddy Aug. 29, 2016, 5:58 a.m. UTC | #3
On Monday 29 August 2016 07:11 AM, Nicholas Piggin wrote:
> On Mon, 29 Aug 2016 00:07:27 +0530
> Madhavan Srinivasan <maddy@linux.vnet.ibm.com> wrote:
>
>> New Kconfig is added "CONFIG_IRQ_DEBUG_SUPPORT" to add a warn_on
>> to alert the usage of soft_irq_set_mask() for disabling lower
>> bitmask interrupts.
>>
>> Have also moved the code under the CONFIG_TRACE_IRQFLAGS in
>> arch_local_irq_restore() to new Kconfig as suggested.
>>
>> Patch also adds a new soft_irq_set_mask() to update paca->soft_enabled.
>>
>> Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
>> ---
>>   arch/powerpc/Kconfig              |  4 ++++
>>   arch/powerpc/include/asm/hw_irq.h | 17 +++++++++++++++++
>>   arch/powerpc/kernel/irq.c         |  4 ++--
>>   3 files changed, 23 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
>> index 927d2ab2ce08..878f05925340 100644
>> --- a/arch/powerpc/Kconfig
>> +++ b/arch/powerpc/Kconfig
>> @@ -51,6 +51,10 @@ config TRACE_IRQFLAGS_SUPPORT
>>   	bool
>>   	default y
>>   
>> +config IRQ_DEBUG_SUPPORT
>> +	bool
>> +	default n
>> +
>>   config LOCKDEP_SUPPORT
>>   	bool
>>   	default y
>> diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
>> index 415734c07cfa..9f71559ce868 100644
>> --- a/arch/powerpc/include/asm/hw_irq.h
>> +++ b/arch/powerpc/include/asm/hw_irq.h
>> @@ -81,6 +81,23 @@ static inline unsigned long arch_local_irq_disable(void)
>>   	return flags;
>>   }
>>   
>> +static inline unsigned long soft_irq_set_mask(int value)
>> +{
>> +	unsigned long flags, zero;
>> +
>> +#ifdef CONFIG_IRQ_DEBUG_SUPPORT
>> +	WARN_ON(value <= IRQ_DISABLE_MASK_LINUX);
>> +#endif
>> +	asm volatile(
>> +		"li %1,%3; lbz %0,%2(13); stb %1,%2(13)"
>> +		: "=r" (flags), "=&r" (zero)
>> +		: "i" (offsetof(struct paca_struct, soft_enabled)),\
>> +		 "i" (value)
>> +		: "memory");
>> +
>> +	return flags;
>> +}
> One other thing, if we have:
>
> local_irq_save(flags);              // disable LINUX mask -> LINUX
> local_irq_and_pmu_save(flags);      // disable PMU   mask -> LINUX|PMU
> local_irq_and_pmu_restore(flags);   // enable  PMU   mask -> LINUX
> local_irq_restore(flags);           // enable  LINUX mask -> NONE
>
> Then the nested code that re-enables PMUs will not replay PMU interrupt
> until the outer irq is re-enabled. I don't *think* this is a problem,
> but it probably should be commented.

Yes. That is right. Will comment.

>
> Which brings us to arch_local_irq_restore() (from another patch):
>
>
> @@ -208,7 +209,7 @@ notrace void arch_local_irq_restore(unsigned long en)
>
>   	/* Write the new soft-enabled value */
>   	set_soft_enabled(en);
> -	if (!en)
> +	if (en == IRQ_DISABLE_MASK_LINUX)
>   		return;
>   	/*
>   	 * From this point onward, we can take interrupts, preempt,
>
> I think this is a bit buggy for some cases of nested disables. For the
> above it is okay, but if we have:
>
> local_irq_and_pmu_save(flags);
> local_irq_and_pmu_save(flags);
> local_irq_and_pmu_restore(flags);
> local_irq_and_pmu_restore(flags);
>
>
> The first restore will restore LINUX|PMU mask, which will not be caught
> by this check.

Nice catch :). Yes, this should be non-zero check.

Also looking more closing, another possible issue is with this seq,

local_irq_and_pmu_save(flags);
local_irq_save(flags);

Currently we do just store in the set_soft_enabled(), which will
reset the PMI bit in the above seq. So need to rework on the 
set_soft_enabled().

Maddy
> Testing instead for non-zero (any IRQ bits masked) should work. We should
> probably also add an IRQ_DEBUG_SUPPORT check to ensure LINUX bit is always
> one of the outer-most bits to be cleared (theoretically we could support
> masking other bits without masking LINUX, but let's disallow that until a
> compelling use case shows up).
>
> Thanks,
> Nick
>
diff mbox

Patch

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 927d2ab2ce08..878f05925340 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -51,6 +51,10 @@  config TRACE_IRQFLAGS_SUPPORT
 	bool
 	default y
 
+config IRQ_DEBUG_SUPPORT
+	bool
+	default n
+
 config LOCKDEP_SUPPORT
 	bool
 	default y
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 415734c07cfa..9f71559ce868 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -81,6 +81,23 @@  static inline unsigned long arch_local_irq_disable(void)
 	return flags;
 }
 
+static inline unsigned long soft_irq_set_mask(int value)
+{
+	unsigned long flags, zero;
+
+#ifdef CONFIG_IRQ_DEBUG_SUPPORT
+	WARN_ON(value <= IRQ_DISABLE_MASK_LINUX);
+#endif
+	asm volatile(
+		"li %1,%3; lbz %0,%2(13); stb %1,%2(13)"
+		: "=r" (flags), "=&r" (zero)
+		: "i" (offsetof(struct paca_struct, soft_enabled)),\
+		 "i" (value)
+		: "memory");
+
+	return flags;
+}
+
 extern void arch_local_irq_restore(unsigned long);
 
 static inline void arch_local_irq_enable(void)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index db453a5c0514..57343a55111e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -243,7 +243,7 @@  notrace void arch_local_irq_restore(unsigned long en)
 	 */
 	if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
 		__hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQ_DEBUG_SUPPORT
 	else {
 		/*
 		 * We should already be hard disabled here. We had bugs
@@ -254,7 +254,7 @@  notrace void arch_local_irq_restore(unsigned long en)
 		if (WARN_ON(mfmsr() & MSR_EE))
 			__hard_irq_disable();
 	}
-#endif /* CONFIG_TRACE_IRQFLAGS */
+#endif /* CONFIG_IRQ_DEBUG_SUPPORT */
 
 	set_soft_enabled(IRQ_DISABLE_MASK_LINUX);