Patchwork powerpc: Add compiler memory barrier to mtmsr macro

login
register
mail settings
Submitter Paul Mackerras
Date June 12, 2009, noon
Message ID <18994.17266.721207.48859@cargo.ozlabs.ibm.com>
Download mbox | patch
Permalink /patch/28634/
State Accepted
Commit 4c75f84f2c781beb230031234ed961d28771a764
Delegated to: Benjamin Herrenschmidt
Headers show

Comments

Paul Mackerras - June 12, 2009, noon
On 32-bit non-Book E, local_irq_restore() turns into just mtmsr(),
which doesn't currently have a compiler memory barrier.  This means
that accesses to memory inside a local_irq_save/restore section,
or a spin_lock_irqsave/spin_unlock_irqrestore section on UP, can
be reordered by the compiler to occur outside that section.

To fix this, this adds a compiler memory barrier to mtmsr for both
32-bit and 64-bit.  Having a compiler memory barrier in mtmsr makes
sense because it will almost always be changing something about the
context in which memory accesses are done, so in general we don't want
memory accesses getting moved from one side of an mtmsr to the other.

With the barrier in mtmsr(), some of the explicit barriers in
hw_irq.h are now redundant, so this removes them.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/hw_irq.h |    5 ++---
 arch/powerpc/include/asm/reg.h    |    4 ++--
 2 files changed, 4 insertions(+), 5 deletions(-)

Patch

diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 20a44d0..7eada1a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -80,7 +80,7 @@  static inline void local_irq_disable(void)
 	__asm__ __volatile__("wrteei 0": : :"memory");
 #else
 	unsigned long msr;
-	__asm__ __volatile__("": : :"memory");
+
 	msr = mfmsr();
 	SET_MSR_EE(msr & ~MSR_EE);
 #endif
@@ -92,7 +92,7 @@  static inline void local_irq_enable(void)
 	__asm__ __volatile__("wrteei 1": : :"memory");
 #else
 	unsigned long msr;
-	__asm__ __volatile__("": : :"memory");
+
 	msr = mfmsr();
 	SET_MSR_EE(msr | MSR_EE);
 #endif
@@ -108,7 +108,6 @@  static inline void local_irq_save_ptr(unsigned long *flags)
 #else
 	SET_MSR_EE(msr & ~MSR_EE);
 #endif
-	__asm__ __volatile__("": : :"memory");
 }
 
 #define local_save_flags(flags)	((flags) = mfmsr())
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fb359b0..a3c28e4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -745,11 +745,11 @@ 
 			asm volatile("mfmsr %0" : "=r" (rval)); rval;})
 #ifdef CONFIG_PPC64
 #define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \
-				     : : "r" (v))
+				     : : "r" (v) : "memory")
 #define mtmsrd(v)	__mtmsrd((v), 0)
 #define mtmsr(v)	mtmsrd(v)
 #else
-#define mtmsr(v)	asm volatile("mtmsr %0" : : "r" (v))
+#define mtmsr(v)	asm volatile("mtmsr %0" : : "r" (v) : "memory")
 #endif
 
 #define mfspr(rn)	({unsigned long rval; \