diff mbox series

[bpf-next,1/3] tools: add smp_* barrier variants to include infrastructure

Message ID 20181017144156.16639-2-daniel@iogearbox.net
State Changes Requested, archived
Delegated to: BPF Maintainers
Headers show
Series improve and fix barriers for walking perf rb | expand

Commit Message

Daniel Borkmann Oct. 17, 2018, 2:41 p.m. UTC
Add the definition for smp_rmb(), smp_wmb(), and smp_mb() to the
tools include infrastructure. This patch adds the implementation
for x86-64 and arm64, and have it fall back for other archs which
do not have it implemented at this point such that others can be
added successively for those who have access to test machines. The
x86-64 one uses lock + add combination for smp_mb() with address
below red zone.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
---
 tools/arch/arm64/include/asm/barrier.h | 10 ++++++++++
 tools/arch/x86/include/asm/barrier.h   |  9 ++++++---
 tools/include/asm/barrier.h            | 11 +++++++++++
 3 files changed, 27 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 40bde6b..acf1f06 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,4 +14,14 @@ 
 #define wmb()		asm volatile("dmb ishst" ::: "memory")
 #define rmb()		asm volatile("dmb ishld" ::: "memory")
 
+/*
+ * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
+ * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
+ * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
+ * smp_*() don't.
+ */
+#define smp_mb()	asm volatile("dmb ish" ::: "memory")
+#define smp_wmb()	asm volatile("dmb ishst" ::: "memory")
+#define smp_rmb()	asm volatile("dmb ishld" ::: "memory")
+
 #endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h
index 8774dee..c97c0c5 100644
--- a/tools/arch/x86/include/asm/barrier.h
+++ b/tools/arch/x86/include/asm/barrier.h
@@ -21,9 +21,12 @@ 
 #define rmb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #define wmb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #elif defined(__x86_64__)
-#define mb() 	asm volatile("mfence":::"memory")
-#define rmb()	asm volatile("lfence":::"memory")
-#define wmb()	asm volatile("sfence" ::: "memory")
+#define mb()      asm volatile("mfence" ::: "memory")
+#define rmb()     asm volatile("lfence" ::: "memory")
+#define wmb()     asm volatile("sfence" ::: "memory")
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_mb()  asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
 #endif
 
 #endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */
diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h
index 391d942..e4c8845 100644
--- a/tools/include/asm/barrier.h
+++ b/tools/include/asm/barrier.h
@@ -1,4 +1,5 @@ 
 /* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/compiler.h>
 #if defined(__i386__) || defined(__x86_64__)
 #include "../../arch/x86/include/asm/barrier.h"
 #elif defined(__arm__)
@@ -26,3 +27,13 @@ 
 #else
 #include <asm-generic/barrier.h>
 #endif
+/* Fallback definitions for archs that haven't been updated yet. */
+#ifndef smp_rmb
+# define smp_rmb()	rmb()
+#endif
+#ifndef smp_wmb
+# define smp_wmb()	wmb()
+#endif
+#ifndef smp_mb
+# define smp_mb()	mb()
+#endif