@@ -14,4 +14,14 @@
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
+/*
+ * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
+ * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
+ * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
+ * smp_*() don't.
+ */
+#define smp_mb() asm volatile("dmb ish" ::: "memory")
+#define smp_wmb() asm volatile("dmb ishst" ::: "memory")
+#define smp_rmb() asm volatile("dmb ishld" ::: "memory")
+
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
@@ -21,9 +21,12 @@
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#elif defined(__x86_64__)
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence" ::: "memory")
+#define mb() asm volatile("mfence" ::: "memory")
+#define rmb() asm volatile("lfence" ::: "memory")
+#define wmb() asm volatile("sfence" ::: "memory")
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
#endif
#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/compiler.h>
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/barrier.h"
#elif defined(__arm__)
@@ -26,3 +27,13 @@
#else
#include <asm-generic/barrier.h>
#endif
+/* Fallback definitions for archs that haven't been updated yet. */
+#ifndef smp_rmb
+# define smp_rmb() rmb()
+#endif
+#ifndef smp_wmb
+# define smp_wmb() wmb()
+#endif
+#ifndef smp_mb
+# define smp_mb() mb()
+#endif
Add the definition for smp_rmb(), smp_wmb(), and smp_mb() to the tools include infrastructure. This patch adds the implementation for x86-64 and arm64, and have it fall back for other archs which do not have it implemented at this point such that others can be added successively for those who have access to test machines. The x86-64 one uses lock + add combination for smp_mb() with address below red zone. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> --- tools/arch/arm64/include/asm/barrier.h | 10 ++++++++++ tools/arch/x86/include/asm/barrier.h | 9 ++++++--- tools/include/asm/barrier.h | 11 +++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-)