@@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/locallock.h>
#include <net/sch_generic.h>
@@ -559,7 +560,20 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define BPF_PROG_RUN(prog, ctx) ({ \
+#ifdef CONFIG_PREEMPT_RT_FULL
+DECLARE_LOCAL_IRQ_LOCK(bpf_invoke_lock);
+#define bpf_prog_lock() local_lock(bpf_invoke_lock)
+#define bpf_prog_unlock() local_unlock(bpf_invoke_lock)
+#else
+#define bpf_prog_lock() preempt_disable()
+#define bpf_prog_unlock() preempt_enable()
+#endif
+
+/* We cannot migrate off of the current cpu because BPF programs
+ * access per-cpu maps and other per-cpu data structures which are
+ * shared between BPF program execution and kernel execution.
+ */
+#define __BPF_PROG_RUN(prog, ctx) ({ \
u32 ret; \
cant_sleep(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
@@ -576,6 +590,13 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
} \
ret; })
+#define BPF_PROG_RUN(prog, ctx) ({ \
+ u32 ret; \
+ bpf_prog_lock(); \
+ ret = __BPF_PROG_RUN(prog, ctx); \
+ bpf_prog_unlock(); \
+ ret; })
+
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
@@ -2217,6 +2217,11 @@ int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
EXPORT_SYMBOL(bpf_stats_enabled_key);
+#ifdef CONFIG_PREEMPT_RT_FULL
+DEFINE_LOCAL_IRQ_LOCK(bpf_invoke_lock);
+EXPORT_SYMBOL(bpf_invoke_lock);
+#endif
+
/* All definitions of tracepoints related to BPF. */
#define CREATE_TRACE_POINTS
#include <linux/bpf_trace.h>
For now simply surround every invocation of BPF programs with a call to the locking primitive. The next step will be pulling the local lock out to the necessary areas of the various call sites. Signed-off-by: David S. Miller <davem@davemloft.net> --- include/linux/filter.h | 23 ++++++++++++++++++++++- kernel/bpf/core.c | 5 +++++ 2 files changed, 27 insertions(+), 1 deletion(-)