[RFC,v1,3/7] bpf: Remove preemption disable from simple call sites.
diff mbox series

Message ID 20191207.160422.913393771404629217.davem@davemloft.net
State RFC
Delegated to: BPF Maintainers
Headers show
Series
  • bpf: Make RT friendly.
Related show

Commit Message

David Miller Dec. 8, 2019, 12:04 a.m. UTC
All of these cases are strictly of the form:

	preempt_disable();
	BPF_PROG_RUN(...);
	preempt_enable();

BPF_PROG_RUN() is now a wrapper around __BPF_PROG_RUN()
which disables migration (via RT local locking primitives
or preemption disabling).

Therefore, this is a trivially correct transformation.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 include/linux/filter.h    | 2 --
 kernel/trace/bpf_trace.c  | 2 --
 net/core/flow_dissector.c | 2 --
 net/kcm/kcmsock.c         | 2 --
 4 files changed, 8 deletions(-)

Patch
diff mbox series

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1f4a782b6184..a64adc7751e8 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -714,9 +714,7 @@  static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
 	if (unlikely(prog->cb_access))
 		memset(cb_data, 0, BPF_SKB_CB_LEN);
 
-	preempt_disable();
 	res = BPF_PROG_RUN(prog, skb);
-	preempt_enable();
 	return res;
 }
 
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ffc91d4935ac..cc4873cfaab2 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1455,9 +1455,7 @@  static __always_inline
 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
 {
 	rcu_read_lock();
-	preempt_disable();
 	(void) BPF_PROG_RUN(prog, args);
-	preempt_enable();
 	rcu_read_unlock();
 }
 
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ca871657a4c4..ffd384ba929f 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -888,9 +888,7 @@  bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
 		     (int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
 	flow_keys->flags = flags;
 
-	preempt_disable();
 	result = BPF_PROG_RUN(prog, ctx);
-	preempt_enable();
 
 	flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
 	flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index ea9e73428ed9..d5f9d4d8e06c 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -380,9 +380,7 @@  static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
 	struct bpf_prog *prog = psock->bpf_prog;
 	int res;
 
-	preempt_disable();
 	res = BPF_PROG_RUN(prog, skb);
-	preempt_enable();
 	return res;
 }