@@ -714,9 +714,7 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- preempt_disable();
res = BPF_PROG_RUN(prog, skb);
- preempt_enable();
return res;
}
@@ -1455,9 +1455,7 @@ static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
rcu_read_lock();
- preempt_disable();
(void) BPF_PROG_RUN(prog, args);
- preempt_enable();
rcu_read_unlock();
}
@@ -888,9 +888,7 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
flow_keys->flags = flags;
- preempt_disable();
result = BPF_PROG_RUN(prog, ctx);
- preempt_enable();
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
@@ -380,9 +380,7 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
struct bpf_prog *prog = psock->bpf_prog;
int res;
- preempt_disable();
res = BPF_PROG_RUN(prog, skb);
- preempt_enable();
return res;
}
All of these cases are strictly of the form: preempt_disable(); BPF_PROG_RUN(...); preempt_enable(); BPF_PROG_RUN() is now a wrapper around __BPF_PROG_RUN() which disables migration (via RT local locking primitives or preemption disabling). Therefore, this is a trivially correct transformation. Signed-off-by: David S. Miller <davem@davemloft.net> --- include/linux/filter.h | 2 -- kernel/trace/bpf_trace.c | 2 -- net/core/flow_dissector.c | 2 -- net/kcm/kcmsock.c | 2 -- 4 files changed, 8 deletions(-)