diff mbox series

[RFC,bpf-next,v2,4/9] net: flow_dissector: prepare for no-skb use case

Message ID 20190319221948.170441-5-sdf@google.com
State RFC
Delegated to: BPF Maintainers
Headers show
Series net: flow_dissector: trigger BPF hook when called from eth_get_headlen | expand

Commit Message

Stanislav Fomichev March 19, 2019, 10:19 p.m. UTC
Create helpers that will be reused by both versions:
* init_flow_keys - initialize flow keys
* clamp_flow_keys - clam flow keys when done

Rename (to have consistent bpf_flow_ prefix):
* __skb_flow_bpf_dissect to bpf_flow_dissect_skb
* __skb_flow_bpf_to_target to bpf_flow_keys_to_target

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 include/linux/skbuff.h    |  8 +++---
 net/bpf/test_run.c        |  6 ++---
 net/core/flow_dissector.c | 56 +++++++++++++++++++++++++--------------
 3 files changed, 43 insertions(+), 27 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4ca4c60cbacb..194dbc2985e5 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1278,10 +1278,10 @@  static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
 struct net *skb_net(const struct sk_buff *skb);
 
 struct bpf_flow_keys;
-bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
-			    const struct sk_buff *skb,
-			    struct flow_dissector *flow_dissector,
-			    struct bpf_flow_keys *flow_keys);
+bool bpf_flow_dissect_skb(struct bpf_prog *prog,
+			  const struct sk_buff *skb,
+			  struct flow_dissector *flow_dissector,
+			  struct bpf_flow_keys *flow_keys);
 bool __skb_flow_dissect(struct net *net,
 			const struct sk_buff *skb,
 			struct flow_dissector *flow_dissector,
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fab142b796ef..512773a95ad5 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -300,9 +300,9 @@  int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
 	preempt_disable();
 	time_start = ktime_get_ns();
 	for (i = 0; i < repeat; i++) {
-		retval = __skb_flow_bpf_dissect(prog, skb,
-						&flow_keys_dissector,
-						&flow_keys);
+		retval = bpf_flow_dissect_skb(prog, skb,
+					      &flow_keys_dissector,
+					      &flow_keys);
 
 		if (signal_pending(current)) {
 			preempt_enable();
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index e13165e7528c..ab43f9bd7ec4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -629,9 +629,9 @@  static bool skb_flow_dissect_allowed(int *num_hdrs)
 	return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
 }
 
-static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
-				     struct flow_dissector *flow_dissector,
-				     void *target_container)
+static void bpf_flow_keys_to_target(const struct bpf_flow_keys *flow_keys,
+				    struct flow_dissector *flow_dissector,
+				    void *target_container)
 {
 	struct flow_dissector_key_control *key_control;
 	struct flow_dissector_key_basic *key_basic;
@@ -683,10 +683,32 @@  static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
 	}
 }
 
-bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
-			    const struct sk_buff *skb,
-			    struct flow_dissector *flow_dissector,
-			    struct bpf_flow_keys *flow_keys)
+static inline void init_flow_keys(struct bpf_flow_keys *flow_keys,
+				  const struct sk_buff *skb, int nhoff)
+{
+	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+	memset(cb, 0, sizeof(*cb));
+	memset(flow_keys, 0, sizeof(*flow_keys));
+
+	flow_keys->nhoff = nhoff;
+	flow_keys->thoff = nhoff;
+
+	cb->qdisc_cb.flow_keys = flow_keys;
+}
+
+static inline void clamp_flow_keys(struct bpf_flow_keys *flow_keys,
+				   int hlen)
+{
+	flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, hlen);
+	flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
+				   flow_keys->nhoff, hlen);
+}
+
+bool bpf_flow_dissect_skb(struct bpf_prog *prog,
+			  const struct sk_buff *skb,
+			  struct flow_dissector *flow_dissector,
+			  struct bpf_flow_keys *flow_keys)
 {
 	struct bpf_skb_data_end cb_saved;
 	struct bpf_skb_data_end *cb;
@@ -702,13 +724,9 @@  bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
 
 	/* Save Control Block */
 	memcpy(&cb_saved, cb, sizeof(cb_saved));
-	memset(cb, 0, sizeof(*cb));
 
 	/* Pass parameters to the BPF program */
-	memset(flow_keys, 0, sizeof(*flow_keys));
-	cb->qdisc_cb.flow_keys = flow_keys;
-	flow_keys->nhoff = skb_network_offset(skb);
-	flow_keys->thoff = flow_keys->nhoff;
+	init_flow_keys(flow_keys, skb, skb_network_offset(skb));
 
 	bpf_compute_data_pointers((struct sk_buff *)skb);
 	result = BPF_PROG_RUN(prog, skb);
@@ -716,9 +734,7 @@  bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
 	/* Restore state */
 	memcpy(cb, &cb_saved, sizeof(cb_saved));
 
-	flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
-	flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
-				   flow_keys->nhoff, skb->len);
+	clamp_flow_keys(flow_keys, skb->len);
 
 	return result == BPF_OK;
 }
@@ -806,11 +822,11 @@  bool __skb_flow_dissect(struct net *net,
 			attached = rcu_dereference(net->flow_dissector_prog);
 
 		if (attached) {
-			ret = __skb_flow_bpf_dissect(attached, skb,
-						     flow_dissector,
-						     &flow_keys);
-			__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
-						 target_container);
+			ret = bpf_flow_dissect_skb(attached, skb,
+						   flow_dissector,
+						   &flow_keys);
+			bpf_flow_keys_to_target(&flow_keys, flow_dissector,
+						target_container);
 			rcu_read_unlock();
 			return ret;
 		}