@@ -398,6 +398,14 @@ enum bpf_func_id {
*/
BPF_FUNC_skb_change_tail,
+ /**
+ * bpf_netpolicy(skb)
+ * Netpolicy tc extension. Search for proper Tx queue
+ * @skb: pointer to skb
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_netpolicy,
+
__BPF_FUNC_MAX_ID,
};
@@ -3285,8 +3285,8 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
#ifdef CONFIG_NETPOLICY
struct netpolicy_instance *instance;
- queue_index = -1;
- if (dev->netpolicy && sk) {
+ queue_index = sk_tx_queue_get(sk);
+ if ((queue_index < 0) && dev->netpolicy && sk) {
instance = netpolicy_find_instance(sk);
if (instance) {
if (!instance->dev)
@@ -2351,6 +2351,38 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
.arg3_type = ARG_CONST_STACK_SIZE,
};
+#ifdef CONFIG_NETPOLICY
+static u64 bpf_netpolicy(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (unsigned long) r1;
+ struct netpolicy_instance *instance;
+ struct net_device *dev = skb->dev;
+ struct sock *sk = skb->sk;
+ int queue_index;
+
+ if (dev->netpolicy && sk) {
+ instance = netpolicy_find_instance(sk);
+ if (instance) {
+ if (!instance->dev)
+ instance->dev = dev;
+ queue_index = netpolicy_pick_queue(instance, false);
+ if ((queue_index >= 0) && sk_fullsock(sk) &&
+ rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, queue_index);
+ }
+ }
+
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_netpolicy_proto = {
+ .func = bpf_netpolicy,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+#endif
+
static const struct bpf_func_proto *
bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
{
@@ -2515,6 +2547,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_get_smp_processor_id_proto;
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
+#ifdef CONFIG_NETPOLICY
+ case BPF_FUNC_netpolicy:
+ return &bpf_netpolicy_proto;
+#endif
default:
return sk_filter_func_proto(func_id);
}
@@ -85,6 +85,7 @@ always += xdp2_kern.o
always += test_current_task_under_cgroup_kern.o
always += trace_event_kern.o
always += sampleip_kern.o
+always += netpolicy_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
@@ -88,6 +88,8 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
(void *) BPF_FUNC_l4_csum_replace;
static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
(void *) BPF_FUNC_skb_under_cgroup;
+static int (*bpf_netpolicy)(void *ctx) =
+ (void *) BPF_FUNC_netpolicy;
#if defined(__x86_64__)