diff mbox series

[2/3,nf-next] netfilter:nf_flow_table: Support bridge type flow offload

Message ID 1561545148-11978-2-git-send-email-wenxu@ucloud.cn
State Awaiting Upstream
Delegated to: David Miller
Headers show
Series [1/3,nf-next] netfilter:nf_flow_table: Refactor flow_offload_tuple to support more offload method | expand

Commit Message

wenxu June 26, 2019, 10:32 a.m. UTC
From: wenxu <wenxu@ucloud.cn>

With nf_conntrack_bridge function. The bridge family can do
conntrack it self. The flow offload function based on the
conntrack. So the flow in the bridge wih conntrack can be
offloaded.

Signed-off-by: wenxu <wenxu@ucloud.cn>
---
 include/net/netfilter/nf_flow_table.h | 30 +++++++++++-
 net/netfilter/nf_flow_table_core.c    | 53 ++++++++++++++++-----
 net/netfilter/nf_flow_table_ip.c      | 41 +++++++++++++---
 net/netfilter/nft_flow_offload.c      | 89 ++++++++++++++++++++++++++++++++---
 4 files changed, 185 insertions(+), 28 deletions(-)

Comments

Florian Westphal June 26, 2019, 6:38 p.m. UTC | #1
wenxu@ucloud.cn <wenxu@ucloud.cn> wrote:
> diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
> index 0016bb8..9af01ef 100644
> --- a/net/netfilter/nf_flow_table_ip.c
> +++ b/net/netfilter/nf_flow_table_ip.c
> -	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
> +	if (family == NFPROTO_IPV4) {
> +		iph = ip_hdr(skb);
> +		ip_decrease_ttl(iph);
> +
> +		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
> +		skb_dst_set_noref(skb, &rt->dst);
> +		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
> +	} else {
> +		const struct net_bridge_port *p;
> +
> +		if (vlan_tag && (p = br_port_get_rtnl_rcu(state->in)))
> +			__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan_tag);
> +		else
> +			__vlan_hwaccel_clear_tag(skb);
> +
> +		br_dev_queue_push_xmit(state->net, state->sk, skb);

Won't that result in a module dep on bridge?

Whats the idea with this patch?

Do you see a performance improvement when bypassing bridge layer? If so,
how much?

I just wonder if its really cheaper than not using bridge conntrack in
the first place :-)
Pablo Neira Ayuso June 26, 2019, 6:40 p.m. UTC | #2
On Wed, Jun 26, 2019 at 06:32:27PM +0800, wenxu@ucloud.cn wrote:
> From: wenxu <wenxu@ucloud.cn>
> 
> With nf_conntrack_bridge function. The bridge family can do
> conntrack it self. The flow offload function based on the
> conntrack. So the flow in the bridge wih conntrack can be
> offloaded.
> 
> Signed-off-by: wenxu <wenxu@ucloud.cn>
> ---
>  include/net/netfilter/nf_flow_table.h | 30 +++++++++++-
>  net/netfilter/nf_flow_table_core.c    | 53 ++++++++++++++++-----
>  net/netfilter/nf_flow_table_ip.c      | 41 +++++++++++++---
>  net/netfilter/nft_flow_offload.c      | 89 ++++++++++++++++++++++++++++++++---
>  4 files changed, 185 insertions(+), 28 deletions(-)
> 
> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
> index 968be64..9a0cf27 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -33,8 +33,22 @@ enum flow_offload_tuple_dir {
>  	FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
>  };
>  
> +enum flow_offload_tuple_type {
> +	FLOW_OFFLOAD_TYPE_INET,
> +	FLOW_OFFLOAD_TYPE_BRIDGE,
> +};
> +
> +struct dst_br_port {
> +	struct net_device *dev;
> +	u16	dst_vlan_tag;
> +};
> +
>  struct flow_offload_dst {
> -	struct dst_entry		*dst_cache;
> +	enum flow_offload_tuple_type type;
> +	union {
> +		struct dst_entry		*dst_cache;
> +		struct dst_br_port		dst_port;
> +	};
>  };
>  
>  struct flow_offload_tuple {
> @@ -52,6 +66,7 @@ struct flow_offload_tuple {
>  	};
>  
>  	int				iifidx;
> +	u16				vlan_tag;
>  
>  	u8				l3proto;
>  	u8				l4proto;
> @@ -89,8 +104,19 @@ struct nf_flow_route {
>  	} tuple[FLOW_OFFLOAD_DIR_MAX];
>  };
>  
> +struct nf_flow_forward {
> +	struct {
> +		struct dst_br_port	dst_port;
> +		u16 vlan_tag;
> +	} tuple[FLOW_OFFLOAD_DIR_MAX];
> +};
> +
>  struct nf_flow_data {
> -	struct nf_flow_route route;
> +	enum flow_offload_tuple_type type;
> +	union {
> +		struct nf_flow_route route;
> +		struct nf_flow_forward forward;
> +	};
>  };
>  
>  struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
> index 125ce1c..19ee69c 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -29,16 +29,38 @@ struct flow_offload_entry {
>  {
>  	struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
>  	struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
> -	struct dst_entry *other_dst = date->route.tuple[!dir].dst;
> -	struct dst_entry *dst = data->route.tuple[dir].dst;
>  
> +	struct dst_entry *other_dst;
> +	struct dst_entry *dst;
> +	struct dst_br_port other_dst_port;
> +	struct dst_br_port dst_port;

Please, reverse xmas tree for variable definitions.

> +	if (data->type == FLOW_OFFLOAD_TYPE_BRIDGE) {

Could you add functions for these?

        nf_flow_fill_bridge_dst(...)

> +		other_dst_port = data->forward.tuple[!dir].dst_port;
> +		dst_port = data->forward.tuple[dir].dst_port;
> +
> +		ft->iifidx = other_dst_port.dev->ifindex;
> +		ft->dst.dst_port = dst_port;
> +		ft->vlan_tag = data->forward.tuple[dir].vlan_tag;
> +	} else {

You could probably make an initial patch to add this function, so this
patch becomes smaller and easier to review:

                nf_flow_fill_inet_dst(...)

to wrap the code below.

> +		other_dst = data->route.tuple[!dir].dst;
> +		dst = data->route.tuple[dir].dst;
> +
> +		ft->iifidx = other_dst->dev->ifindex;
> +		ft->dst.dst_cache = dst;
> +	}
> +
> +	ft->dst.type = data->type;
>  	ft->dir = dir;
>  
>  	switch (ctt->src.l3num) {
>  	case NFPROTO_IPV4:
>  		ft->src_v4 = ctt->src.u3.in;
>  		ft->dst_v4 = ctt->dst.u3.in;
> -		ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
> +		if (data->type == FLOW_OFFLOAD_TYPE_BRIDGE)
> +			ft->mtu = dst_port.dev->mtu;
> +		else
> +			ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
>  		break;
>  	case NFPROTO_IPV6:
>  		ft->src_v6 = ctt->src.u3.in6;
> @@ -51,9 +73,6 @@ struct flow_offload_entry {
>  	ft->l4proto = ctt->dst.protonum;
>  	ft->src_port = ctt->src.u.tcp.port;
>  	ft->dst_port = ctt->dst.u.tcp.port;
> -
> -	ft->iifidx = other_dst->dev->ifindex;
> -	ft->dst_cache = dst;
>  }
>  
>  struct flow_offload *
> @@ -72,11 +91,13 @@ struct flow_offload *
>  
>  	flow = &entry->flow;
>  
> -	if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
> -		goto err_dst_cache_original;
> +	if (data->type == FLOW_OFFLOAD_TYPE_INET) {

Place this code below in a function?

> +		if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
> +			goto err_dst_cache_original;
>  
> -	if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
> -		goto err_dst_cache_reply;
> +		if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
> +			goto err_dst_cache_reply;
> +	}
>  
>  	entry->ct = ct;
>  
> @@ -91,7 +112,8 @@ struct flow_offload *
>  	return flow;
>  
>  err_dst_cache_reply:
> -	dst_release(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
> +	if (data->type == FLOW_OFFLOAD_TYPE_INET)
> +		dst_release(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);

Same thing here, place this code in a function.

>  err_dst_cache_original:
>  	kfree(entry);
>  err_ct_refcnt:
> @@ -139,8 +161,13 @@ void flow_offload_free(struct flow_offload *flow)
>  {
>  	struct flow_offload_entry *e;
>  
> -	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.dst_cache);
> -	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst.dst_cache);
> +	if (flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.type == FLOW_OFFLOAD_TYPE_INET) {

Place this code in a function.

Better use switch() rather than if().

> +		dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.dst_cache);
> +		dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst.dst_cache);
> +	} else {
> +		dev_put(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.dst_port.dev);
> +		dev_put(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst.dst_port.dev);
> +	}
>  	e = container_of(flow, struct flow_offload_entry, flow);
>  	if (flow->flags & FLOW_OFFLOAD_DYING)
>  		nf_ct_delete(e->ct, 0, 0);
> diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
> index 0016bb8..9af01ef 100644
> --- a/net/netfilter/nf_flow_table_ip.c
> +++ b/net/netfilter/nf_flow_table_ip.c
> @@ -16,6 +16,8 @@
>  #include <linux/tcp.h>
>  #include <linux/udp.h>
>  
> +#include "../bridge/br_private.h"
> +
>  static int nf_flow_state_check(struct flow_offload *flow, int proto,
>  			       struct sk_buff *skb, unsigned int thoff)
>  {
> @@ -220,6 +222,7 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
>  {
>  	struct flow_offload_tuple_rhash *tuplehash;
>  	struct nf_flowtable *flow_table = priv;
> +	int family = flow_table->type->family;
>  	struct flow_offload_tuple tuple = {};
>  	enum flow_offload_tuple_dir dir;
>  	struct flow_offload *flow;
> @@ -228,6 +231,7 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
>  	unsigned int thoff;
>  	struct iphdr *iph;
>  	__be32 nexthop;
> +	u16 vlan_tag;
>  
>  	if (skb->protocol != htons(ETH_P_IP))
>  		return NF_ACCEPT;
> @@ -235,14 +239,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
>  	if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
>  		return NF_ACCEPT;
>  
> +	if (family != NFPROTO_BRIDGE && family != NFPROTO_IPV4)
> +		return NF_ACCEPT;
> +
> +	if (family == NFPROTO_BRIDGE && skb_vlan_tag_present(skb))
> +		tuple.vlan_tag = skb_vlan_tag_get_id(skb);
> +
>  	tuplehash = flow_offload_lookup(flow_table, &tuple);
>  	if (tuplehash == NULL)
>  		return NF_ACCEPT;
>  
>  	dir = tuplehash->tuple.dir;
>  	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
> -	rt = (struct rtable *)flow->tuplehash[dir].tuple.dst.dst_cache;
> -	outdev = rt->dst.dev;
> +	if (family == NFPROTO_IPV4) {
> +		rt = (struct rtable *)flow->tuplehash[dir].tuple.dst.dst_cache;
> +		outdev = rt->dst.dev;
> +	} else {
> +		vlan_tag = flow->tuplehash[dir].tuple.dst.dst_port.dst_vlan_tag;
> +		outdev = flow->tuplehash[dir].tuple.dst.dst_port.dev;
> +	}
>  
>  	if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
>  		return NF_ACCEPT;
> @@ -258,13 +273,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
>  		return NF_DROP;
>  
>  	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
> -	iph = ip_hdr(skb);
> -	ip_decrease_ttl(iph);
>  
>  	skb->dev = outdev;
> -	nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
> -	skb_dst_set_noref(skb, &rt->dst);
> -	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
> +	if (family == NFPROTO_IPV4) {
> +		iph = ip_hdr(skb);
> +		ip_decrease_ttl(iph);
> +
> +		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
> +		skb_dst_set_noref(skb, &rt->dst);
> +		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
> +	} else {
> +		const struct net_bridge_port *p;
> +
> +		if (vlan_tag && (p = br_port_get_rtnl_rcu(state->in)))
> +			__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan_tag);
> +		else
> +			__vlan_hwaccel_clear_tag(skb);
> +
> +		br_dev_queue_push_xmit(state->net, state->sk, skb);
> +	}

Probably you can place common code into functions, the make a function
that uses these function to build the bridge and the inet datapath
into independent functions.

Instead of all these if() branches so often to reuse code, which makes
the code hard to follow.

>  	return NF_STOLEN;
>  }
> diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
> index cdb7c46..c88396a 100644
> --- a/net/netfilter/nft_flow_offload.c
> +++ b/net/netfilter/nft_flow_offload.c
> @@ -14,6 +14,8 @@
>  #include <linux/netfilter/nf_conntrack_common.h>
>  #include <net/netfilter/nf_flow_table.h>
>  
> +#include "../bridge/br_private.h"
> +
>  struct nft_flow_offload {
>  	struct nft_flowtable	*flowtable;
>  };
> @@ -49,6 +51,58 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
>  	return 0;
>  }
>  
> +static int nft_flow_forward(const struct nft_pktinfo *pkt,
> +			    const struct nf_conn *ct,
> +			    struct nf_flow_forward *forward,
> +			    enum ip_conntrack_dir dir)
> +{
> +	struct net_bridge_vlan_group *vg;
> +	const struct net_bridge_port *p;
> +	u16 vid = 0;
> +
> +	if (skb_vlan_tag_present(pkt->skb))
> +		vid = skb_vlan_tag_get_id(pkt->skb);
> +
> +	forward->tuple[dir].dst_port.dst_vlan_tag = vid;
> +	forward->tuple[!dir].vlan_tag = vid;
> +	forward->tuple[dir].dst_port.dev = dev_get_by_index(dev_net(nft_out(pkt)),
> +							    nft_out(pkt)->ifindex);
> +	forward->tuple[!dir].dst_port.dev = dev_get_by_index(dev_net(nft_in(pkt)),
> +							     nft_in(pkt)->ifindex);
> +
> +	rtnl_lock();

rtnl_lock() from the packet path?

> +	p = br_port_get_rtnl_rcu(nft_out(pkt));
> +	if (p) {
> +		if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
> +			goto out;
> +
> +		if (!vid) {
> +			vg = nbp_vlan_group_rcu(p);
> +			vid = br_get_pvid(vg);
> +		}
> +
> +		if (vid) {
> +			struct bridge_vlan_info info;
> +
> +			if (br_vlan_get_info(nft_in(pkt), vid, &info) == 0 &&
> +			    info.flags & BRIDGE_VLAN_INFO_UNTAGGED)
> +				vid = 0;
> +		}
> +	} else {
> +		rtnl_unlock();
> +		dev_put(forward->tuple[dir].dst_port.dev);
> +		dev_put(forward->tuple[!dir].dst_port.dev);
> +		return -ENOENT;
> +	}
> +
> +out:
> +	rtnl_unlock();
> +	forward->tuple[!dir].dst_port.dst_vlan_tag = vid;
> +	forward->tuple[dir].vlan_tag = vid;
> +
> +	return 0;
> +}
> +
>  static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
>  {
>  	if (skb_sec_path(skb))
> @@ -61,6 +115,15 @@ static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
>  
>  		if (unlikely(opt->optlen))
>  			return true;
> +	} else if (family == NFPROTO_BRIDGE) {
> +		const struct iphdr *iph;
> +
> +		if (skb->protocol != htons(ETH_P_IP))
> +			return true;
> +
> +		iph = ip_hdr(skb);
> +		if (iph->ihl > 5)
> +			return true;
>  	}
>  
>  	return false;
> @@ -76,11 +139,12 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
>  	struct nf_flow_data data;
>  	struct flow_offload *flow;
>  	enum ip_conntrack_dir dir;
> +	int family = nft_pf(pkt);
>  	bool is_tcp = false;
>  	struct nf_conn *ct;
>  	int ret;
>  
> -	if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
> +	if (nft_flow_offload_skip(pkt->skb, family))
>  		goto out;
>  
>  	ct = nf_ct_get(pkt->skb, &ctinfo);
> @@ -108,8 +172,15 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
>  		goto out;
>  
>  	dir = CTINFO2DIR(ctinfo);
> -	if (nft_flow_route(pkt, ct, &data.route, dir) < 0)
> -		goto err_flow_route;
> +	if (family == NFPROTO_BRIDGE) {
> +		data.type = FLOW_OFFLOAD_TYPE_BRIDGE;
> +		if (nft_flow_forward(pkt, ct, &data.forward, dir) < 0)
> +			goto err_flow_data;
> +	} else {
> +		data.type = FLOW_OFFLOAD_TYPE_INET;
> +		if (nft_flow_route(pkt, ct, &data.route, dir) < 0)
> +			goto err_flow_data;
> +	}
>  
>  	flow = flow_offload_alloc(ct, &data);
>  	if (!flow)
> @@ -124,14 +195,20 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
>  	if (ret < 0)
>  		goto err_flow_add;
>  
> -	dst_release(data.route.tuple[!dir].dst);
> +	if (family != NFPROTO_BRIDGE)
> +		dst_release(data.route.tuple[!dir].dst);
>  	return;
>  
>  err_flow_add:
>  	flow_offload_free(flow);
>  err_flow_alloc:
> -	dst_release(data.route.tuple[!dir].dst);
> -err_flow_route:
> +	if (family == NFPROTO_BRIDGE) {
> +		dev_put(data.forward.tuple[dir].dst_port.dev);
> +		dev_put(data.forward.tuple[!dir].dst_port.dev);
> +	} else {
> +		dst_release(data.route.tuple[!dir].dst);
> +	}
> +err_flow_data:
>  	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
>  out:
>  	regs->verdict.code = NFT_BREAK;
> -- 
> 1.8.3.1
>
Florian Westphal June 26, 2019, 7:19 p.m. UTC | #3
Florian Westphal <fw@strlen.de> wrote:
> wenxu@ucloud.cn <wenxu@ucloud.cn> wrote:
> > diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
> > index 0016bb8..9af01ef 100644
> > --- a/net/netfilter/nf_flow_table_ip.c
> > +++ b/net/netfilter/nf_flow_table_ip.c
> > -	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
> > +	if (family == NFPROTO_IPV4) {
> > +		iph = ip_hdr(skb);
> > +		ip_decrease_ttl(iph);
> > +
> > +		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
> > +		skb_dst_set_noref(skb, &rt->dst);
> > +		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
> > +	} else {
> > +		const struct net_bridge_port *p;
> > +
> > +		if (vlan_tag && (p = br_port_get_rtnl_rcu(state->in)))
> > +			__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan_tag);
> > +		else
> > +			__vlan_hwaccel_clear_tag(skb);
> > +
> > +		br_dev_queue_push_xmit(state->net, state->sk, skb);
> 
> Won't that result in a module dep on bridge?
> 
> Whats the idea with this patch?
> 
> Do you see a performance improvement when bypassing bridge layer? If so,
> how much?
> 
> I just wonder if its really cheaper than not using bridge conntrack in
> the first place :-)

Addendum: Did you look at the nftables fwd expression?  Maybe you can use
it as a simpler way to speed things up?
wenxu June 27, 2019, 6:22 a.m. UTC | #4
On 6/27/2019 3:19 AM, Florian Westphal wrote:
> Florian Westphal <fw@strlen.de> wrote:
>> wenxu@ucloud.cn <wenxu@ucloud.cn> wrote:
>>> diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
>>> index 0016bb8..9af01ef 100644
>>> --- a/net/netfilter/nf_flow_table_ip.c
>>> +++ b/net/netfilter/nf_flow_table_ip.c
>>> -	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
>>> +	if (family == NFPROTO_IPV4) {
>>> +		iph = ip_hdr(skb);
>>> +		ip_decrease_ttl(iph);
>>> +
>>> +		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
>>> +		skb_dst_set_noref(skb, &rt->dst);
>>> +		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
>>> +	} else {
>>> +		const struct net_bridge_port *p;
>>> +
>>> +		if (vlan_tag && (p = br_port_get_rtnl_rcu(state->in)))
>>> +			__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan_tag);
>>> +		else
>>> +			__vlan_hwaccel_clear_tag(skb);
>>> +
>>> +		br_dev_queue_push_xmit(state->net, state->sk, skb);
>> Won't that result in a module dep on bridge?
I  will fix it in version 2
>>
>> Whats the idea with this patch?
>>
>> Do you see a performance improvement when bypassing bridge layer? If so,
>> how much?
>>
>> I just wonder if its really cheaper than not using bridge conntrack in
>> the first place :-)

This patch is based on the conntrack function in bridge.  It will bypass the fdb lookup

and conntrack lookup to get the performance  improvement. The more important things

for hardware offload in the future with nf_tables add hardware offload support

>
Pablo Neira Ayuso June 27, 2019, 12:58 p.m. UTC | #5
On Thu, Jun 27, 2019 at 02:22:36PM +0800, wenxu wrote:
> On 6/27/2019 3:19 AM, Florian Westphal wrote:
> > Florian Westphal <fw@strlen.de> wrote:
[...]
> >> Whats the idea with this patch?
> >>
> >> Do you see a performance improvement when bypassing bridge layer? If so,
> >> how much?
> >>
> >> I just wonder if its really cheaper than not using bridge conntrack in
> >> the first place :-)
> 
> This patch is based on the conntrack function in bridge.  It will
> bypass the fdb lookup and conntrack lookup to get the performance 
> improvement. The more important things for hardware offload in the
> future with nf_tables add hardware offload support

Florian would like to see numbers / benchmark.
wenxu June 28, 2019, 3:37 a.m. UTC | #6
On 6/27/2019 8:58 PM, Pablo Neira Ayuso wrote:
> On Thu, Jun 27, 2019 at 02:22:36PM +0800, wenxu wrote:
>> On 6/27/2019 3:19 AM, Florian Westphal wrote:
>>> Florian Westphal <fw@strlen.de> wrote:
> [...]
>>>> Whats the idea with this patch?
>>>>
>>>> Do you see a performance improvement when bypassing bridge layer? If so,
>>>> how much?
>>>>
>>>> I just wonder if its really cheaper than not using bridge conntrack in
>>>> the first place :-)
>> This patch is based on the conntrack function in bridge.  It will
>> bypass the fdb lookup and conntrack lookup to get the performance 
>> improvement. The more important things for hardware offload in the
>> future with nf_tables add hardware offload support
> Florian would like to see numbers / benchmark.


I just did a simple performace test with following test.

p netns add ns21
ip netns add ns22
ip l add dev veth21 type veth peer name eth0 netns ns21
ip l add dev veth22 type veth peer name eth0 netns ns22
ifconfig veth21 up
ifconfig veth22 up
ip netns exec ns21 ip a a dev eth0 10.0.0.7/24
ip netns exec ns22 ip a a dev eth0 10.0.0.8/24
ip netns exec ns21 ifconfig eth0 up
ip netns exec ns22 ifconfig eth0 up

ip l add dev br0 type bridge vlan_filtering 1
brctl addif br0 veth21
brctl addif br0 veth22

ifconfig br0 up

bridge vlan add dev veth21 vid 200 pvid untagged
bridge vlan add dev veth22 vid 200 pvid untagged

nft add table bridge firewall
nft add chain bridge firewall zones { type filter hook prerouting priority - 300 \; }
nft add rule bridge firewall zones counter ct zone set iif map { "veth21" : 2, "veth22" : 2 }

nft add chain bridge firewall rule-200-ingress
nft add rule bridge firewall rule-200-ingress ct zone 2 ct state established,related counter accept
nft add rule bridge firewall rule-200-ingress ct zone 2 ct state invalid counter drop
nft add rule bridge firewall rule-200-ingress ct zone 2 tcp dport 23 ct state new counter accept
nft add rule bridge firewall rule-200-ingress counter drop

nft add chain bridge firewall rule-200-egress
nft add rule bridge firewall rule-200-egress ct zone 2 ct state established,related counter accept
nft add rule bridge firewall rule-200-egress ct zone 2 ct state invalid counter drop
nft add rule bridge firewall rule-200-egress ct zone 2 tcp dport 23 ct state new counter drop
nft add rule bridge firewall rule-200-egress counter accept

nft add chain bridge firewall rules-all { type filter hook prerouting priority - 150 \; }
nft add rule bridge firewall rules-all counter meta protocol ip iif vmap { "veth22" : jump rule-200-ingress, "veth21" : jump rule-200-egress }



netns21 communication with ns22


ns21 iperf to 10.0.0.8 with dport 22 in ns22


first time with OFFLOAD enable

nft add flowtable bridge firewall fb2 { hook ingress priority 0 \; devices = { veth21, veth22 } \; }
nft add chain bridge firewall ftb-all {type filter hook forward priority 0 \; policy accept \; }
nft add rule bridge firewall ftb-all counter ct zone 2 ip protocol tcp flow offload @fb2

# iperf -c 10.0.0.8 -p 22 -t 60 -i2
------------------------------------------------------------
Client connecting to 10.0.0.8, TCP port 22
TCP window size: 85.0 KByte (default)
------------------------------------------------------------
[  3] local 10.0.0.7 port 60014 connected with 10.0.0.8 port 22
[ ID] Interval       Transfer     Bandwidth
[  3]  0.0- 2.0 sec  10.8 GBytes  46.5 Gbits/sec
[  3]  2.0- 4.0 sec  10.9 GBytes  46.7 Gbits/sec
[  3]  4.0- 6.0 sec  10.9 GBytes  46.8 Gbits/sec
[  3]  6.0- 8.0 sec  11.0 GBytes  47.2 Gbits/sec
[  3]  8.0-10.0 sec  11.0 GBytes  47.1 Gbits/sec
[  3] 10.0-12.0 sec  11.0 GBytes  47.1 Gbits/sec
[  3] 12.0-14.0 sec  11.7 GBytes  50.4 Gbits/sec
[  3] 14.0-16.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 16.0-18.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 18.0-20.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 20.0-22.0 sec  12.0 GBytes  51.5 Gbits/sec
[  3] 22.0-24.0 sec  12.0 GBytes  51.4 Gbits/sec
[  3] 24.0-26.0 sec  12.0 GBytes  51.3 Gbits/sec
[  3] 26.0-28.0 sec  12.0 GBytes  51.7 Gbits/sec
[  3] 28.0-30.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 30.0-32.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 32.0-34.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 34.0-36.0 sec  12.0 GBytes  51.5 Gbits/sec
[  3] 36.0-38.0 sec  12.0 GBytes  51.5 Gbits/sec
[  3] 38.0-40.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 40.0-42.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 42.0-44.0 sec  12.0 GBytes  51.5 Gbits/sec
[  3] 44.0-46.0 sec  12.0 GBytes  51.4 Gbits/sec
[  3] 46.0-48.0 sec  12.0 GBytes  51.4 Gbits/sec
[  3] 48.0-50.0 sec  12.0 GBytes  51.5 Gbits/sec
[  3] 50.0-52.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 52.0-54.0 sec  12.0 GBytes  51.6 Gbits/sec
[  3] 54.0-56.0 sec  12.0 GBytes  51.5 Gbits/sec
[  3] 56.0-58.0 sec  11.9 GBytes  51.2 Gbits/sec
[  3] 58.0-60.0 sec  11.8 GBytes  50.7 Gbits/sec
[  3]  0.0-60.0 sec   353 GBytes  50.5 Gbits/sec


The second time on any offload:
# iperf -c 10.0.0.8 -p 22 -t 60 -i2
------------------------------------------------------------
Client connecting to 10.0.0.8, TCP port 22
TCP window size: 85.0 KByte (default)
------------------------------------------------------------
[  3] local 10.0.0.7 port 60536 connected with 10.0.0.8 port 22
[ ID] Interval       Transfer     Bandwidth
[  3]  0.0- 2.0 sec  8.88 GBytes  38.1 Gbits/sec
[  3]  2.0- 4.0 sec  9.02 GBytes  38.7 Gbits/sec
[  3]  4.0- 6.0 sec  9.02 GBytes  38.8 Gbits/sec
[  3]  6.0- 8.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3]  8.0-10.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 10.0-12.0 sec  9.04 GBytes  38.8 Gbits/sec
[  3] 12.0-14.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 14.0-16.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 16.0-18.0 sec  9.06 GBytes  38.9 Gbits/sec
[  3] 18.0-20.0 sec  9.07 GBytes  39.0 Gbits/sec
[  3] 20.0-22.0 sec  9.07 GBytes  38.9 Gbits/sec
[  3] 22.0-24.0 sec  9.06 GBytes  38.9 Gbits/sec
[  3] 24.0-26.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 26.0-28.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 28.0-30.0 sec  9.06 GBytes  38.9 Gbits/sec
[  3] 30.0-32.0 sec  9.06 GBytes  38.9 Gbits/sec
[  3] 32.0-34.0 sec  9.07 GBytes  38.9 Gbits/sec
[  3] 34.0-36.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 36.0-38.0 sec  9.03 GBytes  38.8 Gbits/sec
[  3] 38.0-40.0 sec  9.03 GBytes  38.8 Gbits/sec
[  3] 40.0-42.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 42.0-44.0 sec  9.03 GBytes  38.8 Gbits/sec
[  3] 44.0-46.0 sec  9.04 GBytes  38.8 Gbits/sec
[  3] 46.0-48.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 48.0-50.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 50.0-52.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 52.0-54.0 sec  9.06 GBytes  38.9 Gbits/sec
[  3] 54.0-56.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 56.0-58.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3] 58.0-60.0 sec  9.05 GBytes  38.9 Gbits/sec
[  3]  0.0-60.0 sec   271 GBytes  38.8 Gbits/sec




>
Florian Westphal June 28, 2019, 6:06 a.m. UTC | #7
wenxu <wenxu@ucloud.cn> wrote:
> ns21 iperf to 10.0.0.8 with dport 22 in ns22
> first time with OFFLOAD enable
> 
> nft add flowtable bridge firewall fb2 { hook ingress priority 0 \; devices = { veth21, veth22 } \; }
> nft add chain bridge firewall ftb-all {type filter hook forward priority 0 \; policy accept \; }
> nft add rule bridge firewall ftb-all counter ct zone 2 ip protocol tcp flow offload @fb2
> 
> # iperf -c 10.0.0.8 -p 22 -t 60 -i2
[..]
> [  3]  0.0-60.0 sec   353 GBytes  50.5 Gbits/sec
> 
> The second time on any offload:
> # iperf -c 10.0.0.8 -p 22 -t 60 -i2
> [  3]  0.0-60.0 sec   271 GBytes  38.8 Gbits/sec

Wow, this is pretty impressive.  Do you have numbers without
offload and no connection tracking?

Is this with CONFIG_RETPOLINE=y (just curious)?
wenxu June 28, 2019, 9:51 a.m. UTC | #8
On 6/28/2019 2:06 PM, Florian Westphal wrote:
> wenxu <wenxu@ucloud.cn> wrote:
>> ns21 iperf to 10.0.0.8 with dport 22 in ns22
>> first time with OFFLOAD enable
>>
>> nft add flowtable bridge firewall fb2 { hook ingress priority 0 \; devices = { veth21, veth22 } \; }
>> nft add chain bridge firewall ftb-all {type filter hook forward priority 0 \; policy accept \; }
>> nft add rule bridge firewall ftb-all counter ct zone 2 ip protocol tcp flow offload @fb2
>>
>> # iperf -c 10.0.0.8 -p 22 -t 60 -i2
> [..]
>> [  3]  0.0-60.0 sec   353 GBytes  50.5 Gbits/sec
>>
>> The second time on any offload:
>> # iperf -c 10.0.0.8 -p 22 -t 60 -i2
>> [  3]  0.0-60.0 sec   271 GBytes  38.8 Gbits/sec
> Wow, this is pretty impressive.  Do you have numbers without
> offload and no connection tracking?

There is no other connection  on the bridge in zone 2

>
> Is this with CONFIG_RETPOLINE=y (just curious)?
Yes, it is enable.
diff mbox series

Patch

diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 968be64..9a0cf27 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -33,8 +33,22 @@  enum flow_offload_tuple_dir {
 	FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
 };
 
+enum flow_offload_tuple_type {
+	FLOW_OFFLOAD_TYPE_INET,
+	FLOW_OFFLOAD_TYPE_BRIDGE,
+};
+
+struct dst_br_port {
+	struct net_device *dev;
+	u16	dst_vlan_tag;
+};
+
 struct flow_offload_dst {
-	struct dst_entry		*dst_cache;
+	enum flow_offload_tuple_type type;
+	union {
+		struct dst_entry		*dst_cache;
+		struct dst_br_port		dst_port;
+	};
 };
 
 struct flow_offload_tuple {
@@ -52,6 +66,7 @@  struct flow_offload_tuple {
 	};
 
 	int				iifidx;
+	u16				vlan_tag;
 
 	u8				l3proto;
 	u8				l4proto;
@@ -89,8 +104,19 @@  struct nf_flow_route {
 	} tuple[FLOW_OFFLOAD_DIR_MAX];
 };
 
+struct nf_flow_forward {
+	struct {
+		struct dst_br_port	dst_port;
+		u16 vlan_tag;
+	} tuple[FLOW_OFFLOAD_DIR_MAX];
+};
+
 struct nf_flow_data {
-	struct nf_flow_route route;
+	enum flow_offload_tuple_type type;
+	union {
+		struct nf_flow_route route;
+		struct nf_flow_forward forward;
+	};
 };
 
 struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 125ce1c..19ee69c 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -29,16 +29,38 @@  struct flow_offload_entry {
 {
 	struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
 	struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
-	struct dst_entry *other_dst = date->route.tuple[!dir].dst;
-	struct dst_entry *dst = data->route.tuple[dir].dst;
 
+	struct dst_entry *other_dst;
+	struct dst_entry *dst;
+	struct dst_br_port other_dst_port;
+	struct dst_br_port dst_port;
+
+	if (data->type == FLOW_OFFLOAD_TYPE_BRIDGE) {
+		other_dst_port = data->forward.tuple[!dir].dst_port;
+		dst_port = data->forward.tuple[dir].dst_port;
+
+		ft->iifidx = other_dst_port.dev->ifindex;
+		ft->dst.dst_port = dst_port;
+		ft->vlan_tag = data->forward.tuple[dir].vlan_tag;
+	} else {
+		other_dst = data->route.tuple[!dir].dst;
+		dst = data->route.tuple[dir].dst;
+
+		ft->iifidx = other_dst->dev->ifindex;
+		ft->dst.dst_cache = dst;
+	}
+
+	ft->dst.type = data->type;
 	ft->dir = dir;
 
 	switch (ctt->src.l3num) {
 	case NFPROTO_IPV4:
 		ft->src_v4 = ctt->src.u3.in;
 		ft->dst_v4 = ctt->dst.u3.in;
-		ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
+		if (data->type == FLOW_OFFLOAD_TYPE_BRIDGE)
+			ft->mtu = dst_port.dev->mtu;
+		else
+			ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
 		break;
 	case NFPROTO_IPV6:
 		ft->src_v6 = ctt->src.u3.in6;
@@ -51,9 +73,6 @@  struct flow_offload_entry {
 	ft->l4proto = ctt->dst.protonum;
 	ft->src_port = ctt->src.u.tcp.port;
 	ft->dst_port = ctt->dst.u.tcp.port;
-
-	ft->iifidx = other_dst->dev->ifindex;
-	ft->dst_cache = dst;
 }
 
 struct flow_offload *
@@ -72,11 +91,13 @@  struct flow_offload *
 
 	flow = &entry->flow;
 
-	if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
-		goto err_dst_cache_original;
+	if (data->type == FLOW_OFFLOAD_TYPE_INET) {
+		if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
+			goto err_dst_cache_original;
 
-	if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
-		goto err_dst_cache_reply;
+		if (!dst_hold_safe(data->route.tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
+			goto err_dst_cache_reply;
+	}
 
 	entry->ct = ct;
 
@@ -91,7 +112,8 @@  struct flow_offload *
 	return flow;
 
 err_dst_cache_reply:
-	dst_release(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
+	if (data->type == FLOW_OFFLOAD_TYPE_INET)
+		dst_release(data->route.tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
 err_dst_cache_original:
 	kfree(entry);
 err_ct_refcnt:
@@ -139,8 +161,13 @@  void flow_offload_free(struct flow_offload *flow)
 {
 	struct flow_offload_entry *e;
 
-	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.dst_cache);
-	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst.dst_cache);
+	if (flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.type == FLOW_OFFLOAD_TYPE_INET) {
+		dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.dst_cache);
+		dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst.dst_cache);
+	} else {
+		dev_put(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst.dst_port.dev);
+		dev_put(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst.dst_port.dev);
+	}
 	e = container_of(flow, struct flow_offload_entry, flow);
 	if (flow->flags & FLOW_OFFLOAD_DYING)
 		nf_ct_delete(e->ct, 0, 0);
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 0016bb8..9af01ef 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -16,6 +16,8 @@ 
 #include <linux/tcp.h>
 #include <linux/udp.h>
 
+#include "../bridge/br_private.h"
+
 static int nf_flow_state_check(struct flow_offload *flow, int proto,
 			       struct sk_buff *skb, unsigned int thoff)
 {
@@ -220,6 +222,7 @@  static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
 	struct flow_offload_tuple_rhash *tuplehash;
 	struct nf_flowtable *flow_table = priv;
+	int family = flow_table->type->family;
 	struct flow_offload_tuple tuple = {};
 	enum flow_offload_tuple_dir dir;
 	struct flow_offload *flow;
@@ -228,6 +231,7 @@  static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 	unsigned int thoff;
 	struct iphdr *iph;
 	__be32 nexthop;
+	u16 vlan_tag;
 
 	if (skb->protocol != htons(ETH_P_IP))
 		return NF_ACCEPT;
@@ -235,14 +239,25 @@  static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 	if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
 		return NF_ACCEPT;
 
+	if (family != NFPROTO_BRIDGE && family != NFPROTO_IPV4)
+		return NF_ACCEPT;
+
+	if (family == NFPROTO_BRIDGE && skb_vlan_tag_present(skb))
+		tuple.vlan_tag = skb_vlan_tag_get_id(skb);
+
 	tuplehash = flow_offload_lookup(flow_table, &tuple);
 	if (tuplehash == NULL)
 		return NF_ACCEPT;
 
 	dir = tuplehash->tuple.dir;
 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
-	rt = (struct rtable *)flow->tuplehash[dir].tuple.dst.dst_cache;
-	outdev = rt->dst.dev;
+	if (family == NFPROTO_IPV4) {
+		rt = (struct rtable *)flow->tuplehash[dir].tuple.dst.dst_cache;
+		outdev = rt->dst.dev;
+	} else {
+		vlan_tag = flow->tuplehash[dir].tuple.dst.dst_port.dst_vlan_tag;
+		outdev = flow->tuplehash[dir].tuple.dst.dst_port.dev;
+	}
 
 	if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
 		return NF_ACCEPT;
@@ -258,13 +273,25 @@  static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 		return NF_DROP;
 
 	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
-	iph = ip_hdr(skb);
-	ip_decrease_ttl(iph);
 
 	skb->dev = outdev;
-	nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
-	skb_dst_set_noref(skb, &rt->dst);
-	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
+	if (family == NFPROTO_IPV4) {
+		iph = ip_hdr(skb);
+		ip_decrease_ttl(iph);
+
+		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+		skb_dst_set_noref(skb, &rt->dst);
+		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
+	} else {
+		const struct net_bridge_port *p;
+
+		if (vlan_tag && (p = br_port_get_rtnl_rcu(state->in)))
+			__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan_tag);
+		else
+			__vlan_hwaccel_clear_tag(skb);
+
+		br_dev_queue_push_xmit(state->net, state->sk, skb);
+	}
 
 	return NF_STOLEN;
 }
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index cdb7c46..c88396a 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -14,6 +14,8 @@ 
 #include <linux/netfilter/nf_conntrack_common.h>
 #include <net/netfilter/nf_flow_table.h>
 
+#include "../bridge/br_private.h"
+
 struct nft_flow_offload {
 	struct nft_flowtable	*flowtable;
 };
@@ -49,6 +51,58 @@  static int nft_flow_route(const struct nft_pktinfo *pkt,
 	return 0;
 }
 
+static int nft_flow_forward(const struct nft_pktinfo *pkt,
+			    const struct nf_conn *ct,
+			    struct nf_flow_forward *forward,
+			    enum ip_conntrack_dir dir)
+{
+	struct net_bridge_vlan_group *vg;
+	const struct net_bridge_port *p;
+	u16 vid = 0;
+
+	if (skb_vlan_tag_present(pkt->skb))
+		vid = skb_vlan_tag_get_id(pkt->skb);
+
+	forward->tuple[dir].dst_port.dst_vlan_tag = vid;
+	forward->tuple[!dir].vlan_tag = vid;
+	forward->tuple[dir].dst_port.dev = dev_get_by_index(dev_net(nft_out(pkt)),
+							    nft_out(pkt)->ifindex);
+	forward->tuple[!dir].dst_port.dev = dev_get_by_index(dev_net(nft_in(pkt)),
+							     nft_in(pkt)->ifindex);
+
+	rtnl_lock();
+	p = br_port_get_rtnl_rcu(nft_out(pkt));
+	if (p) {
+		if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
+			goto out;
+
+		if (!vid) {
+			vg = nbp_vlan_group_rcu(p);
+			vid = br_get_pvid(vg);
+		}
+
+		if (vid) {
+			struct bridge_vlan_info info;
+
+			if (br_vlan_get_info(nft_in(pkt), vid, &info) == 0 &&
+			    info.flags & BRIDGE_VLAN_INFO_UNTAGGED)
+				vid = 0;
+		}
+	} else {
+		rtnl_unlock();
+		dev_put(forward->tuple[dir].dst_port.dev);
+		dev_put(forward->tuple[!dir].dst_port.dev);
+		return -ENOENT;
+	}
+
+out:
+	rtnl_unlock();
+	forward->tuple[!dir].dst_port.dst_vlan_tag = vid;
+	forward->tuple[dir].vlan_tag = vid;
+
+	return 0;
+}
+
 static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
 {
 	if (skb_sec_path(skb))
@@ -61,6 +115,15 @@  static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
 
 		if (unlikely(opt->optlen))
 			return true;
+	} else if (family == NFPROTO_BRIDGE) {
+		const struct iphdr *iph;
+
+		if (skb->protocol != htons(ETH_P_IP))
+			return true;
+
+		iph = ip_hdr(skb);
+		if (iph->ihl > 5)
+			return true;
 	}
 
 	return false;
@@ -76,11 +139,12 @@  static void nft_flow_offload_eval(const struct nft_expr *expr,
 	struct nf_flow_data data;
 	struct flow_offload *flow;
 	enum ip_conntrack_dir dir;
+	int family = nft_pf(pkt);
 	bool is_tcp = false;
 	struct nf_conn *ct;
 	int ret;
 
-	if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
+	if (nft_flow_offload_skip(pkt->skb, family))
 		goto out;
 
 	ct = nf_ct_get(pkt->skb, &ctinfo);
@@ -108,8 +172,15 @@  static void nft_flow_offload_eval(const struct nft_expr *expr,
 		goto out;
 
 	dir = CTINFO2DIR(ctinfo);
-	if (nft_flow_route(pkt, ct, &data.route, dir) < 0)
-		goto err_flow_route;
+	if (family == NFPROTO_BRIDGE) {
+		data.type = FLOW_OFFLOAD_TYPE_BRIDGE;
+		if (nft_flow_forward(pkt, ct, &data.forward, dir) < 0)
+			goto err_flow_data;
+	} else {
+		data.type = FLOW_OFFLOAD_TYPE_INET;
+		if (nft_flow_route(pkt, ct, &data.route, dir) < 0)
+			goto err_flow_data;
+	}
 
 	flow = flow_offload_alloc(ct, &data);
 	if (!flow)
@@ -124,14 +195,20 @@  static void nft_flow_offload_eval(const struct nft_expr *expr,
 	if (ret < 0)
 		goto err_flow_add;
 
-	dst_release(data.route.tuple[!dir].dst);
+	if (family != NFPROTO_BRIDGE)
+		dst_release(data.route.tuple[!dir].dst);
 	return;
 
 err_flow_add:
 	flow_offload_free(flow);
 err_flow_alloc:
-	dst_release(data.route.tuple[!dir].dst);
-err_flow_route:
+	if (family == NFPROTO_BRIDGE) {
+		dev_put(data.forward.tuple[dir].dst_port.dev);
+		dev_put(data.forward.tuple[!dir].dst_port.dev);
+	} else {
+		dst_release(data.route.tuple[!dir].dst);
+	}
+err_flow_data:
 	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
 out:
 	regs->verdict.code = NFT_BREAK;