@@ -319,6 +319,40 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
+static inline int nf_flow_ppoe_push(struct sk_buff *skb, u16 id)
+{
+ struct ppp_hdr {
+ struct pppoe_hdr hdr;
+ __be16 proto;
+ } *ph;
+ int data_len = skb->len + 2;
+ __be16 proto;
+
+ if (skb_cow_head(skb, PPPOE_SES_HLEN))
+ return -1;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = htons(PPP_IP);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = htons(PPP_IPV6);
+ else
+ return -1;
+
+ __skb_push(skb, PPPOE_SES_HLEN);
+ skb_reset_network_header(skb);
+
+ ph = (struct ppp_hdr *)(skb->data);
+ ph->hdr.ver = 1;
+ ph->hdr.type = 1;
+ ph->hdr.code = 0;
+ ph->hdr.sid = htons(id);
+ ph->hdr.length = htons(data_len);
+ ph->proto = proto;
+ skb->protocol = htons(ETH_P_PPP_SES);
+
+ return 0;
+}
+
static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
{
__be16 proto;
@@ -304,6 +304,9 @@ static void nf_flow_encap_push(struct sk_buff *skb,
tuplehash->tuple.encap[i].proto,
tuplehash->tuple.encap[i].id);
break;
+ case htons(ETH_P_PPP_SES):
+ nf_flow_ppoe_push(skb, tuplehash->tuple.encap[i].id);
+ break;
}
}
}
@@ -126,13 +126,9 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
info->encap[info->num_encaps].id = path->encap.id;
info->encap[info->num_encaps].proto = path->encap.proto;
info->num_encaps++;
- if (path->type == DEV_PATH_PPPOE) {
- if (!info->outdev)
- info->outdev = path->dev;
+ if (path->type == DEV_PATH_PPPOE)
memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
- }
- if (path->type == DEV_PATH_VLAN)
- info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
break;
case DEV_PATH_BRIDGE:
if (is_zero_ether_addr(info->h_source))
@@ -160,8 +156,7 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
break;
}
}
- if (!info->outdev)
- info->outdev = info->indev;
+ info->outdev = info->indev;
info->hw_outdev = info->indev;