diff mbox series

[bpf-next,v2,3/3] selftests: bpf: add test_lwt_ip_encap selftest

Message ID 20190124193418.81674-4-posk@google.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series bpf: add BPF_LWT_ENCAP_IP option to bpf_lwt_push_encap | expand

Commit Message

Peter Oskolkov Jan. 24, 2019, 7:34 p.m. UTC
This patch adds a bpf self-test to cover BPF_LWT_ENCAP_IP mode
in bpf_lwt_push_encap.

Covered:
- encapping in LWT_IN and LWT_XMIT
- IPv4 and IPv6

Signed-off-by: Peter Oskolkov <posk@google.com>
---
 tools/testing/selftests/bpf/Makefile          |   5 +-
 .../testing/selftests/bpf/test_lwt_ip_encap.c | 125 +++++++
 .../selftests/bpf/test_lwt_ip_encap.sh        | 316 ++++++++++++++++++
 3 files changed, 444 insertions(+), 2 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/test_lwt_ip_encap.c
 create mode 100755 tools/testing/selftests/bpf/test_lwt_ip_encap.sh

Comments

Alexei Starovoitov Jan. 26, 2019, 9:46 p.m. UTC | #1
On Thu, Jan 24, 2019 at 11:34 AM Peter Oskolkov <posk@google.com> wrote:
>
> This patch adds a bpf self-test to cover BPF_LWT_ENCAP_IP mode
> in bpf_lwt_push_encap.
>
> Covered:
> - encapping in LWT_IN and LWT_XMIT
> - IPv4 and IPv6
>
> Signed-off-by: Peter Oskolkov <posk@google.com>
> ---
>  tools/testing/selftests/bpf/Makefile          |   5 +-

no longer applies cleanly.
please rebase.
thanks
Alexei Starovoitov Jan. 26, 2019, 9:47 p.m. UTC | #2
On Sat, Jan 26, 2019 at 1:46 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Thu, Jan 24, 2019 at 11:34 AM Peter Oskolkov <posk@google.com> wrote:
> >
> > This patch adds a bpf self-test to cover BPF_LWT_ENCAP_IP mode
> > in bpf_lwt_push_encap.
> >
> > Covered:
> > - encapping in LWT_IN and LWT_XMIT
> > - IPv4 and IPv6
> >
> > Signed-off-by: Peter Oskolkov <posk@google.com>
> > ---
> >  tools/testing/selftests/bpf/Makefile          |   5 +-
>
> no longer applies cleanly.
> please rebase.
> thanks

oops. wrong thread.
Meant to say that Stanislav's patch doesn't apply.
David Ahern Jan. 28, 2019, 8:31 p.m. UTC | #3
On 1/24/19 12:34 PM, Peter Oskolkov wrote:
> This patch adds a bpf self-test to cover BPF_LWT_ENCAP_IP mode
> in bpf_lwt_push_encap.
> 
> Covered:
> - encapping in LWT_IN and LWT_XMIT
> - IPv4 and IPv6
> 
> Signed-off-by: Peter Oskolkov <posk@google.com>
> ---
>  tools/testing/selftests/bpf/Makefile          |   5 +-
>  .../testing/selftests/bpf/test_lwt_ip_encap.c | 125 +++++++
>  .../selftests/bpf/test_lwt_ip_encap.sh        | 316 ++++++++++++++++++
>  3 files changed, 444 insertions(+), 2 deletions(-)
>  create mode 100644 tools/testing/selftests/bpf/test_lwt_ip_encap.c
>  create mode 100755 tools/testing/selftests/bpf/test_lwt_ip_encap.sh
> 
> diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
> index 70229de510f5..407c51cc5f07 100644
> --- a/tools/testing/selftests/bpf/Makefile
> +++ b/tools/testing/selftests/bpf/Makefile
> @@ -39,7 +39,7 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
>  	get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
>  	test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
>  	test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
> -	xdp_dummy.o test_map_in_map.o
> +	xdp_dummy.o test_map_in_map.o test_lwt_ip_encap.o
>  
>  # Order correspond to 'make run_tests' order
>  TEST_PROGS := test_kmod.sh \
> @@ -53,7 +53,8 @@ TEST_PROGS := test_kmod.sh \
>  	test_lirc_mode2.sh \
>  	test_skb_cgroup_id.sh \
>  	test_flow_dissector.sh \
> -	test_xdp_vlan.sh
> +	test_xdp_vlan.sh \
> +	test_lwt_ip_encap.sh
>  
>  TEST_PROGS_EXTENDED := with_addr.sh \
>  	tcp_client.py \
> diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.c b/tools/testing/selftests/bpf/test_lwt_ip_encap.c
> new file mode 100644
> index 000000000000..a7014277f3fe
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.c
> @@ -0,0 +1,125 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <linux/bpf.h>
> +#include <string.h>
> +#include "bpf_helpers.h"
> +#include "bpf_endian.h"
> +
> +#define BPF_LWT_ENCAP_IP 2
> +#define BPF_LWT_REROUTE 128

if you sync bpf.h header to tools/include/uapi/linux/bpf.h you should
not need to hard code those.


> +
> +struct iphdr {
> +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
> +	__u8	ihl:4,
> +		version:4;
> +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	__u8	version:4,
> +		ihl:4;
> +#else
> +#error "Fix your compiler's __BYTE_ORDER__?!"
> +#endif
> +	__u8	tos;
> +	__be16	tot_len;
> +	__be16	id;
> +	__be16	frag_off;
> +	__u8	ttl;
> +	__u8	protocol;
> +	__sum16	check;
> +	__be32	saddr;
> +	__be32	daddr;
> +};
> +
> +struct ipv6hdr {
> +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
> +	__u8	priority:4,
> +		version:4;
> +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	__u8	version:4,
> +		priority:4;
> +#else
> +#error "Fix your compiler's __BYTE_ORDER__?!"
> +#endif
> +	__u8	flow_lbl[3];
> +
> +	__be16	payload_len;
> +	__u8	nexthdr;
> +	__u8	hop_limit;
> +
> +	__u8	saddr[16];
> +	__u8	daddr[16];
> +};

you should be using proper header files for iphdr and ipv6hdr


> +
> +struct grehdr {
> +	__be16 flags;
> +	__be16 protocol;
> +};
> +
> +SEC("encap_gre")
> +int bpf_lwt_encap_gre(struct __sk_buff *skb)
> +{
> +	struct encap_hdr {
> +		struct iphdr iph;
> +		struct grehdr greh;
> +	} hdr;
> +	int err;
> +
> +	memset(&hdr, 0, sizeof(struct encap_hdr));
> +
> +	hdr.iph.ihl = 5;
> +	hdr.iph.version = 4;
> +	hdr.iph.ttl = 0x40;
> +	hdr.iph.protocol = 47;  /* IPPROTO_GRE */
> +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
> +	hdr.iph.saddr = 0x640110ac;  /* 172.16.1.100 */
> +	hdr.iph.daddr = 0x641010ac;  /* 172.16.16.100 */
> +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
> +	hdr.iph.saddr = 0xac100164;  /* 172.16.1.100 */
> +	hdr.iph.daddr = 0xac101064;  /* 172.16.16.100 */
> +#else
> +#error "Fix your compiler's __BYTE_ORDER__?!"
> +#endif
> +	hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
> +
> +	hdr.greh.protocol = skb->protocol;
> +
> +	err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
> +				 sizeof(struct encap_hdr));
> +	if (err)
> +		return BPF_DROP;
> +	return BPF_LWT_REROUTE;
> +}
> +
> +SEC("encap_gre6")
> +int bpf_lwt_encap_gre6(struct __sk_buff *skb)
> +{
> +	struct encap_hdr {
> +		struct ipv6hdr ip6hdr;
> +		struct grehdr greh;
> +	} hdr;
> +	int err;
> +
> +	memset(&hdr, 0, sizeof(struct encap_hdr));
> +
> +	hdr.ip6hdr.version = 6;
> +	hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
> +	hdr.ip6hdr.nexthdr = 47;  /* IPPROTO_GRE */
> +	hdr.ip6hdr.hop_limit = 0x40;
> +	/* fb01::1 */
> +	hdr.ip6hdr.saddr[0] = 0xfb;
> +	hdr.ip6hdr.saddr[1] = 1;
> +	hdr.ip6hdr.saddr[15] = 1;
> +	/* fb10::1 */
> +	hdr.ip6hdr.daddr[0] = 0xfb;
> +	hdr.ip6hdr.daddr[1] = 0x10;
> +	hdr.ip6hdr.daddr[15] = 1;
> +
> +	hdr.greh.protocol = skb->protocol;
> +
> +	err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
> +				 sizeof(struct encap_hdr));
> +	if (err)
> +		return BPF_DROP;
> +
> +	return BPF_LWT_REROUTE;
> +}
> +
> +char _license[] SEC("license") = "GPL";
> diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
> new file mode 100755
> index 000000000000..4f511587bb74
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
> @@ -0,0 +1,316 @@
> +#!/bin/bash
> +# SPDX-License-Identifier: GPL-2.0
> +#
> +# Setup/topology:
> +#
> +#    NS1             NS2             NS3
> +#   veth1 <---> veth2   veth3 <---> veth4 (the top route)
> +#   veth5 <---> veth6   veth7 <---> veth8 (the bottom route)
> +#
> +#   each vethN gets IPv[4|6]_N address
> +#
> +#   IPv*_SRC = IPv*_1
> +#   IPv*_DST = IPv*_4
> +#
> +#   all tests test pings from IPv*_SRC to IPv*_DST
> +#
> +#   by default, routes are configured to allow packets to go
> +#   IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
> +#
> +#   a GRE device is installed in NS3 with IPv*_GRE, and
> +#   NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
> +#   (the bottom route)
> +#
> +# Tests:
> +#
> +#   1. routes NS2->IPv*_DST are brought down, so the only way a ping
> +#      from IP*_SRC to IP*_DST can work is via IPv*_GRE
> +#
> +#   2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
> +#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
> +#
> +#       ping: SRC->[encap at veth1:egress]->GRE:decap->DST
> +#       ping replies go DST->SRC directly
> +#
> +#   2b. in an ingress test, a bpf LWT_IN program is installed on veth2
> +#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
> +#
> +#       ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
> +#       ping replies go DST->SRC directly
> +
> +set +x  # debug OFF

please don't do that. Being able to run a shell script with bash -x is
really helpful.

> +set -e  # exit on error
> +
> +if [[ $EUID -ne 0 ]]; then
> +	echo "This script must be run as root"
> +	echo "FAIL"
> +	exit 1
> +fi
> +
> +readonly NS1="ns1-$(mktemp -u XXXXXX)"
> +readonly NS2="ns2-$(mktemp -u XXXXXX)"
> +readonly NS3="ns3-$(mktemp -u XXXXXX)"
> +
> +readonly IPv4_1="172.16.1.100"
> +readonly IPv4_2="172.16.2.100"
> +readonly IPv4_3="172.16.3.100"
> +readonly IPv4_4="172.16.4.100"
> +readonly IPv4_5="172.16.5.100"
> +readonly IPv4_6="172.16.6.100"
> +readonly IPv4_7="172.16.7.100"
> +readonly IPv4_8="172.16.8.100"
> +readonly IPv4_GRE="172.16.16.100"
> +
> +readonly IPv4_SRC=$IPv4_1
> +readonly IPv4_DST=$IPv4_4
> +
> +readonly IPv6_1="fb01::1"
> +readonly IPv6_2="fb02::1"
> +readonly IPv6_3="fb03::1"
> +readonly IPv6_4="fb04::1"
> +readonly IPv6_5="fb05::1"
> +readonly IPv6_6="fb06::1"
> +readonly IPv6_7="fb07::1"
> +readonly IPv6_8="fb08::1"
> +readonly IPv6_GRE="fb10::1"
> +
> +readonly IPv6_SRC=$IPv6_1
> +readonly IPv6_DST=$IPv6_4
> +
> +setup() {
> +set -e  # exit on error
> +	# create devices and namespaces
> +	ip netns add "${NS1}"
> +	ip netns add "${NS2}"
> +	ip netns add "${NS3}"
> +
> +	ip link add veth1 type veth peer name veth2
> +	ip link add veth3 type veth peer name veth4
> +	ip link add veth5 type veth peer name veth6
> +	ip link add veth7 type veth peer name veth8
> +
> +	ip netns exec ${NS2} sysctl -w net.ipv4.ip_forward=1 2>&1 > /dev/null
> +	ip netns exec ${NS2} sysctl -w net.ipv6.conf.all.forwarding=1 2>&1 > /dev/null

-q on the sysctl will squash the output. Errors are worth seeing

> +
> +	ip link set veth1 netns ${NS1}
> +	ip link set veth2 netns ${NS2}
> +	ip link set veth3 netns ${NS2}
> +	ip link set veth4 netns ${NS3}
> +	ip link set veth5 netns ${NS1}
> +	ip link set veth6 netns ${NS2}
> +	ip link set veth7 netns ${NS2}
> +	ip link set veth8 netns ${NS3}
> +
> +	# configure addesses: the top route (1-2-3-4)
> +	ip -netns ${NS1}    addr add ${IPv4_1}/24  dev veth1
> +	ip -netns ${NS2}    addr add ${IPv4_2}/24  dev veth2
> +	ip -netns ${NS2}    addr add ${IPv4_3}/24  dev veth3
> +	ip -netns ${NS3}    addr add ${IPv4_4}/24  dev veth4
> +	ip -netns ${NS1} -6 addr add ${IPv6_1}/128 dev veth1
> +	ip -netns ${NS2} -6 addr add ${IPv6_2}/128 dev veth2
> +	ip -netns ${NS2} -6 addr add ${IPv6_3}/128 dev veth3
> +	ip -netns ${NS3} -6 addr add ${IPv6_4}/128 dev veth4
> +
> +	# configure addresses: the bottom route (5-6-7-8)
> +	ip -netns ${NS1}    addr add ${IPv4_5}/24  dev veth5
> +	ip -netns ${NS2}    addr add ${IPv4_6}/24  dev veth6
> +	ip -netns ${NS2}    addr add ${IPv4_7}/24  dev veth7
> +	ip -netns ${NS3}    addr add ${IPv4_8}/24  dev veth8
> +	ip -netns ${NS1} -6 addr add ${IPv6_5}/128 dev veth5
> +	ip -netns ${NS2} -6 addr add ${IPv6_6}/128 dev veth6
> +	ip -netns ${NS2} -6 addr add ${IPv6_7}/128 dev veth7
> +	ip -netns ${NS3} -6 addr add ${IPv6_8}/128 dev veth8
> +
> +
> +	ip -netns ${NS1} link set dev veth1 up
> +	ip -netns ${NS2} link set dev veth2 up
> +	ip -netns ${NS2} link set dev veth3 up
> +	ip -netns ${NS3} link set dev veth4 up
> +	ip -netns ${NS1} link set dev veth5 up
> +	ip -netns ${NS2} link set dev veth6 up
> +	ip -netns ${NS2} link set dev veth7 up
> +	ip -netns ${NS3} link set dev veth8 up
> +
> +	# configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
> +	# the bottom route to specific bottom addresses
> +
> +	# NS1
> +	# top route
> +	ip -netns ${NS1}    route add ${IPv4_2}/32  dev veth1
> +	ip -netns ${NS1}    route add default dev veth1 via ${IPv4_2}  # go top by default
> +	ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1
> +	ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2}  # go top by default
> +	# bottom route
> +	ip -netns ${NS1}    route add ${IPv4_6}/32  dev veth5
> +	ip -netns ${NS1}    route add ${IPv4_7}/32  dev veth5 via ${IPv4_6}
> +	ip -netns ${NS1}    route add ${IPv4_8}/32  dev veth5 via ${IPv4_6}
> +	ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5
> +	ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6}
> +	ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6}
> +
> +	# NS2
> +	# top route
> +	ip -netns ${NS2}    route add ${IPv4_1}/32  dev veth2
> +	ip -netns ${NS2}    route add ${IPv4_4}/32  dev veth3
> +	ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2
> +	ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3
> +	# bottom route
> +	ip -netns ${NS2}    route add ${IPv4_5}/32  dev veth6
> +	ip -netns ${NS2}    route add ${IPv4_8}/32  dev veth7
> +	ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6
> +	ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7
> +
> +	# NS3
> +	# top route
> +	ip -netns ${NS3}    route add ${IPv4_3}/32  dev veth4
> +	ip -netns ${NS3}    route add ${IPv4_1}/32  dev veth4 via ${IPv4_3}
> +	ip -netns ${NS3}    route add ${IPv4_2}/32  dev veth4 via ${IPv4_3}
> +	ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
> +	ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
> +	ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
> +	# bottom route
> +	ip -netns ${NS3}    route add ${IPv4_7}/32  dev veth8
> +	ip -netns ${NS3}    route add ${IPv4_5}/32  dev veth8 via ${IPv4_7}
> +	ip -netns ${NS3}    route add ${IPv4_6}/32  dev veth8 via ${IPv4_7}
> +	ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
> +	ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
> +	ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
> +
> +	# configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
> +	ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
> +	ip -netns ${NS3} link set gre_dev up
> +	ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
> +	ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
> +	ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
> +
> +
> +	# configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
> +	ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
> +	ip -netns ${NS3} link set gre6_dev up
> +	ip -netns ${NS3} -6 addr add ${IPv6_GRE} dev gre6_dev
> +	ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6}
> +	ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8}
> +
> +	# rp_filter gets confused by what these tests are doing, so disable it
> +	ip netns exec ${NS1} sysctl -w net.ipv4.conf.all.rp_filter=0 2>&1 > /dev/null
> +	ip netns exec ${NS2} sysctl -w net.ipv4.conf.all.rp_filter=0 2>&1 > /dev/null
> +	ip netns exec ${NS3} sysctl -w net.ipv4.conf.all.rp_filter=0 2>&1 > /dev/null
> +}
> +
> +cleanup() {
> +	ip netns del ${NS1} 2> /dev/null
> +	ip netns del ${NS2} 2> /dev/null
> +	ip netns del ${NS3} 2> /dev/null
> +}
> +
> +trap cleanup EXIT
> +
> +test_ping() {
> +	local readonly PROTO=$1
> +	local readonly EXPECTED=$2
> +	local RET=0
> +
> +	set +e
> +	if [ "${PROTO}" == "IPv4" ] ; then
> +		ip netns exec ${NS1} ping  -c 1 -W 1 -I ${IPv4_SRC} ${IPv4_DST} 2>&1 > /dev/null
> +		# ip netns exec ${NS1} ping  -c 1 -W 10 -I ${IPv4_SRC} ${IPv4_DST}
> +		RET=$?
> +	elif [ "${PROTO}" == "IPv6" ] ; then
> +		ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST} 2>&1 > /dev/null
> +		# ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST}
> +		RET=$?
> +	else
> +		echo "test_ping: unknown PROTO: ${PROTO}"
> +		exit 1
> +	fi
> +	set -e
> +
> +	if [ "0" != "${RET}" ]; then
> +		RET=1
> +	fi
> +
> +	if [ "${EXPECTED}" != "${RET}" ] ; then
> +		echo "FAIL: test_ping: ${RET}"
> +		exit 1
> +	fi
> +}
> +
> +test_egress() {
> +	local readonly ENCAP=$1
> +	echo "starting egress ${ENCAP} encap test"
> +	setup
> +
> +	# need to wait a bit for IPv6 to autoconf, otherwise
> +	# ping6 sometimes fails with "unable to bind to address"
> +	sleep 1

My attempts to run this test script fail:
$ ./test_lwt_ip_encap.sh
starting egress IPv4 encap test
PASS
starting egress IPv6 encap test
ping: bind icmp socket: Cannot assign requested address
FAIL: test_ping: 1

Adding 'nodad' for IPv6 addresses avoids the race of DAD completing
before the ping6 is attempted. And it removes the need for the sleep 1.


> +
> +	# by default, pings work
> +	test_ping IPv4 0
> +	test_ping IPv6 0
> +
> +	# remove NS2->DST routes, ping fails
> +	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3
> +	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
> +	test_ping IPv4 1
> +	test_ping IPv6 1
> +
> +	# install replacement routes (LWT/eBPF), pings succeed
> +	if [ "${ENCAP}" == "IPv4" ] ; then
> +		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
> +		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
> +	elif [ "${ENCAP}" == "IPv6" ] ; then
> +		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
> +		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
> +	else
> +		echo "FAIL: unknown encap ${ENCAP}"
> +	fi
> +	test_ping IPv4 0
> +	test_ping IPv6 0
> +
> +	cleanup
> +	echo "PASS"
> +}
> +
> +test_ingress() {
> +	local readonly ENCAP=$1
> +	echo "starting ingress ${ENCAP} encap test"
> +	setup
> +
> +	# need to wait a bit for IPv6 to autoconf, otherwise
> +	# ping6 sometimes fails with "unable to bind to address"
> +	sleep 1
> +
> +	# by default, pings work
> +	test_ping IPv4 0
> +	test_ping IPv6 0
> +
> +	# remove NS2->DST routes, pings fail
> +	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3
> +	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
> +	test_ping IPv4 1
> +	test_ping IPv6 1
> +
> +	# install replacement routes (LWT/eBPF), pings succeed
> +	if [ "${ENCAP}" == "IPv4" ] ; then
> +		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
> +		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
> +	elif [ "${ENCAP}" == "IPv6" ] ; then
> +		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
> +		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
> +	else
> +		echo "FAIL: unknown encap ${ENCAP}"
> +	fi
> +	test_ping IPv4 0
> +	test_ping IPv6 0
> +
> +	cleanup
> +	echo "PASS"
> +}
> +
> +test_egress IPv4
> +test_egress IPv6
> +
> +test_ingress IPv4
> +test_ingress IPv6
> +
> +echo "all tests passed"
> 

And adding a VRF version of this test fails as I suspected. See comments
on patch 2.
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 70229de510f5..407c51cc5f07 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -39,7 +39,7 @@  TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
 	get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
 	test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
 	test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
-	xdp_dummy.o test_map_in_map.o
+	xdp_dummy.o test_map_in_map.o test_lwt_ip_encap.o
 
 # Order correspond to 'make run_tests' order
 TEST_PROGS := test_kmod.sh \
@@ -53,7 +53,8 @@  TEST_PROGS := test_kmod.sh \
 	test_lirc_mode2.sh \
 	test_skb_cgroup_id.sh \
 	test_flow_dissector.sh \
-	test_xdp_vlan.sh
+	test_xdp_vlan.sh \
+	test_lwt_ip_encap.sh
 
 TEST_PROGS_EXTENDED := with_addr.sh \
 	tcp_client.py \
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.c b/tools/testing/selftests/bpf/test_lwt_ip_encap.c
new file mode 100644
index 000000000000..a7014277f3fe
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.c
@@ -0,0 +1,125 @@ 
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <string.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+#define BPF_LWT_ENCAP_IP 2
+#define BPF_LWT_REROUTE 128
+
+struct iphdr {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+	__u8	ihl:4,
+		version:4;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	__u8	version:4,
+		ihl:4;
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+	__u8	tos;
+	__be16	tot_len;
+	__be16	id;
+	__be16	frag_off;
+	__u8	ttl;
+	__u8	protocol;
+	__sum16	check;
+	__be32	saddr;
+	__be32	daddr;
+};
+
+struct ipv6hdr {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+	__u8	priority:4,
+		version:4;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	__u8	version:4,
+		priority:4;
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+	__u8	flow_lbl[3];
+
+	__be16	payload_len;
+	__u8	nexthdr;
+	__u8	hop_limit;
+
+	__u8	saddr[16];
+	__u8	daddr[16];
+};
+
+struct grehdr {
+	__be16 flags;
+	__be16 protocol;
+};
+
+SEC("encap_gre")
+int bpf_lwt_encap_gre(struct __sk_buff *skb)
+{
+	struct encap_hdr {
+		struct iphdr iph;
+		struct grehdr greh;
+	} hdr;
+	int err;
+
+	memset(&hdr, 0, sizeof(struct encap_hdr));
+
+	hdr.iph.ihl = 5;
+	hdr.iph.version = 4;
+	hdr.iph.ttl = 0x40;
+	hdr.iph.protocol = 47;  /* IPPROTO_GRE */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+	hdr.iph.saddr = 0x640110ac;  /* 172.16.1.100 */
+	hdr.iph.daddr = 0x641010ac;  /* 172.16.16.100 */
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	hdr.iph.saddr = 0xac100164;  /* 172.16.1.100 */
+	hdr.iph.daddr = 0xac101064;  /* 172.16.16.100 */
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+	hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
+
+	hdr.greh.protocol = skb->protocol;
+
+	err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+				 sizeof(struct encap_hdr));
+	if (err)
+		return BPF_DROP;
+	return BPF_LWT_REROUTE;
+}
+
+SEC("encap_gre6")
+int bpf_lwt_encap_gre6(struct __sk_buff *skb)
+{
+	struct encap_hdr {
+		struct ipv6hdr ip6hdr;
+		struct grehdr greh;
+	} hdr;
+	int err;
+
+	memset(&hdr, 0, sizeof(struct encap_hdr));
+
+	hdr.ip6hdr.version = 6;
+	hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
+	hdr.ip6hdr.nexthdr = 47;  /* IPPROTO_GRE */
+	hdr.ip6hdr.hop_limit = 0x40;
+	/* fb01::1 */
+	hdr.ip6hdr.saddr[0] = 0xfb;
+	hdr.ip6hdr.saddr[1] = 1;
+	hdr.ip6hdr.saddr[15] = 1;
+	/* fb10::1 */
+	hdr.ip6hdr.daddr[0] = 0xfb;
+	hdr.ip6hdr.daddr[1] = 0x10;
+	hdr.ip6hdr.daddr[15] = 1;
+
+	hdr.greh.protocol = skb->protocol;
+
+	err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+				 sizeof(struct encap_hdr));
+	if (err)
+		return BPF_DROP;
+
+	return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
new file mode 100755
index 000000000000..4f511587bb74
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -0,0 +1,316 @@ 
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Setup/topology:
+#
+#    NS1             NS2             NS3
+#   veth1 <---> veth2   veth3 <---> veth4 (the top route)
+#   veth5 <---> veth6   veth7 <---> veth8 (the bottom route)
+#
+#   each vethN gets IPv[4|6]_N address
+#
+#   IPv*_SRC = IPv*_1
+#   IPv*_DST = IPv*_4
+#
+#   all tests test pings from IPv*_SRC to IPv*_DST
+#
+#   by default, routes are configured to allow packets to go
+#   IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
+#
+#   a GRE device is installed in NS3 with IPv*_GRE, and
+#   NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
+#   (the bottom route)
+#
+# Tests:
+#
+#   1. routes NS2->IPv*_DST are brought down, so the only way a ping
+#      from IP*_SRC to IP*_DST can work is via IPv*_GRE
+#
+#   2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
+#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+#       ping: SRC->[encap at veth1:egress]->GRE:decap->DST
+#       ping replies go DST->SRC directly
+#
+#   2b. in an ingress test, a bpf LWT_IN program is installed on veth2
+#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+#       ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+#       ping replies go DST->SRC directly
+
+set +x  # debug OFF
+set -e  # exit on error
+
+if [[ $EUID -ne 0 ]]; then
+	echo "This script must be run as root"
+	echo "FAIL"
+	exit 1
+fi
+
+readonly NS1="ns1-$(mktemp -u XXXXXX)"
+readonly NS2="ns2-$(mktemp -u XXXXXX)"
+readonly NS3="ns3-$(mktemp -u XXXXXX)"
+
+readonly IPv4_1="172.16.1.100"
+readonly IPv4_2="172.16.2.100"
+readonly IPv4_3="172.16.3.100"
+readonly IPv4_4="172.16.4.100"
+readonly IPv4_5="172.16.5.100"
+readonly IPv4_6="172.16.6.100"
+readonly IPv4_7="172.16.7.100"
+readonly IPv4_8="172.16.8.100"
+readonly IPv4_GRE="172.16.16.100"
+
+readonly IPv4_SRC=$IPv4_1
+readonly IPv4_DST=$IPv4_4
+
+readonly IPv6_1="fb01::1"
+readonly IPv6_2="fb02::1"
+readonly IPv6_3="fb03::1"
+readonly IPv6_4="fb04::1"
+readonly IPv6_5="fb05::1"
+readonly IPv6_6="fb06::1"
+readonly IPv6_7="fb07::1"
+readonly IPv6_8="fb08::1"
+readonly IPv6_GRE="fb10::1"
+
+readonly IPv6_SRC=$IPv6_1
+readonly IPv6_DST=$IPv6_4
+
+setup() {
+set -e  # exit on error
+	# create devices and namespaces
+	ip netns add "${NS1}"
+	ip netns add "${NS2}"
+	ip netns add "${NS3}"
+
+	ip link add veth1 type veth peer name veth2
+	ip link add veth3 type veth peer name veth4
+	ip link add veth5 type veth peer name veth6
+	ip link add veth7 type veth peer name veth8
+
+	ip netns exec ${NS2} sysctl -w net.ipv4.ip_forward=1 2>&1 > /dev/null
+	ip netns exec ${NS2} sysctl -w net.ipv6.conf.all.forwarding=1 2>&1 > /dev/null
+
+	ip link set veth1 netns ${NS1}
+	ip link set veth2 netns ${NS2}
+	ip link set veth3 netns ${NS2}
+	ip link set veth4 netns ${NS3}
+	ip link set veth5 netns ${NS1}
+	ip link set veth6 netns ${NS2}
+	ip link set veth7 netns ${NS2}
+	ip link set veth8 netns ${NS3}
+
+	# configure addesses: the top route (1-2-3-4)
+	ip -netns ${NS1}    addr add ${IPv4_1}/24  dev veth1
+	ip -netns ${NS2}    addr add ${IPv4_2}/24  dev veth2
+	ip -netns ${NS2}    addr add ${IPv4_3}/24  dev veth3
+	ip -netns ${NS3}    addr add ${IPv4_4}/24  dev veth4
+	ip -netns ${NS1} -6 addr add ${IPv6_1}/128 dev veth1
+	ip -netns ${NS2} -6 addr add ${IPv6_2}/128 dev veth2
+	ip -netns ${NS2} -6 addr add ${IPv6_3}/128 dev veth3
+	ip -netns ${NS3} -6 addr add ${IPv6_4}/128 dev veth4
+
+	# configure addresses: the bottom route (5-6-7-8)
+	ip -netns ${NS1}    addr add ${IPv4_5}/24  dev veth5
+	ip -netns ${NS2}    addr add ${IPv4_6}/24  dev veth6
+	ip -netns ${NS2}    addr add ${IPv4_7}/24  dev veth7
+	ip -netns ${NS3}    addr add ${IPv4_8}/24  dev veth8
+	ip -netns ${NS1} -6 addr add ${IPv6_5}/128 dev veth5
+	ip -netns ${NS2} -6 addr add ${IPv6_6}/128 dev veth6
+	ip -netns ${NS2} -6 addr add ${IPv6_7}/128 dev veth7
+	ip -netns ${NS3} -6 addr add ${IPv6_8}/128 dev veth8
+
+
+	ip -netns ${NS1} link set dev veth1 up
+	ip -netns ${NS2} link set dev veth2 up
+	ip -netns ${NS2} link set dev veth3 up
+	ip -netns ${NS3} link set dev veth4 up
+	ip -netns ${NS1} link set dev veth5 up
+	ip -netns ${NS2} link set dev veth6 up
+	ip -netns ${NS2} link set dev veth7 up
+	ip -netns ${NS3} link set dev veth8 up
+
+	# configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
+	# the bottom route to specific bottom addresses
+
+	# NS1
+	# top route
+	ip -netns ${NS1}    route add ${IPv4_2}/32  dev veth1
+	ip -netns ${NS1}    route add default dev veth1 via ${IPv4_2}  # go top by default
+	ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1
+	ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2}  # go top by default
+	# bottom route
+	ip -netns ${NS1}    route add ${IPv4_6}/32  dev veth5
+	ip -netns ${NS1}    route add ${IPv4_7}/32  dev veth5 via ${IPv4_6}
+	ip -netns ${NS1}    route add ${IPv4_8}/32  dev veth5 via ${IPv4_6}
+	ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5
+	ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6}
+	ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6}
+
+	# NS2
+	# top route
+	ip -netns ${NS2}    route add ${IPv4_1}/32  dev veth2
+	ip -netns ${NS2}    route add ${IPv4_4}/32  dev veth3
+	ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2
+	ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3
+	# bottom route
+	ip -netns ${NS2}    route add ${IPv4_5}/32  dev veth6
+	ip -netns ${NS2}    route add ${IPv4_8}/32  dev veth7
+	ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6
+	ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7
+
+	# NS3
+	# top route
+	ip -netns ${NS3}    route add ${IPv4_3}/32  dev veth4
+	ip -netns ${NS3}    route add ${IPv4_1}/32  dev veth4 via ${IPv4_3}
+	ip -netns ${NS3}    route add ${IPv4_2}/32  dev veth4 via ${IPv4_3}
+	ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
+	ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
+	ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
+	# bottom route
+	ip -netns ${NS3}    route add ${IPv4_7}/32  dev veth8
+	ip -netns ${NS3}    route add ${IPv4_5}/32  dev veth8 via ${IPv4_7}
+	ip -netns ${NS3}    route add ${IPv4_6}/32  dev veth8 via ${IPv4_7}
+	ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
+	ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
+	ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
+
+	# configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
+	ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
+	ip -netns ${NS3} link set gre_dev up
+	ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
+	ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
+	ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
+
+
+	# configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
+	ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
+	ip -netns ${NS3} link set gre6_dev up
+	ip -netns ${NS3} -6 addr add ${IPv6_GRE} dev gre6_dev
+	ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6}
+	ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8}
+
+	# rp_filter gets confused by what these tests are doing, so disable it
+	ip netns exec ${NS1} sysctl -w net.ipv4.conf.all.rp_filter=0 2>&1 > /dev/null
+	ip netns exec ${NS2} sysctl -w net.ipv4.conf.all.rp_filter=0 2>&1 > /dev/null
+	ip netns exec ${NS3} sysctl -w net.ipv4.conf.all.rp_filter=0 2>&1 > /dev/null
+}
+
+cleanup() {
+	ip netns del ${NS1} 2> /dev/null
+	ip netns del ${NS2} 2> /dev/null
+	ip netns del ${NS3} 2> /dev/null
+}
+
+trap cleanup EXIT
+
+test_ping() {
+	local readonly PROTO=$1
+	local readonly EXPECTED=$2
+	local RET=0
+
+	set +e
+	if [ "${PROTO}" == "IPv4" ] ; then
+		ip netns exec ${NS1} ping  -c 1 -W 1 -I ${IPv4_SRC} ${IPv4_DST} 2>&1 > /dev/null
+		# ip netns exec ${NS1} ping  -c 1 -W 10 -I ${IPv4_SRC} ${IPv4_DST}
+		RET=$?
+	elif [ "${PROTO}" == "IPv6" ] ; then
+		ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST} 2>&1 > /dev/null
+		# ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST}
+		RET=$?
+	else
+		echo "test_ping: unknown PROTO: ${PROTO}"
+		exit 1
+	fi
+	set -e
+
+	if [ "0" != "${RET}" ]; then
+		RET=1
+	fi
+
+	if [ "${EXPECTED}" != "${RET}" ] ; then
+		echo "FAIL: test_ping: ${RET}"
+		exit 1
+	fi
+}
+
+test_egress() {
+	local readonly ENCAP=$1
+	echo "starting egress ${ENCAP} encap test"
+	setup
+
+	# need to wait a bit for IPv6 to autoconf, otherwise
+	# ping6 sometimes fails with "unable to bind to address"
+	sleep 1
+
+	# by default, pings work
+	test_ping IPv4 0
+	test_ping IPv6 0
+
+	# remove NS2->DST routes, ping fails
+	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3
+	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+	test_ping IPv4 1
+	test_ping IPv6 1
+
+	# install replacement routes (LWT/eBPF), pings succeed
+	if [ "${ENCAP}" == "IPv4" ] ; then
+		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
+	elif [ "${ENCAP}" == "IPv6" ] ; then
+		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
+	else
+		echo "FAIL: unknown encap ${ENCAP}"
+	fi
+	test_ping IPv4 0
+	test_ping IPv6 0
+
+	cleanup
+	echo "PASS"
+}
+
+test_ingress() {
+	local readonly ENCAP=$1
+	echo "starting ingress ${ENCAP} encap test"
+	setup
+
+	# need to wait a bit for IPv6 to autoconf, otherwise
+	# ping6 sometimes fails with "unable to bind to address"
+	sleep 1
+
+	# by default, pings work
+	test_ping IPv4 0
+	test_ping IPv6 0
+
+	# remove NS2->DST routes, pings fail
+	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3
+	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
+	test_ping IPv4 1
+	test_ping IPv6 1
+
+	# install replacement routes (LWT/eBPF), pings succeed
+	if [ "${ENCAP}" == "IPv4" ] ; then
+		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
+	elif [ "${ENCAP}" == "IPv6" ] ; then
+		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
+	else
+		echo "FAIL: unknown encap ${ENCAP}"
+	fi
+	test_ping IPv4 0
+	test_ping IPv6 0
+
+	cleanup
+	echo "PASS"
+}
+
+test_egress IPv4
+test_egress IPv6
+
+test_ingress IPv4
+test_ingress IPv6
+
+echo "all tests passed"