diff mbox series

[bpf-next,v4,2/2] samples/bpf: Add xdp_sample_pkts example

Message ID 152821020092.23694.4231922206556589059.stgit@alrua-kau
State Changes Requested, archived
Delegated to: BPF Maintainers
Headers show
Series [bpf-next,v4,1/2] trace_helpers.c: Add helpers to poll multiple perf FDs for events | expand

Commit Message

Toke Høiland-Jørgensen June 5, 2018, 2:50 p.m. UTC
Add an example program showing how to sample packets from XDP using the
perf event buffer. The example userspace program just prints the ethernet
header for every packet sampled.

Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
---
 samples/bpf/Makefile               |    4 +
 samples/bpf/xdp_sample_pkts_kern.c |   62 +++++++++++++
 samples/bpf/xdp_sample_pkts_user.c |  176 ++++++++++++++++++++++++++++++++++++
 3 files changed, 242 insertions(+)
 create mode 100644 samples/bpf/xdp_sample_pkts_kern.c
 create mode 100644 samples/bpf/xdp_sample_pkts_user.c

Comments

Jakub Kicinski June 5, 2018, 10:24 p.m. UTC | #1
On Tue, 05 Jun 2018 16:50:00 +0200, Toke Høiland-Jørgensen wrote:
> Add an example program showing how to sample packets from XDP using the
> perf event buffer. The example userspace program just prints the ethernet
> header for every packet sampled.
> 
> Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>

> diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
> new file mode 100644
> index 000000000000..4560522ca015
> --- /dev/null
> +++ b/samples/bpf/xdp_sample_pkts_kern.c
> @@ -0,0 +1,62 @@
> +#include <linux/ptrace.h>
> +#include <linux/version.h>
> +#include <uapi/linux/bpf.h>
> +#include "bpf_helpers.h"
> +
> +#define SAMPLE_SIZE 64ul
> +#define MAX_CPUS 24

That may be a lil' too few for modern HW with hyper-threading on ;)  
My development machine says:

$ ncpus
28

128, maybe?

> +#define bpf_printk(fmt, ...)					\
> +({								\
> +	       char ____fmt[] = fmt;				\
> +	       bpf_trace_printk(____fmt, sizeof(____fmt),	\
> +				##__VA_ARGS__);			\
> +})
> +
> +struct bpf_map_def SEC("maps") my_map = {
> +	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
> +	.key_size = sizeof(int),
> +	.value_size = sizeof(u32),
> +	.max_entries = MAX_CPUS,
> +};
> +
> +SEC("xdp_sample")
> +int xdp_sample_prog(struct xdp_md *ctx)
> +{
> +	void *data_end = (void *)(long)ctx->data_end;
> +	void *data = (void *)(long)ctx->data;
> +
> +        /* Metadata will be in the perf event before the packet data. */
> +	struct S {
> +		u16 cookie;
> +		u16 pkt_len;
> +	} __attribute__((packed)) metadata;
> +
> +	if (data + SAMPLE_SIZE < data_end) {
> +		/* The XDP perf_event_output handler will use the upper 32 bits
> +		 * of the flags argument as a number of bytes to include of the
> +		 * packet payload in the event data. If the size is too big, the
> +		 * call to bpf_perf_event_output will fail and return -EFAULT.
> +		 *
> +		 * See bpf_xdp_event_output in net/core/filter.c.
> +		 *
> +		 * The BPF_F_CURRENT_CPU flag means that the event output fd
> +		 * will be indexed by the CPU number in the event map.
> +		 */
> +		u64 flags = (SAMPLE_SIZE << 32) | BPF_F_CURRENT_CPU;
> +		int ret;
> +
> +		metadata.cookie = 0xdead;
> +		metadata.pkt_len = (u16)(data_end - data);
> +
> +		ret = bpf_perf_event_output(ctx, &my_map, flags,
> +				      &metadata, sizeof(metadata));
> +		if(ret)

Please run checkpatch --strict on the samples.

> +			bpf_printk("perf_event_output failed: %d\n", ret);
> +	}
> +
> +	return XDP_PASS;
> +}
> +
> +char _license[] SEC("license") = "GPL";
> +u32 _version SEC("version") = LINUX_VERSION_CODE;
Toke Høiland-Jørgensen June 6, 2018, 11:01 a.m. UTC | #2
Jakub Kicinski <jakub.kicinski@netronome.com> writes:

> On Tue, 05 Jun 2018 16:50:00 +0200, Toke Høiland-Jørgensen wrote:
>> Add an example program showing how to sample packets from XDP using the
>> perf event buffer. The example userspace program just prints the ethernet
>> header for every packet sampled.
>> 
>> Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
>
>> diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
>> new file mode 100644
>> index 000000000000..4560522ca015
>> --- /dev/null
>> +++ b/samples/bpf/xdp_sample_pkts_kern.c
>> @@ -0,0 +1,62 @@
>> +#include <linux/ptrace.h>
>> +#include <linux/version.h>
>> +#include <uapi/linux/bpf.h>
>> +#include "bpf_helpers.h"
>> +
>> +#define SAMPLE_SIZE 64ul
>> +#define MAX_CPUS 24
>
> That may be a lil' too few for modern HW with hyper-threading on ;)  
> My development machine says:
>
> $ ncpus
> 28
>
> 128, maybe?

What? 24 CPUs should be enough for everyone, surely? ;)

(I just took twice the number of CPUs in my largest machine; but fair
point, other people have access to more expensive toys I guess... will fix)

>> +#define bpf_printk(fmt, ...)					\
>> +({								\
>> +	       char ____fmt[] = fmt;				\
>> +	       bpf_trace_printk(____fmt, sizeof(____fmt),	\
>> +				##__VA_ARGS__);			\
>> +})
>> +
>> +struct bpf_map_def SEC("maps") my_map = {
>> +	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
>> +	.key_size = sizeof(int),
>> +	.value_size = sizeof(u32),
>> +	.max_entries = MAX_CPUS,
>> +};
>> +
>> +SEC("xdp_sample")
>> +int xdp_sample_prog(struct xdp_md *ctx)
>> +{
>> +	void *data_end = (void *)(long)ctx->data_end;
>> +	void *data = (void *)(long)ctx->data;
>> +
>> +        /* Metadata will be in the perf event before the packet data. */
>> +	struct S {
>> +		u16 cookie;
>> +		u16 pkt_len;
>> +	} __attribute__((packed)) metadata;
>> +
>> +	if (data + SAMPLE_SIZE < data_end) {
>> +		/* The XDP perf_event_output handler will use the upper 32 bits
>> +		 * of the flags argument as a number of bytes to include of the
>> +		 * packet payload in the event data. If the size is too big, the
>> +		 * call to bpf_perf_event_output will fail and return -EFAULT.
>> +		 *
>> +		 * See bpf_xdp_event_output in net/core/filter.c.
>> +		 *
>> +		 * The BPF_F_CURRENT_CPU flag means that the event output fd
>> +		 * will be indexed by the CPU number in the event map.
>> +		 */
>> +		u64 flags = (SAMPLE_SIZE << 32) | BPF_F_CURRENT_CPU;
>> +		int ret;
>> +
>> +		metadata.cookie = 0xdead;
>> +		metadata.pkt_len = (u16)(data_end - data);
>> +
>> +		ret = bpf_perf_event_output(ctx, &my_map, flags,
>> +				      &metadata, sizeof(metadata));
>> +		if(ret)
>
> Please run checkpatch --strict on the samples.

Will do!

>> +			bpf_printk("perf_event_output failed: %d\n", ret);
>> +	}
>> +
>> +	return XDP_PASS;
>> +}
>> +
>> +char _license[] SEC("license") = "GPL";
>> +u32 _version SEC("version") = LINUX_VERSION_CODE;
Jesper Dangaard Brouer June 6, 2018, 11:33 a.m. UTC | #3
On Wed, 06 Jun 2018 13:01:52 +0200
Toke Høiland-Jørgensen <toke@toke.dk> wrote:

> Jakub Kicinski <jakub.kicinski@netronome.com> writes:
> 
> > On Tue, 05 Jun 2018 16:50:00 +0200, Toke Høiland-Jørgensen wrote:  
> >> Add an example program showing how to sample packets from XDP using the
> >> perf event buffer. The example userspace program just prints the ethernet
> >> header for every packet sampled.
> >> 
> >> Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>  
> >  
> >> diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
> >> new file mode 100644
> >> index 000000000000..4560522ca015
> >> --- /dev/null
> >> +++ b/samples/bpf/xdp_sample_pkts_kern.c
> >> @@ -0,0 +1,62 @@
> >> +#include <linux/ptrace.h>
> >> +#include <linux/version.h>
> >> +#include <uapi/linux/bpf.h>
> >> +#include "bpf_helpers.h"
> >> +
> >> +#define SAMPLE_SIZE 64ul
> >> +#define MAX_CPUS 24  
> >
> > That may be a lil' too few for modern HW with hyper-threading on ;)  
> > My development machine says:
> >
> > $ ncpus
> > 28
> >
> > 128, maybe?  
> 
> What? 24 CPUs should be enough for everyone, surely? ;)
> 
> (I just took twice the number of CPUs in my largest machine; but fair
> point, other people have access to more expensive toys I guess... will fix)
> 
> >> +#define bpf_printk(fmt, ...)					\
> >> +({								\
> >> +	       char ____fmt[] = fmt;				\
> >> +	       bpf_trace_printk(____fmt, sizeof(____fmt),	\
> >> +				##__VA_ARGS__);			\
> >> +})
> >> +
> >> +struct bpf_map_def SEC("maps") my_map = {
> >> +	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
> >> +	.key_size = sizeof(int),
> >> +	.value_size = sizeof(u32),
> >> +	.max_entries = MAX_CPUS,
> >> +};

I agree, we should increase it to something larger than 24, just to
avoid people getting surprised, but this is only sample code.

At some point I want us to add another example, that show how to adjust
this runtime, which is actually possible with bpf_load.c via a map
callback that Martin added.  I've not look at how libbpf can support
this.  I do have sample code[1] that could be adjusted to do this, but
I don't want to create dependencies to bpf_load.c, as we all agree that
everything should be moved to use libbpf (tools/lib/bpf).

[1] https://github.com/netoptimizer/prototype-kernel/blob/master/kernel/samples/bpf/xdp_ddos01_blacklist_user.c#L186-L218
diff mbox series

Patch

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 1303af10e54d..9ea2f7b64869 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -52,6 +52,7 @@  hostprogs-y += xdp_adjust_tail
 hostprogs-y += xdpsock
 hostprogs-y += xdp_fwd
 hostprogs-y += task_fd_query
+hostprogs-y += xdp_sample_pkts
 
 # Libbpf dependencies
 LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
@@ -107,6 +108,7 @@  xdp_adjust_tail-objs := xdp_adjust_tail_user.o
 xdpsock-objs := bpf_load.o xdpsock_user.o
 xdp_fwd-objs := bpf_load.o xdp_fwd_user.o
 task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
+xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -163,6 +165,7 @@  always += xdp_adjust_tail_kern.o
 always += xdpsock_kern.o
 always += xdp_fwd_kern.o
 always += task_fd_query_kern.o
+always += xdp_sample_pkts_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 HOSTCFLAGS += -I$(srctree)/tools/lib/
@@ -179,6 +182,7 @@  HOSTCFLAGS_spintest_user.o += -I$(srctree)/tools/lib/bpf/
 HOSTCFLAGS_trace_event_user.o += -I$(srctree)/tools/lib/bpf/
 HOSTCFLAGS_sampleip_user.o += -I$(srctree)/tools/lib/bpf/
 HOSTCFLAGS_task_fd_query_user.o += -I$(srctree)/tools/lib/bpf/
+HOSTCFLAGS_xdp_sample_pkts_user.o += -I$(srctree)/tools/lib/bpf/
 
 HOST_LOADLIBES		+= $(LIBBPF) -lelf
 HOSTLOADLIBES_tracex4		+= -lrt
diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
new file mode 100644
index 000000000000..4560522ca015
--- /dev/null
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -0,0 +1,62 @@ 
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define SAMPLE_SIZE 64ul
+#define MAX_CPUS 24
+
+#define bpf_printk(fmt, ...)					\
+({								\
+	       char ____fmt[] = fmt;				\
+	       bpf_trace_printk(____fmt, sizeof(____fmt),	\
+				##__VA_ARGS__);			\
+})
+
+struct bpf_map_def SEC("maps") my_map = {
+	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+	.key_size = sizeof(int),
+	.value_size = sizeof(u32),
+	.max_entries = MAX_CPUS,
+};
+
+SEC("xdp_sample")
+int xdp_sample_prog(struct xdp_md *ctx)
+{
+	void *data_end = (void *)(long)ctx->data_end;
+	void *data = (void *)(long)ctx->data;
+
+        /* Metadata will be in the perf event before the packet data. */
+	struct S {
+		u16 cookie;
+		u16 pkt_len;
+	} __attribute__((packed)) metadata;
+
+	if (data + SAMPLE_SIZE < data_end) {
+		/* The XDP perf_event_output handler will use the upper 32 bits
+		 * of the flags argument as a number of bytes to include of the
+		 * packet payload in the event data. If the size is too big, the
+		 * call to bpf_perf_event_output will fail and return -EFAULT.
+		 *
+		 * See bpf_xdp_event_output in net/core/filter.c.
+		 *
+		 * The BPF_F_CURRENT_CPU flag means that the event output fd
+		 * will be indexed by the CPU number in the event map.
+		 */
+		u64 flags = (SAMPLE_SIZE << 32) | BPF_F_CURRENT_CPU;
+		int ret;
+
+		metadata.cookie = 0xdead;
+		metadata.pkt_len = (u16)(data_end - data);
+
+		ret = bpf_perf_event_output(ctx, &my_map, flags,
+				      &metadata, sizeof(metadata));
+		if(ret)
+			bpf_printk("perf_event_output failed: %d\n", ret);
+	}
+
+	return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
new file mode 100644
index 000000000000..672392d48ce3
--- /dev/null
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -0,0 +1,176 @@ 
+/* This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <net/if.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/sysinfo.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <signal.h>
+#include <libbpf.h>
+#include <bpf/bpf.h>
+
+#include "perf-sys.h"
+#include "trace_helpers.h"
+
+#define MAX_CPUS 24
+static int pmu_fds[MAX_CPUS], if_idx = 0;
+static struct perf_event_mmap_page *headers[MAX_CPUS];
+static char *if_name;
+
+static int do_attach(int idx, int fd, const char *name)
+{
+	int err;
+
+	err = bpf_set_link_xdp_fd(idx, fd, 0);
+	if (err < 0)
+		printf("ERROR: failed to attach program to %s\n", name);
+
+	return err;
+}
+
+static int do_detach(int idx, const char *name)
+{
+	int err;
+
+	err = bpf_set_link_xdp_fd(idx, -1, 0);
+	if (err < 0)
+		printf("ERROR: failed to detach program from %s\n", name);
+
+	return err;
+}
+
+#define SAMPLE_SIZE 64
+
+static int print_bpf_output(void *data, int size)
+{
+	struct {
+		__u16 cookie;
+		__u16 pkt_len;
+		__u8  pkt_data[SAMPLE_SIZE];
+	} __attribute__((packed)) *e = data;
+	int i;
+
+	if (e->cookie != 0xdead) {
+		printf("BUG cookie %x sized %d\n",
+		       e->cookie, size);
+		return LIBBPF_PERF_EVENT_ERROR;
+	}
+
+	printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len);
+	for (i = 0; i < 14 && i < e->pkt_len; i++)
+		printf("%02x ", e->pkt_data[i]);
+	printf("\n");
+
+	return LIBBPF_PERF_EVENT_CONT;
+}
+
+static void test_bpf_perf_event(int map_fd, int num)
+{
+	struct perf_event_attr attr = {
+		.sample_type = PERF_SAMPLE_RAW,
+		.type = PERF_TYPE_SOFTWARE,
+		.config = PERF_COUNT_SW_BPF_OUTPUT,
+		.wakeup_events = 1, /* get an fd notification for every event */
+	};
+	int i;
+
+	for (i = 0; i < num; i++) {
+		int key = i;
+
+		pmu_fds[i] = sys_perf_event_open(&attr, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
+
+		assert(pmu_fds[i] >= 0);
+		assert(bpf_map_update_elem(map_fd, &key, &pmu_fds[i], BPF_ANY) == 0);
+		ioctl(pmu_fds[i], PERF_EVENT_IOC_ENABLE, 0);
+	}
+}
+
+static void sig_handler(int signo)
+{
+	do_detach(if_idx, if_name);
+	exit(0);
+}
+
+int main(int argc, char **argv)
+{
+	struct bpf_prog_load_attr prog_load_attr = {
+		.prog_type	= BPF_PROG_TYPE_XDP,
+	};
+	struct bpf_object *obj;
+	struct bpf_map *map;
+	int prog_fd, map_fd;
+	char filename[256];
+	int ret, err, i;
+	int numcpus;
+
+	if (argc < 2) {
+		printf("Usage: %s <ifname>\n", argv[0]);
+		return 1;
+	}
+
+	numcpus = get_nprocs();
+	if (numcpus > MAX_CPUS)
+		numcpus = MAX_CPUS;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+	prog_load_attr.file = filename;
+
+	if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+		return 1;
+
+	if (!prog_fd) {
+		printf("load_bpf_file: %s\n", strerror(errno));
+		return 1;
+	}
+
+	map = bpf_map__next(NULL, obj);
+	if (!map) {
+		printf("finding a map in obj file failed\n");
+		return 1;
+	}
+	map_fd = bpf_map__fd(map);
+
+	if_idx = if_nametoindex(argv[1]);
+	if (!if_idx)
+		if_idx = strtoul(argv[1], NULL, 0);
+
+	if (!if_idx) {
+		fprintf(stderr, "Invalid ifname\n");
+		return 1;
+	}
+	if_name = argv[1];
+	err = do_attach(if_idx, prog_fd, argv[1]);
+	if (err)
+		return err;
+
+	if (signal(SIGINT, sig_handler) ||
+	    signal(SIGHUP, sig_handler) ||
+	    signal(SIGTERM, sig_handler)) {
+		perror("signal");
+		return 1;
+	}
+
+	test_bpf_perf_event(map_fd, numcpus);
+
+	for (i = 0; i < numcpus; i++)
+		if (perf_event_mmap_header(pmu_fds[i], &headers[i]) < 0)
+			return 1;
+
+	ret = perf_event_poller_multi(pmu_fds, headers, numcpus, print_bpf_output);
+	kill(0, SIGINT);
+	return ret;
+}