diff mbox series

[2/2] tools: bpftool: add probes for eBPF program types

Message ID 20190529103808.17882-3-stefan.bader@canonical.com
State New
Headers show
Series Fix compilation of net selftests | expand

Commit Message

Stefan Bader May 29, 2019, 10:38 a.m. UTC
From: Quentin Monnet <quentin.monnet@netronome.com>

BugLink: https://bugs.launchpad.net/bugs/1829812

Introduce probes for supported BPF program types in libbpf, and call it
from bpftool to test what types are available on the system. The probe
simply consists in loading a very basic program of that type and see if
the verifier complains or not.

Sample output:

    # bpftool feature probe kernel
    ...
    Scanning eBPF program types...
    eBPF program_type socket_filter is available
    eBPF program_type kprobe is available
    eBPF program_type sched_cls is available
    ...

    # bpftool --json --pretty feature probe kernel
    {
        ...
        "program_types": {
            "have_socket_filter_prog_type": true,
            "have_kprobe_prog_type": true,
            "have_sched_cls_prog_type": true,
            ...
        }
    }

v5:
- In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section.
- Rename (non-API function) prog_load() as probe_load().

v3:
- Get kernel version for checking kprobes availability from libbpf
  instead of from bpftool. Do not pass kernel_version as an argument
  when calling libbpf probes.
- Use a switch with all enum values for setting specific program
  parameters just before probing, so that gcc complains at compile time
  (-Wswitch-enum) if new prog types were added to the kernel but libbpf
  was not updated.
- Add a comment in libbpf.h about setrlimit() usage to allow many
  consecutive probe attempts.

v2:
- Move probes from bpftool to libbpf.
- Remove C-style macros output from this patch.

Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

(cherry picked from commit 1bf4b05810fe38c5f09973295e8d4234a4fd5d87)
[smb: fuzz 1 in hunk #2 of tools/bpf/bpftool/feature.c]
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
---
 tools/bpf/bpftool/feature.c   | 48 +++++++++++++++++-
 tools/lib/bpf/Build           |  2 +-
 tools/lib/bpf/libbpf.h        | 11 ++++
 tools/lib/bpf/libbpf.map      |  5 ++
 tools/lib/bpf/libbpf_probes.c | 95 +++++++++++++++++++++++++++++++++++
 5 files changed, 159 insertions(+), 2 deletions(-)
 create mode 100644 tools/lib/bpf/libbpf_probes.c
diff mbox series

Patch

diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 954fb12a5228..d0022448c610 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -1,6 +1,7 @@ 
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 /* Copyright (c) 2019 Netronome Systems, Inc. */
 
+#include <ctype.h>
 #include <errno.h>
 #include <string.h>
 #include <unistd.h>
@@ -10,6 +11,7 @@ 
 #include <linux/limits.h>
 
 #include <bpf.h>
+#include <libbpf.h>
 
 #include "main.h"
 
@@ -40,6 +42,17 @@  print_start_section(const char *json_title, const char *plain_title)
 	}
 }
 
+static void
+print_end_then_start_section(const char *json_title, const char *plain_title)
+{
+	if (json_output)
+		jsonw_end_object(json_wtr);
+	else
+		printf("\n");
+
+	print_start_section(json_title, plain_title);
+}
+
 /* Probing functions */
 
 static bool probe_bpf_syscall(void)
@@ -56,9 +69,33 @@  static bool probe_bpf_syscall(void)
 	return res;
 }
 
+static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types)
+{
+	const char *plain_comment = "eBPF program_type ";
+	char feat_name[128], plain_desc[128];
+	size_t maxlen;
+	bool res;
+
+	res = bpf_probe_prog_type(prog_type, 0);
+
+	supported_types[prog_type] |= res;
+
+	maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+	if (strlen(prog_type_name[prog_type]) > maxlen) {
+		p_info("program type name too long");
+		return;
+	}
+
+	sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
+	sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
+	print_bool_feature(feat_name, plain_desc, res);
+}
+
 static int do_probe(int argc, char **argv)
 {
 	enum probe_component target = COMPONENT_UNSPEC;
+	bool supported_types[128] = {};
+	unsigned int i;
 
 	/* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
 	 * Let's approximate, and restrict usage to root user only.
@@ -91,8 +128,17 @@  static int do_probe(int argc, char **argv)
 	print_start_section("syscall_config",
 			    "Scanning system call availability...");
 
-	probe_bpf_syscall();
+	if (!probe_bpf_syscall())
+		/* bpf() syscall unavailable, don't probe other BPF features */
+		goto exit_close_json;
+
+	print_end_then_start_section("program_types",
+				     "Scanning eBPF program types...");
+
+	for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+		probe_prog_type(i, supported_types);
 
+exit_close_json:
 	if (json_output) {
 		/* End current "section" of probes */
 		jsonw_end_object(json_wtr);
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 197b40f5b5c6..bfd9bfc82c3b 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@ 
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5f68d7b75215..8e63821109ab 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -355,6 +355,17 @@  LIBBPF_API const struct bpf_line_info *
 bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
 		      __u32 insn_off, __u32 nr_skip);
 
+/*
+ * Probe for supported system features
+ *
+ * Note that running many of these probes in a short amount of time can cause
+ * the kernel to reach the maximal size of lockable memory allowed for the
+ * user, causing subsequent probes to fail. In this case, the caller may want
+ * to adjust that limit with setrlimit().
+ */
+LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
+				    __u32 ifindex);
+
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index cd02cd4e2cc3..c7ec3ffa24e9 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -124,3 +124,8 @@  LIBBPF_0.0.1 {
 	local:
 		*;
 };
+
+LIBBPF_0.0.2 {
+	global:
+		bpf_probe_prog_type;
+} LIBBPF_0.0.1;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
new file mode 100644
index 000000000000..056c0c186f2a
--- /dev/null
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -0,0 +1,95 @@ 
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <unistd.h>
+#include <sys/utsname.h>
+
+#include <linux/filter.h>
+#include <linux/kernel.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+
+static int get_kernel_version(void)
+{
+	int version, subversion, patchlevel;
+	struct utsname utsn;
+
+	/* Return 0 on failure, and attempt to probe with empty kversion */
+	if (uname(&utsn))
+		return 0;
+
+	if (sscanf(utsn.release, "%d.%d.%d",
+		   &version, &subversion, &patchlevel) != 3)
+		return 0;
+
+	return (version << 16) + (subversion << 8) + patchlevel;
+}
+
+static void
+probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
+	   size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
+{
+	struct bpf_load_program_attr xattr = {};
+	int fd;
+
+	switch (prog_type) {
+	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+		xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+		break;
+	case BPF_PROG_TYPE_KPROBE:
+		xattr.kern_version = get_kernel_version();
+		break;
+	case BPF_PROG_TYPE_UNSPEC:
+	case BPF_PROG_TYPE_SOCKET_FILTER:
+	case BPF_PROG_TYPE_SCHED_CLS:
+	case BPF_PROG_TYPE_SCHED_ACT:
+	case BPF_PROG_TYPE_TRACEPOINT:
+	case BPF_PROG_TYPE_XDP:
+	case BPF_PROG_TYPE_PERF_EVENT:
+	case BPF_PROG_TYPE_CGROUP_SKB:
+	case BPF_PROG_TYPE_CGROUP_SOCK:
+	case BPF_PROG_TYPE_LWT_IN:
+	case BPF_PROG_TYPE_LWT_OUT:
+	case BPF_PROG_TYPE_LWT_XMIT:
+	case BPF_PROG_TYPE_SOCK_OPS:
+	case BPF_PROG_TYPE_SK_SKB:
+	case BPF_PROG_TYPE_CGROUP_DEVICE:
+	case BPF_PROG_TYPE_SK_MSG:
+	case BPF_PROG_TYPE_RAW_TRACEPOINT:
+	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+	case BPF_PROG_TYPE_LIRC_MODE2:
+	case BPF_PROG_TYPE_SK_REUSEPORT:
+	case BPF_PROG_TYPE_FLOW_DISSECTOR:
+	default:
+		break;
+	}
+
+	xattr.prog_type = prog_type;
+	xattr.insns = insns;
+	xattr.insns_cnt = insns_cnt;
+	xattr.license = "GPL";
+	xattr.prog_ifindex = ifindex;
+
+	fd = bpf_load_program_xattr(&xattr, buf, buf_len);
+	if (fd >= 0)
+		close(fd);
+}
+
+bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+	struct bpf_insn insns[2] = {
+		BPF_MOV64_IMM(BPF_REG_0, 0),
+		BPF_EXIT_INSN()
+	};
+
+	if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
+		/* nfp returns -EINVAL on exit(0) with TC offload */
+		insns[0].imm = 2;
+
+	errno = 0;
+	probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
+
+	return errno != EINVAL && errno != EOPNOTSUPP;
+}