diff mbox series

[v2,bpf-next,3/7] libbpf: add kprobe/uprobe attach API

Message ID 20190621045555.4152743-4-andriin@fb.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series libbpf: add tracing attach APIs | expand

Commit Message

Andrii Nakryiko June 21, 2019, 4:55 a.m. UTC
Add ability to attach to kernel and user probes and retprobes.
Implementation depends on perf event support for kprobes/uprobes.

Signed-off-by: Andrii Nakryiko <andriin@fb.com>
---
 tools/lib/bpf/libbpf.c   | 207 +++++++++++++++++++++++++++++++++++++++
 tools/lib/bpf/libbpf.h   |   8 ++
 tools/lib/bpf/libbpf.map |   2 +
 3 files changed, 217 insertions(+)

Comments

Daniel Borkmann June 26, 2019, 2:25 p.m. UTC | #1
On 06/21/2019 06:55 AM, Andrii Nakryiko wrote:
> Add ability to attach to kernel and user probes and retprobes.
> Implementation depends on perf event support for kprobes/uprobes.
> 
> Signed-off-by: Andrii Nakryiko <andriin@fb.com>
> ---
>  tools/lib/bpf/libbpf.c   | 207 +++++++++++++++++++++++++++++++++++++++
>  tools/lib/bpf/libbpf.h   |   8 ++
>  tools/lib/bpf/libbpf.map |   2 +
>  3 files changed, 217 insertions(+)
> 
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 2bb1fa008be3..d506772df350 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -3969,6 +3969,213 @@ int bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
>  	return 0;
>  }
>  
> +static int parse_uint(const char *buf)
> +{
> +	int ret;
> +
> +	errno = 0;
> +	ret = (int)strtol(buf, NULL, 10);
> +	if (errno) {
> +		ret = -errno;
> +		pr_debug("failed to parse '%s' as unsigned int\n", buf);
> +		return ret;
> +	}
> +	if (ret < 0) {
> +		pr_debug("failed to parse '%s' as unsigned int\n", buf);
> +		return -EINVAL;
> +	}
> +	return ret;
> +}
> +
> +static int parse_uint_from_file(const char* file)
> +{
> +	char buf[STRERR_BUFSIZE];
> +	int fd, ret;
> +
> +	fd = open(file, O_RDONLY);
> +	if (fd < 0) {
> +		ret = -errno;
> +		pr_debug("failed to open '%s': %s\n", file,
> +			 libbpf_strerror_r(ret, buf, sizeof(buf)));
> +		return ret;
> +	}
> +	ret = read(fd, buf, sizeof(buf));
> +	ret = ret < 0 ? -errno : ret;
> +	close(fd);
> +	if (ret < 0) {
> +		pr_debug("failed to read '%s': %s\n", file,
> +			libbpf_strerror_r(ret, buf, sizeof(buf)));
> +		return ret;
> +	}
> +	if (ret == 0 || ret >= sizeof(buf)) {
> +		buf[sizeof(buf) - 1] = 0;
> +		pr_debug("unexpected input from '%s': '%s'\n", file, buf);
> +		return -EINVAL;
> +	}
> +	return parse_uint(buf);
> +}
> +
> +static int determine_kprobe_perf_type(void)
> +{
> +	const char *file = "/sys/bus/event_source/devices/kprobe/type";
> +	return parse_uint_from_file(file);
> +}
> +
> +static int determine_uprobe_perf_type(void)
> +{
> +	const char *file = "/sys/bus/event_source/devices/uprobe/type";
> +	return parse_uint_from_file(file);
> +}
> +
> +static int parse_config_from_file(const char *file)
> +{
> +	char buf[STRERR_BUFSIZE];
> +	int fd, ret;
> +
> +	fd = open(file, O_RDONLY);
> +	if (fd < 0) {
> +		ret = -errno;
> +		pr_debug("failed to open '%s': %s\n", file,
> +			 libbpf_strerror_r(ret, buf, sizeof(buf)));
> +		return ret;
> +	}
> +	ret = read(fd, buf, sizeof(buf));
> +	ret = ret < 0 ? -errno : ret;
> +	close(fd);
> +	if (ret < 0) {
> +		pr_debug("failed to read '%s': %s\n", file,
> +			libbpf_strerror_r(ret, buf, sizeof(buf)));
> +		return ret;
> +	}
> +	if (ret == 0 || ret >= sizeof(buf)) {
> +		buf[sizeof(buf) - 1] = 0;
> +		pr_debug("unexpected input from '%s': '%s'\n", file, buf);
> +		return -EINVAL;
> +	}
> +	if (strncmp(buf, "config:", 7)) {
> +		pr_debug("expected 'config:' prefix, found '%s'\n", buf);
> +		return -EINVAL;
> +	}
> +	return parse_uint(buf + 7);
> +}
> +
> +static int determine_kprobe_retprobe_bit(void)
> +{
> +	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
> +	return parse_config_from_file(file);
> +}
> +
> +static int determine_uprobe_retprobe_bit(void)
> +{
> +	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
> +	return parse_config_from_file(file);
> +}
> +
> +static int perf_event_open_probe(bool uprobe, bool retprobe, const char* name,
> +				 uint64_t offset, int pid)
> +{
> +	struct perf_event_attr attr = {};
> +	char errmsg[STRERR_BUFSIZE];
> +	int type, pfd, err;
> +
> +	type = uprobe ? determine_uprobe_perf_type()
> +		      : determine_kprobe_perf_type();
> +	if (type < 0) {
> +		pr_warning("failed to determine %s perf type: %s\n",
> +			   uprobe ? "uprobe" : "kprobe",
> +			   libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
> +		return type;
> +	}
> +	if (retprobe) {
> +		int bit = uprobe ? determine_uprobe_retprobe_bit()
> +				 : determine_kprobe_retprobe_bit();
> +
> +		if (bit < 0) {
> +			pr_warning("failed to determine %s retprobe bit: %s\n",
> +				   uprobe ? "uprobe" : "kprobe",
> +				   libbpf_strerror_r(bit, errmsg,
> +						     sizeof(errmsg)));
> +			return bit;
> +		}
> +		attr.config |= 1 << bit;
> +	}
> +	attr.size = sizeof(attr);
> +	attr.type = type;
> +	attr.config1 = (uint64_t)(void *)name; /* kprobe_func or uprobe_path */
> +	attr.config2 = offset;		       /* kprobe_addr or probe_offset */
> +
> +	/* pid filter is meaningful only for uprobes */
> +	pfd = syscall(__NR_perf_event_open, &attr,
> +		      pid < 0 ? -1 : pid /* pid */,
> +		      pid == -1 ? 0 : -1 /* cpu */,
> +		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
> +	if (pfd < 0) {
> +		err = -errno;
> +		pr_warning("%s perf_event_open() failed: %s\n",
> +			   uprobe ? "uprobe" : "kprobe",
> +			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
> +		return err;
> +	}
> +	return pfd;
> +}
> +
> +int bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
> +			       const char *func_name)
> +{
> +	char errmsg[STRERR_BUFSIZE];
> +	int pfd, err;
> +
> +	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
> +				    0 /* offset */, -1 /* pid */);
> +	if (pfd < 0) {
> +		pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
> +			   bpf_program__title(prog, false),
> +			   retprobe ? "kretprobe" : "kprobe", func_name,
> +			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
> +		return pfd;
> +	}
> +	err = bpf_program__attach_perf_event(prog, pfd);
> +	if (err) {
> +		libbpf_perf_event_disable_and_close(pfd);
> +		pr_warning("program '%s': failed to attach to %s '%s': %s\n",
> +			   bpf_program__title(prog, false),
> +			   retprobe ? "kretprobe" : "kprobe", func_name,
> +			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
> +		return err;
> +	}
> +	return pfd;
> +}

I do like that we facilitate usage by adding these APIs to libbpf, but my $0.02
would be that they should be designed slightly different. See it as a nit, but
given it's exposed in libbpf.map and therefore immutable in future it's worth
considering; right now with this set here you have:

int bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
			       const char *func_name)
int bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
			       pid_t pid, const char *binary_path,
			       size_t func_offset)
int bpf_program__attach_tracepoint(struct bpf_program *prog,
				   const char *tp_category,
				   const char *tp_name)
int bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
				       const char *tp_name)
int bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
int libbpf_perf_event_disable_and_close(int pfd)

So the idea is that all the bpf_program__attach_*() APIs return an fd that you
can later on pass into libbpf_perf_event_disable_and_close(). I think there is
a bit of a disconnect in that the bpf_program__attach_*() APIs try to do too
many things at once. For example, the bpf_program__attach_raw_tracepoint() fd
has nothing to do with perf, so passing to libbpf_perf_event_disable_and_close()
kind of works, but is hacky since there's no PERF_EVENT_IOC_DISABLE for it so this
would always error if a user cares to check the return code. In the kernel, we
use anon inode for this kind of object. Also, if a user tries to add more than
one program to the same event, we need to recreate a new event fd every time.

What this boils down to is that this should get a proper abstraction, e.g. as
in struct libbpf_event which holds the event object. There should be helper
functions like libbpf_event_create_{kprobe,uprobe,tracepoint,raw_tracepoint} returning
such an struct libbpf_event object on success, and a single libbpf_event_destroy()
that does the event specific teardown. bpf_program__attach_event() can then take
care of only attaching the program to it. Having an object for this is also more
extensible than just a fd number. Nice thing is that this can also be completely
internal to libbpf.c as with struct bpf_program and other abstractions where we
don't expose the internals in the public header.

Thanks,
Daniel
Andrii Nakryiko June 26, 2019, 10:15 p.m. UTC | #2
On Wed, Jun 26, 2019 at 7:25 AM Daniel Borkmann <daniel@iogearbox.net> wrote:
>
> On 06/21/2019 06:55 AM, Andrii Nakryiko wrote:
> > Add ability to attach to kernel and user probes and retprobes.
> > Implementation depends on perf event support for kprobes/uprobes.
> >
> > Signed-off-by: Andrii Nakryiko <andriin@fb.com>
> > ---

<snip>

> > +}
>
> I do like that we facilitate usage by adding these APIs to libbpf, but my $0.02
> would be that they should be designed slightly different. See it as a nit, but
> given it's exposed in libbpf.map and therefore immutable in future it's worth
> considering; right now with this set here you have:
>
> int bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
>                                const char *func_name)
> int bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
>                                pid_t pid, const char *binary_path,
>                                size_t func_offset)
> int bpf_program__attach_tracepoint(struct bpf_program *prog,
>                                    const char *tp_category,
>                                    const char *tp_name)
> int bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
>                                        const char *tp_name)
> int bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
> int libbpf_perf_event_disable_and_close(int pfd)
>
> So the idea is that all the bpf_program__attach_*() APIs return an fd that you
> can later on pass into libbpf_perf_event_disable_and_close(). I think there is
> a bit of a disconnect in that the bpf_program__attach_*() APIs try to do too
> many things at once. For example, the bpf_program__attach_raw_tracepoint() fd
> has nothing to do with perf, so passing to libbpf_perf_event_disable_and_close()
> kind of works, but is hacky since there's no PERF_EVENT_IOC_DISABLE for it so this
> would always error if a user cares to check the return code. In the kernel, we

Yeah, you are absolutely right, missed that it's not creating perf
event under cover, to be honest.

> use anon inode for this kind of object. Also, if a user tries to add more than
> one program to the same event, we need to recreate a new event fd every time.
>
> What this boils down to is that this should get a proper abstraction, e.g. as
> in struct libbpf_event which holds the event object. There should be helper
> functions like libbpf_event_create_{kprobe,uprobe,tracepoint,raw_tracepoint} returning
> such an struct libbpf_event object on success, and a single libbpf_event_destroy()
> that does the event specific teardown. bpf_program__attach_event() can then take
> care of only attaching the program to it. Having an object for this is also more
> extensible than just a fd number. Nice thing is that this can also be completely
> internal to libbpf.c as with struct bpf_program and other abstractions where we
> don't expose the internals in the public header.

Yeah, I totally agree, I think this is a great idea! I don't
particularly like "event" name, that seems very overloaded term. Do
you mind if I call this "bpf_hook" instead of "libbpf_event"? I've
always thought about these different points in the system to which one
can attach BPF program as hooks exposed from kernel :)

Would it also make sense to do attaching to non-tracing hooks using
the same mechanism (e.g., all the per-cgroup stuff, sysctl, etc)? Not
sure how people do that today, will check to see how it's done, but I
think nothing should conceptually prevent doing that using the same
abstract bpf_hook way, right?

>
> Thanks,
> Daniel
Daniel Borkmann June 27, 2019, 9:16 p.m. UTC | #3
On 06/27/2019 12:15 AM, Andrii Nakryiko wrote:
> On Wed, Jun 26, 2019 at 7:25 AM Daniel Borkmann <daniel@iogearbox.net> wrote:
[...]
>> What this boils down to is that this should get a proper abstraction, e.g. as
>> in struct libbpf_event which holds the event object. There should be helper
>> functions like libbpf_event_create_{kprobe,uprobe,tracepoint,raw_tracepoint} returning
>> such an struct libbpf_event object on success, and a single libbpf_event_destroy()
>> that does the event specific teardown. bpf_program__attach_event() can then take
>> care of only attaching the program to it. Having an object for this is also more
>> extensible than just a fd number. Nice thing is that this can also be completely
>> internal to libbpf.c as with struct bpf_program and other abstractions where we
>> don't expose the internals in the public header.
> 
> Yeah, I totally agree, I think this is a great idea! I don't
> particularly like "event" name, that seems very overloaded term. Do
> you mind if I call this "bpf_hook" instead of "libbpf_event"? I've
> always thought about these different points in the system to which one
> can attach BPF program as hooks exposed from kernel :)
> 
> Would it also make sense to do attaching to non-tracing hooks using
> the same mechanism (e.g., all the per-cgroup stuff, sysctl, etc)? Not
> sure how people do that today, will check to see how it's done, but I
> think nothing should conceptually prevent doing that using the same
> abstract bpf_hook way, right?

I think if we abstract it this way, then absolutely. If I grok the naming conventions
from the README right, then this would be under 'bpf_hook__' prefix. :)

Thanks,
Daniel
Andrii Nakryiko June 27, 2019, 9:42 p.m. UTC | #4
On Thu, Jun 27, 2019 at 2:16 PM Daniel Borkmann <daniel@iogearbox.net> wrote:
>
> On 06/27/2019 12:15 AM, Andrii Nakryiko wrote:
> > On Wed, Jun 26, 2019 at 7:25 AM Daniel Borkmann <daniel@iogearbox.net> wrote:
> [...]
> >> What this boils down to is that this should get a proper abstraction, e.g. as
> >> in struct libbpf_event which holds the event object. There should be helper
> >> functions like libbpf_event_create_{kprobe,uprobe,tracepoint,raw_tracepoint} returning
> >> such an struct libbpf_event object on success, and a single libbpf_event_destroy()
> >> that does the event specific teardown. bpf_program__attach_event() can then take
> >> care of only attaching the program to it. Having an object for this is also more
> >> extensible than just a fd number. Nice thing is that this can also be completely
> >> internal to libbpf.c as with struct bpf_program and other abstractions where we
> >> don't expose the internals in the public header.
> >
> > Yeah, I totally agree, I think this is a great idea! I don't
> > particularly like "event" name, that seems very overloaded term. Do
> > you mind if I call this "bpf_hook" instead of "libbpf_event"? I've
> > always thought about these different points in the system to which one
> > can attach BPF program as hooks exposed from kernel :)
> >
> > Would it also make sense to do attaching to non-tracing hooks using
> > the same mechanism (e.g., all the per-cgroup stuff, sysctl, etc)? Not
> > sure how people do that today, will check to see how it's done, but I
> > think nothing should conceptually prevent doing that using the same
> > abstract bpf_hook way, right?
>
> I think if we abstract it this way, then absolutely. If I grok the naming conventions
> from the README right, then this would be under 'bpf_hook__' prefix. :)

Yeah, so this is what I had, API-wise:

struct bpf_hook;

LIBBPF_API struct bpf_hook *bpf_hook__new_perf_event(int pfd);
LIBBPF_API struct bpf_hook *bpf_hook__new_kprobe(bool retprobe,
                                                 const char *func_name);
LIBBPF_API struct bpf_hook *bpf_hook__new_uprobe(bool retprobe, pid_t pid,
                                                 const char *binary_path,
                                                 size_t func_offset);
LIBBPF_API struct bpf_hook *bpf_hook__new_tracepoint(const char *tp_category,
                                                     const char *tp_name);
LIBBPF_API struct bpf_hook *bpf_hook__new_raw_tracepoint(const char *tp_name);

LIBBPF_API int bpf_hook__attach_program(struct bpf_hook *hook,
                                        struct bpf_program *prog);
LIBBPF_API int bpf_hook__free(struct bpf_hook *hook);


You'd use bpf_hook_new_xxx to create struct bpf_hook, which then get's
attached using generic bpf_hook__attach_program and detached/freed
with generic bpf_hook__free.

But once I converted selftests, I realized that this generic
bpf_hook__attach_program is kind of unnecessary and is just a
boiler-plate extra function that everyone has to call. So now I'm
leaning towards a hybrid approach:

- bpf_program__attach_xxx will create some specific struct bpf_hook
*and* attach bpf_program to it;
- bpf_hook__free(struct bpf_hook *) would still be used to detach/free
resources, abstracting away specifics of detaching.

There are few benefits to this, I think:
1. Less error checking and clean up from caller: attach either
succeeds (and you'll have to eventually do bpf_hook__free) or not, and
then nothing needs to be cleaned up. With separate create/attach, if
create succeeds, but attach fails, you'll have to do extra
bpf_hook__free call. This bundling of create/attach does prevent
theoretical use case of having hook creation in one place and then
pass this generically into another place for attachment, but it seems
like a bit far-fetched corner use case, which can be implemented at
application level, if necessary.
2. bpf_program__attach has more context for helpful log messages, if
something goes wrong. E.g.,
bpf_program__attach_tracepoint(tp_category, tp_name), once it created
perf event FD for tracepoint, can discard tp_category and tp_name and
use only FD for attachment. But if attachment fails, we don't really
know which tracepoint failed to attach. To facilitate that, you'd need
to allocate/copy tp_category/tp_name (just in case for logging), which
is PITA. With bundled attach, you can log nice error with context
right there with no overhead.

This still allows to do cgroup/flow/sysctl/etc attachment in similar
uniform way (but that's for another set of patches).

Also, I renamed bpf_hook to bpf_link as it seems to convey connection
between connection point (hook) and bpf_program better. Alternative
might be bpf_assoc, I'll mention that as well in cover letter.

Anyways, I think it's better usability without losing anything
(realistically) in terms of flexibility for users. I'll post v2 later
today.


>
> Thanks,
> Daniel
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2bb1fa008be3..d506772df350 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -3969,6 +3969,213 @@  int bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
 	return 0;
 }
 
+static int parse_uint(const char *buf)
+{
+	int ret;
+
+	errno = 0;
+	ret = (int)strtol(buf, NULL, 10);
+	if (errno) {
+		ret = -errno;
+		pr_debug("failed to parse '%s' as unsigned int\n", buf);
+		return ret;
+	}
+	if (ret < 0) {
+		pr_debug("failed to parse '%s' as unsigned int\n", buf);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int parse_uint_from_file(const char* file)
+{
+	char buf[STRERR_BUFSIZE];
+	int fd, ret;
+
+	fd = open(file, O_RDONLY);
+	if (fd < 0) {
+		ret = -errno;
+		pr_debug("failed to open '%s': %s\n", file,
+			 libbpf_strerror_r(ret, buf, sizeof(buf)));
+		return ret;
+	}
+	ret = read(fd, buf, sizeof(buf));
+	ret = ret < 0 ? -errno : ret;
+	close(fd);
+	if (ret < 0) {
+		pr_debug("failed to read '%s': %s\n", file,
+			libbpf_strerror_r(ret, buf, sizeof(buf)));
+		return ret;
+	}
+	if (ret == 0 || ret >= sizeof(buf)) {
+		buf[sizeof(buf) - 1] = 0;
+		pr_debug("unexpected input from '%s': '%s'\n", file, buf);
+		return -EINVAL;
+	}
+	return parse_uint(buf);
+}
+
+static int determine_kprobe_perf_type(void)
+{
+	const char *file = "/sys/bus/event_source/devices/kprobe/type";
+	return parse_uint_from_file(file);
+}
+
+static int determine_uprobe_perf_type(void)
+{
+	const char *file = "/sys/bus/event_source/devices/uprobe/type";
+	return parse_uint_from_file(file);
+}
+
+static int parse_config_from_file(const char *file)
+{
+	char buf[STRERR_BUFSIZE];
+	int fd, ret;
+
+	fd = open(file, O_RDONLY);
+	if (fd < 0) {
+		ret = -errno;
+		pr_debug("failed to open '%s': %s\n", file,
+			 libbpf_strerror_r(ret, buf, sizeof(buf)));
+		return ret;
+	}
+	ret = read(fd, buf, sizeof(buf));
+	ret = ret < 0 ? -errno : ret;
+	close(fd);
+	if (ret < 0) {
+		pr_debug("failed to read '%s': %s\n", file,
+			libbpf_strerror_r(ret, buf, sizeof(buf)));
+		return ret;
+	}
+	if (ret == 0 || ret >= sizeof(buf)) {
+		buf[sizeof(buf) - 1] = 0;
+		pr_debug("unexpected input from '%s': '%s'\n", file, buf);
+		return -EINVAL;
+	}
+	if (strncmp(buf, "config:", 7)) {
+		pr_debug("expected 'config:' prefix, found '%s'\n", buf);
+		return -EINVAL;
+	}
+	return parse_uint(buf + 7);
+}
+
+static int determine_kprobe_retprobe_bit(void)
+{
+	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
+	return parse_config_from_file(file);
+}
+
+static int determine_uprobe_retprobe_bit(void)
+{
+	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
+	return parse_config_from_file(file);
+}
+
+static int perf_event_open_probe(bool uprobe, bool retprobe, const char* name,
+				 uint64_t offset, int pid)
+{
+	struct perf_event_attr attr = {};
+	char errmsg[STRERR_BUFSIZE];
+	int type, pfd, err;
+
+	type = uprobe ? determine_uprobe_perf_type()
+		      : determine_kprobe_perf_type();
+	if (type < 0) {
+		pr_warning("failed to determine %s perf type: %s\n",
+			   uprobe ? "uprobe" : "kprobe",
+			   libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
+		return type;
+	}
+	if (retprobe) {
+		int bit = uprobe ? determine_uprobe_retprobe_bit()
+				 : determine_kprobe_retprobe_bit();
+
+		if (bit < 0) {
+			pr_warning("failed to determine %s retprobe bit: %s\n",
+				   uprobe ? "uprobe" : "kprobe",
+				   libbpf_strerror_r(bit, errmsg,
+						     sizeof(errmsg)));
+			return bit;
+		}
+		attr.config |= 1 << bit;
+	}
+	attr.size = sizeof(attr);
+	attr.type = type;
+	attr.config1 = (uint64_t)(void *)name; /* kprobe_func or uprobe_path */
+	attr.config2 = offset;		       /* kprobe_addr or probe_offset */
+
+	/* pid filter is meaningful only for uprobes */
+	pfd = syscall(__NR_perf_event_open, &attr,
+		      pid < 0 ? -1 : pid /* pid */,
+		      pid == -1 ? 0 : -1 /* cpu */,
+		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+	if (pfd < 0) {
+		err = -errno;
+		pr_warning("%s perf_event_open() failed: %s\n",
+			   uprobe ? "uprobe" : "kprobe",
+			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+		return err;
+	}
+	return pfd;
+}
+
+int bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
+			       const char *func_name)
+{
+	char errmsg[STRERR_BUFSIZE];
+	int pfd, err;
+
+	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
+				    0 /* offset */, -1 /* pid */);
+	if (pfd < 0) {
+		pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
+			   bpf_program__title(prog, false),
+			   retprobe ? "kretprobe" : "kprobe", func_name,
+			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+		return pfd;
+	}
+	err = bpf_program__attach_perf_event(prog, pfd);
+	if (err) {
+		libbpf_perf_event_disable_and_close(pfd);
+		pr_warning("program '%s': failed to attach to %s '%s': %s\n",
+			   bpf_program__title(prog, false),
+			   retprobe ? "kretprobe" : "kprobe", func_name,
+			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+		return err;
+	}
+	return pfd;
+}
+
+int bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
+			       pid_t pid, const char *binary_path,
+			       size_t func_offset)
+{
+	char errmsg[STRERR_BUFSIZE];
+	int pfd, err;
+
+	pfd = perf_event_open_probe(true /* uprobe */, retprobe,
+				    binary_path, func_offset, pid);
+	if (pfd < 0) {
+		pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
+			   bpf_program__title(prog, false),
+			   retprobe ? "uretprobe" : "uprobe",
+			   binary_path, func_offset,
+			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+		return pfd;
+	}
+	err = bpf_program__attach_perf_event(prog, pfd);
+	if (err) {
+		libbpf_perf_event_disable_and_close(pfd);
+		pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
+			   bpf_program__title(prog, false),
+			   retprobe ? "uretprobe" : "uprobe",
+			   binary_path, func_offset,
+			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+		return err;
+	}
+	return pfd;
+}
+
 enum bpf_perf_event_ret
 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
 			   void **copy_mem, size_t *copy_size,
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 76db1bbc0dac..a7264f06aa5f 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -168,6 +168,14 @@  LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
 LIBBPF_API int libbpf_perf_event_disable_and_close(int pfd);
 LIBBPF_API int bpf_program__attach_perf_event(struct bpf_program *prog,
 					      int pfd);
+LIBBPF_API int bpf_program__attach_kprobe(struct bpf_program *prog,
+					  bool retprobe,
+					  const char *func_name);
+LIBBPF_API int bpf_program__attach_uprobe(struct bpf_program *prog,
+					  bool retprobe,
+					  pid_t pid,
+					  const char *binary_path,
+					  size_t func_offset);
 
 struct bpf_insn;
 
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index d27406982b5a..1a982c2e1751 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -172,7 +172,9 @@  LIBBPF_0.0.4 {
 		btf_dump__new;
 		btf__parse_elf;
 		bpf_object__load_xattr;
+		bpf_program__attach_kprobe;
 		bpf_program__attach_perf_event;
+		bpf_program__attach_uprobe;
 		libbpf_num_possible_cpus;
 		libbpf_perf_event_disable_and_close;
 } LIBBPF_0.0.3;