diff mbox series

[bpf-next,v2,1/6] libbpf: Unpin auto-pinned maps if loading fails

Message ID 157324878624.910124.5124587166846797199.stgit@toke.dk
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series libbpf: Fix pinning and error message bugs and add new getters | expand

Commit Message

Toke Høiland-Jørgensen Nov. 8, 2019, 9:33 p.m. UTC
From: Toke Høiland-Jørgensen <toke@redhat.com>

Since the automatic map-pinning happens during load, it will leave pinned
maps around if the load fails at a later stage. Fix this by unpinning any
pinned maps on cleanup. To avoid unpinning pinned maps that were reused
rather than newly pinned, add a new boolean property on struct bpf_map to
keep track of whether that map was reused or not; and only unpin those maps
that were not reused.

Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects")
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
---
 tools/lib/bpf/libbpf.c |   16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

Comments

David Miller Nov. 8, 2019, 9:41 p.m. UTC | #1
From: Toke Høiland-Jørgensen <toke@redhat.com>
Date: Fri, 08 Nov 2019 22:33:06 +0100

> From: Toke Høiland-Jørgensen <toke@redhat.com>
> 
> Since the automatic map-pinning happens during load, it will leave pinned
> maps around if the load fails at a later stage. Fix this by unpinning any
> pinned maps on cleanup. To avoid unpinning pinned maps that were reused
> rather than newly pinned, add a new boolean property on struct bpf_map to
> keep track of whether that map was reused or not; and only unpin those maps
> that were not reused.
> 
> Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects")
> Acked-by: Song Liu <songliubraving@fb.com>
> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>

Acked-by: David S. Miller <davem@davemloft.net>
Andrii Nakryiko Nov. 8, 2019, 10:40 p.m. UTC | #2
On Fri, Nov 8, 2019 at 1:33 PM Toke Høiland-Jørgensen <toke@redhat.com> wrote:
>
> From: Toke Høiland-Jørgensen <toke@redhat.com>
>
> Since the automatic map-pinning happens during load, it will leave pinned
> maps around if the load fails at a later stage. Fix this by unpinning any
> pinned maps on cleanup. To avoid unpinning pinned maps that were reused
> rather than newly pinned, add a new boolean property on struct bpf_map to
> keep track of whether that map was reused or not; and only unpin those maps
> that were not reused.
>
> Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects")
> Acked-by: Song Liu <songliubraving@fb.com>
> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
> ---
>  tools/lib/bpf/libbpf.c |   16 +++++++++++++---
>  1 file changed, 13 insertions(+), 3 deletions(-)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index be4af95d5a2c..cea61b2ec9d3 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -229,6 +229,7 @@ struct bpf_map {
>         enum libbpf_map_type libbpf_type;
>         char *pin_path;
>         bool pinned;
> +       bool was_reused;

nit: just reused, similar to pinned?

>  };
>
>  struct bpf_secdata {
> @@ -1995,6 +1996,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
>         map->def.map_flags = info.map_flags;
>         map->btf_key_type_id = info.btf_key_type_id;
>         map->btf_value_type_id = info.btf_value_type_id;
> +       map->was_reused = true;
>
>         return 0;
>
> @@ -4007,15 +4009,18 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
>         return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
>  }
>
> -int bpf_object__unload(struct bpf_object *obj)
> +static int __bpf_object__unload(struct bpf_object *obj, bool unpin)
>  {
>         size_t i;
>
>         if (!obj)
>                 return -EINVAL;
>
> -       for (i = 0; i < obj->nr_maps; i++)
> +       for (i = 0; i < obj->nr_maps; i++) {
>                 zclose(obj->maps[i].fd);
> +               if (unpin && obj->maps[i].pinned && !obj->maps[i].was_reused)
> +                       bpf_map__unpin(&obj->maps[i], NULL);
> +       }
>
>         for (i = 0; i < obj->nr_programs; i++)
>                 bpf_program__unload(&obj->programs[i]);
> @@ -4023,6 +4028,11 @@ int bpf_object__unload(struct bpf_object *obj)
>         return 0;
>  }
>
> +int bpf_object__unload(struct bpf_object *obj)
> +{
> +       return __bpf_object__unload(obj, false);
> +}
> +
>  int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
>  {
>         struct bpf_object *obj;
> @@ -4047,7 +4057,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
>
>         return 0;
>  out:
> -       bpf_object__unload(obj);
> +       __bpf_object__unload(obj, true);

giving this is the only (special) case of auto-unpinning auto-pinned
maps, why not do a trivial loop here, instead of having this extra
unpin flag and extra __bpf_object__unload function?

>         pr_warn("failed to load object '%s'\n", obj->path);
>         return err;
>  }
>
Toke Høiland-Jørgensen Nov. 8, 2019, 11:33 p.m. UTC | #3
Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:

> On Fri, Nov 8, 2019 at 1:33 PM Toke Høiland-Jørgensen <toke@redhat.com> wrote:
>>
>> From: Toke Høiland-Jørgensen <toke@redhat.com>
>>
>> Since the automatic map-pinning happens during load, it will leave pinned
>> maps around if the load fails at a later stage. Fix this by unpinning any
>> pinned maps on cleanup. To avoid unpinning pinned maps that were reused
>> rather than newly pinned, add a new boolean property on struct bpf_map to
>> keep track of whether that map was reused or not; and only unpin those maps
>> that were not reused.
>>
>> Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects")
>> Acked-by: Song Liu <songliubraving@fb.com>
>> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
>> ---
>>  tools/lib/bpf/libbpf.c |   16 +++++++++++++---
>>  1 file changed, 13 insertions(+), 3 deletions(-)
>>
>> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
>> index be4af95d5a2c..cea61b2ec9d3 100644
>> --- a/tools/lib/bpf/libbpf.c
>> +++ b/tools/lib/bpf/libbpf.c
>> @@ -229,6 +229,7 @@ struct bpf_map {
>>         enum libbpf_map_type libbpf_type;
>>         char *pin_path;
>>         bool pinned;
>> +       bool was_reused;
>
> nit: just reused, similar to pinned?
>
>>  };
>>
>>  struct bpf_secdata {
>> @@ -1995,6 +1996,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
>>         map->def.map_flags = info.map_flags;
>>         map->btf_key_type_id = info.btf_key_type_id;
>>         map->btf_value_type_id = info.btf_value_type_id;
>> +       map->was_reused = true;
>>
>>         return 0;
>>
>> @@ -4007,15 +4009,18 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
>>         return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
>>  }
>>
>> -int bpf_object__unload(struct bpf_object *obj)
>> +static int __bpf_object__unload(struct bpf_object *obj, bool unpin)
>>  {
>>         size_t i;
>>
>>         if (!obj)
>>                 return -EINVAL;
>>
>> -       for (i = 0; i < obj->nr_maps; i++)
>> +       for (i = 0; i < obj->nr_maps; i++) {
>>                 zclose(obj->maps[i].fd);
>> +               if (unpin && obj->maps[i].pinned && !obj->maps[i].was_reused)
>> +                       bpf_map__unpin(&obj->maps[i], NULL);
>> +       }
>>
>>         for (i = 0; i < obj->nr_programs; i++)
>>                 bpf_program__unload(&obj->programs[i]);
>> @@ -4023,6 +4028,11 @@ int bpf_object__unload(struct bpf_object *obj)
>>         return 0;
>>  }
>>
>> +int bpf_object__unload(struct bpf_object *obj)
>> +{
>> +       return __bpf_object__unload(obj, false);
>> +}
>> +
>>  int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
>>  {
>>         struct bpf_object *obj;
>> @@ -4047,7 +4057,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
>>
>>         return 0;
>>  out:
>> -       bpf_object__unload(obj);
>> +       __bpf_object__unload(obj, true);
>
> giving this is the only (special) case of auto-unpinning auto-pinned
> maps, why not do a trivial loop here, instead of having this extra
> unpin flag and extra __bpf_object__unload function?

Oh, you mean just do a loop in addition to the call to __unload? Sure, I
guess we can do that instead...

-Toke
Andrii Nakryiko Nov. 8, 2019, 11:35 p.m. UTC | #4
On Fri, Nov 8, 2019 at 3:33 PM Toke Høiland-Jørgensen <toke@redhat.com> wrote:
>
> Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:
>
> > On Fri, Nov 8, 2019 at 1:33 PM Toke Høiland-Jørgensen <toke@redhat.com> wrote:
> >>
> >> From: Toke Høiland-Jørgensen <toke@redhat.com>
> >>
> >> Since the automatic map-pinning happens during load, it will leave pinned
> >> maps around if the load fails at a later stage. Fix this by unpinning any
> >> pinned maps on cleanup. To avoid unpinning pinned maps that were reused
> >> rather than newly pinned, add a new boolean property on struct bpf_map to
> >> keep track of whether that map was reused or not; and only unpin those maps
> >> that were not reused.
> >>
> >> Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects")
> >> Acked-by: Song Liu <songliubraving@fb.com>
> >> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
> >> ---
> >>  tools/lib/bpf/libbpf.c |   16 +++++++++++++---
> >>  1 file changed, 13 insertions(+), 3 deletions(-)
> >>
> >> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> >> index be4af95d5a2c..cea61b2ec9d3 100644
> >> --- a/tools/lib/bpf/libbpf.c
> >> +++ b/tools/lib/bpf/libbpf.c
> >> @@ -229,6 +229,7 @@ struct bpf_map {
> >>         enum libbpf_map_type libbpf_type;
> >>         char *pin_path;
> >>         bool pinned;
> >> +       bool was_reused;
> >
> > nit: just reused, similar to pinned?
> >
> >>  };
> >>
> >>  struct bpf_secdata {
> >> @@ -1995,6 +1996,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
> >>         map->def.map_flags = info.map_flags;
> >>         map->btf_key_type_id = info.btf_key_type_id;
> >>         map->btf_value_type_id = info.btf_value_type_id;
> >> +       map->was_reused = true;
> >>
> >>         return 0;
> >>
> >> @@ -4007,15 +4009,18 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
> >>         return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
> >>  }
> >>
> >> -int bpf_object__unload(struct bpf_object *obj)
> >> +static int __bpf_object__unload(struct bpf_object *obj, bool unpin)
> >>  {
> >>         size_t i;
> >>
> >>         if (!obj)
> >>                 return -EINVAL;
> >>
> >> -       for (i = 0; i < obj->nr_maps; i++)
> >> +       for (i = 0; i < obj->nr_maps; i++) {
> >>                 zclose(obj->maps[i].fd);
> >> +               if (unpin && obj->maps[i].pinned && !obj->maps[i].was_reused)
> >> +                       bpf_map__unpin(&obj->maps[i], NULL);
> >> +       }
> >>
> >>         for (i = 0; i < obj->nr_programs; i++)
> >>                 bpf_program__unload(&obj->programs[i]);
> >> @@ -4023,6 +4028,11 @@ int bpf_object__unload(struct bpf_object *obj)
> >>         return 0;
> >>  }
> >>
> >> +int bpf_object__unload(struct bpf_object *obj)
> >> +{
> >> +       return __bpf_object__unload(obj, false);
> >> +}
> >> +
> >>  int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
> >>  {
> >>         struct bpf_object *obj;
> >> @@ -4047,7 +4057,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
> >>
> >>         return 0;
> >>  out:
> >> -       bpf_object__unload(obj);
> >> +       __bpf_object__unload(obj, true);
> >
> > giving this is the only (special) case of auto-unpinning auto-pinned
> > maps, why not do a trivial loop here, instead of having this extra
> > unpin flag and extra __bpf_object__unload function?
>
> Oh, you mean just do a loop in addition to the call to __unload? Sure, I
> guess we can do that instead...

I think that's cleaner, because it's custom clean up logic in one
place, rather than supported feature of unload.

>
> -Toke
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index be4af95d5a2c..cea61b2ec9d3 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -229,6 +229,7 @@  struct bpf_map {
 	enum libbpf_map_type libbpf_type;
 	char *pin_path;
 	bool pinned;
+	bool was_reused;
 };
 
 struct bpf_secdata {
@@ -1995,6 +1996,7 @@  int bpf_map__reuse_fd(struct bpf_map *map, int fd)
 	map->def.map_flags = info.map_flags;
 	map->btf_key_type_id = info.btf_key_type_id;
 	map->btf_value_type_id = info.btf_value_type_id;
+	map->was_reused = true;
 
 	return 0;
 
@@ -4007,15 +4009,18 @@  bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
 	return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
 }
 
-int bpf_object__unload(struct bpf_object *obj)
+static int __bpf_object__unload(struct bpf_object *obj, bool unpin)
 {
 	size_t i;
 
 	if (!obj)
 		return -EINVAL;
 
-	for (i = 0; i < obj->nr_maps; i++)
+	for (i = 0; i < obj->nr_maps; i++) {
 		zclose(obj->maps[i].fd);
+		if (unpin && obj->maps[i].pinned && !obj->maps[i].was_reused)
+			bpf_map__unpin(&obj->maps[i], NULL);
+	}
 
 	for (i = 0; i < obj->nr_programs; i++)
 		bpf_program__unload(&obj->programs[i]);
@@ -4023,6 +4028,11 @@  int bpf_object__unload(struct bpf_object *obj)
 	return 0;
 }
 
+int bpf_object__unload(struct bpf_object *obj)
+{
+	return __bpf_object__unload(obj, false);
+}
+
 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
 {
 	struct bpf_object *obj;
@@ -4047,7 +4057,7 @@  int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
 
 	return 0;
 out:
-	bpf_object__unload(obj);
+	__bpf_object__unload(obj, true);
 	pr_warn("failed to load object '%s'\n", obj->path);
 	return err;
 }