diff mbox series

[06/14] bpf: Add bpf_kallsyms_tree tree

Message ID 20200208154209.1797988-7-jolsa@kernel.org
State Superseded
Delegated to: BPF Maintainers
Headers show
Series bpf: Add trampoline and dispatcher to /proc/kallsyms | expand

Commit Message

Jiri Olsa Feb. 8, 2020, 3:42 p.m. UTC
The bpf_tree is used both for kallsyms iterations and searching
for exception tables of bpf programs, which is needed only for
bpf programs.

Adding bpf_kallsyms_tree that will hold symbols for all bpf_prog,
bpf_trampoline and bpf_dispatcher objects and keeping bpf_tree
only for bpf_prog objects exception tables search to keep it fast.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
 include/linux/bpf.h |  1 +
 kernel/bpf/core.c   | 60 ++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 55 insertions(+), 6 deletions(-)

Comments

Andrii Nakryiko Feb. 11, 2020, 6:21 p.m. UTC | #1
On Sat, Feb 8, 2020 at 7:43 AM Jiri Olsa <jolsa@kernel.org> wrote:
>
> The bpf_tree is used both for kallsyms iterations and searching
> for exception tables of bpf programs, which is needed only for
> bpf programs.
>
> Adding bpf_kallsyms_tree that will hold symbols for all bpf_prog,
> bpf_trampoline and bpf_dispatcher objects and keeping bpf_tree
> only for bpf_prog objects exception tables search to keep it fast.
>
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> ---
>  include/linux/bpf.h |  1 +
>  kernel/bpf/core.c   | 60 ++++++++++++++++++++++++++++++++++++++++-----
>  2 files changed, 55 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index da67ca3afa2f..151d7b1c8435 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -468,6 +468,7 @@ struct bpf_ksym {
>         unsigned long            end;
>         char                     name[KSYM_NAME_LEN];
>         struct list_head         lnode;
> +       struct latch_tree_node   tnode;
>  };
>
>  enum bpf_tramp_prog_type {
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index b9b7077e60f3..1daa72341450 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -606,8 +606,46 @@ static const struct latch_tree_ops bpf_tree_ops = {
>         .comp   = bpf_tree_comp,
>  };
>
> +static __always_inline unsigned long
> +bpf_get_ksym_start(struct latch_tree_node *n)

I thought static functions are never marked as inline in kernel
sources. Are there some special cases when its ok/necessary?

> +{
> +       const struct bpf_ksym *ksym;
> +
> +       ksym = container_of(n, struct bpf_ksym, tnode);
> +       return ksym->start;
> +}
> +
> +static __always_inline bool
> +bpf_ksym_tree_less(struct latch_tree_node *a,
> +                  struct latch_tree_node *b)
> +{
> +       return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
> +}
> +
> +static __always_inline int
> +bpf_ksym_tree_comp(void *key, struct latch_tree_node *n)
> +{
> +       unsigned long val = (unsigned long)key;
> +       const struct bpf_ksym *ksym;
> +
> +       ksym = container_of(n, struct bpf_ksym, tnode);
> +
> +       if (val < ksym->start)
> +               return -1;
> +       if (val >= ksym->end)
> +               return  1;
> +
> +       return 0;
> +}
> +
> +static const struct latch_tree_ops bpf_kallsyms_tree_ops = {

Given all the helper functions use bpf_ksym_tree and bpf_ksym
(bpf_ksym_find) prefixes, call this bpf_ksym_tree_ops?

> +       .less   = bpf_ksym_tree_less,
> +       .comp   = bpf_ksym_tree_comp,
> +};
> +
>  static DEFINE_SPINLOCK(bpf_lock);
>  static LIST_HEAD(bpf_kallsyms);
> +static struct latch_tree_root bpf_kallsyms_tree __cacheline_aligned;

same as above, bpf_ksym_tree for consistency?

>  static struct latch_tree_root bpf_tree __cacheline_aligned;
>
>  static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
> @@ -615,6 +653,7 @@ static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
>         WARN_ON_ONCE(!list_empty(&aux->ksym.lnode));
>         list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms);
>         latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
> +       latch_tree_insert(&aux->ksym.tnode, &bpf_kallsyms_tree, &bpf_kallsyms_tree_ops);
>  }
>

[...]
Jiri Olsa Feb. 12, 2020, 10:49 a.m. UTC | #2
On Tue, Feb 11, 2020 at 10:21:10AM -0800, Andrii Nakryiko wrote:
> On Sat, Feb 8, 2020 at 7:43 AM Jiri Olsa <jolsa@kernel.org> wrote:
> >
> > The bpf_tree is used both for kallsyms iterations and searching
> > for exception tables of bpf programs, which is needed only for
> > bpf programs.
> >
> > Adding bpf_kallsyms_tree that will hold symbols for all bpf_prog,
> > bpf_trampoline and bpf_dispatcher objects and keeping bpf_tree
> > only for bpf_prog objects exception tables search to keep it fast.
> >
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> > ---
> >  include/linux/bpf.h |  1 +
> >  kernel/bpf/core.c   | 60 ++++++++++++++++++++++++++++++++++++++++-----
> >  2 files changed, 55 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index da67ca3afa2f..151d7b1c8435 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -468,6 +468,7 @@ struct bpf_ksym {
> >         unsigned long            end;
> >         char                     name[KSYM_NAME_LEN];
> >         struct list_head         lnode;
> > +       struct latch_tree_node   tnode;
> >  };
> >
> >  enum bpf_tramp_prog_type {
> > diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> > index b9b7077e60f3..1daa72341450 100644
> > --- a/kernel/bpf/core.c
> > +++ b/kernel/bpf/core.c
> > @@ -606,8 +606,46 @@ static const struct latch_tree_ops bpf_tree_ops = {
> >         .comp   = bpf_tree_comp,
> >  };
> >
> > +static __always_inline unsigned long
> > +bpf_get_ksym_start(struct latch_tree_node *n)
> 
> I thought static functions are never marked as inline in kernel
> sources. Are there some special cases when its ok/necessary?

I followed the other latch tree ops functions and did not think
much about that.. will check

> 
> > +{
> > +       const struct bpf_ksym *ksym;
> > +
> > +       ksym = container_of(n, struct bpf_ksym, tnode);
> > +       return ksym->start;
> > +}
> > +
> > +static __always_inline bool
> > +bpf_ksym_tree_less(struct latch_tree_node *a,
> > +                  struct latch_tree_node *b)
> > +{
> > +       return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
> > +}
> > +
> > +static __always_inline int
> > +bpf_ksym_tree_comp(void *key, struct latch_tree_node *n)
> > +{
> > +       unsigned long val = (unsigned long)key;
> > +       const struct bpf_ksym *ksym;
> > +
> > +       ksym = container_of(n, struct bpf_ksym, tnode);
> > +
> > +       if (val < ksym->start)
> > +               return -1;
> > +       if (val >= ksym->end)
> > +               return  1;
> > +
> > +       return 0;
> > +}
> > +
> > +static const struct latch_tree_ops bpf_kallsyms_tree_ops = {
> 
> Given all the helper functions use bpf_ksym_tree and bpf_ksym
> (bpf_ksym_find) prefixes, call this bpf_ksym_tree_ops?

right, should be bpf_ksym_tree_ops as you said

> 
> > +       .less   = bpf_ksym_tree_less,
> > +       .comp   = bpf_ksym_tree_comp,
> > +};
> > +
> >  static DEFINE_SPINLOCK(bpf_lock);
> >  static LIST_HEAD(bpf_kallsyms);
> > +static struct latch_tree_root bpf_kallsyms_tree __cacheline_aligned;
> 
> same as above, bpf_ksym_tree for consistency?

right, thanks

jirka
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index da67ca3afa2f..151d7b1c8435 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -468,6 +468,7 @@  struct bpf_ksym {
 	unsigned long		 end;
 	char			 name[KSYM_NAME_LEN];
 	struct list_head	 lnode;
+	struct latch_tree_node	 tnode;
 };
 
 enum bpf_tramp_prog_type {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index b9b7077e60f3..1daa72341450 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -606,8 +606,46 @@  static const struct latch_tree_ops bpf_tree_ops = {
 	.comp	= bpf_tree_comp,
 };
 
+static __always_inline unsigned long
+bpf_get_ksym_start(struct latch_tree_node *n)
+{
+	const struct bpf_ksym *ksym;
+
+	ksym = container_of(n, struct bpf_ksym, tnode);
+	return ksym->start;
+}
+
+static __always_inline bool
+bpf_ksym_tree_less(struct latch_tree_node *a,
+		   struct latch_tree_node *b)
+{
+	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
+}
+
+static __always_inline int
+bpf_ksym_tree_comp(void *key, struct latch_tree_node *n)
+{
+	unsigned long val = (unsigned long)key;
+	const struct bpf_ksym *ksym;
+
+	ksym = container_of(n, struct bpf_ksym, tnode);
+
+	if (val < ksym->start)
+		return -1;
+	if (val >= ksym->end)
+		return  1;
+
+	return 0;
+}
+
+static const struct latch_tree_ops bpf_kallsyms_tree_ops = {
+	.less	= bpf_ksym_tree_less,
+	.comp	= bpf_ksym_tree_comp,
+};
+
 static DEFINE_SPINLOCK(bpf_lock);
 static LIST_HEAD(bpf_kallsyms);
+static struct latch_tree_root bpf_kallsyms_tree __cacheline_aligned;
 static struct latch_tree_root bpf_tree __cacheline_aligned;
 
 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
@@ -615,6 +653,7 @@  static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 	WARN_ON_ONCE(!list_empty(&aux->ksym.lnode));
 	list_add_tail_rcu(&aux->ksym.lnode, &bpf_kallsyms);
 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+	latch_tree_insert(&aux->ksym.tnode, &bpf_kallsyms_tree, &bpf_kallsyms_tree_ops);
 }
 
 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
@@ -623,6 +662,7 @@  static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
 		return;
 
 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+	latch_tree_erase(&aux->ksym.tnode, &bpf_kallsyms_tree, &bpf_kallsyms_tree_ops);
 	list_del_rcu(&aux->ksym.lnode);
 }
 
@@ -671,19 +711,27 @@  static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 	       NULL;
 }
 
+static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
+{
+	struct latch_tree_node *n;
+
+	n = latch_tree_find((void *)addr, &bpf_kallsyms_tree, &bpf_kallsyms_tree_ops);
+	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
+}
+
 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 				 unsigned long *off, char *sym)
 {
-	struct bpf_prog *prog;
+	struct bpf_ksym *ksym;
 	char *ret = NULL;
 
 	rcu_read_lock();
-	prog = bpf_prog_kallsyms_find(addr);
-	if (prog) {
-		unsigned long symbol_start = prog->aux->ksym.start;
-		unsigned long symbol_end = prog->aux->ksym.end;
+	ksym = bpf_ksym_find(addr);
+	if (ksym) {
+		unsigned long symbol_start = ksym->start;
+		unsigned long symbol_end = ksym->end;
 
-		strncpy(sym, prog->aux->ksym.name, KSYM_NAME_LEN);
+		strncpy(sym, ksym->name, KSYM_NAME_LEN);
 
 		ret = sym;
 		if (size)