@@ -573,6 +573,9 @@ struct bpf_image {
#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
bool is_bpf_image_address(unsigned long address);
void *bpf_image_alloc(void);
+/* Called only from code, so there's no need for stubs. */
+void bpf_ksym_add(struct bpf_ksym *ksym);
+void bpf_ksym_del(struct bpf_ksym *ksym);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
@@ -651,6 +651,13 @@ static void __bpf_ksym_add(struct bpf_ksym *ksym)
latch_tree_insert(&ksym->tnode, &bpf_ksym_tree, &bpf_ksym_tree_ops);
}
+void bpf_ksym_add(struct bpf_ksym *ksym)
+{
+ spin_lock_bh(&bpf_lock);
+ __bpf_ksym_add(ksym);
+ spin_unlock_bh(&bpf_lock);
+}
+
static void __bpf_ksym_del(struct bpf_ksym *ksym)
{
if (list_empty(&ksym->lnode))
@@ -660,6 +667,13 @@ static void __bpf_ksym_del(struct bpf_ksym *ksym)
list_del_rcu(&ksym->lnode);
}
+void bpf_ksym_del(struct bpf_ksym *ksym)
+{
+ spin_lock_bh(&bpf_lock);
+ __bpf_ksym_del(ksym);
+ spin_unlock_bh(&bpf_lock);
+}
+
static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
{
return fp->jited && !bpf_prog_was_classic(fp);
Adding bpf_ksym_add/del functions as locked version for __bpf_ksym_add/del. It will be used in following patches for bpf_trampoline and bpf_dispatcher. Signed-off-by: Jiri Olsa <jolsa@kernel.org> --- include/linux/bpf.h | 3 +++ kernel/bpf/core.c | 14 ++++++++++++++ 2 files changed, 17 insertions(+)