@@ -128,4 +128,9 @@ struct sk_filter *bpf_prog_get(u32 ufd);
/* verify correctness of eBPF program */
int bpf_check(struct sk_filter *fp);
+/* in-kernel helper functions called from eBPF programs */
+u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+
#endif /* _LINUX_BPF_H */
@@ -377,6 +377,9 @@ enum bpf_prog_type {
*/
enum bpf_func_id {
BPF_FUNC_unspec,
+ BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(map_id, void *key) */
+ BPF_FUNC_map_update_elem, /* int map_update_elem(map_id, void *key, void *value) */
+ BPF_FUNC_map_delete_elem, /* int map_delete_elem(map_id, void *key) */
__BPF_FUNC_MAX_ID,
};
@@ -741,3 +741,88 @@ SYSCALL_DEFINE5(bpf, int, cmd, unsigned long, arg2, unsigned long, arg3,
return -EINVAL;
}
}
+
+/* called from eBPF program under rcu lock
+ *
+ * if kernel subsystem is allowing eBPF programs to call this function,
+ * inside its own verifier_ops->get_func_proto() callback it should return
+ * (struct bpf_func_proto) {
+ * .ret_type = PTR_TO_MAP_CONDITIONAL,
+ * .arg1_type = CONST_ARG_MAP_ID,
+ * .arg2_type = PTR_TO_STACK_IMM_MAP_KEY,
+ * }
+ * so that eBPF verifier properly checks the arguments
+ */
+u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct bpf_map *map;
+ int map_id = r1;
+ void *key = (void *) (unsigned long) r2;
+ void *value;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ map = idr_find(&bpf_map_id_idr, map_id);
+ /* eBPF verifier guarantees that map_id is valid for the life of
+ * the program
+ */
+ BUG_ON(!map);
+
+ value = map->ops->map_lookup_elem(map, key);
+
+ return (unsigned long) value;
+}
+
+/* called from eBPF program under rcu lock
+ *
+ * if kernel subsystem is allowing eBPF programs to call this function,
+ * inside its own verifier_ops->get_func_proto() callback it should return
+ * (struct bpf_func_proto) {
+ * .ret_type = RET_INTEGER,
+ * .arg1_type = CONST_ARG_MAP_ID,
+ * .arg2_type = PTR_TO_STACK_IMM_MAP_KEY,
+ * .arg3_type = PTR_TO_STACK_IMM_MAP_VALUE,
+ * }
+ * so that eBPF verifier properly checks the arguments
+ */
+u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct bpf_map *map;
+ int map_id = r1;
+ void *key = (void *) (unsigned long) r2;
+ void *value = (void *) (unsigned long) r3;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ map = idr_find(&bpf_map_id_idr, map_id);
+ /* eBPF verifier guarantees that map_id is valid */
+ BUG_ON(!map);
+
+ return map->ops->map_update_elem(map, key, value);
+}
+
+/* called from eBPF program under rcu lock
+ *
+ * if kernel subsystem is allowing eBPF programs to call this function,
+ * inside its own verifier_ops->get_func_proto() callback it should return
+ * (struct bpf_func_proto) {
+ * .ret_type = RET_INTEGER,
+ * .arg1_type = CONST_ARG_MAP_ID,
+ * .arg2_type = PTR_TO_STACK_IMM_MAP_KEY,
+ * }
+ * so that eBPF verifier properly checks the arguments
+ */
+u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct bpf_map *map;
+ int map_id = r1;
+ void *key = (void *) (unsigned long) r2;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ map = idr_find(&bpf_map_id_idr, map_id);
+ /* eBPF verifier guarantees that map_id is valid */
+ BUG_ON(!map);
+
+ return map->ops->map_delete_elem(map, key);
+}
expose bpf_map_lookup_elem(), bpf_map_update_elem(), bpf_map_delete_elem() map accessors to eBPF programs Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> --- include/linux/bpf.h | 5 +++ include/uapi/linux/bpf.h | 3 ++ kernel/bpf/syscall.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+)