Message ID | 3c8b73a6ea66eb01419da4ff0c611f72c7905a5a.1524407665.git.paul.chaignon@orange.com |
---|---|
State | Changes Requested, archived |
Delegated to: | BPF Maintainers |
Headers | show |
Series | bpf: allow map helpers access to map values directly | expand |
On 04/22/2018 11:52 PM, Paul Chaignon wrote: > Helpers that expect ARG_PTR_TO_MAP_KEY and ARG_PTR_TO_MAP_VALUE can only > access stack and packet memory. Allow these helpers to directly access > map values by passing registers of type PTR_TO_MAP_VALUE. > > This change removes the need for an extra copy to the stack when using a > map value to perform a second map lookup, as in the following: > > struct bpf_map_def SEC("maps") infobyreq = { > .type = BPF_MAP_TYPE_HASHMAP, > .key_size = sizeof(struct request *), > .value_size = sizeof(struct info_t), > .max_entries = 1024, > }; > struct bpf_map_def SEC("maps") counts = { > .type = BPF_MAP_TYPE_HASHMAP, > .key_size = sizeof(struct info_t), > .value_size = sizeof(u64), > .max_entries = 1024, > }; > SEC("kprobe/blk_account_io_start") > int bpf_blk_account_io_start(struct pt_regs *ctx) > { > struct info_t *info = bpf_map_lookup_elem(&infobyreq, &ctx->di); > u64 *count = bpf_map_lookup_elem(&counts, info); > (*count)++; > } > > Signed-off-by: Paul Chaignon <paul.chaignon@orange.com> > --- > kernel/bpf/verifier.c | 9 ++++++++- > 1 file changed, 8 insertions(+), 1 deletion(-) > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index 5dd1dcb902bf..70e00beade03 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -1914,7 +1914,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, > if (arg_type == ARG_PTR_TO_MAP_KEY || > arg_type == ARG_PTR_TO_MAP_VALUE) { > expected_type = PTR_TO_STACK; > - if (!type_is_pkt_pointer(type) && > + if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && > type != expected_type) > goto err_type; > } else if (arg_type == ARG_CONST_SIZE || > @@ -1970,6 +1970,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, > err = check_packet_access(env, regno, reg->off, > meta->map_ptr->key_size, > false); > + else if (type == PTR_TO_MAP_VALUE) > + err = check_map_access(env, regno, reg->off, > + meta->map_ptr->key_size, false); > else > err = check_stack_boundary(env, regno, > meta->map_ptr->key_size, We should reuse check_helper_mem_access() here which covers all three cases from above already and simplifies the code a bit. > @@ -1987,6 +1990,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, > err = check_packet_access(env, regno, reg->off, > meta->map_ptr->value_size, > false); > + else if (type == PTR_TO_MAP_VALUE) > + err = check_map_access(env, regno, reg->off, > + meta->map_ptr->value_size, > + false); > else > err = check_stack_boundary(env, regno, > meta->map_ptr->value_size, > Ditto. Thanks, Daniel
On 04/23/2018 11:18 PM +0200, Daniel Borkmann wrote: > On 04/22/2018 11:52 PM, Paul Chaignon wrote: > > Helpers that expect ARG_PTR_TO_MAP_KEY and ARG_PTR_TO_MAP_VALUE can only > > access stack and packet memory. Allow these helpers to directly access > > map values by passing registers of type PTR_TO_MAP_VALUE. > > > > This change removes the need for an extra copy to the stack when using a > > map value to perform a second map lookup, as in the following: > > > > struct bpf_map_def SEC("maps") infobyreq = { > > .type = BPF_MAP_TYPE_HASHMAP, > > .key_size = sizeof(struct request *), > > .value_size = sizeof(struct info_t), > > .max_entries = 1024, > > }; > > struct bpf_map_def SEC("maps") counts = { > > .type = BPF_MAP_TYPE_HASHMAP, > > .key_size = sizeof(struct info_t), > > .value_size = sizeof(u64), > > .max_entries = 1024, > > }; > > SEC("kprobe/blk_account_io_start") > > int bpf_blk_account_io_start(struct pt_regs *ctx) > > { > > struct info_t *info = bpf_map_lookup_elem(&infobyreq, &ctx->di); > > u64 *count = bpf_map_lookup_elem(&counts, info); > > (*count)++; > > } > > > > Signed-off-by: Paul Chaignon <paul.chaignon@orange.com> > > --- > > kernel/bpf/verifier.c | 9 ++++++++- > > 1 file changed, 8 insertions(+), 1 deletion(-) > > > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > > index 5dd1dcb902bf..70e00beade03 100644 > > --- a/kernel/bpf/verifier.c > > +++ b/kernel/bpf/verifier.c > > @@ -1914,7 +1914,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, > > if (arg_type == ARG_PTR_TO_MAP_KEY || > > arg_type == ARG_PTR_TO_MAP_VALUE) { > > expected_type = PTR_TO_STACK; > > - if (!type_is_pkt_pointer(type) && > > + if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && > > type != expected_type) > > goto err_type; > > } else if (arg_type == ARG_CONST_SIZE || > > @@ -1970,6 +1970,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, > > err = check_packet_access(env, regno, reg->off, > > meta->map_ptr->key_size, > > false); > > + else if (type == PTR_TO_MAP_VALUE) > > + err = check_map_access(env, regno, reg->off, > > + meta->map_ptr->key_size, false); > > else > > err = check_stack_boundary(env, regno, > > meta->map_ptr->key_size, > > We should reuse check_helper_mem_access() here which covers all three cases > from above already and simplifies the code a bit. Thanks for the review. I've sent a refactored patchset that uses check_helper_mem_access(). > > > @@ -1987,6 +1990,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, > > err = check_packet_access(env, regno, reg->off, > > meta->map_ptr->value_size, > > false); > > + else if (type == PTR_TO_MAP_VALUE) > > + err = check_map_access(env, regno, reg->off, > > + meta->map_ptr->value_size, > > + false); > > else > > err = check_stack_boundary(env, regno, > > meta->map_ptr->value_size, > > > > Ditto. > > Thanks, > Daniel
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5dd1dcb902bf..70e00beade03 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1914,7 +1914,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; - if (!type_is_pkt_pointer(type) && + if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || @@ -1970,6 +1970,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size, false); + else if (type == PTR_TO_MAP_VALUE) + err = check_map_access(env, regno, reg->off, + meta->map_ptr->key_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->key_size, @@ -1987,6 +1990,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size, false); + else if (type == PTR_TO_MAP_VALUE) + err = check_map_access(env, regno, reg->off, + meta->map_ptr->value_size, + false); else err = check_stack_boundary(env, regno, meta->map_ptr->value_size,
Helpers that expect ARG_PTR_TO_MAP_KEY and ARG_PTR_TO_MAP_VALUE can only access stack and packet memory. Allow these helpers to directly access map values by passing registers of type PTR_TO_MAP_VALUE. This change removes the need for an extra copy to the stack when using a map value to perform a second map lookup, as in the following: struct bpf_map_def SEC("maps") infobyreq = { .type = BPF_MAP_TYPE_HASHMAP, .key_size = sizeof(struct request *), .value_size = sizeof(struct info_t), .max_entries = 1024, }; struct bpf_map_def SEC("maps") counts = { .type = BPF_MAP_TYPE_HASHMAP, .key_size = sizeof(struct info_t), .value_size = sizeof(u64), .max_entries = 1024, }; SEC("kprobe/blk_account_io_start") int bpf_blk_account_io_start(struct pt_regs *ctx) { struct info_t *info = bpf_map_lookup_elem(&infobyreq, &ctx->di); u64 *count = bpf_map_lookup_elem(&counts, info); (*count)++; } Signed-off-by: Paul Chaignon <paul.chaignon@orange.com> --- kernel/bpf/verifier.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)