diff mbox series

[bpf-next,v4,2/3] bpf: implement map_gen_lookup() callback for XSKMAP

Message ID 20191031084749.14626-3-bjorn.topel@gmail.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series xsk: XSKMAP performance improvements | expand

Commit Message

Björn Töpel Oct. 31, 2019, 8:47 a.m. UTC
From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>

Inline the xsk_map_lookup_elem() via implementing the map_gen_lookup()
callback. This results in emitting the bpf instructions in place of
bpf_map_lookup_elem() helper call and better performance of bpf
programs.

Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
---
 kernel/bpf/xskmap.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

Comments

Daniel Borkmann Oct. 31, 2019, 11:48 p.m. UTC | #1
On Thu, Oct 31, 2019 at 09:47:48AM +0100, Björn Töpel wrote:
> From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
> 
> Inline the xsk_map_lookup_elem() via implementing the map_gen_lookup()
> callback. This results in emitting the bpf instructions in place of
> bpf_map_lookup_elem() helper call and better performance of bpf
> programs.
> 
> Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
> Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
> ---
>  kernel/bpf/xskmap.c | 17 +++++++++++++++++
>  1 file changed, 17 insertions(+)
> 
> diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
> index edcbd863650e..fa32f775b4de 100644
> --- a/kernel/bpf/xskmap.c
> +++ b/kernel/bpf/xskmap.c
> @@ -163,6 +163,22 @@ struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
>  	return xs;
>  }
>  
> +static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
> +{
> +	const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
> +	struct bpf_insn *insn = insn_buf;
> +
> +	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
> +	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
> +	*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
> +	*insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
> +	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
> +	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);

Your map slots are always exactly sizeof(struct xdp_sock *), right? Wouldn't
this BPF_DW crash on 32 bit?

Meaning, it would have to be BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ...)?

> +	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
> +	*insn++ = BPF_MOV64_IMM(ret, 0);
> +	return insn - insn_buf;
> +}
> +
>  int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
>  		       struct xdp_sock *xs)
>  {
> @@ -303,6 +319,7 @@ const struct bpf_map_ops xsk_map_ops = {
>  	.map_free = xsk_map_free,
>  	.map_get_next_key = xsk_map_get_next_key,
>  	.map_lookup_elem = xsk_map_lookup_elem,
> +	.map_gen_lookup = xsk_map_gen_lookup,
>  	.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
>  	.map_update_elem = xsk_map_update_elem,
>  	.map_delete_elem = xsk_map_delete_elem,
> -- 
> 2.20.1
>
Björn Töpel Nov. 1, 2019, 8:31 a.m. UTC | #2
On Fri, 1 Nov 2019 at 00:48, Daniel Borkmann <daniel@iogearbox.net> wrote:
>
> On Thu, Oct 31, 2019 at 09:47:48AM +0100, Björn Töpel wrote:
> > From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
[...]
> > +static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
> > +{
> > +     const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
> > +     struct bpf_insn *insn = insn_buf;
> > +
> > +     *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
> > +     *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
> > +     *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
> > +     *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
> > +     *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
> > +     *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
>
> Your map slots are always exactly sizeof(struct xdp_sock *), right? Wouldn't
> this BPF_DW crash on 32 bit?
>
> Meaning, it would have to be BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ...)?
>

Indeed. Thanks for finding this. I'll do a respin.

Björn
diff mbox series

Patch

diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index edcbd863650e..fa32f775b4de 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -163,6 +163,22 @@  struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
 	return xs;
 }
 
+static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+{
+	const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
+	struct bpf_insn *insn = insn_buf;
+
+	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
+	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+	*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
+	*insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
+	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
+	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
+	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+	*insn++ = BPF_MOV64_IMM(ret, 0);
+	return insn - insn_buf;
+}
+
 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
 		       struct xdp_sock *xs)
 {
@@ -303,6 +319,7 @@  const struct bpf_map_ops xsk_map_ops = {
 	.map_free = xsk_map_free,
 	.map_get_next_key = xsk_map_get_next_key,
 	.map_lookup_elem = xsk_map_lookup_elem,
+	.map_gen_lookup = xsk_map_gen_lookup,
 	.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
 	.map_update_elem = xsk_map_update_elem,
 	.map_delete_elem = xsk_map_delete_elem,