diff mbox series

[04/13] bpf: move tmp variable into ax register in interpreter

Message ID 1549862710-24224-5-git-send-email-tyhicks@canonical.com
State New
Headers show
Series Multiple BPF security issues | expand

Commit Message

Tyler Hicks Feb. 11, 2019, 5:25 a.m. UTC
From: Daniel Borkmann <daniel@iogearbox.net>

This change moves the on-stack 64 bit tmp variable in ___bpf_prog_run()
into the hidden ax register. The latter is currently only used in JITs
for constant blinding as a temporary scratch register, meaning the BPF
interpreter will never see the use of ax. Therefore it is safe to use
it for the cases where tmp has been used earlier. This is needed to later
on allow restricted hidden use of ax in both interpreter and JITs.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

CVE-2019-7308

(backported from commit 144cd91c4c2bced6eb8a7e25e590f6618a11e854)
[tyhicks: Backport around context changes and missing commit 1ea47e01ad6e]
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
---
 include/linux/filter.h |  3 ++-
 kernel/bpf/core.c      | 32 ++++++++++++++++----------------
 2 files changed, 18 insertions(+), 17 deletions(-)

Comments

Kleber Sacilotto de Souza March 7, 2019, 2:02 p.m. UTC | #1
Hi Tyler,

On 2/11/19 6:25 AM, Tyler Hicks wrote:
> From: Daniel Borkmann <daniel@iogearbox.net>
> 
> This change moves the on-stack 64 bit tmp variable in ___bpf_prog_run()
> into the hidden ax register. The latter is currently only used in JITs
> for constant blinding as a temporary scratch register, meaning the BPF
> interpreter will never see the use of ax. Therefore it is safe to use
> it for the cases where tmp has been used earlier. This is needed to later
> on allow restricted hidden use of ax in both interpreter and JITs.
> 
> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
> Acked-by: Alexei Starovoitov <ast@kernel.org>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> 
> CVE-2019-7308
> 
> (backported from commit 144cd91c4c2bced6eb8a7e25e590f6618a11e854)
> [tyhicks: Backport around context changes and missing commit 1ea47e01ad6e]
> Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
> ---
>  include/linux/filter.h |  3 ++-
>  kernel/bpf/core.c      | 32 ++++++++++++++++----------------
>  2 files changed, 18 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index e85292a16467..ec944f15f1a4 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -53,7 +53,8 @@ struct bpf_prog_aux;
>   * constants. See JIT pre-step in bpf_jit_blind_constants().
>   */
>  #define BPF_REG_AX		MAX_BPF_REG
> -#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)
> +#define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
> +#define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
>  
>  /* unused opcode to mark special call to bpf_tail_call() helper */
>  #define BPF_TAIL_CALL	0xf0
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 7949e8b8f94e..c8b57f2afbd6 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -51,6 +51,7 @@
>  #define DST	regs[insn->dst_reg]
>  #define SRC	regs[insn->src_reg]
>  #define FP	regs[BPF_REG_FP]
> +#define AX	regs[BPF_REG_AX]
>  #define ARG1	regs[BPF_REG_ARG1]
>  #define CTX	regs[BPF_REG_CTX]
>  #define IMM	insn->imm
> @@ -778,7 +779,6 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
>  static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
>  				    u64 *stack)
>  {
> -	u64 tmp;
>  	static const void *jumptable[256] = {
>  		[0 ... 255] = &&default_label,
>  		/* Now overwrite non-defaults ... */
> @@ -952,22 +952,22 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
>  	ALU64_MOD_X:
>  		if (unlikely(SRC == 0))
>  			return 0;
> -		div64_u64_rem(DST, SRC, &tmp);
> -		DST = tmp;
> +		div64_u64_rem(DST, SRC, &AX);
> +		DST = AX;
>  		CONT;
>  	ALU_MOD_X:
>  		if (unlikely((u32)SRC == 0))
>  			return 0;
> -		tmp = (u32) DST;
> -		DST = do_div(tmp, (u32) SRC);
> +		AX = (u32) DST;
> +		DST = do_div(AX, (u32) SRC);
>  		CONT;
>  	ALU64_MOD_K:
> -		div64_u64_rem(DST, IMM, &tmp);
> -		DST = tmp;
> +		div64_u64_rem(DST, IMM, &AX);
> +		DST = AX;
>  		CONT;
>  	ALU_MOD_K:
> -		tmp = (u32) DST;
> -		DST = do_div(tmp, (u32) IMM);
> +		AX = (u32) DST;
> +		DST = do_div(AX, (u32) IMM);
>  		CONT;
>  	ALU64_DIV_X:
>  		if (unlikely(SRC == 0))
> @@ -977,17 +977,17 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
>  	ALU_DIV_X:
>  		if (unlikely((u32)SRC == 0))
>  			return 0;
> -		tmp = (u32) DST;
> -		do_div(tmp, (u32) SRC);
> -		DST = (u32) tmp;
> +		AX = (u32) DST;
> +		do_div(AX, (u32) SRC);
> +		DST = (u32) AX;
>  		CONT;
>  	ALU64_DIV_K:
>  		DST = div64_u64(DST, IMM);
>  		CONT;
>  	ALU_DIV_K:
> -		tmp = (u32) DST;
> -		do_div(tmp, (u32) IMM);
> -		DST = (u32) tmp;
> +		AX = (u32) DST;
> +		do_div(AX, (u32) IMM);
> +		DST = (u32) AX;
>  		CONT;
>  	ALU_END_TO_BE:
>  		switch (IMM) {
> @@ -1291,7 +1291,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
>  static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
>  { \
>  	u64 stack[stack_size / sizeof(u64)]; \
> -	u64 regs[MAX_BPF_REG]; \
> +	u64 regs[MAX_BPF_EXT_REG]; \
>  \
>  	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
>  	ARG1 = (u64) (unsigned long) ctx; \
> 

This code doesn't compile when CONFIG_BPF_JIT_ALWAYS_ON is not set. The
Bionic kernel doesn't have e0cea7ce988c (bpf: implement ld_abs/ld_ind in
native bpf), so the tmp variable is still used in other places. It seems
that we also need the additional fixes:

diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3cf79f0eac6c..f20df89589ed 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1262,7 +1262,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
 		 */
 
-		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
+		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &AX);
 		if (likely(ptr != NULL)) {
 			BPF_R0 = get_unaligned_be32(ptr);
 			CONT;
@@ -1272,7 +1272,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
 		off = IMM;
 load_half:
-		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
+		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &AX);
 		if (likely(ptr != NULL)) {
 			BPF_R0 = get_unaligned_be16(ptr);
 			CONT;
@@ -1282,7 +1282,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
 		off = IMM;
 load_byte:
-		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
+		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &AX);
 		if (likely(ptr != NULL)) {
 			BPF_R0 = *(u8 *)ptr;
 			CONT;


Does it make sense?

If it does, could you please send a v2 of this patch? We can replace the
original backport applied to the tree so we don't break bisectability.


Thanks,
Kleber
Tyler Hicks March 7, 2019, 6:10 p.m. UTC | #2
On 2019-03-07 15:02:26, Kleber Souza wrote:
> Hi Tyler,
> 
> On 2/11/19 6:25 AM, Tyler Hicks wrote:
> > From: Daniel Borkmann <daniel@iogearbox.net>
> > 
> > This change moves the on-stack 64 bit tmp variable in ___bpf_prog_run()
> > into the hidden ax register. The latter is currently only used in JITs
> > for constant blinding as a temporary scratch register, meaning the BPF
> > interpreter will never see the use of ax. Therefore it is safe to use
> > it for the cases where tmp has been used earlier. This is needed to later
> > on allow restricted hidden use of ax in both interpreter and JITs.
> > 
> > Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
> > Acked-by: Alexei Starovoitov <ast@kernel.org>
> > Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> > 
> > CVE-2019-7308
> > 
> > (backported from commit 144cd91c4c2bced6eb8a7e25e590f6618a11e854)
> > [tyhicks: Backport around context changes and missing commit 1ea47e01ad6e]
> > Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
> > ---
> >  include/linux/filter.h |  3 ++-
> >  kernel/bpf/core.c      | 32 ++++++++++++++++----------------
> >  2 files changed, 18 insertions(+), 17 deletions(-)
> > 
> > diff --git a/include/linux/filter.h b/include/linux/filter.h
> > index e85292a16467..ec944f15f1a4 100644
> > --- a/include/linux/filter.h
> > +++ b/include/linux/filter.h
> > @@ -53,7 +53,8 @@ struct bpf_prog_aux;
> >   * constants. See JIT pre-step in bpf_jit_blind_constants().
> >   */
> >  #define BPF_REG_AX		MAX_BPF_REG
> > -#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)
> > +#define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
> > +#define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
> >  
> >  /* unused opcode to mark special call to bpf_tail_call() helper */
> >  #define BPF_TAIL_CALL	0xf0
> > diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> > index 7949e8b8f94e..c8b57f2afbd6 100644
> > --- a/kernel/bpf/core.c
> > +++ b/kernel/bpf/core.c
> > @@ -51,6 +51,7 @@
> >  #define DST	regs[insn->dst_reg]
> >  #define SRC	regs[insn->src_reg]
> >  #define FP	regs[BPF_REG_FP]
> > +#define AX	regs[BPF_REG_AX]
> >  #define ARG1	regs[BPF_REG_ARG1]
> >  #define CTX	regs[BPF_REG_CTX]
> >  #define IMM	insn->imm
> > @@ -778,7 +779,6 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
> >  static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
> >  				    u64 *stack)
> >  {
> > -	u64 tmp;
> >  	static const void *jumptable[256] = {
> >  		[0 ... 255] = &&default_label,
> >  		/* Now overwrite non-defaults ... */
> > @@ -952,22 +952,22 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
> >  	ALU64_MOD_X:
> >  		if (unlikely(SRC == 0))
> >  			return 0;
> > -		div64_u64_rem(DST, SRC, &tmp);
> > -		DST = tmp;
> > +		div64_u64_rem(DST, SRC, &AX);
> > +		DST = AX;
> >  		CONT;
> >  	ALU_MOD_X:
> >  		if (unlikely((u32)SRC == 0))
> >  			return 0;
> > -		tmp = (u32) DST;
> > -		DST = do_div(tmp, (u32) SRC);
> > +		AX = (u32) DST;
> > +		DST = do_div(AX, (u32) SRC);
> >  		CONT;
> >  	ALU64_MOD_K:
> > -		div64_u64_rem(DST, IMM, &tmp);
> > -		DST = tmp;
> > +		div64_u64_rem(DST, IMM, &AX);
> > +		DST = AX;
> >  		CONT;
> >  	ALU_MOD_K:
> > -		tmp = (u32) DST;
> > -		DST = do_div(tmp, (u32) IMM);
> > +		AX = (u32) DST;
> > +		DST = do_div(AX, (u32) IMM);
> >  		CONT;
> >  	ALU64_DIV_X:
> >  		if (unlikely(SRC == 0))
> > @@ -977,17 +977,17 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
> >  	ALU_DIV_X:
> >  		if (unlikely((u32)SRC == 0))
> >  			return 0;
> > -		tmp = (u32) DST;
> > -		do_div(tmp, (u32) SRC);
> > -		DST = (u32) tmp;
> > +		AX = (u32) DST;
> > +		do_div(AX, (u32) SRC);
> > +		DST = (u32) AX;
> >  		CONT;
> >  	ALU64_DIV_K:
> >  		DST = div64_u64(DST, IMM);
> >  		CONT;
> >  	ALU_DIV_K:
> > -		tmp = (u32) DST;
> > -		do_div(tmp, (u32) IMM);
> > -		DST = (u32) tmp;
> > +		AX = (u32) DST;
> > +		do_div(AX, (u32) IMM);
> > +		DST = (u32) AX;
> >  		CONT;
> >  	ALU_END_TO_BE:
> >  		switch (IMM) {
> > @@ -1291,7 +1291,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
> >  static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
> >  { \
> >  	u64 stack[stack_size / sizeof(u64)]; \
> > -	u64 regs[MAX_BPF_REG]; \
> > +	u64 regs[MAX_BPF_EXT_REG]; \
> >  \
> >  	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
> >  	ARG1 = (u64) (unsigned long) ctx; \
> > 
> 
> This code doesn't compile when CONFIG_BPF_JIT_ALWAYS_ON is not set. The
> Bionic kernel doesn't have e0cea7ce988c (bpf: implement ld_abs/ld_ind in
> native bpf), so the tmp variable is still used in other places. It seems
> that we also need the additional fixes:
> 
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 3cf79f0eac6c..f20df89589ed 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -1262,7 +1262,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
>  		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
>  		 */
>  
> -		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
> +		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &AX);
>  		if (likely(ptr != NULL)) {
>  			BPF_R0 = get_unaligned_be32(ptr);
>  			CONT;
> @@ -1272,7 +1272,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
>  	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
>  		off = IMM;
>  load_half:
> -		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
> +		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &AX);
>  		if (likely(ptr != NULL)) {
>  			BPF_R0 = get_unaligned_be16(ptr);
>  			CONT;
> @@ -1282,7 +1282,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
>  	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
>  		off = IMM;
>  load_byte:
> -		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
> +		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &AX);
>  		if (likely(ptr != NULL)) {
>  			BPF_R0 = *(u8 *)ptr;
>  			CONT;
> 
> 
> Does it make sense?
> 
> If it does, could you please send a v2 of this patch? We can replace the
> original backport applied to the tree so we don't break bisectability.

Yes, it does make sense. I've applied the change and verified that i386
bionic builds. I'll reply with the v2 of this patch.

Tyler
diff mbox series

Patch

diff --git a/include/linux/filter.h b/include/linux/filter.h
index e85292a16467..ec944f15f1a4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -53,7 +53,8 @@  struct bpf_prog_aux;
  * constants. See JIT pre-step in bpf_jit_blind_constants().
  */
 #define BPF_REG_AX		MAX_BPF_REG
-#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
 
 /* unused opcode to mark special call to bpf_tail_call() helper */
 #define BPF_TAIL_CALL	0xf0
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7949e8b8f94e..c8b57f2afbd6 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -51,6 +51,7 @@ 
 #define DST	regs[insn->dst_reg]
 #define SRC	regs[insn->src_reg]
 #define FP	regs[BPF_REG_FP]
+#define AX	regs[BPF_REG_AX]
 #define ARG1	regs[BPF_REG_ARG1]
 #define CTX	regs[BPF_REG_CTX]
 #define IMM	insn->imm
@@ -778,7 +779,6 @@  EXPORT_SYMBOL_GPL(__bpf_call_base);
 static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 				    u64 *stack)
 {
-	u64 tmp;
 	static const void *jumptable[256] = {
 		[0 ... 255] = &&default_label,
 		/* Now overwrite non-defaults ... */
@@ -952,22 +952,22 @@  static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 	ALU64_MOD_X:
 		if (unlikely(SRC == 0))
 			return 0;
-		div64_u64_rem(DST, SRC, &tmp);
-		DST = tmp;
+		div64_u64_rem(DST, SRC, &AX);
+		DST = AX;
 		CONT;
 	ALU_MOD_X:
 		if (unlikely((u32)SRC == 0))
 			return 0;
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) SRC);
+		AX = (u32) DST;
+		DST = do_div(AX, (u32) SRC);
 		CONT;
 	ALU64_MOD_K:
-		div64_u64_rem(DST, IMM, &tmp);
-		DST = tmp;
+		div64_u64_rem(DST, IMM, &AX);
+		DST = AX;
 		CONT;
 	ALU_MOD_K:
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) IMM);
+		AX = (u32) DST;
+		DST = do_div(AX, (u32) IMM);
 		CONT;
 	ALU64_DIV_X:
 		if (unlikely(SRC == 0))
@@ -977,17 +977,17 @@  static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 	ALU_DIV_X:
 		if (unlikely((u32)SRC == 0))
 			return 0;
-		tmp = (u32) DST;
-		do_div(tmp, (u32) SRC);
-		DST = (u32) tmp;
+		AX = (u32) DST;
+		do_div(AX, (u32) SRC);
+		DST = (u32) AX;
 		CONT;
 	ALU64_DIV_K:
 		DST = div64_u64(DST, IMM);
 		CONT;
 	ALU_DIV_K:
-		tmp = (u32) DST;
-		do_div(tmp, (u32) IMM);
-		DST = (u32) tmp;
+		AX = (u32) DST;
+		do_div(AX, (u32) IMM);
+		DST = (u32) AX;
 		CONT;
 	ALU_END_TO_BE:
 		switch (IMM) {
@@ -1291,7 +1291,7 @@  STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
 { \
 	u64 stack[stack_size / sizeof(u64)]; \
-	u64 regs[MAX_BPF_REG]; \
+	u64 regs[MAX_BPF_EXT_REG]; \
 \
 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
 	ARG1 = (u64) (unsigned long) ctx; \