[bpf-next,3/9] bpf: extend is_branch_taken to registers
diff mbox series

Message ID 20190613044738.3858896-4-ast@kernel.org
State Superseded
Headers show
Series
  • bpf: bounded loops and other features
Related show

Commit Message

Alexei Starovoitov June 13, 2019, 4:47 a.m. UTC
This patch extends is_branch_taken() logic from JMP+K instructions
to JMP+X instructions.
Conditional branches are often done when src and dst registers
contain known scalars. In such case the verifier can follow
the branch that is going to be taken when program executes on CPU.
That speeds up the verification and essential feature to support
bounded loops.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 kernel/bpf/verifier.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

Patch
diff mbox series

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a21bafd7d931..c79c09586a9e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5263,10 +5263,11 @@  static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	struct bpf_verifier_state *this_branch = env->cur_state;
 	struct bpf_verifier_state *other_branch;
 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
-	struct bpf_reg_state *dst_reg, *other_branch_regs;
+	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
 	u8 opcode = BPF_OP(insn->code);
 	bool is_jmp32;
 	int err;
+	u64 cond_val;
 
 	/* Only conditional jumps are expected to reach here. */
 	if (opcode == BPF_JA || opcode > BPF_JSLE) {
@@ -5290,6 +5291,7 @@  static int check_cond_jmp_op(struct bpf_verifier_env *env,
 				insn->src_reg);
 			return -EACCES;
 		}
+		src_reg = &regs[insn->src_reg];
 	} else {
 		if (insn->src_reg != BPF_REG_0) {
 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
@@ -5306,8 +5308,11 @@  static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
 
 	if (BPF_SRC(insn->code) == BPF_K) {
-		int pred = is_branch_taken(dst_reg, insn->imm, opcode,
-					   is_jmp32);
+		int pred;
+
+		cond_val = insn->imm;
+check_taken:
+		pred = is_branch_taken(dst_reg, cond_val, opcode, is_jmp32);
 
 		if (pred == 1) {
 			 /* only follow the goto, ignore fall-through */
@@ -5319,6 +5324,11 @@  static int check_cond_jmp_op(struct bpf_verifier_env *env,
 			 */
 			return 0;
 		}
+	} else if (BPF_SRC(insn->code) == BPF_X &&
+		   src_reg->type == SCALAR_VALUE &&
+		   tnum_is_const(src_reg->var_off)) {
+		cond_val = src_reg->var_off.value;
+		goto check_taken;
 	}
 
 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,