diff mbox series

[bpf,8/9] bpf: fix integer overflows

Message ID 20171219041201.1979983-9-ast@kernel.org
State Accepted, archived
Delegated to: BPF Maintainers
Headers show
Series bpf: verifier security fixes | expand

Commit Message

Alexei Starovoitov Dec. 19, 2017, 4:12 a.m. UTC
There were various issues related to the limited size of integers used in
the verifier:
 - `off + size` overflow in __check_map_access()
 - `off + reg->off` overflow in check_mem_access()
 - `off + reg->var_off.value` overflow or 32-bit truncation of
   `reg->var_off.value` in check_mem_access()
 - 32-bit truncation in check_stack_boundary()

Make sure that any integer math cannot overflow by not allowing
pointer math with large values.

Also reduce the scope of "scalar op scalar" tracking.

Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 include/linux/bpf_verifier.h |  4 ++--
 kernel/bpf/verifier.c        | 48 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 50 insertions(+), 2 deletions(-)

Comments

Edward Cree Dec. 19, 2017, 10:29 a.m. UTC | #1
On 19/12/17 04:12, Alexei Starovoitov wrote:
> Also reduce the scope of "scalar op scalar" tracking.
<snip>
> @@ -2046,6 +2088,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
>  	src_known = tnum_is_const(src_reg.var_off);
>  	dst_known = tnum_is_const(dst_reg->var_off);
>  
> +	if (!src_known &&
> +	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
> +		__mark_reg_unknown(dst_reg);
> +		return 0;
> +	}
> +
>  	switch (opcode) {
>  	case BPF_ADD:
>  		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
Still not seeing any explanation for why this change is necessary.
It also seems arbitrary, e.g. why is AND allowed but not OR?

Hypothetical use case: combining a series of flags based on data read from
 packet into an array index used for storing into a map value.  That sounds
 to me like something someone may be legitimately doing.

-Ed
Alexei Starovoitov Dec. 19, 2017, 7:57 p.m. UTC | #2
On 12/19/17 2:29 AM, Edward Cree wrote:
> On 19/12/17 04:12, Alexei Starovoitov wrote:
>> Also reduce the scope of "scalar op scalar" tracking.
> <snip>
>> @@ -2046,6 +2088,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
>>  	src_known = tnum_is_const(src_reg.var_off);
>>  	dst_known = tnum_is_const(dst_reg->var_off);
>>
>> +	if (!src_known &&
>> +	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
>> +		__mark_reg_unknown(dst_reg);
>> +		return 0;
>> +	}
>> +
>>  	switch (opcode) {
>>  	case BPF_ADD:
>>  		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
> Still not seeing any explanation for why this change is necessary.
> It also seems arbitrary, e.g. why is AND allowed but not OR?
>
> Hypothetical use case: combining a series of flags based on data read from
>  packet into an array index used for storing into a map value.  That sounds
>  to me like something someone may be legitimately doing.

when someone is trying to fetch run-time variables and OR them together
without final bounds check that is pretty broken.
All of our arrays (packet pointer, map_values, etc) are C style array
that go from zero to N.
Verifier already doing ugly dance with signed/unsigned <,>,<=,>= checks
and fighting llvm optimizations along the way. AND alu to bound access
is enough in most cases and rarely '<' checks are needed.
It is not possible to bound the access with OR, MUL, so no need
to track bits in such ops.
Notice that '!src_known' check above. It means the verifier still tracks
reg |= imm ops, but not reg |= reg.
Moreover this tracking should not have been added to the verifier
in the first place especially with zero tests that cover such situation.
The kernel is not the place to add code 'just in case'.
When the real need arises (highly unlikely for the above hunk)
we can make verifier smarter step by step.
Less tracking also means less state to diverge -> better pruning ->
faster load times.
But the main goal of this hunk is to reduce attack surface.
The bug in RSH handling (patch 1) is an example that the bit tracking
is not easy to do correctly. Especially doing "scalar op scalar"
when scalars are variable registers and not a known constants.
So that is what commit log says:
'... reduce the scope of "scalar op scalar" tracking.'
diff mbox series

Patch

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c561b986bab0..1632bb13ad8a 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -15,11 +15,11 @@ 
  * In practice this is far bigger than any realistic pointer offset; this limit
  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
  */
-#define BPF_MAX_VAR_OFF	(1ULL << 31)
+#define BPF_MAX_VAR_OFF	(1 << 29)
 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
  * that converting umax_value to int cannot overflow.
  */
-#define BPF_MAX_VAR_SIZ	INT_MAX
+#define BPF_MAX_VAR_SIZ	(1 << 29)
 
 /* Liveness marks, used for registers and spilled-regs (in stack slots).
  * Read marks propagate upwards until they find a write mark; they record that
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 982bd9ec721a..86dfe6b5c243 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1819,6 +1819,41 @@  static bool signed_sub_overflows(s64 a, s64 b)
 	return res > a;
 }
 
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+				  const struct bpf_reg_state *reg,
+				  enum bpf_reg_type type)
+{
+	bool known = tnum_is_const(reg->var_off);
+	s64 val = reg->var_off.value;
+	s64 smin = reg->smin_value;
+
+	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
+		verbose(env, "math between %s pointer and %lld is not allowed\n",
+			reg_type_str[type], val);
+		return false;
+	}
+
+	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
+		verbose(env, "%s pointer offset %d is not allowed\n",
+			reg_type_str[type], reg->off);
+		return false;
+	}
+
+	if (smin == S64_MIN) {
+		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+			reg_type_str[type]);
+		return false;
+	}
+
+	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
+		verbose(env, "value %lld makes %s pointer be out of bounds\n",
+			smin, reg_type_str[type]);
+		return false;
+	}
+
+	return true;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -1887,6 +1922,10 @@  static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 	dst_reg->type = ptr_reg->type;
 	dst_reg->id = ptr_reg->id;
 
+	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+		return -EINVAL;
+
 	switch (opcode) {
 	case BPF_ADD:
 		/* We can take a fixed offset as long as it doesn't overflow
@@ -2017,6 +2056,9 @@  static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 		return -EACCES;
 	}
 
+	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+		return -EINVAL;
+
 	__update_reg_bounds(dst_reg);
 	__reg_deduce_bounds(dst_reg);
 	__reg_bound_offset(dst_reg);
@@ -2046,6 +2088,12 @@  static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	src_known = tnum_is_const(src_reg.var_off);
 	dst_known = tnum_is_const(dst_reg->var_off);
 
+	if (!src_known &&
+	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+		__mark_reg_unknown(dst_reg);
+		return 0;
+	}
+
 	switch (opcode) {
 	case BPF_ADD:
 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||