@@ -44,10 +44,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
- /* a workaround to prevent compiler from generating
- * codes verifier cannot handle yet.
- */
- volatile int ret;
+ int ret;
if (ctx->write)
return 0;
@@ -827,3 +827,79 @@
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+{
+ "jsle32, jsge32, mov : combining range",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+ BPF_JMP32_IMM(BPF_JSLE, BPF_REG_0, 0, 4),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_0, 8, 3),
+ BPF_ALU32_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32, jsgt32, add : combining range",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_0, 1, 4),
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 4, 3),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32, jsgt32 : negative lower bound",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_0, -2, 4),
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 4, 3),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 4 },
+ .result = REJECT,
+ .errstr = "R8 unbounded memory access, make sure to bounds check any array access into a map",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
Added a few verifier tests for 32bit signed compares. $ ./test_verifier ... #557/p jsle32, jsge32, mov : combining range OK #558/p jslt32, jsgt32, add : combining range OK #559/p jslt32, jsgt32 : negative lower bound OK ... Also reverted the workaround in test_sysctl_loop1.c since the kernel verifier is able to handle the case now. $ ./test_progs ... #4/18 test_sysctl_loop1.o:OK ... For non-alu32 mode where llvm optimization (https://reviews.llvm.org/D72787) also kicked in, existing verifier can handle it well: $ ./test_progs-no-alu32 ... #4/18 test_sysctl_loop1.o:OK ... Signed-off-by: Yonghong Song <yhs@fb.com> --- .../selftests/bpf/progs/test_sysctl_loop1.c | 5 +- tools/testing/selftests/bpf/verifier/jmp32.c | 76 +++++++++++++++++++ 2 files changed, 77 insertions(+), 4 deletions(-)