diff mbox series

[bpf-next,v2,3/3] tools/bpf: add verifier test for s32/u32 helper return values

Message ID 20191119195714.3692123-1-yhs@fb.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series bpf: allow s32/u32 return types in verifier for bpf helpers | expand

Commit Message

Yonghong Song Nov. 19, 2019, 7:57 p.m. UTC
Added two verifier tests for helper returning s32/u32.
The return value is refined with jmp32 instruction and
later the whole r0 is used. With the previous patch,
two tests will fail.

Signed-off-by: Yonghong Song <yhs@fb.com>
---
 .../selftests/bpf/verifier/helper_ret.c       | 50 +++++++++++++++++++
 1 file changed, 50 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/verifier/helper_ret.c
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/verifier/helper_ret.c b/tools/testing/selftests/bpf/verifier/helper_ret.c
new file mode 100644
index 000000000000..7850a52645a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_ret.c
@@ -0,0 +1,50 @@ 
+{
+	"helper_ret: get_cgroup_classid: __u32",
+	.insns = {
+	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+	BPF_LD_MAP_FD(BPF_REG_1, 0),
+	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+	BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+	BPF_JMP32_IMM(BPF_JGE, BPF_REG_0, 7, 2),
+	BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_0),
+	BPF_ST_MEM(BPF_B, BPF_REG_6, 0, 0),
+        BPF_MOV32_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	.fixup_map_hash_48b = { 4 },
+	.result = ACCEPT,
+	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+	"helper_ret: skb_pull_data: __s32",
+	.insns = {
+	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+	BPF_LD_MAP_FD(BPF_REG_1, 0),
+	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+	BPF_MOV64_IMM(BPF_REG_2, 4),
+	BPF_EMIT_CALL(BPF_FUNC_skb_pull_data),
+	BPF_JMP32_IMM(BPF_JSLT, BPF_REG_0, 1, 3),
+	BPF_JMP32_IMM(BPF_JSGE, BPF_REG_0, 7, 2),
+	BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_0),
+	BPF_ST_MEM(BPF_B, BPF_REG_6, 0, 0),
+        BPF_MOV32_IMM(BPF_REG_0, 0),
+	BPF_EXIT_INSN(),
+	},
+	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	.fixup_map_hash_48b = { 4 },
+	.result = ACCEPT,
+	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},