diff options
Diffstat (limited to 'tools/testing/selftests/bpf/verifier')
-rw-r--r-- | tools/testing/selftests/bpf/verifier/basic_instr.c | 85 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/calls.c | 22 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/cfg.c | 11 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/direct_packet_access.c | 3 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/div_overflow.c | 14 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/helper_access_var_len.c | 28 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/loops1.c | 161 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/prevent_map_lookup.c | 15 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/sock.c | 18 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/subreg.c | 533 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/wide_store.c | 36 |
11 files changed, 880 insertions, 46 deletions
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c index ed91a7b9a456..071dbc889e8c 100644 --- a/tools/testing/selftests/bpf/verifier/basic_instr.c +++ b/tools/testing/selftests/bpf/verifier/basic_instr.c @@ -91,6 +91,91 @@ .result = ACCEPT, }, { + "lsh64 by 0 imm", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "rsh64 by 0 imm", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "arsh64 by 0 imm", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "lsh64 by 0 reg", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 1), + BPF_LD_IMM64(BPF_REG_2, 0), + BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "rsh64 by 0 reg", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_LD_IMM64(BPF_REG_3, 0), + BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "arsh64 by 0 reg", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_LD_IMM64(BPF_REG_3, 0), + BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ "invalid 64-bit BPF_END", .insns = { BPF_MOV32_IMM(BPF_REG_0, 0), diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 9093a8f64dc6..2d752c4f8d9d 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -215,9 +215,11 @@ BPF_MOV64_IMM(BPF_REG_0, 3), BPF_JMP_IMM(BPF_JA, 0, 0, -6), }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", - .result = REJECT, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "back-edge from insn", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 1, }, { "calls: conditional call 4", @@ -250,22 +252,24 @@ BPF_MOV64_IMM(BPF_REG_0, 3), BPF_EXIT_INSN(), }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", - .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, }, { "calls: conditional call 6", .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), BPF_EXIT_INSN(), BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, mark)), BPF_EXIT_INSN(), }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "infinite loop detected", .result = REJECT, }, { diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c index 349c0862fb4c..4eb76ed739ce 100644 --- a/tools/testing/selftests/bpf/verifier/cfg.c +++ b/tools/testing/selftests/bpf/verifier/cfg.c @@ -41,7 +41,8 @@ BPF_JMP_IMM(BPF_JA, 0, 0, -1), BPF_EXIT_INSN(), }, - .errstr = "back-edge", + .errstr = "unreachable insn 1", + .errstr_unpriv = "back-edge", .result = REJECT, }, { @@ -53,18 +54,20 @@ BPF_JMP_IMM(BPF_JA, 0, 0, -4), BPF_EXIT_INSN(), }, - .errstr = "back-edge", + .errstr = "unreachable insn 4", + .errstr_unpriv = "back-edge", .result = REJECT, }, { "conditional loop", .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), BPF_EXIT_INSN(), }, - .errstr = "back-edge", + .errstr = "infinite loop detected", + .errstr_unpriv = "back-edge", .result = REJECT, }, diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c index d5c596fdc4b9..2c5fbe7bcd27 100644 --- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c @@ -511,7 +511,8 @@ offsetof(struct __sk_buff, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c index bd3f38dbe796..acab4f00819f 100644 --- a/tools/testing/selftests/bpf/verifier/div_overflow.c +++ b/tools/testing/selftests/bpf/verifier/div_overflow.c @@ -29,8 +29,11 @@ "DIV64 overflow, check 1", .insns = { BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), - BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), + BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1), + BPF_MOV32_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, @@ -40,8 +43,11 @@ { "DIV64 overflow, check 2", .insns = { - BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), - BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1), + BPF_LD_IMM64(BPF_REG_1, LLONG_MIN), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_0, 1), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c index 1f39d845c64f..67ab12410050 100644 --- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c +++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c @@ -29,9 +29,9 @@ { "helper access to variable memory: stack, bitwise AND, zero included", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), @@ -46,9 +46,9 @@ { "helper access to variable memory: stack, bitwise AND + JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), @@ -122,9 +122,9 @@ { "helper access to variable memory: stack, JMP, bounds + offset", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), @@ -143,9 +143,9 @@ { "helper access to variable memory: stack, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), @@ -163,9 +163,9 @@ { "helper access to variable memory: stack, JMP, no max check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_MOV64_IMM(BPF_REG_4, 0), @@ -183,9 +183,9 @@ { "helper access to variable memory: stack, JMP, no min check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), @@ -201,9 +201,9 @@ { "helper access to variable memory: stack, JMP (signed), no min check", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), @@ -244,6 +244,7 @@ { "helper access to variable memory: map, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), @@ -251,7 +252,7 @@ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4), @@ -262,7 +263,7 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_48b = { 3 }, + .fixup_map_hash_48b = { 4 }, .errstr = "invalid access to map value, value_size=48 off=0 size=49", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, @@ -296,6 +297,7 @@ { "helper access to variable memory: map adjusted, JMP, wrong max", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), @@ -304,7 +306,7 @@ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4), @@ -315,7 +317,7 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .fixup_map_hash_48b = { 3 }, + .fixup_map_hash_48b = { 4 }, .errstr = "R1 min value is outside of the array range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, @@ -337,8 +339,8 @@ { "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), @@ -562,6 +564,7 @@ { "helper access to variable memory: 8 bytes leak", .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), BPF_MOV64_IMM(BPF_REG_0, 0), @@ -572,7 +575,6 @@ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 1), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c new file mode 100644 index 000000000000..5e980a5ab69d --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/loops1.c @@ -0,0 +1,161 @@ +{ + "bounded loop, count to 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop, count to 20", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 20, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded loop, count from positive unknown to 4", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop, count from totally unknown to 4", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded loop, count to 4 with equality", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded loop, start in the middle", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_A(1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "back-edge", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop containing a forward jump", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .retval = 4, +}, +{ + "bounded loop that jumps out rather than in", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 10000, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_JMP_A(-4), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "infinite loop after a conditional jump", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 5), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_A(-2), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "program is too large", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bounded recursion", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 4, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "back-edge", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "infinite loop in two jumps", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_A(0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "loop detected", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "infinite loop: three-jump trick", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, -11), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "loop detected", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c index bbdba990fefb..da7a4b37cb98 100644 --- a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c +++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c @@ -29,21 +29,6 @@ .prog_type = BPF_PROG_TYPE_SOCK_OPS, }, { - "prevent map lookup in xskmap", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ "prevent map lookup in stack trace", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c index b31cd2cf50d0..9ed192e14f5f 100644 --- a/tools/testing/selftests/bpf/verifier/sock.c +++ b/tools/testing/selftests/bpf/verifier/sock.c @@ -498,3 +498,21 @@ .result = REJECT, .errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem", }, +{ + "bpf_map_lookup_elem(xskmap, &key); xs->queue_id", + .insns = { + BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_xdp_sock, queue_id)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_xskmap = { 3 }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/subreg.c b/tools/testing/selftests/bpf/verifier/subreg.c new file mode 100644 index 000000000000..4c4133c80440 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/subreg.c @@ -0,0 +1,533 @@ +/* This file contains sub-register zero extension checks for insns defining + * sub-registers, meaning: + * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width + * forms (BPF_END) could define sub-registers. + * - Narrow direct loads, BPF_B/H/W | BPF_LDX. + * - BPF_LD is not exposed to JIT back-ends, so no need for testing. + * + * "get_prandom_u32" is used to initialize low 32-bit of some registers to + * prevent potential optimizations done by verifier or JIT back-ends which could + * optimize register back into constant when range info shows one register is a + * constant. + */ +{ + "add32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "add32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + /* An insn could have no effect on the low 32-bit, for example: + * a = a + 0 + * a = a | 0 + * a = a & -1 + * But, they should still zero high 32-bit. + */ + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "sub32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), + BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "sub32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mul32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mul32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "div32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "div32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "or32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), + BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "or32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "and32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), + BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "and32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "lsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "lsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "rsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "rsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "neg32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mod32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mod32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "xor32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "xor32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mov32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_MOV32_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mov32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end16 (to_le) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end32 (to_le) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end16 (to_be) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end32 (to_be) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_b zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_h zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_w zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, diff --git a/tools/testing/selftests/bpf/verifier/wide_store.c b/tools/testing/selftests/bpf/verifier/wide_store.c new file mode 100644 index 000000000000..8fe99602ded4 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/wide_store.c @@ -0,0 +1,36 @@ +#define BPF_SOCK_ADDR(field, off, res, err) \ +{ \ + "wide store to bpf_sock_addr." #field "[" #off "]", \ + .insns = { \ + BPF_MOV64_IMM(BPF_REG_0, 1), \ + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, \ + offsetof(struct bpf_sock_addr, field[off])), \ + BPF_EXIT_INSN(), \ + }, \ + .result = res, \ + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \ + .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \ + .errstr = err, \ +} + +/* user_ip6[0] is u64 aligned */ +BPF_SOCK_ADDR(user_ip6, 0, ACCEPT, + NULL), +BPF_SOCK_ADDR(user_ip6, 1, REJECT, + "invalid bpf_context access off=12 size=8"), +BPF_SOCK_ADDR(user_ip6, 2, ACCEPT, + NULL), +BPF_SOCK_ADDR(user_ip6, 3, REJECT, + "invalid bpf_context access off=20 size=8"), + +/* msg_src_ip6[0] is _not_ u64 aligned */ +BPF_SOCK_ADDR(msg_src_ip6, 0, REJECT, + "invalid bpf_context access off=44 size=8"), +BPF_SOCK_ADDR(msg_src_ip6, 1, ACCEPT, + NULL), +BPF_SOCK_ADDR(msg_src_ip6, 2, REJECT, + "invalid bpf_context access off=52 size=8"), +BPF_SOCK_ADDR(msg_src_ip6, 3, REJECT, + "invalid bpf_context access off=56 size=8"), + +#undef BPF_SOCK_ADDR |