summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2023-03-25 17:02:06 -0700
committerAlexei Starovoitov <ast@kernel.org>2023-03-25 17:02:06 -0700
commite99360762a9cbd93bf1d352e90df5df78daa2f90 (patch)
tree4f46a712b5334598eddf869b913f7b887c07178c /tools/testing/selftests/bpf/progs/verifier_basic_stack.c
parent496f4f1b0f8e01baea22e6573f60af8cfd84df48 (diff)
parentffb515c933a9e8000e50b03f76569ffb6ef4d39d (diff)
Merge branch 'First set of verifier/*.c migrated to inline assembly'
Eduard Zingerman says: ==================== This is a follow up for RFC [1]. It migrates a first batch of 38 verifier/*.c tests to inline assembly and use of ./test_progs for actual execution. The migration is done by a python script (see [2]). Each migrated verifier/xxx.c file is mapped to progs/verifier_xxx.c plus an entry in the prog_tests/verifier.c. One patch per each file. A few patches at the beginning of the patch-set extend test_loader with necessary functionality, mainly: - support for tests execution in unprivileged mode; - support for test runs for test programs. Migrated tests could be selected for execution using the following filter: ./test_progs -a verifier_* An example of the migrated test: SEC("xdp") __description("XDP pkt read, pkt_data' > pkt_end, corner case, good access") __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) __naked void end_corner_case_good_access_1(void) { asm volatile (" \ r2 = *(u32*)(r1 + %[xdp_md_data]); \ r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ r1 = r2; \ r1 += 8; \ if r1 > r3 goto l0_%=; \ r0 = *(u64*)(r1 - 8); \ l0_%=: r0 = 0; \ exit; \ " : : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) : __clobber_all); } Changes compared to RFC: - test_loader.c is extended to support test program runs; - capabilities handling now matches behavior of test_verifier; - BPF_ST_MEM instructions are automatically replaced by BPF_STX_MEM instructions to overcome current clang limitations; - tests styling updates according to RFC feedback; - 38 migrated files are included instead of 1. I used the following means for testing: - migration tool itself has a set of self-tests; - migrated tests are passing; - manually compared each old/new file side-by-side. While doing side-by-side comparison I've noted a few defects in the original tests: - and.c: - One of the jump targets is off by one; - BPF_ST_MEM wrong OFF/IMM ordering; - array_access.c: - BPF_ST_MEM wrong OFF/IMM ordering; - value_or_null.c: - BPF_ST_MEM wrong OFF/IMM ordering. These defects would be addressed separately. [1] RFC https://lore.kernel.org/bpf/20230123145148.2791939-1-eddyz87@gmail.com/ [2] Migration tool https://github.com/eddyz87/verifier-tests-migrator ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/progs/verifier_basic_stack.c')
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_basic_stack.c100
1 files changed, 100 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
new file mode 100644
index 000000000000..359df865a8f3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Converted from tools/testing/selftests/bpf/verifier/basic_stack.c */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, long long);
+ __type(value, long long);
+} map_hash_8b SEC(".maps");
+
+SEC("socket")
+__description("stack out of bounds")
+__failure __msg("invalid write to stack")
+__failure_unpriv
+__naked void stack_out_of_bounds(void)
+{
+ asm volatile (" \
+ r1 = 0; \
+ *(u64*)(r10 + 8) = r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("uninitialized stack1")
+__failure __msg("invalid indirect read from stack")
+__failure_unpriv
+__naked void uninitialized_stack1(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_hash_8b] ll; \
+ call %[bpf_map_lookup_elem]; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_hash_8b)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("uninitialized stack2")
+__failure __msg("invalid read from stack")
+__failure_unpriv
+__naked void uninitialized_stack2(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r0 = *(u64*)(r2 - 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("invalid fp arithmetic")
+__failure __msg("R1 subtraction from stack pointer")
+__failure_unpriv
+__naked void invalid_fp_arithmetic(void)
+{
+ /* If this gets ever changed, make sure JITs can deal with it. */
+ asm volatile (" \
+ r0 = 0; \
+ r1 = r10; \
+ r1 -= 8; \
+ *(u64*)(r1 + 0) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("non-invalid fp arithmetic")
+__success __success_unpriv __retval(0)
+__naked void non_invalid_fp_arithmetic(void)
+{
+ asm volatile (" \
+ r0 = 0; \
+ *(u64*)(r10 - 8) = r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("misaligned read from stack")
+__failure __msg("misaligned stack access")
+__failure_unpriv
+__naked void misaligned_read_from_stack(void)
+{
+ asm volatile (" \
+ r2 = r10; \
+ r0 = *(u64*)(r2 - 4); \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";