summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf')
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c218
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c91
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c54
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c4
-rw-r--r--tools/testing/selftests/bpf/progs/atomics.c28
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp3
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_invalid.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c25
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c16
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c38
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c4
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c2
26 files changed, 281 insertions, 271 deletions
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 1dad8d617da81..a7eead8820a03 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+bpftool
bpf-helpers*
bpf-syscall*
test_verifier
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 91ea729990daf..fe12b4f5fe20d 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -470,6 +470,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$$(call msg,BINARY,,$$@)
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.o $$@
+ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool $(if $2,$2/)bpftool
endef
@@ -555,7 +556,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
- feature \
+ feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index 0ee29e11eaee2..970f09156eb46 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
- {0, "R3_w=inv2"},
- {1, "R3_w=inv4"},
- {2, "R3_w=inv8"},
- {3, "R3_w=inv16"},
- {4, "R3_w=inv32"},
+ {0, "R3_w=2"},
+ {1, "R3_w=4"},
+ {2, "R3_w=8"},
+ {3, "R3_w=16"},
+ {4, "R3_w=32"},
},
},
{
@@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
- {0, "R3_w=inv1"},
- {1, "R3_w=inv2"},
- {2, "R3_w=inv4"},
- {3, "R3_w=inv8"},
- {4, "R3_w=inv16"},
- {5, "R3_w=inv1"},
- {6, "R4_w=inv32"},
- {7, "R4_w=inv16"},
- {8, "R4_w=inv8"},
- {9, "R4_w=inv4"},
- {10, "R4_w=inv2"},
+ {0, "R3_w=1"},
+ {1, "R3_w=2"},
+ {2, "R3_w=4"},
+ {3, "R3_w=8"},
+ {4, "R3_w=16"},
+ {5, "R3_w=1"},
+ {6, "R4_w=32"},
+ {7, "R4_w=16"},
+ {8, "R4_w=8"},
+ {9, "R4_w=4"},
+ {10, "R4_w=2"},
},
},
{
@@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
- {0, "R3_w=inv4"},
- {1, "R3_w=inv8"},
- {2, "R3_w=inv10"},
- {3, "R4_w=inv8"},
- {4, "R4_w=inv12"},
- {5, "R4_w=inv14"},
+ {0, "R3_w=4"},
+ {1, "R3_w=8"},
+ {2, "R3_w=10"},
+ {3, "R4_w=8"},
+ {4, "R4_w=12"},
+ {5, "R4_w=14"},
},
},
{
@@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {0, "R1=ctx(id=0,off=0,imm=0)"},
+ {0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
- {0, "R3_w=inv7"},
- {1, "R3_w=inv7"},
- {2, "R3_w=inv14"},
- {3, "R3_w=inv56"},
+ {0, "R3_w=7"},
+ {1, "R3_w=7"},
+ {2, "R3_w=14"},
+ {3, "R3_w=56"},
},
},
@@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
- {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
- {12, "R3_w=pkt_end(id=0,off=0,imm=0)"},
- {17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
- {19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
- {20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+ {6, "R0_w=pkt(off=8,r=8,imm=0)"},
+ {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+ {7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
+ {8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
+ {10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
+ {12, "R3_w=pkt_end(off=0,imm=0)"},
+ {17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+ {18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"},
+ {19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
+ {20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
+ {21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
},
},
{
@@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
- {14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
- {15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+ {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+ {7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+ {8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+ {9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+ {10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
+ {11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+ {12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
+ {13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
+ {14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
+ {15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
},
},
{
@@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
- {2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
- {4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
- {5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
- {9, "R2=pkt(id=0,off=0,r=18,imm=0)"},
- {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
- {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
- {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+ {2, "R5_w=pkt(off=0,r=0,imm=0)"},
+ {4, "R5_w=pkt(off=14,r=0,imm=0)"},
+ {5, "R4_w=pkt(off=14,r=0,imm=0)"},
+ {9, "R2=pkt(off=0,r=18,imm=0)"},
+ {10, "R5=pkt(off=14,r=18,imm=0)"},
+ {10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
+ {13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
+ {14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
},
},
{
@@ -296,59 +296,59 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+ {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
- {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
* offset is considered using reg->aux_off_align which
* is 4 and meets the load's requirements.
*/
- {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4.
*/
- {17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
- {18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and
* meets the load's requirements.
*/
- {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
+ {23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
- {25, "R5_w=pkt(id=0,off=14,r=8"},
+ {25, "R5_w=pkt(off=14,r=8"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n).
*/
- {26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant is added to R5 again, setting reg->off to 18. */
- {27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
- {28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
+ {28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
- {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+ {33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
+ {33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
},
},
{
@@ -386,36 +386,36 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+ {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
- {8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
/* Packet pointer has (4n+2) offset */
- {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
- {12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+ {11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
+ {12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+ {15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
- {17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
- {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
- {20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+ {19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
+ {20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+ {23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
},
},
{
@@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
- {3, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+ {3, "R5_w=pkt_end(off=0,imm=0)"},
/* (ptr - ptr) << 2 == unknown, (4n) */
- {5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
+ {5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
- {6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+ {6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
- {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
- {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
- {12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
- {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+ {15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
@@ -502,23 +502,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+ {8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
- {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
/* New unknown value in R7 is (4n) */
- {10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+ {10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Subtracting it from R6 blows our unsigned bounds */
- {11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+ {11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
- {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
+ {14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
+ {20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},
},
},
@@ -556,23 +556,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
- {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
- {9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
+ {6, "R2_w=pkt(off=0,r=8,imm=0)"},
+ {9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},
/* Adding 14 makes R6 be (4n+2) */
- {10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+ {10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},
/* Subtracting from packet pointer overflows ubounds */
- {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
+ {13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
- {14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+ {14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+ {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+ {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
},
},
};
@@ -648,8 +648,8 @@ static int do_test_single(struct bpf_align_test *test)
/* Check the next line as well in case the previous line
* did not have a corresponding bpf insn. Example:
* func#0 @0
- * 0: R1=ctx(id=0,off=0,imm=0) R10=fp0
- * 0: (b7) r3 = 2 ; R3_w=inv2
+ * 0: R1=ctx(off=0,imm=0) R10=fp0
+ * 0: (b7) r3 = 2 ; R3_w=2
*/
if (!strstr(line_ptr, m.match)) {
cur_line = -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
index ab62aba10e2b3..13e101f370a1d 100644
--- a/tools/testing/selftests/bpf/prog_tests/atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/atomics.c
@@ -7,19 +7,15 @@
static void test_add(struct atomics_lskel *skel)
{
int err, prog_fd;
- int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- link_fd = atomics_lskel__add__attach(skel);
- if (!ASSERT_GT(link_fd, 0, "attach(add)"))
- return;
-
+ /* No need to attach it, just run it directly */
prog_fd = skel->progs.add.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
- goto cleanup;
+ return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
- goto cleanup;
+ return;
ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
@@ -31,27 +27,20 @@ static void test_add(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
-
-cleanup:
- close(link_fd);
}
static void test_sub(struct atomics_lskel *skel)
{
int err, prog_fd;
- int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- link_fd = atomics_lskel__sub__attach(skel);
- if (!ASSERT_GT(link_fd, 0, "attach(sub)"))
- return;
-
+ /* No need to attach it, just run it directly */
prog_fd = skel->progs.sub.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
- goto cleanup;
+ return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
- goto cleanup;
+ return;
ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
@@ -63,27 +52,20 @@ static void test_sub(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
-
-cleanup:
- close(link_fd);
}
static void test_and(struct atomics_lskel *skel)
{
int err, prog_fd;
- int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- link_fd = atomics_lskel__and__attach(skel);
- if (!ASSERT_GT(link_fd, 0, "attach(and)"))
- return;
-
+ /* No need to attach it, just run it directly */
prog_fd = skel->progs.and.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
- goto cleanup;
+ return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
- goto cleanup;
+ return;
ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
@@ -92,26 +74,20 @@ static void test_and(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
-cleanup:
- close(link_fd);
}
static void test_or(struct atomics_lskel *skel)
{
int err, prog_fd;
- int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- link_fd = atomics_lskel__or__attach(skel);
- if (!ASSERT_GT(link_fd, 0, "attach(or)"))
- return;
-
+ /* No need to attach it, just run it directly */
prog_fd = skel->progs.or.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
- goto cleanup;
+ return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
- goto cleanup;
+ return;
ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
@@ -120,26 +96,20 @@ static void test_or(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
-cleanup:
- close(link_fd);
}
static void test_xor(struct atomics_lskel *skel)
{
int err, prog_fd;
- int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- link_fd = atomics_lskel__xor__attach(skel);
- if (!ASSERT_GT(link_fd, 0, "attach(xor)"))
- return;
-
+ /* No need to attach it, just run it directly */
prog_fd = skel->progs.xor.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
- goto cleanup;
+ return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
- goto cleanup;
+ return;
ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
@@ -148,26 +118,20 @@ static void test_xor(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
-cleanup:
- close(link_fd);
}
static void test_cmpxchg(struct atomics_lskel *skel)
{
int err, prog_fd;
- int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- link_fd = atomics_lskel__cmpxchg__attach(skel);
- if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)"))
- return;
-
+ /* No need to attach it, just run it directly */
prog_fd = skel->progs.cmpxchg.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
- goto cleanup;
+ return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
- goto cleanup;
+ return;
ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
@@ -176,45 +140,34 @@ static void test_cmpxchg(struct atomics_lskel *skel)
ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
-
-cleanup:
- close(link_fd);
}
static void test_xchg(struct atomics_lskel *skel)
{
int err, prog_fd;
- int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
- link_fd = atomics_lskel__xchg__attach(skel);
- if (!ASSERT_GT(link_fd, 0, "attach(xchg)"))
- return;
-
+ /* No need to attach it, just run it directly */
prog_fd = skel->progs.xchg.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
- goto cleanup;
+ return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
- goto cleanup;
+ return;
ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
-
-cleanup:
- close(link_fd);
}
void test_atomics(void)
{
struct atomics_lskel *skel;
- __u32 duration = 0;
skel = atomics_lskel__open_and_load();
- if (CHECK(!skel, "skel_load", "atomics skeleton failed\n"))
+ if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))
return;
if (skel->data->skip_tests) {
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 9e26903f9170c..5fce7008d1ff3 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -148,22 +148,38 @@ static void test_btf_dump_incremental(void)
/* First, generate BTF corresponding to the following C code:
*
- * enum { VAL = 1 };
+ * enum x;
+ *
+ * enum x { X = 1 };
+ *
+ * enum { Y = 1 };
+ *
+ * struct s;
*
* struct s { int x; };
*
*/
+ id = btf__add_enum(btf, "x", 4);
+ ASSERT_EQ(id, 1, "enum_declaration_id");
+ id = btf__add_enum(btf, "x", 4);
+ ASSERT_EQ(id, 2, "named_enum_id");
+ err = btf__add_enum_value(btf, "X", 1);
+ ASSERT_OK(err, "named_enum_val_ok");
+
id = btf__add_enum(btf, NULL, 4);
- ASSERT_EQ(id, 1, "enum_id");
- err = btf__add_enum_value(btf, "VAL", 1);
- ASSERT_OK(err, "enum_val_ok");
+ ASSERT_EQ(id, 3, "anon_enum_id");
+ err = btf__add_enum_value(btf, "Y", 1);
+ ASSERT_OK(err, "anon_enum_val_ok");
id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
- ASSERT_EQ(id, 2, "int_id");
+ ASSERT_EQ(id, 4, "int_id");
+
+ id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT);
+ ASSERT_EQ(id, 5, "fwd_id");
id = btf__add_struct(btf, "s", 4);
- ASSERT_EQ(id, 3, "struct_id");
- err = btf__add_field(btf, "x", 2, 0, 0);
+ ASSERT_EQ(id, 6, "struct_id");
+ err = btf__add_field(btf, "x", 4, 0, 0);
ASSERT_OK(err, "field_ok");
for (i = 1; i < btf__type_cnt(btf); i++) {
@@ -173,11 +189,20 @@ static void test_btf_dump_incremental(void)
fflush(dump_buf_file);
dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
+
ASSERT_STREQ(dump_buf,
+"enum x;\n"
+"\n"
+"enum x {\n"
+" X = 1,\n"
+"};\n"
+"\n"
"enum {\n"
-" VAL = 1,\n"
+" Y = 1,\n"
"};\n"
"\n"
+"struct s;\n"
+"\n"
"struct s {\n"
" int x;\n"
"};\n\n", "c_dump1");
@@ -199,10 +224,12 @@ static void test_btf_dump_incremental(void)
fseek(dump_buf_file, 0, SEEK_SET);
id = btf__add_struct(btf, "s", 4);
- ASSERT_EQ(id, 4, "struct_id");
- err = btf__add_field(btf, "x", 1, 0, 0);
+ ASSERT_EQ(id, 7, "struct_id");
+ err = btf__add_field(btf, "x", 2, 0, 0);
+ ASSERT_OK(err, "field_ok");
+ err = btf__add_field(btf, "y", 3, 32, 0);
ASSERT_OK(err, "field_ok");
- err = btf__add_field(btf, "s", 3, 32, 0);
+ err = btf__add_field(btf, "s", 6, 64, 0);
ASSERT_OK(err, "field_ok");
for (i = 1; i < btf__type_cnt(btf); i++) {
@@ -214,9 +241,10 @@ static void test_btf_dump_incremental(void)
dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
ASSERT_STREQ(dump_buf,
"struct s___2 {\n"
+" enum x x;\n"
" enum {\n"
-" VAL___2 = 1,\n"
-" } x;\n"
+" Y___2 = 1,\n"
+" } y;\n"
" struct s s;\n"
"};\n\n" , "c_dump1");
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 8fbb40a832d57..f28f75aa91549 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -512,7 +512,7 @@ static int __trigger_module_test_read(const struct core_reloc_test_case *test)
}
-static struct core_reloc_test_case test_cases[] = {
+static const struct core_reloc_test_case test_cases[] = {
/* validate we can find kernel image and use its BTF for relocs */
{
.case_name = "kernel",
@@ -843,7 +843,7 @@ static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objp
int n;
n = snprintf(command, sizeof(command),
- "./tools/build/bpftool/bpftool gen min_core_btf %s %s %s",
+ "./bpftool gen min_core_btf %s %s %s",
src_btf, dst_btf, objpath);
if (n < 0 || n >= sizeof(command))
return -1;
@@ -855,7 +855,7 @@ static void run_core_reloc_tests(bool use_btfgen)
{
const size_t mmap_sz = roundup_page(sizeof(struct data));
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
- struct core_reloc_test_case *test_case;
+ struct core_reloc_test_case *test_case, test_case_copy;
const char *tp_name, *probe_name;
int err, i, equal, fd;
struct bpf_link *link = NULL;
@@ -870,7 +870,10 @@ static void run_core_reloc_tests(bool use_btfgen)
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
char btf_file[] = "/tmp/core_reloc.btf.XXXXXX";
- test_case = &test_cases[i];
+
+ test_case_copy = test_cases[i];
+ test_case = &test_case_copy;
+
if (!test__start_subtest(test_case->case_name))
continue;
@@ -881,6 +884,7 @@ static void run_core_reloc_tests(bool use_btfgen)
/* generate a "minimal" BTF file and use it as source */
if (use_btfgen) {
+
if (!test_case->btf_src_file || test_case->fails) {
test__skip();
continue;
@@ -989,7 +993,8 @@ cleanup:
CHECK_FAIL(munmap(mmap_data, mmap_sz));
mmap_data = NULL;
}
- remove(btf_file);
+ if (use_btfgen)
+ remove(test_case->btf_src_file);
bpf_link__destroy(link);
link = NULL;
bpf_object__close(obj);
@@ -1001,7 +1006,7 @@ void test_core_reloc(void)
run_core_reloc_tests(false);
}
-void test_core_btfgen(void)
+void test_core_reloc_btfgen(void)
{
run_core_reloc_tests(true);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c
index 1ef377a7e7319..fe9a23e65ef41 100644
--- a/tools/testing/selftests/bpf/prog_tests/log_buf.c
+++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c
@@ -78,7 +78,7 @@ static void obj_load_log_buf(void)
ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
"libbpf_log_not_empty");
ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
- ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"),
+ ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"),
"good_log_verbose");
ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
"bad_log_not_empty");
@@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void)
opts.log_level = 2;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
good_prog_insns, good_prog_insn_cnt, &opts);
- ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2");
+ ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2");
ASSERT_GE(fd, 0, "good_fd2");
if (fd >= 0)
close(fd);
diff --git a/tools/testing/selftests/bpf/progs/atomics.c b/tools/testing/selftests/bpf/progs/atomics.c
index 16e57313204af..f89c7f0cc53b9 100644
--- a/tools/testing/selftests/bpf/progs/atomics.c
+++ b/tools/testing/selftests/bpf/progs/atomics.c
@@ -20,8 +20,8 @@ __u64 add_stack_value_copy = 0;
__u64 add_stack_result = 0;
__u64 add_noreturn_value = 1;
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(add, int a)
+SEC("raw_tp/sys_enter")
+int add(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@@ -46,8 +46,8 @@ __s64 sub_stack_value_copy = 0;
__s64 sub_stack_result = 0;
__s64 sub_noreturn_value = 1;
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(sub, int a)
+SEC("raw_tp/sys_enter")
+int sub(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@@ -70,8 +70,8 @@ __u32 and32_value = 0x110;
__u32 and32_result = 0;
__u64 and_noreturn_value = (0x110ull << 32);
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(and, int a)
+SEC("raw_tp/sys_enter")
+int and(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@@ -91,8 +91,8 @@ __u32 or32_value = 0x110;
__u32 or32_result = 0;
__u64 or_noreturn_value = (0x110ull << 32);
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(or, int a)
+SEC("raw_tp/sys_enter")
+int or(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@@ -111,8 +111,8 @@ __u32 xor32_value = 0x110;
__u32 xor32_result = 0;
__u64 xor_noreturn_value = (0x110ull << 32);
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(xor, int a)
+SEC("raw_tp/sys_enter")
+int xor(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@@ -132,8 +132,8 @@ __u32 cmpxchg32_value = 1;
__u32 cmpxchg32_result_fail = 0;
__u32 cmpxchg32_result_succeed = 0;
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(cmpxchg, int a)
+SEC("raw_tp/sys_enter")
+int cmpxchg(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@@ -153,8 +153,8 @@ __u64 xchg64_result = 0;
__u32 xchg32_value = 1;
__u32 xchg32_result = 0;
-SEC("fentry/bpf_fentry_test1")
-int BPF_PROG(xchg, int a)
+SEC("raw_tp/sys_enter")
+int xchg(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp
index 773f165c4898e..19ad172036daa 100644
--- a/tools/testing/selftests/bpf/test_cpp.cpp
+++ b/tools/testing/selftests/bpf/test_cpp.cpp
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#include <iostream>
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#include <bpf/libbpf.h>
+#pragma GCC diagnostic pop
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include "test_core_extern.skel.h"
diff --git a/tools/testing/selftests/bpf/verifier/atomic_invalid.c b/tools/testing/selftests/bpf/verifier/atomic_invalid.c
index 39272720b2f6f..25f4ac1c69ab7 100644
--- a/tools/testing/selftests/bpf/verifier/atomic_invalid.c
+++ b/tools/testing/selftests/bpf/verifier/atomic_invalid.c
@@ -1,6 +1,6 @@
-#define __INVALID_ATOMIC_ACCESS_TEST(op) \
+#define __INVALID_ATOMIC_ACCESS_TEST(op) \
{ \
- "atomic " #op " access through non-pointer ", \
+ "atomic " #op " access through non-pointer ", \
.insns = { \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_MOV64_IMM(BPF_REG_1, 0), \
@@ -9,7 +9,7 @@
BPF_EXIT_INSN(), \
}, \
.result = REJECT, \
- .errstr = "R1 invalid mem access 'inv'" \
+ .errstr = "R1 invalid mem access 'scalar'" \
}
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index e061e8799ce23..33125d5f6772e 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -508,7 +508,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT
},
@@ -530,7 +530,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT
},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 829be2b9e08e1..f890333259ad6 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -97,6 +97,25 @@
},
},
{
+ "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 3 },
+ { "bpf_kfunc_call_test_release", 5 },
+ },
+},
+{
"calls: basic sanity",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
@@ -169,7 +188,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
+ .errstr = "R0 invalid mem access 'scalar'",
},
{
"calls: multiple ret types in subprog 2",
@@ -472,7 +491,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
- .errstr = "R6 invalid mem access 'inv'",
+ .errstr = "R6 invalid mem access 'scalar'",
.prog_type = BPF_PROG_TYPE_XDP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@@ -1678,7 +1697,7 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
+ .errstr = "R0 invalid mem access 'scalar'",
},
{
"calls: pkt_ptr spill into caller stack",
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
index 23080862aafd2..60f6fbe03f198 100644
--- a/tools/testing/selftests/bpf/verifier/ctx.c
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -127,7 +127,7 @@
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = REJECT,
- .errstr = "R1 type=inv expected=ctx",
+ .errstr = "R1 type=scalar expected=ctx",
},
{
"pass ctx or null check, 4: ctx - const",
@@ -193,5 +193,5 @@
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = REJECT,
- .errstr = "R1 type=inv expected=ctx",
+ .errstr = "R1 type=scalar expected=ctx",
},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
index ac1e19d0f5200..11acd1855acf6 100644
--- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -339,7 +339,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "R2 invalid mem access 'inv'",
+ .errstr = "R2 invalid mem access 'scalar'",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
index 0ab7f1dfc97ac..a6c869a7319cd 100644
--- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
+++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
@@ -350,7 +350,7 @@
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
- .errstr = "R1 type=inv expected=fp",
+ .errstr = "R1 type=scalar expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -471,7 +471,7 @@
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
},
- .errstr = "R1 type=inv expected=fp",
+ .errstr = "R1 type=scalar expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -484,7 +484,7 @@
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
},
- .errstr = "R1 type=inv expected=fp",
+ .errstr = "R1 type=scalar expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index 1c857b2fbdf0a..6ddc418fdfafa 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -286,7 +286,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -356,7 +356,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -426,7 +426,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -496,7 +496,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -566,7 +566,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -636,7 +636,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -706,7 +706,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@@ -776,7 +776,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 6dc8003ffc709..9e754423fa8b0 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -27,7 +27,7 @@
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
@@ -87,7 +87,7 @@
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c
index cc8e8c3cdc03d..eb5ed936580bf 100644
--- a/tools/testing/selftests/bpf/verifier/raw_stack.c
+++ b/tools/testing/selftests/bpf/verifier/raw_stack.c
@@ -132,7 +132,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
+ .errstr = "R0 invalid mem access 'scalar'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@@ -162,7 +162,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
- .errstr = "R3 invalid mem access 'inv'",
+ .errstr = "R3 invalid mem access 'scalar'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index 3b6ee009c00b6..fbd682520e475 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -162,7 +162,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
+ .errstr = "type=scalar expected=sock",
.result = REJECT,
},
{
@@ -178,7 +178,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
+ .errstr = "type=scalar expected=sock",
.result = REJECT,
},
{
@@ -274,7 +274,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
+ .errstr = "type=scalar expected=sock",
.result = REJECT,
},
{
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
index 682519769fe3c..68b14fdfebdb1 100644
--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -104,7 +104,7 @@
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
- .errstr = "R6 invalid mem access 'inv'",
+ .errstr = "R6 invalid mem access 'scalar'",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 8c224eac93df7..86b24cad27a76 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -502,7 +502,7 @@
.fixup_sk_storage_map = { 11 },
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "R3 type=inv expected=fp",
+ .errstr = "R3 type=scalar expected=fp",
},
{
"sk_storage_get(map, skb->sk, &stack_value, 1): stack_value",
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index 8cfc5349d2a84..e23f07175e1bf 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -102,7 +102,7 @@
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
- .errstr = "R0 invalid mem access 'inv",
+ .errstr = "R0 invalid mem access 'scalar'",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@@ -147,11 +147,11 @@
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
+ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
+ /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -190,11 +190,11 @@
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -222,11 +222,11 @@
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -250,11 +250,11 @@
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -280,11 +280,11 @@
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+ /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
+ /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@@ -305,13 +305,13 @@
BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
- /* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
+ /* *(u32 *)(r10 -8) = r4 R4=umax=40 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* r4 = (*u32 *)(r10 - 8) */
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
- /* r2 += r4 R2=pkt R4=inv,umax=40 */
+ /* r2 += r4 R2=pkt R4=umax=40 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
- /* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
+ /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
index 111801aea5e35..878ca26c3f0a4 100644
--- a/tools/testing/selftests/bpf/verifier/unpriv.c
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -214,7 +214,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
- .errstr = "R1 type=inv expected=ctx",
+ .errstr = "R1 type=scalar expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
@@ -420,7 +420,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
BPF_EXIT_INSN(),
},
- .errstr_unpriv = "R7 invalid mem access 'inv'",
+ .errstr_unpriv = "R7 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0,
diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
index 4890628672183..d6f29eb4bd576 100644
--- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
+++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
@@ -64,7 +64,7 @@
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
- .errstr = "invalid mem access 'inv'",
+ .errstr = "invalid mem access 'scalar'",
.result = REJECT,
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -89,7 +89,7 @@
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "leaking pointer from stack off -8",
- .errstr = "R0 invalid mem access 'inv'",
+ .errstr = "R0 invalid mem access 'scalar'",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index 359f3e8f8b604..249187d3c5304 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -397,7 +397,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
- .errstr_unpriv = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 invalid mem access 'scalar'",
.retval = 0,
},
{
@@ -1074,7 +1074,7 @@
},
.fixup_map_array_48b = { 3 },
.result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
+ .errstr = "R0 invalid mem access 'scalar'",
.errstr_unpriv = "R0 pointer -= pointer prohibited",
},
{
diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c
index eab1f7f56e2f0..187c6f6e32bc1 100644
--- a/tools/testing/selftests/bpf/verifier/var_off.c
+++ b/tools/testing/selftests/bpf/verifier/var_off.c
@@ -131,7 +131,7 @@
* write might have overwritten the spilled pointer (i.e. we lose track
* of the spilled register when we analyze the write).
*/
- .errstr = "R2 invalid mem access 'inv'",
+ .errstr = "R2 invalid mem access 'scalar'",
.result = REJECT,
},
{