diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
31 files changed, 2948 insertions, 285 deletions
| diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index ec11e20d2b92..bf307bb9e446 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -2,79 +2,28 @@  #include <test_progs.h>  #include "test_attach_probe.skel.h" -#if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2 - -#define OP_RT_RA_MASK   0xffff0000UL -#define LIS_R2          0x3c400000UL -#define ADDIS_R2_R12    0x3c4c0000UL -#define ADDI_R2_R2      0x38420000UL - -static ssize_t get_offset(ssize_t addr, ssize_t base) -{ -	u32 *insn = (u32 *) addr; - -	/* -	 * A PPC64 ABIv2 function may have a local and a global entry -	 * point. We need to use the local entry point when patching -	 * functions, so identify and step over the global entry point -	 * sequence. -	 * -	 * The global entry point sequence is always of the form: -	 * -	 * addis r2,r12,XXXX -	 * addi  r2,r2,XXXX -	 * -	 * A linker optimisation may convert the addis to lis: -	 * -	 * lis   r2,XXXX -	 * addi  r2,r2,XXXX -	 */ -	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || -	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) && -	    ((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2)) -		return (ssize_t)(insn + 2) - base; -	else -		return addr - base; -} -#else -#define get_offset(addr, base) (addr - base) -#endif - -ssize_t get_base_addr() { -	size_t start, offset; -	char buf[256]; -	FILE *f; - -	f = fopen("/proc/self/maps", "r"); -	if (!f) -		return -errno; - -	while (fscanf(f, "%zx-%*x %s %zx %*[^\n]\n", -		      &start, buf, &offset) == 3) { -		if (strcmp(buf, "r-xp") == 0) { -			fclose(f); -			return start - offset; -		} -	} - -	fclose(f); -	return -EINVAL; -} +/* this is how USDT semaphore is actually defined, except volatile modifier */ +volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));  void test_attach_probe(void)  { +	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);  	int duration = 0;  	struct bpf_link *kprobe_link, *kretprobe_link;  	struct bpf_link *uprobe_link, *uretprobe_link;  	struct test_attach_probe* skel;  	size_t uprobe_offset; -	ssize_t base_addr; +	ssize_t base_addr, ref_ctr_offset;  	base_addr = get_base_addr();  	if (CHECK(base_addr < 0, "get_base_addr",  		  "failed to find base addr: %zd", base_addr))  		return; -	uprobe_offset = get_offset((size_t)&get_base_addr, base_addr); +	uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); + +	ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); +	if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) +		return;  	skel = test_attach_probe__open_and_load();  	if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) @@ -96,20 +45,28 @@ void test_attach_probe(void)  		goto cleanup;  	skel->links.handle_kretprobe = kretprobe_link; -	uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe, -						 false /* retprobe */, -						 0 /* self pid */, -						 "/proc/self/exe", -						 uprobe_offset); +	ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); + +	uprobe_opts.retprobe = false; +	uprobe_opts.ref_ctr_offset = ref_ctr_offset; +	uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, +						      0 /* self pid */, +						      "/proc/self/exe", +						      uprobe_offset, +						      &uprobe_opts);  	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))  		goto cleanup;  	skel->links.handle_uprobe = uprobe_link; -	uretprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uretprobe, -						    true /* retprobe */, -						    -1 /* any pid */, -						    "/proc/self/exe", -						    uprobe_offset); +	ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); + +	/* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */ +	uprobe_opts.retprobe = true; +	uprobe_opts.ref_ctr_offset = ref_ctr_offset; +	uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, +							 -1 /* any pid */, +							 "/proc/self/exe", +							 uprobe_offset, &uprobe_opts);  	if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))  		goto cleanup;  	skel->links.handle_uretprobe = uretprobe_link; @@ -136,4 +93,5 @@ void test_attach_probe(void)  cleanup:  	test_attach_probe__destroy(skel); +	ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");  } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c new file mode 100644 index 000000000000..5eea3c3a40fe --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/syscall.h> +#include <unistd.h> +#include <test_progs.h> +#include "test_bpf_cookie.skel.h" + +static void kprobe_subtest(struct test_bpf_cookie *skel) +{ +	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); +	struct bpf_link *link1 = NULL, *link2 = NULL; +	struct bpf_link *retlink1 = NULL, *retlink2 = NULL; + +	/* attach two kprobes */ +	opts.bpf_cookie = 0x1; +	opts.retprobe = false; +	link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, +						 SYS_NANOSLEEP_KPROBE_NAME, &opts); +	if (!ASSERT_OK_PTR(link1, "link1")) +		goto cleanup; + +	opts.bpf_cookie = 0x2; +	opts.retprobe = false; +	link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, +						 SYS_NANOSLEEP_KPROBE_NAME, &opts); +	if (!ASSERT_OK_PTR(link2, "link2")) +		goto cleanup; + +	/* attach two kretprobes */ +	opts.bpf_cookie = 0x10; +	opts.retprobe = true; +	retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, +						    SYS_NANOSLEEP_KPROBE_NAME, &opts); +	if (!ASSERT_OK_PTR(retlink1, "retlink1")) +		goto cleanup; + +	opts.bpf_cookie = 0x20; +	opts.retprobe = true; +	retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, +						    SYS_NANOSLEEP_KPROBE_NAME, &opts); +	if (!ASSERT_OK_PTR(retlink2, "retlink2")) +		goto cleanup; + +	/* trigger kprobe && kretprobe */ +	usleep(1); + +	ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); +	ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); + +cleanup: +	bpf_link__destroy(link1); +	bpf_link__destroy(link2); +	bpf_link__destroy(retlink1); +	bpf_link__destroy(retlink2); +} + +static void uprobe_subtest(struct test_bpf_cookie *skel) +{ +	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); +	struct bpf_link *link1 = NULL, *link2 = NULL; +	struct bpf_link *retlink1 = NULL, *retlink2 = NULL; +	size_t uprobe_offset; +	ssize_t base_addr; + +	base_addr = get_base_addr(); +	uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); + +	/* attach two uprobes */ +	opts.bpf_cookie = 0x100; +	opts.retprobe = false; +	link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, +						"/proc/self/exe", uprobe_offset, &opts); +	if (!ASSERT_OK_PTR(link1, "link1")) +		goto cleanup; + +	opts.bpf_cookie = 0x200; +	opts.retprobe = false; +	link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */, +						"/proc/self/exe", uprobe_offset, &opts); +	if (!ASSERT_OK_PTR(link2, "link2")) +		goto cleanup; + +	/* attach two uretprobes */ +	opts.bpf_cookie = 0x1000; +	opts.retprobe = true; +	retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */, +						   "/proc/self/exe", uprobe_offset, &opts); +	if (!ASSERT_OK_PTR(retlink1, "retlink1")) +		goto cleanup; + +	opts.bpf_cookie = 0x2000; +	opts.retprobe = true; +	retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */, +						   "/proc/self/exe", uprobe_offset, &opts); +	if (!ASSERT_OK_PTR(retlink2, "retlink2")) +		goto cleanup; + +	/* trigger uprobe && uretprobe */ +	get_base_addr(); + +	ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res"); +	ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res"); + +cleanup: +	bpf_link__destroy(link1); +	bpf_link__destroy(link2); +	bpf_link__destroy(retlink1); +	bpf_link__destroy(retlink2); +} + +static void tp_subtest(struct test_bpf_cookie *skel) +{ +	DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts); +	struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL; + +	/* attach first tp prog */ +	opts.bpf_cookie = 0x10000; +	link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1, +						    "syscalls", "sys_enter_nanosleep", &opts); +	if (!ASSERT_OK_PTR(link1, "link1")) +		goto cleanup; + +	/* attach second tp prog */ +	opts.bpf_cookie = 0x20000; +	link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2, +						    "syscalls", "sys_enter_nanosleep", &opts); +	if (!ASSERT_OK_PTR(link2, "link2")) +		goto cleanup; + +	/* trigger tracepoints */ +	usleep(1); + +	ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1"); + +	/* now we detach first prog and will attach third one, which causes +	 * two internal calls to bpf_prog_array_copy(), shuffling +	 * bpf_prog_array_items around. We test here that we don't lose track +	 * of associated bpf_cookies. +	 */ +	bpf_link__destroy(link1); +	link1 = NULL; +	kern_sync_rcu(); +	skel->bss->tp_res = 0; + +	/* attach third tp prog */ +	opts.bpf_cookie = 0x40000; +	link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3, +						    "syscalls", "sys_enter_nanosleep", &opts); +	if (!ASSERT_OK_PTR(link3, "link3")) +		goto cleanup; + +	/* trigger tracepoints */ +	usleep(1); + +	ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2"); + +cleanup: +	bpf_link__destroy(link1); +	bpf_link__destroy(link2); +	bpf_link__destroy(link3); +} + +static void burn_cpu(void) +{ +	volatile int j = 0; +	cpu_set_t cpu_set; +	int i, err; + +	/* generate some branches on cpu 0 */ +	CPU_ZERO(&cpu_set); +	CPU_SET(0, &cpu_set); +	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); +	ASSERT_OK(err, "set_thread_affinity"); + +	/* spin the loop for a while (random high number) */ +	for (i = 0; i < 1000000; ++i) +		++j; +} + +static void pe_subtest(struct test_bpf_cookie *skel) +{ +	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts); +	struct bpf_link *link = NULL; +	struct perf_event_attr attr; +	int pfd = -1; + +	/* create perf event */ +	memset(&attr, 0, sizeof(attr)); +	attr.size = sizeof(attr); +	attr.type = PERF_TYPE_SOFTWARE; +	attr.config = PERF_COUNT_SW_CPU_CLOCK; +	attr.freq = 1; +	attr.sample_freq = 4000; +	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); +	if (!ASSERT_GE(pfd, 0, "perf_fd")) +		goto cleanup; + +	opts.bpf_cookie = 0x100000; +	link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); +	if (!ASSERT_OK_PTR(link, "link1")) +		goto cleanup; + +	burn_cpu(); /* trigger BPF prog */ + +	ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1"); + +	/* prevent bpf_link__destroy() closing pfd itself */ +	bpf_link__disconnect(link); +	/* close BPF link's FD explicitly */ +	close(bpf_link__fd(link)); +	/* free up memory used by struct bpf_link */ +	bpf_link__destroy(link); +	link = NULL; +	kern_sync_rcu(); +	skel->bss->pe_res = 0; + +	opts.bpf_cookie = 0x200000; +	link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); +	if (!ASSERT_OK_PTR(link, "link2")) +		goto cleanup; + +	burn_cpu(); /* trigger BPF prog */ + +	ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2"); + +cleanup: +	close(pfd); +	bpf_link__destroy(link); +} + +void test_bpf_cookie(void) +{ +	struct test_bpf_cookie *skel; + +	skel = test_bpf_cookie__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		return; + +	skel->bss->my_tid = syscall(SYS_gettid); + +	if (test__start_subtest("kprobe")) +		kprobe_subtest(skel); +	if (test__start_subtest("uprobe")) +		uprobe_subtest(skel); +	if (test__start_subtest("tracepoint")) +		tp_subtest(skel); +	if (test__start_subtest("perf_event")) +		pe_subtest(skel); + +	test_bpf_cookie__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 1f1aade56504..77ac24b191d4 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -13,6 +13,7 @@  #include "bpf_iter_tcp6.skel.h"  #include "bpf_iter_udp4.skel.h"  #include "bpf_iter_udp6.skel.h" +#include "bpf_iter_unix.skel.h"  #include "bpf_iter_test_kern1.skel.h"  #include "bpf_iter_test_kern2.skel.h"  #include "bpf_iter_test_kern3.skel.h" @@ -313,6 +314,19 @@ static void test_udp6(void)  	bpf_iter_udp6__destroy(skel);  } +static void test_unix(void) +{ +	struct bpf_iter_unix *skel; + +	skel = bpf_iter_unix__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load")) +		return; + +	do_dummy_read(skel->progs.dump_unix); + +	bpf_iter_unix__destroy(skel); +} +  /* The expected string is less than 16 bytes */  static int do_read_with_fd(int iter_fd, const char *expected,  			   bool read_one_char) @@ -1255,6 +1269,8 @@ void test_bpf_iter(void)  		test_udp4();  	if (test__start_subtest("udp6"))  		test_udp6(); +	if (test__start_subtest("unix")) +		test_unix();  	if (test__start_subtest("anon"))  		test_anon_iter(false);  	if (test__start_subtest("anon-read-one-char")) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c new file mode 100644 index 000000000000..85babb0487b3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#define _GNU_SOURCE +#include <sched.h> +#include <test_progs.h> +#include "network_helpers.h" +#include "bpf_dctcp.skel.h" +#include "bpf_cubic.skel.h" +#include "bpf_iter_setsockopt.skel.h" + +static int create_netns(void) +{ +	if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) +		return -1; + +	if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo")) +		return -1; + +	return 0; +} + +static unsigned int set_bpf_cubic(int *fds, unsigned int nr_fds) +{ +	unsigned int i; + +	for (i = 0; i < nr_fds; i++) { +		if (setsockopt(fds[i], SOL_TCP, TCP_CONGESTION, "bpf_cubic", +			       sizeof("bpf_cubic"))) +			return i; +	} + +	return nr_fds; +} + +static unsigned int check_bpf_dctcp(int *fds, unsigned int nr_fds) +{ +	char tcp_cc[16]; +	socklen_t optlen = sizeof(tcp_cc); +	unsigned int i; + +	for (i = 0; i < nr_fds; i++) { +		if (getsockopt(fds[i], SOL_TCP, TCP_CONGESTION, +			       tcp_cc, &optlen) || +		    strcmp(tcp_cc, "bpf_dctcp")) +			return i; +	} + +	return nr_fds; +} + +static int *make_established(int listen_fd, unsigned int nr_est, +			     int **paccepted_fds) +{ +	int *est_fds, *accepted_fds; +	unsigned int i; + +	est_fds = malloc(sizeof(*est_fds) * nr_est); +	if (!est_fds) +		return NULL; + +	accepted_fds = malloc(sizeof(*accepted_fds) * nr_est); +	if (!accepted_fds) { +		free(est_fds); +		return NULL; +	} + +	for (i = 0; i < nr_est; i++) { +		est_fds[i] = connect_to_fd(listen_fd, 0); +		if (est_fds[i] == -1) +			break; +		if (set_bpf_cubic(&est_fds[i], 1) != 1) { +			close(est_fds[i]); +			break; +		} + +		accepted_fds[i] = accept(listen_fd, NULL, 0); +		if (accepted_fds[i] == -1) { +			close(est_fds[i]); +			break; +		} +	} + +	if (!ASSERT_EQ(i, nr_est, "create established fds")) { +		free_fds(accepted_fds, i); +		free_fds(est_fds, i); +		return NULL; +	} + +	*paccepted_fds = accepted_fds; +	return est_fds; +} + +static unsigned short get_local_port(int fd) +{ +	struct sockaddr_in6 addr; +	socklen_t addrlen = sizeof(addr); + +	if (!getsockname(fd, &addr, &addrlen)) +		return ntohs(addr.sin6_port); + +	return 0; +} + +static void do_bpf_iter_setsockopt(struct bpf_iter_setsockopt *iter_skel, +				   bool random_retry) +{ +	int *reuse_listen_fds = NULL, *accepted_fds = NULL, *est_fds = NULL; +	unsigned int nr_reuse_listens = 256, nr_est = 256; +	int err, iter_fd = -1, listen_fd = -1; +	char buf; + +	/* Prepare non-reuseport listen_fd */ +	listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); +	if (!ASSERT_GE(listen_fd, 0, "start_server")) +		return; +	if (!ASSERT_EQ(set_bpf_cubic(&listen_fd, 1), 1, +		       "set listen_fd to cubic")) +		goto done; +	iter_skel->bss->listen_hport = get_local_port(listen_fd); +	if (!ASSERT_NEQ(iter_skel->bss->listen_hport, 0, +			"get_local_port(listen_fd)")) +		goto done; + +	/* Connect to non-reuseport listen_fd */ +	est_fds = make_established(listen_fd, nr_est, &accepted_fds); +	if (!ASSERT_OK_PTR(est_fds, "create established")) +		goto done; + +	/* Prepare reuseport listen fds */ +	reuse_listen_fds = start_reuseport_server(AF_INET6, SOCK_STREAM, +						  "::1", 0, 0, +						  nr_reuse_listens); +	if (!ASSERT_OK_PTR(reuse_listen_fds, "start_reuseport_server")) +		goto done; +	if (!ASSERT_EQ(set_bpf_cubic(reuse_listen_fds, nr_reuse_listens), +		       nr_reuse_listens, "set reuse_listen_fds to cubic")) +		goto done; +	iter_skel->bss->reuse_listen_hport = get_local_port(reuse_listen_fds[0]); +	if (!ASSERT_NEQ(iter_skel->bss->reuse_listen_hport, 0, +			"get_local_port(reuse_listen_fds[0])")) +		goto done; + +	/* Run bpf tcp iter to switch from bpf_cubic to bpf_dctcp */ +	iter_skel->bss->random_retry = random_retry; +	iter_fd = bpf_iter_create(bpf_link__fd(iter_skel->links.change_tcp_cc)); +	if (!ASSERT_GE(iter_fd, 0, "create iter_fd")) +		goto done; + +	while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 && +	       errno == EAGAIN) +		; +	if (!ASSERT_OK(err, "read iter error")) +		goto done; + +	/* Check reuseport listen fds for dctcp */ +	ASSERT_EQ(check_bpf_dctcp(reuse_listen_fds, nr_reuse_listens), +		  nr_reuse_listens, +		  "check reuse_listen_fds dctcp"); + +	/* Check non reuseport listen fd for dctcp */ +	ASSERT_EQ(check_bpf_dctcp(&listen_fd, 1), 1, +		  "check listen_fd dctcp"); + +	/* Check established fds for dctcp */ +	ASSERT_EQ(check_bpf_dctcp(est_fds, nr_est), nr_est, +		  "check est_fds dctcp"); + +	/* Check accepted fds for dctcp */ +	ASSERT_EQ(check_bpf_dctcp(accepted_fds, nr_est), nr_est, +		  "check accepted_fds dctcp"); + +done: +	if (iter_fd != -1) +		close(iter_fd); +	if (listen_fd != -1) +		close(listen_fd); +	free_fds(reuse_listen_fds, nr_reuse_listens); +	free_fds(accepted_fds, nr_est); +	free_fds(est_fds, nr_est); +} + +void test_bpf_iter_setsockopt(void) +{ +	struct bpf_iter_setsockopt *iter_skel = NULL; +	struct bpf_cubic *cubic_skel = NULL; +	struct bpf_dctcp *dctcp_skel = NULL; +	struct bpf_link *cubic_link = NULL; +	struct bpf_link *dctcp_link = NULL; + +	if (create_netns()) +		return; + +	/* Load iter_skel */ +	iter_skel = bpf_iter_setsockopt__open_and_load(); +	if (!ASSERT_OK_PTR(iter_skel, "iter_skel")) +		return; +	iter_skel->links.change_tcp_cc = bpf_program__attach_iter(iter_skel->progs.change_tcp_cc, NULL); +	if (!ASSERT_OK_PTR(iter_skel->links.change_tcp_cc, "attach iter")) +		goto done; + +	/* Load bpf_cubic */ +	cubic_skel = bpf_cubic__open_and_load(); +	if (!ASSERT_OK_PTR(cubic_skel, "cubic_skel")) +		goto done; +	cubic_link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic); +	if (!ASSERT_OK_PTR(cubic_link, "cubic_link")) +		goto done; + +	/* Load bpf_dctcp */ +	dctcp_skel = bpf_dctcp__open_and_load(); +	if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel")) +		goto done; +	dctcp_link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); +	if (!ASSERT_OK_PTR(dctcp_link, "dctcp_link")) +		goto done; + +	do_bpf_iter_setsockopt(iter_skel, true); +	do_bpf_iter_setsockopt(iter_skel, false); + +done: +	bpf_link__destroy(cubic_link); +	bpf_link__destroy(dctcp_link); +	bpf_cubic__destroy(cubic_skel); +	bpf_dctcp__destroy(dctcp_skel); +	bpf_iter_setsockopt__destroy(iter_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index efe1e979affb..94e03df69d71 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -4,37 +4,22 @@  #include <linux/err.h>  #include <netinet/tcp.h>  #include <test_progs.h> +#include "network_helpers.h"  #include "bpf_dctcp.skel.h"  #include "bpf_cubic.skel.h"  #include "bpf_tcp_nogpl.skel.h" +#include "bpf_dctcp_release.skel.h"  #define min(a, b) ((a) < (b) ? (a) : (b)) +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif +  static const unsigned int total_bytes = 10 * 1024 * 1024; -static const struct timeval timeo_sec = { .tv_sec = 10 }; -static const size_t timeo_optlen = sizeof(timeo_sec);  static int expected_stg = 0xeB9F;  static int stop, duration; -static int settimeo(int fd) -{ -	int err; - -	err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, -			 timeo_optlen); -	if (CHECK(err == -1, "setsockopt(fd, SO_RCVTIMEO)", "errno:%d\n", -		  errno)) -		return -1; - -	err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec, -			 timeo_optlen); -	if (CHECK(err == -1, "setsockopt(fd, SO_SNDTIMEO)", "errno:%d\n", -		  errno)) -		return -1; - -	return 0; -} -  static int settcpca(int fd, const char *tcp_ca)  {  	int err; @@ -61,7 +46,7 @@ static void *server(void *arg)  		goto done;  	} -	if (settimeo(fd)) { +	if (settimeo(fd, 0)) {  		err = -errno;  		goto done;  	} @@ -114,7 +99,7 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)  	}  	if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) || -	    settimeo(lfd) || settimeo(fd)) +	    settimeo(lfd, 0) || settimeo(fd, 0))  		goto done;  	/* bind, listen and start server thread to accept */ @@ -267,6 +252,77 @@ static void test_invalid_license(void)  	libbpf_set_print(old_print_fn);  } +static void test_dctcp_fallback(void) +{ +	int err, lfd = -1, cli_fd = -1, srv_fd = -1; +	struct network_helper_opts opts = { +		.cc = "cubic", +	}; +	struct bpf_dctcp *dctcp_skel; +	struct bpf_link *link = NULL; +	char srv_cc[16]; +	socklen_t cc_len = sizeof(srv_cc); + +	dctcp_skel = bpf_dctcp__open(); +	if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel")) +		return; +	strcpy(dctcp_skel->rodata->fallback, "cubic"); +	if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load")) +		goto done; + +	link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); +	if (!ASSERT_OK_PTR(link, "dctcp link")) +		goto done; + +	lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); +	if (!ASSERT_GE(lfd, 0, "lfd") || +	    !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp")) +		goto done; + +	cli_fd = connect_to_fd_opts(lfd, &opts); +	if (!ASSERT_GE(cli_fd, 0, "cli_fd")) +		goto done; + +	srv_fd = accept(lfd, NULL, 0); +	if (!ASSERT_GE(srv_fd, 0, "srv_fd")) +		goto done; +	ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res"); +	ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res"); + +	err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len); +	if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)")) +		goto done; +	ASSERT_STREQ(srv_cc, "cubic", "srv_fd cc"); + +done: +	bpf_link__destroy(link); +	bpf_dctcp__destroy(dctcp_skel); +	if (lfd != -1) +		close(lfd); +	if (srv_fd != -1) +		close(srv_fd); +	if (cli_fd != -1) +		close(cli_fd); +} + +static void test_rel_setsockopt(void) +{ +	struct bpf_dctcp_release *rel_skel; +	libbpf_print_fn_t old_print_fn; + +	err_str = "unknown func bpf_setsockopt"; +	found = false; + +	old_print_fn = libbpf_set_print(libbpf_debug_print); +	rel_skel = bpf_dctcp_release__open_and_load(); +	libbpf_set_print(old_print_fn); + +	ASSERT_ERR_PTR(rel_skel, "rel_skel"); +	ASSERT_TRUE(found, "expected_err_msg"); + +	bpf_dctcp_release__destroy(rel_skel); +} +  void test_bpf_tcp_ca(void)  {  	if (test__start_subtest("dctcp")) @@ -275,4 +331,8 @@ void test_bpf_tcp_ca(void)  		test_cubic();  	if (test__start_subtest("invalid_license"))  		test_invalid_license(); +	if (test__start_subtest("dctcp_fallback")) +		test_dctcp_fallback(); +	if (test__start_subtest("rel_setsockopt")) +		test_rel_setsockopt();  } diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 857e3f26086f..649f87382c8d 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4350,7 +4350,8 @@ static void do_test_file(unsigned int test_num)  		goto done;  	} -	err = btf__get_from_id(info.btf_id, &btf); +	btf = btf__load_from_kernel_by_id(info.btf_id); +	err = libbpf_get_error(btf);  	if (CHECK(err, "cannot get btf from kernel, err: %d", err))  		goto done; @@ -4386,6 +4387,7 @@ skip:  	fprintf(stderr, "OK");  done: +	btf__free(btf);  	free(func_info);  	bpf_object__close(obj);  } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 1b90e684ff13..52ccf0cf35e1 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -232,7 +232,593 @@ err_out:  	btf__free(btf);  } +#define STRSIZE				4096 + +static void btf_dump_snprintf(void *ctx, const char *fmt, va_list args) +{ +	char *s = ctx, new[STRSIZE]; + +	vsnprintf(new, STRSIZE, fmt, args); +	if (strlen(s) < STRSIZE) +		strncat(s, new, STRSIZE - strlen(s) - 1); +} + +static int btf_dump_data(struct btf *btf, struct btf_dump *d, +			 char *name, char *prefix, __u64 flags, void *ptr, +			 size_t ptr_sz, char *str, const char *expected_val) +{ +	DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); +	size_t type_sz; +	__s32 type_id; +	int ret = 0; + +	if (flags & BTF_F_COMPACT) +		opts.compact = true; +	if (flags & BTF_F_NONAME) +		opts.skip_names = true; +	if (flags & BTF_F_ZERO) +		opts.emit_zeroes = true; +	if (prefix) { +		ASSERT_STRNEQ(name, prefix, strlen(prefix), +			      "verify prefix match"); +		name += strlen(prefix) + 1; +	} +	type_id = btf__find_by_name(btf, name); +	if (!ASSERT_GE(type_id, 0, "find type id")) +		return -ENOENT; +	type_sz = btf__resolve_size(btf, type_id); +	str[0] = '\0'; +	ret = btf_dump__dump_type_data(d, type_id, ptr, ptr_sz, &opts); +	if (type_sz <= ptr_sz) { +		if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz")) +			return -EINVAL; +	} else { +		if (!ASSERT_EQ(ret, -E2BIG, "failed to return -E2BIG")) +			return -EINVAL; +	} +	if (!ASSERT_STREQ(str, expected_val, "ensure expected/actual match")) +		return -EFAULT; +	return 0; +} + +#define TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags,	\ +			   _expected, ...)				\ +	do {								\ +		char __ptrtype[64] = #_type;				\ +		char *_ptrtype = (char *)__ptrtype;			\ +		_type _ptrdata = __VA_ARGS__;				\ +		void *_ptr = &_ptrdata;					\ +									\ +		(void) btf_dump_data(_b, _d, _ptrtype, _prefix, _flags,	\ +				     _ptr, sizeof(_type), _str,		\ +				     _expected);			\ +	} while (0) + +/* Use where expected data string matches its stringified declaration */ +#define TEST_BTF_DUMP_DATA_C(_b, _d, _prefix,  _str, _type, _flags,	\ +			     ...)					\ +	TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags,	\ +			   "(" #_type ")" #__VA_ARGS__,	__VA_ARGS__) + +/* overflow test; pass typesize < expected type size, ensure E2BIG returned */ +#define TEST_BTF_DUMP_DATA_OVER(_b, _d, _prefix, _str, _type, _type_sz,	\ +				_expected, ...)				\ +	do {								\ +		char __ptrtype[64] = #_type;				\ +		char *_ptrtype = (char *)__ptrtype;			\ +		_type _ptrdata = __VA_ARGS__;				\ +		void *_ptr = &_ptrdata;					\ +									\ +		(void) btf_dump_data(_b, _d, _ptrtype, _prefix, 0,	\ +				     _ptr, _type_sz, _str, _expected);	\ +	} while (0) + +#define TEST_BTF_DUMP_VAR(_b, _d, _prefix, _str, _var, _type, _flags,	\ +			  _expected, ...)				\ +	do {								\ +		_type _ptrdata = __VA_ARGS__;				\ +		void *_ptr = &_ptrdata;					\ +									\ +		(void) btf_dump_data(_b, _d, _var, _prefix, _flags,	\ +				     _ptr, sizeof(_type), _str,		\ +				     _expected);			\ +	} while (0) + +static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d, +				   char *str) +{ +#ifdef __SIZEOF_INT128__ +	__int128 i = 0xffffffffffffffff; + +	/* this dance is required because we cannot directly initialize +	 * a 128-bit value to anything larger than a 64-bit value. +	 */ +	i = (i << 64) | (i - 1); +#endif +	/* simple int */ +	TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, 1234); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME, +			   "1234", 1234); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)1234", 1234); + +	/* zero value should be printed at toplevel */ +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT, "(int)0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME, +			   "0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_ZERO, +			   "(int)0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, +			   BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, +			   "0", 0); +	TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, -4567); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME, +			   "-4567", -4567); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)-4567", -4567); + +	TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, int, sizeof(int)-1, "", 1); + +#ifdef __SIZEOF_INT128__ +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128, BTF_F_COMPACT, +			   "(__int128)0xffffffffffffffff", +			   0xffffffffffffffff); +	ASSERT_OK(btf_dump_data(btf, d, "__int128", NULL, 0, &i, 16, str, +				"(__int128)0xfffffffffffffffffffffffffffffffe"), +		  "dump __int128"); +#endif +} + +static void test_btf_dump_float_data(struct btf *btf, struct btf_dump *d, +				     char *str) +{ +	float t1 = 1.234567; +	float t2 = -1.234567; +	float t3 = 0.0; +	double t4 = 5.678912; +	double t5 = -5.678912; +	double t6 = 0.0; +	long double t7 = 9.876543; +	long double t8 = -9.876543; +	long double t9 = 0.0; + +	/* since the kernel does not likely have any float types in its BTF, we +	 * will need to add some of various sizes. +	 */ + +	ASSERT_GT(btf__add_float(btf, "test_float", 4), 0, "add float"); +	ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t1, 4, str, +				"(test_float)1.234567"), "dump float"); +	ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t2, 4, str, +				"(test_float)-1.234567"), "dump float"); +	ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t3, 4, str, +				"(test_float)0.000000"), "dump float"); + +	ASSERT_GT(btf__add_float(btf, "test_double", 8), 0, "add_double"); +	ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t4, 8, str, +		  "(test_double)5.678912"), "dump double"); +	ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t5, 8, str, +		  "(test_double)-5.678912"), "dump double"); +	ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t6, 8, str, +				"(test_double)0.000000"), "dump double"); + +	ASSERT_GT(btf__add_float(btf, "test_long_double", 16), 0, "add long double"); +	ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t7, 16, +				str, "(test_long_double)9.876543"), +				"dump long_double"); +	ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t8, 16, +				str, "(test_long_double)-9.876543"), +				"dump long_double"); +	ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t9, 16, +				str, "(test_long_double)0.000000"), +				"dump long_double"); +} + +static void test_btf_dump_char_data(struct btf *btf, struct btf_dump *d, +				    char *str) +{ +	/* simple char */ +	TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, char, BTF_F_COMPACT, 100); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME, +			   "100", 100); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)100", 100); +	/* zero value should be printed at toplevel */ +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT, +			   "(char)0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME, +			   "0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_ZERO, +			   "(char)0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, +			   "0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)0", 0); + +	TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, char, sizeof(char)-1, "", 100); +} + +static void test_btf_dump_typedef_data(struct btf *btf, struct btf_dump *d, +				       char *str) +{ +	/* simple typedef */ +	TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, uint64_t, BTF_F_COMPACT, 100); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME, +			   "1", 1); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)1", 1); +	/* zero value should be printed at toplevel */ +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT, "(u64)0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME, +			   "0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_ZERO, +			   "(u64)0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, +			   BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, +			   "0", 0); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)0", 0); + +	/* typedef struct */ +	TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, atomic_t, BTF_F_COMPACT, +			     {.counter = (int)1,}); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME, +			   "{1,}", { .counter = 1 }); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0, +"(atomic_t){\n" +"	.counter = (int)1,\n" +"}", +			   {.counter = 1,}); +	/* typedef with 0 value should be printed at toplevel */ +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT, "(atomic_t){}", +			   {.counter = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME, +			   "{}", {.counter = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0, +"(atomic_t){\n" +"}", +			   {.counter = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_ZERO, +			   "(atomic_t){.counter = (int)0,}", +			   {.counter = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, +			   BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, +			   "{0,}", {.counter = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_ZERO, +"(atomic_t){\n" +"	.counter = (int)0,\n" +"}", +			   { .counter = 0,}); + +	/* overflow should show type but not value since it overflows */ +	TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, atomic_t, sizeof(atomic_t)-1, +				"(atomic_t){\n", { .counter = 1}); +} + +static void test_btf_dump_enum_data(struct btf *btf, struct btf_dump *d, +				    char *str) +{ +	/* enum where enum value does (and does not) exist */ +	TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, +			     BPF_MAP_CREATE); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, +			   "(enum bpf_cmd)BPF_MAP_CREATE", 0); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "BPF_MAP_CREATE", +			   BPF_MAP_CREATE); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0, +			   "(enum bpf_cmd)BPF_MAP_CREATE", +			   BPF_MAP_CREATE); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, +			   BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, +			   "BPF_MAP_CREATE", 0); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, +			   BTF_F_COMPACT | BTF_F_ZERO, +			   "(enum bpf_cmd)BPF_MAP_CREATE", +			   BPF_MAP_CREATE); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, +			   BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, +			   "BPF_MAP_CREATE", BPF_MAP_CREATE); +	TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, 2000); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "2000", 2000); +	TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0, +			   "(enum bpf_cmd)2000", 2000); + +	TEST_BTF_DUMP_DATA_OVER(btf, d, "enum", str, enum bpf_cmd, +				sizeof(enum bpf_cmd) - 1, "", BPF_MAP_CREATE); +} + +static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, +				      char *str) +{ +	DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); +	char zero_data[512] = { }; +	char type_data[512]; +	void *fops = type_data; +	void *skb = type_data; +	size_t type_sz; +	__s32 type_id; +	char *cmpstr; +	int ret; + +	memset(type_data, 255, sizeof(type_data)); + +	/* simple struct */ +	TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT, +			     {.name_off = (__u32)3,.val = (__s32)-1,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "{3,-1,}", +			   { .name_off = 3, .val = -1,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0, +"(struct btf_enum){\n" +"	.name_off = (__u32)3,\n" +"	.val = (__s32)-1,\n" +"}", +			   { .name_off = 3, .val = -1,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "{-1,}", +			   { .name_off = 0, .val = -1,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, +			   BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, +			   "{0,-1,}", +			   { .name_off = 0, .val = -1,}); +	/* empty struct should be printed */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT, +			   "(struct btf_enum){}", +			   { .name_off = 0, .val = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "{}", +			   { .name_off = 0, .val = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0, +"(struct btf_enum){\n" +"}", +			   { .name_off = 0, .val = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, +			   BTF_F_COMPACT | BTF_F_ZERO, +			   "(struct btf_enum){.name_off = (__u32)0,.val = (__s32)0,}", +			   { .name_off = 0, .val = 0,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, +			   BTF_F_ZERO, +"(struct btf_enum){\n" +"	.name_off = (__u32)0,\n" +"	.val = (__s32)0,\n" +"}", +			   { .name_off = 0, .val = 0,}); + +	/* struct with pointers */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT, +			   "(struct list_head){.next = (struct list_head *)0x1,}", +			   { .next = (struct list_head *)1 }); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0, +"(struct list_head){\n" +"	.next = (struct list_head *)0x1,\n" +"}", +			   { .next = (struct list_head *)1 }); +	/* NULL pointer should not be displayed */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT, +			   "(struct list_head){}", +			   { .next = (struct list_head *)0 }); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0, +"(struct list_head){\n" +"}", +			   { .next = (struct list_head *)0 }); + +	/* struct with function pointers */ +	type_id = btf__find_by_name(btf, "file_operations"); +	if (ASSERT_GT(type_id, 0, "find type id")) { +		type_sz = btf__resolve_size(btf, type_id); +		str[0] = '\0'; + +		ret = btf_dump__dump_type_data(d, type_id, fops, type_sz, &opts); +		ASSERT_EQ(ret, type_sz, +			  "unexpected return value dumping file_operations"); +		cmpstr = +"(struct file_operations){\n" +"	.owner = (struct module *)0xffffffffffffffff,\n" +"	.llseek = (loff_t (*)(struct file *, loff_t, int))0xffffffffffffffff,"; + +		ASSERT_STRNEQ(str, cmpstr, strlen(cmpstr), "file_operations"); +	} + +	/* struct with char array */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT, +			   "(struct bpf_prog_info){.name = (char[16])['f','o','o',],}", +			   { .name = "foo",}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "{['f','o','o',],}", +			   {.name = "foo",}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, 0, +"(struct bpf_prog_info){\n" +"	.name = (char[16])[\n" +"		'f',\n" +"		'o',\n" +"		'o',\n" +"	],\n" +"}", +			   {.name = "foo",}); +	/* leading null char means do not display string */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT, +			   "(struct bpf_prog_info){}", +			   {.name = {'\0', 'f', 'o', 'o'}}); +	/* handle non-printable characters */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT, +			   "(struct bpf_prog_info){.name = (char[16])[1,2,3,],}", +			   { .name = {1, 2, 3, 0}}); + +	/* struct with non-char array */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT, +			   "(struct __sk_buff){.cb = (__u32[5])[1,2,3,4,5,],}", +			   { .cb = {1, 2, 3, 4, 5,},}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "{[1,2,3,4,5,],}", +			   { .cb = { 1, 2, 3, 4, 5},}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0, +"(struct __sk_buff){\n" +"	.cb = (__u32[5])[\n" +"		1,\n" +"		2,\n" +"		3,\n" +"		4,\n" +"		5,\n" +"	],\n" +"}", +			   { .cb = { 1, 2, 3, 4, 5},}); +	/* For non-char, arrays, show non-zero values only */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT, +			   "(struct __sk_buff){.cb = (__u32[5])[0,0,1,0,0,],}", +			   { .cb = { 0, 0, 1, 0, 0},}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0, +"(struct __sk_buff){\n" +"	.cb = (__u32[5])[\n" +"		0,\n" +"		0,\n" +"		1,\n" +"		0,\n" +"		0,\n" +"	],\n" +"}", +			   { .cb = { 0, 0, 1, 0, 0},}); + +	/* struct with bitfields */ +	TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT, +		{.code = (__u8)1,.dst_reg = (__u8)0x2,.src_reg = (__u8)0x3,.off = (__s16)4,.imm = (__s32)5,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, +			   BTF_F_COMPACT | BTF_F_NONAME, +			   "{1,0x2,0x3,4,5,}", +			   { .code = 1, .dst_reg = 0x2, .src_reg = 0x3, .off = 4, +			     .imm = 5,}); +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, 0, +"(struct bpf_insn){\n" +"	.code = (__u8)1,\n" +"	.dst_reg = (__u8)0x2,\n" +"	.src_reg = (__u8)0x3,\n" +"	.off = (__s16)4,\n" +"	.imm = (__s32)5,\n" +"}", +			   {.code = 1, .dst_reg = 2, .src_reg = 3, .off = 4, .imm = 5}); + +	/* zeroed bitfields should not be displayed */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT, +			   "(struct bpf_insn){.dst_reg = (__u8)0x1,}", +			   { .code = 0, .dst_reg = 1}); + +	/* struct with enum bitfield */ +	type_id = btf__find_by_name(btf, "fs_context"); +	if (ASSERT_GT(type_id,  0, "find fs_context")) { +		type_sz = btf__resolve_size(btf, type_id); +		str[0] = '\0'; + +		opts.emit_zeroes = true; +		ret = btf_dump__dump_type_data(d, type_id, zero_data, type_sz, &opts); +		ASSERT_EQ(ret, type_sz, +			  "unexpected return value dumping fs_context"); + +		ASSERT_NEQ(strstr(str, "FS_CONTEXT_FOR_MOUNT"), NULL, +				  "bitfield value not present"); +	} + +	/* struct with nested anon union */ +	TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_sock_ops, BTF_F_COMPACT, +			   "(struct bpf_sock_ops){.op = (__u32)1,(union){.args = (__u32[4])[1,2,3,4,],.reply = (__u32)1,.replylong = (__u32[4])[1,2,3,4,],},}", +			   { .op = 1, .args = { 1, 2, 3, 4}}); + +	/* union with nested struct */ +	TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT, +			   "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},}", +			   { .map = { .map_fd = 1 }}); + +	/* struct skb with nested structs/unions; because type output is so +	 * complex, we don't do a string comparison, just verify we return +	 * the type size as the amount of data displayed. +	 */ +	type_id = btf__find_by_name(btf, "sk_buff"); +	if (ASSERT_GT(type_id, 0, "find struct sk_buff")) { +		type_sz = btf__resolve_size(btf, type_id); +		str[0] = '\0'; + +		ret = btf_dump__dump_type_data(d, type_id, skb, type_sz, &opts); +		ASSERT_EQ(ret, type_sz, +			  "unexpected return value dumping sk_buff"); +	} + +	/* overflow bpf_sock_ops struct with final element nonzero/zero. +	 * Regardless of the value of the final field, we don't have all the +	 * data we need to display it, so we should trigger an overflow. +	 * In other words oveflow checking should trump "is field zero?" +	 * checks because if we've overflowed, it shouldn't matter what the +	 * field is - we can't trust its value so shouldn't display it. +	 */ +	TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops, +				sizeof(struct bpf_sock_ops) - 1, +				"(struct bpf_sock_ops){\n\t.op = (__u32)1,\n", +				{ .op = 1, .skb_tcp_flags = 2}); +	TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops, +				sizeof(struct bpf_sock_ops) - 1, +				"(struct bpf_sock_ops){\n\t.op = (__u32)1,\n", +				{ .op = 1, .skb_tcp_flags = 0}); +} + +static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, +				   char *str) +{ +	TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT, +			  "int cpu_number = (int)100", 100); +	TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_profile_flip", int, BTF_F_COMPACT, +			  "static int cpu_profile_flip = (int)2", 2); +} + +static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str, +			     const char *name, const char *expected_val, +			     void *data, size_t data_sz) +{ +	DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); +	int ret = 0, cmp; +	size_t secsize; +	__s32 type_id; + +	opts.compact = true; + +	type_id = btf__find_by_name(btf, name); +	if (!ASSERT_GT(type_id, 0, "find type id")) +		return; + +	secsize = btf__resolve_size(btf, type_id); +	ASSERT_EQ(secsize,  0, "verify section size"); + +	str[0] = '\0'; +	ret = btf_dump__dump_type_data(d, type_id, data, data_sz, &opts); +	ASSERT_EQ(ret, 0, "unexpected return value"); + +	cmp = strcmp(str, expected_val); +	ASSERT_EQ(cmp, 0, "ensure expected/actual match"); +} + +static void test_btf_dump_datasec_data(char *str) +{ +	struct btf *btf = btf__parse("xdping_kern.o", NULL); +	struct btf_dump_opts opts = { .ctx = str }; +	char license[4] = "GPL"; +	struct btf_dump *d; + +	if (!ASSERT_OK_PTR(btf, "xdping_kern.o BTF not found")) +		return; + +	d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf); +	if (!ASSERT_OK_PTR(d, "could not create BTF dump")) +		return; + +	test_btf_datasec(btf, d, str, "license", +			 "SEC(\"license\") char[4] _license = (char[4])['G','P','L',];", +			 license, sizeof(license)); +} +  void test_btf_dump() { +	char str[STRSIZE]; +	struct btf_dump_opts opts = { .ctx = str }; +	struct btf_dump *d; +	struct btf *btf;  	int i;  	for (i = 0; i < ARRAY_SIZE(btf_dump_test_cases); i++) { @@ -245,4 +831,33 @@ void test_btf_dump() {  	}  	if (test__start_subtest("btf_dump: incremental"))  		test_btf_dump_incremental(); + +	btf = libbpf_find_kernel_btf(); +	if (!ASSERT_OK_PTR(btf, "no kernel BTF found")) +		return; + +	d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf); +	if (!ASSERT_OK_PTR(d, "could not create BTF dump")) +		return; + +	/* Verify type display for various types. */ +	if (test__start_subtest("btf_dump: int_data")) +		test_btf_dump_int_data(btf, d, str); +	if (test__start_subtest("btf_dump: float_data")) +		test_btf_dump_float_data(btf, d, str); +	if (test__start_subtest("btf_dump: char_data")) +		test_btf_dump_char_data(btf, d, str); +	if (test__start_subtest("btf_dump: typedef_data")) +		test_btf_dump_typedef_data(btf, d, str); +	if (test__start_subtest("btf_dump: enum_data")) +		test_btf_dump_enum_data(btf, d, str); +	if (test__start_subtest("btf_dump: struct_data")) +		test_btf_dump_struct_data(btf, d, str); +	if (test__start_subtest("btf_dump: var_data")) +		test_btf_dump_var_data(btf, d, str); +	btf_dump__free(d); +	btf__free(btf); + +	if (test__start_subtest("btf_dump: datasec_data")) +		test_btf_dump_datasec_data(str);  } diff --git a/tools/testing/selftests/bpf/prog_tests/btf_module.c b/tools/testing/selftests/bpf/prog_tests/btf_module.c new file mode 100644 index 000000000000..2239d1fe0332 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_module.c @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include <test_progs.h> +#include <bpf/btf.h> + +static const char *module_name = "bpf_testmod"; +static const char *symbol_name = "bpf_testmod_test_read"; + +void test_btf_module() +{ +	struct btf *vmlinux_btf, *module_btf; +	__s32 type_id; + +	if (!env.has_testmod) { +		test__skip(); +		return; +	} + +	vmlinux_btf = btf__load_vmlinux_btf(); +	if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) +		return; + +	module_btf = btf__load_module_btf(module_name, vmlinux_btf); +	if (!ASSERT_OK_PTR(module_btf, "could not load module BTF")) +		goto cleanup; + +	type_id = btf__find_by_name(module_btf, symbol_name); +	ASSERT_GT(type_id, 0, "func not found"); + +cleanup: +	btf__free(module_btf); +	btf__free(vmlinux_btf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c index 981c251453d9..3d4b2a358d47 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -53,8 +53,8 @@ void test_core_autosize(void)  	char btf_file[] = "/tmp/core_autosize.btf.XXXXXX";  	int err, fd = -1, zero = 0;  	int char_id, short_id, int_id, long_long_id, void_ptr_id, id; +	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);  	struct test_core_autosize* skel = NULL; -	struct bpf_object_load_attr load_attr = {};  	struct bpf_program *prog;  	struct bpf_map *bss_map;  	struct btf *btf = NULL; @@ -125,9 +125,10 @@ void test_core_autosize(void)  	fd = -1;  	/* open and load BPF program with custom BTF as the kernel BTF */ -	skel = test_core_autosize__open(); +	open_opts.btf_custom_path = btf_file; +	skel = test_core_autosize__open_opts(&open_opts);  	if (!ASSERT_OK_PTR(skel, "skel_open")) -		return; +		goto cleanup;  	/* disable handle_signed() for now */  	prog = bpf_object__find_program_by_name(skel->obj, "handle_signed"); @@ -135,9 +136,7 @@ void test_core_autosize(void)  		goto cleanup;  	bpf_program__set_autoload(prog, false); -	load_attr.obj = skel->obj; -	load_attr.target_btf_path = btf_file; -	err = bpf_object__load_xattr(&load_attr); +	err = bpf_object__load(skel->obj);  	if (!ASSERT_OK(err, "prog_load"))  		goto cleanup; @@ -204,14 +203,13 @@ void test_core_autosize(void)  	skel = NULL;  	/* now re-load with handle_signed() enabled, it should fail loading */ -	skel = test_core_autosize__open(); +	open_opts.btf_custom_path = btf_file; +	skel = test_core_autosize__open_opts(&open_opts);  	if (!ASSERT_OK_PTR(skel, "skel_open")) -		return; +		goto cleanup; -	load_attr.obj = skel->obj; -	load_attr.target_btf_path = btf_file; -	err = bpf_object__load_xattr(&load_attr); -	if (!ASSERT_ERR(err, "bad_prog_load")) +	err = test_core_autosize__load(skel); +	if (!ASSERT_ERR(err, "skel_load"))  		goto cleanup;  cleanup: diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index d02e064c535f..4739b15b2a97 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -816,7 +816,7 @@ static size_t roundup_page(size_t sz)  void test_core_reloc(void)  {  	const size_t mmap_sz = roundup_page(sizeof(struct data)); -	struct bpf_object_load_attr load_attr = {}; +	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);  	struct core_reloc_test_case *test_case;  	const char *tp_name, *probe_name;  	int err, i, equal; @@ -846,9 +846,16 @@ void test_core_reloc(void)  				continue;  		} -		obj = bpf_object__open_file(test_case->bpf_obj_file, NULL); +		if (test_case->btf_src_file) { +			err = access(test_case->btf_src_file, R_OK); +			if (!ASSERT_OK(err, "btf_src_file")) +				goto cleanup; +		} + +		open_opts.btf_custom_path = test_case->btf_src_file; +		obj = bpf_object__open_file(test_case->bpf_obj_file, &open_opts);  		if (!ASSERT_OK_PTR(obj, "obj_open")) -			continue; +			goto cleanup;  		probe_name = "raw_tracepoint/sys_enter";  		tp_name = "sys_enter"; @@ -862,17 +869,7 @@ void test_core_reloc(void)  			  "prog '%s' not found\n", probe_name))  			goto cleanup; - -		if (test_case->btf_src_file) { -			err = access(test_case->btf_src_file, R_OK); -			if (!ASSERT_OK(err, "btf_src_file")) -				goto cleanup; -		} - -		load_attr.obj = obj; -		load_attr.log_level = 0; -		load_attr.target_btf_path = test_case->btf_src_file; -		err = bpf_object__load_xattr(&load_attr); +		err = bpf_object__load(obj);  		if (err) {  			if (!test_case->fails)  				ASSERT_OK(err, "obj_load"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c new file mode 100644 index 000000000000..02a465f36d59 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "get_func_ip_test.skel.h" + +void test_get_func_ip_test(void) +{ +	struct get_func_ip_test *skel = NULL; +	__u32 duration = 0, retval; +	int err, prog_fd; + +	skel = get_func_ip_test__open(); +	if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) +		return; + +	/* test6 is x86_64 specifc because of the instruction +	 * offset, disabling it for all other archs +	 */ +#ifndef __x86_64__ +	bpf_program__set_autoload(skel->progs.test6, false); +	bpf_program__set_autoload(skel->progs.test7, false); +#endif + +	err = get_func_ip_test__load(skel); +	if (!ASSERT_OK(err, "get_func_ip_test__load")) +		goto cleanup; + +	err = get_func_ip_test__attach(skel); +	if (!ASSERT_OK(err, "get_func_ip_test__attach")) +		goto cleanup; + +	prog_fd = bpf_program__fd(skel->progs.test1); +	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +				NULL, NULL, &retval, &duration); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(retval, 0, "test_run"); + +	prog_fd = bpf_program__fd(skel->progs.test5); +	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +				NULL, NULL, &retval, &duration); + +	ASSERT_OK(err, "test_run"); + +	ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); +	ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); +	ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); +	ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); +	ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); +#ifdef __x86_64__ +	ASSERT_EQ(skel->bss->test6_result, 1, "test6_result"); +	ASSERT_EQ(skel->bss->test7_result, 1, "test7_result"); +#endif + +cleanup: +	get_func_ip_test__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 30a7b9b837bf..9611f2bc50df 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -44,7 +44,7 @@ static void test_subprog(void)  	ASSERT_OK(err, "bpf_prog_test_run(test1)");  	ASSERT_EQ(retval, 10, "test1-retval");  	ASSERT_NEQ(skel->data->active_res, -1, "active_res"); -	ASSERT_EQ(skel->data->sk_state, BPF_TCP_CLOSE, "sk_state"); +	ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res");  	kfunc_call_test_subprog__destroy(skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c index 67bebd324147..cf3acfa5a91d 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c @@ -6,6 +6,7 @@  #include <bpf/btf.h>  #include "test_ksyms_btf.skel.h"  #include "test_ksyms_btf_null_check.skel.h" +#include "test_ksyms_weak.skel.h"  static int duration; @@ -81,6 +82,33 @@ static void test_null_check(void)  	test_ksyms_btf_null_check__destroy(skel);  } +static void test_weak_syms(void) +{ +	struct test_ksyms_weak *skel; +	struct test_ksyms_weak__data *data; +	int err; + +	skel = test_ksyms_weak__open_and_load(); +	if (CHECK(!skel, "test_ksyms_weak__open_and_load", "failed\n")) +		return; + +	err = test_ksyms_weak__attach(skel); +	if (CHECK(err, "test_ksyms_weak__attach", "skeleton attach failed: %d\n", err)) +		goto cleanup; + +	/* trigger tracepoint */ +	usleep(1); + +	data = skel->data; +	ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym"); +	ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym"); +	ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym"); +	ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym"); + +cleanup: +	test_ksyms_weak__destroy(skel); +} +  void test_ksyms_btf(void)  {  	int percpu_datasec; @@ -105,4 +133,7 @@ void test_ksyms_btf(void)  	if (test__start_subtest("null_check"))  		test_null_check(); + +	if (test__start_subtest("weak_ksyms")) +		test_weak_syms();  } diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c new file mode 100644 index 000000000000..6ede48bde91b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <sys/sysinfo.h> +#include <test_progs.h> +#include "network_helpers.h" +#include "netcnt_prog.skel.h" +#include "netcnt_common.h" + +#define CG_NAME "/netcnt" + +void test_netcnt(void) +{ +	union percpu_net_cnt *percpu_netcnt = NULL; +	struct bpf_cgroup_storage_key key; +	int map_fd, percpu_map_fd; +	struct netcnt_prog *skel; +	unsigned long packets; +	union net_cnt netcnt; +	unsigned long bytes; +	int cpu, nproc; +	int cg_fd = -1; +	char cmd[128]; + +	skel = netcnt_prog__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load")) +		return; + +	nproc = get_nprocs_conf(); +	percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); +	if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)")) +		goto err; + +	cg_fd = test__join_cgroup(CG_NAME); +	if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup")) +		goto err; + +	skel->links.bpf_nextcnt = bpf_program__attach_cgroup(skel->progs.bpf_nextcnt, cg_fd); +	if (!ASSERT_OK_PTR(skel->links.bpf_nextcnt, +			   "attach_cgroup(bpf_nextcnt)")) +		goto err; + +	snprintf(cmd, sizeof(cmd), "%s ::1 -A -c 10000 -q > /dev/null", ping_command(AF_INET6)); +	ASSERT_OK(system(cmd), cmd); + +	map_fd = bpf_map__fd(skel->maps.netcnt); +	if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, &key), "bpf_map_get_next_key")) +		goto err; + +	if (!ASSERT_OK(bpf_map_lookup_elem(map_fd, &key, &netcnt), "bpf_map_lookup_elem(netcnt)")) +		goto err; + +	percpu_map_fd = bpf_map__fd(skel->maps.percpu_netcnt); +	if (!ASSERT_OK(bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0]), +		       "bpf_map_lookup_elem(percpu_netcnt)")) +		goto err; + +	/* Some packets can be still in per-cpu cache, but not more than +	 * MAX_PERCPU_PACKETS. +	 */ +	packets = netcnt.packets; +	bytes = netcnt.bytes; +	for (cpu = 0; cpu < nproc; cpu++) { +		ASSERT_LE(percpu_netcnt[cpu].packets, MAX_PERCPU_PACKETS, "MAX_PERCPU_PACKETS"); + +		packets += percpu_netcnt[cpu].packets; +		bytes += percpu_netcnt[cpu].bytes; +	} + +	/* No packets should be lost */ +	ASSERT_EQ(packets, 10000, "packets"); + +	/* Let's check that bytes counter matches the number of packets +	 * multiplied by the size of ipv6 ICMP packet. +	 */ +	ASSERT_EQ(bytes, packets * 104, "bytes"); + +err: +	if (cg_fd != -1) +		close(cg_fd); +	free(percpu_netcnt); +	netcnt_prog__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/netns_cookie.c b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c new file mode 100644 index 000000000000..71d8f3ba7d6b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "netns_cookie_prog.skel.h" +#include "network_helpers.h" + +#ifndef SO_NETNS_COOKIE +#define SO_NETNS_COOKIE 71 +#endif + +static int duration; + +void test_netns_cookie(void) +{ +	int server_fd = -1, client_fd = -1, cgroup_fd = -1; +	int err, val, ret, map, verdict; +	struct netns_cookie_prog *skel; +	uint64_t cookie_expected_value; +	socklen_t vallen = sizeof(cookie_expected_value); +	static const char send_msg[] = "message"; + +	skel = netns_cookie_prog__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		return; + +	cgroup_fd = test__join_cgroup("/netns_cookie"); +	if (CHECK(cgroup_fd < 0, "join_cgroup", "cgroup creation failed\n")) +		goto done; + +	skel->links.get_netns_cookie_sockops = bpf_program__attach_cgroup( +		skel->progs.get_netns_cookie_sockops, cgroup_fd); +	if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_sockops, "prog_attach")) +		goto done; + +	verdict = bpf_program__fd(skel->progs.get_netns_cookie_sk_msg); +	map = bpf_map__fd(skel->maps.sock_map); +	err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); +	if (!ASSERT_OK(err, "prog_attach")) +		goto done; + +	server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); +	if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno)) +		goto done; + +	client_fd = connect_to_fd(server_fd, 0); +	if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno)) +		goto done; + +	ret = send(client_fd, send_msg, sizeof(send_msg), 0); +	if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n", ret)) +		goto done; + +	err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sockops_netns_cookies), +				  &client_fd, &val); +	if (!ASSERT_OK(err, "map_lookup(sockops_netns_cookies)")) +		goto done; + +	err = getsockopt(client_fd, SOL_SOCKET, SO_NETNS_COOKIE, +			 &cookie_expected_value, &vallen); +	if (!ASSERT_OK(err, "getsockopt")) +		goto done; + +	ASSERT_EQ(val, cookie_expected_value, "cookie_value"); + +	err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_msg_netns_cookies), +				  &client_fd, &val); +	if (!ASSERT_OK(err, "map_lookup(sk_msg_netns_cookies)")) +		goto done; + +	ASSERT_EQ(val, cookie_expected_value, "cookie_value"); + +done: +	if (server_fd != -1) +		close(server_fd); +	if (client_fd != -1) +		close(client_fd); +	if (cgroup_fd != -1) +		close(cgroup_fd); +	netns_cookie_prog__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c new file mode 100644 index 000000000000..b1abd0c46607 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <test_progs.h> +#include "test_perf_link.skel.h" + +static void burn_cpu(void) +{ +	volatile int j = 0; +	cpu_set_t cpu_set; +	int i, err; + +	/* generate some branches on cpu 0 */ +	CPU_ZERO(&cpu_set); +	CPU_SET(0, &cpu_set); +	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); +	ASSERT_OK(err, "set_thread_affinity"); + +	/* spin the loop for a while (random high number) */ +	for (i = 0; i < 1000000; ++i) +		++j; +} + +void test_perf_link(void) +{ +	struct test_perf_link *skel = NULL; +	struct perf_event_attr attr; +	int pfd = -1, link_fd = -1, err; +	int run_cnt_before, run_cnt_after; +	struct bpf_link_info info; +	__u32 info_len = sizeof(info); + +	/* create perf event */ +	memset(&attr, 0, sizeof(attr)); +	attr.size = sizeof(attr); +	attr.type = PERF_TYPE_SOFTWARE; +	attr.config = PERF_COUNT_SW_CPU_CLOCK; +	attr.freq = 1; +	attr.sample_freq = 4000; +	pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); +	if (!ASSERT_GE(pfd, 0, "perf_fd")) +		goto cleanup; + +	skel = test_perf_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd, +				  BPF_PERF_EVENT, NULL); +	if (!ASSERT_GE(link_fd, 0, "link_fd")) +		goto cleanup; + +	memset(&info, 0, sizeof(info)); +	err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len); +	if (!ASSERT_OK(err, "link_get_info")) +		goto cleanup; + +	ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type"); +	ASSERT_GT(info.id, 0, "link_id"); +	ASSERT_GT(info.prog_id, 0, "link_prog_id"); + +	/* ensure we get at least one perf_event prog execution */ +	burn_cpu(); +	ASSERT_GT(skel->bss->run_cnt, 0, "run_cnt"); + +	/* perf_event is still active, but we close link and BPF program +	 * shouldn't be executed anymore +	 */ +	close(link_fd); +	link_fd = -1; + +	/* make sure there are no stragglers */ +	kern_sync_rcu(); + +	run_cnt_before = skel->bss->run_cnt; +	burn_cpu(); +	run_cnt_after = skel->bss->run_cnt; + +	ASSERT_EQ(run_cnt_before, run_cnt_after, "run_cnt_before_after"); + +cleanup: +	if (link_fd >= 0) +		close(link_fd); +	if (pfd >= 0) +		close(pfd); +	test_perf_link__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c index fcf54b3a1dd0..d4b953ae3407 100644 --- a/tools/testing/selftests/bpf/prog_tests/pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -125,6 +125,10 @@ void test_pinning(void)  	if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno))  		goto out; +	/* get pinning path */ +	if (!ASSERT_STREQ(bpf_map__pin_path(map), pinpath, "get pin path")) +		goto out; +  	/* set pinning path of other map and re-pin all */  	map = bpf_object__find_map_by_name(obj, "nopinmap");  	if (CHECK(!map, "find map", "NULL map")) @@ -134,6 +138,11 @@ void test_pinning(void)  	if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))  		goto out; +	/* get pinning path after set */ +	if (!ASSERT_STREQ(bpf_map__pin_path(map), custpinpath, +			  "get pin path after set")) +		goto out; +  	/* should only pin the one unpinned map */  	err = bpf_object__pin_maps(obj, NULL);  	if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c index de2688166696..4e91f4d6466c 100644 --- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -34,8 +34,8 @@ void test_reference_tracking(void)  		if (!test__start_subtest(title))  			continue; -		/* Expect verifier failure if test name has 'fail' */ -		if (strstr(title, "fail") != NULL) { +		/* Expect verifier failure if test name has 'err' */ +		if (strstr(title, "err_") != NULL) {  			libbpf_print_fn_t old_print_fn;  			old_print_fn = libbpf_set_print(NULL); diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 023cc532992d..776916b61c40 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -1,5 +1,7 @@  // SPDX-License-Identifier: GPL-2.0  #include <test_progs.h> +#include <sys/time.h> +#include <sys/resource.h>  #include "test_send_signal_kern.skel.h"  int sigusr1_received = 0; @@ -10,29 +12,25 @@ static void sigusr1_handler(int signum)  }  static void test_send_signal_common(struct perf_event_attr *attr, -				    bool signal_thread, -				    const char *test_name) +				    bool signal_thread)  {  	struct test_send_signal_kern *skel;  	int pipe_c2p[2], pipe_p2c[2];  	int err = -1, pmu_fd = -1; -	__u32 duration = 0;  	char buf[256];  	pid_t pid; -	if (CHECK(pipe(pipe_c2p), test_name, -		  "pipe pipe_c2p error: %s\n", strerror(errno))) +	if (!ASSERT_OK(pipe(pipe_c2p), "pipe_c2p"))  		return; -	if (CHECK(pipe(pipe_p2c), test_name, -		  "pipe pipe_p2c error: %s\n", strerror(errno))) { +	if (!ASSERT_OK(pipe(pipe_p2c), "pipe_p2c")) {  		close(pipe_c2p[0]);  		close(pipe_c2p[1]);  		return;  	}  	pid = fork(); -	if (CHECK(pid < 0, test_name, "fork error: %s\n", strerror(errno))) { +	if (!ASSERT_GE(pid, 0, "fork")) {  		close(pipe_c2p[0]);  		close(pipe_c2p[1]);  		close(pipe_p2c[0]); @@ -41,26 +39,40 @@ static void test_send_signal_common(struct perf_event_attr *attr,  	}  	if (pid == 0) { +		int old_prio; +  		/* install signal handler and notify parent */  		signal(SIGUSR1, sigusr1_handler);  		close(pipe_c2p[0]); /* close read */  		close(pipe_p2c[1]); /* close write */ +		/* boost with a high priority so we got a higher chance +		 * that if an interrupt happens, the underlying task +		 * is this process. +		 */ +		errno = 0; +		old_prio = getpriority(PRIO_PROCESS, 0); +		ASSERT_OK(errno, "getpriority"); +		ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); +  		/* notify parent signal handler is installed */ -		CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno); +		ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");  		/* make sure parent enabled bpf program to send_signal */ -		CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno); +		ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");  		/* wait a little for signal handler */  		sleep(1);  		buf[0] = sigusr1_received ? '2' : '0'; -		CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno); +		ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");  		/* wait for parent notification and exit */ -		CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno); +		ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); + +		/* restore the old priority */ +		ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority");  		close(pipe_c2p[1]);  		close(pipe_p2c[0]); @@ -71,20 +83,19 @@ static void test_send_signal_common(struct perf_event_attr *attr,  	close(pipe_p2c[0]); /* close read */  	skel = test_send_signal_kern__open_and_load(); -	if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n")) +	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))  		goto skel_open_load_failure;  	if (!attr) {  		err = test_send_signal_kern__attach(skel); -		if (CHECK(err, "skel_attach", "skeleton attach failed\n")) { +		if (!ASSERT_OK(err, "skel_attach")) {  			err = -1;  			goto destroy_skel;  		}  	} else {  		pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1,  				 -1 /* group id */, 0 /* flags */); -		if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n", -			strerror(errno))) { +		if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) {  			err = -1;  			goto destroy_skel;  		} @@ -96,7 +107,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,  	}  	/* wait until child signal handler installed */ -	CHECK(read(pipe_c2p[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno); +	ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read");  	/* trigger the bpf send_signal */  	skel->bss->pid = pid; @@ -104,21 +115,21 @@ static void test_send_signal_common(struct perf_event_attr *attr,  	skel->bss->signal_thread = signal_thread;  	/* notify child that bpf program can send_signal now */ -	CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno); +	ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");  	/* wait for result */  	err = read(pipe_c2p[0], buf, 1); -	if (CHECK(err < 0, test_name, "reading pipe error: %s\n", strerror(errno))) +	if (!ASSERT_GE(err, 0, "reading pipe"))  		goto disable_pmu; -	if (CHECK(err == 0, test_name, "reading pipe error: size 0\n")) { +	if (!ASSERT_GT(err, 0, "reading pipe error: size 0")) {  		err = -1;  		goto disable_pmu;  	} -	CHECK(buf[0] != '2', test_name, "incorrect result\n"); +	ASSERT_EQ(buf[0], '2', "incorrect result");  	/* notify child safe to exit */ -	CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno); +	ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");  disable_pmu:  	close(pmu_fd); @@ -132,7 +143,7 @@ skel_open_load_failure:  static void test_send_signal_tracepoint(bool signal_thread)  { -	test_send_signal_common(NULL, signal_thread, "tracepoint"); +	test_send_signal_common(NULL, signal_thread);  }  static void test_send_signal_perf(bool signal_thread) @@ -143,7 +154,7 @@ static void test_send_signal_perf(bool signal_thread)  		.config = PERF_COUNT_SW_CPU_CLOCK,  	}; -	test_send_signal_common(&attr, signal_thread, "perf_sw_event"); +	test_send_signal_common(&attr, signal_thread);  }  static void test_send_signal_nmi(bool signal_thread) @@ -172,7 +183,7 @@ static void test_send_signal_nmi(bool signal_thread)  		close(pmu_fd);  	} -	test_send_signal_common(&attr, signal_thread, "perf_hw_event"); +	test_send_signal_common(&attr, signal_thread);  }  void test_send_signal(void) diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index dffbcaa1ec98..8fd1b4b29a0e 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -19,7 +19,7 @@  #define EXP_ADDR_OUT "0000000000000000 ffff00000add4e55 "  #define EXP_ADDR_RET sizeof(EXP_ADDR_OUT "unknownhashedptr") -#define EXP_STR_OUT  "str1 longstr" +#define EXP_STR_OUT  "str1         a  b c      d e longstr"  #define EXP_STR_RET  sizeof(EXP_STR_OUT)  #define EXP_OVER_OUT "%over" @@ -114,6 +114,8 @@ void test_snprintf_negative(void)  	ASSERT_ERR(load_single_snprintf("%"), "invalid specifier 3");  	ASSERT_ERR(load_single_snprintf("%12345678"), "invalid specifier 4");  	ASSERT_ERR(load_single_snprintf("%--------"), "invalid specifier 5"); +	ASSERT_ERR(load_single_snprintf("%lc"), "invalid specifier 6"); +	ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7");  	ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");  	ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");  } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 515229f24a93..5c5979046523 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -351,9 +351,11 @@ static void test_insert_opened(int family, int sotype, int mapfd)  	errno = 0;  	value = s;  	err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); -	if (!err || errno != EOPNOTSUPP) -		FAIL_ERRNO("map_update: expected EOPNOTSUPP"); - +	if (sotype == SOCK_STREAM) { +		if (!err || errno != EOPNOTSUPP) +			FAIL_ERRNO("map_update: expected EOPNOTSUPP"); +	} else if (err) +		FAIL_ERRNO("map_update: expected success");  	xclose(s);  } @@ -919,6 +921,23 @@ static const char *redir_mode_str(enum redir_mode mode)  	}  } +static int add_to_sockmap(int sock_mapfd, int fd1, int fd2) +{ +	u64 value; +	u32 key; +	int err; + +	key = 0; +	value = fd1; +	err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +	if (err) +		return err; + +	key = 1; +	value = fd2; +	return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +} +  static void redir_to_connected(int family, int sotype, int sock_mapfd,  			       int verd_mapfd, enum redir_mode mode)  { @@ -928,9 +947,9 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,  	unsigned int pass;  	socklen_t len;  	int err, n; -	u64 value;  	u32 key;  	char b; +	int retries = 100;  	zero_verdict_count(verd_mapfd); @@ -965,15 +984,7 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,  	if (p1 < 0)  		goto close_cli1; -	key = 0; -	value = p0; -	err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); -	if (err) -		goto close_peer1; - -	key = 1; -	value = p1; -	err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +	err = add_to_sockmap(sock_mapfd, p0, p1);  	if (err)  		goto close_peer1; @@ -991,10 +1002,15 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,  		goto close_peer1;  	if (pass != 1)  		FAIL("%s: want pass count 1, have %d", log_prefix, pass); - +again:  	n = read(c0, &b, 1); -	if (n < 0) +	if (n < 0) { +		if (errno == EAGAIN && retries--) { +			usleep(1000); +			goto again; +		}  		FAIL_ERRNO("%s: read", log_prefix); +	}  	if (n == 0)  		FAIL("%s: incomplete read", log_prefix); @@ -1061,7 +1077,6 @@ static void redir_to_listening(int family, int sotype, int sock_mapfd,  	int s, c, p, err, n;  	unsigned int drop;  	socklen_t len; -	u64 value;  	u32 key;  	zero_verdict_count(verd_mapfd); @@ -1086,15 +1101,7 @@ static void redir_to_listening(int family, int sotype, int sock_mapfd,  	if (p < 0)  		goto close_cli; -	key = 0; -	value = s; -	err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); -	if (err) -		goto close_peer; - -	key = 1; -	value = p; -	err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +	err = add_to_sockmap(sock_mapfd, s, p);  	if (err)  		goto close_peer; @@ -1346,7 +1353,6 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,  	int s1, s2, c, err;  	unsigned int drop;  	socklen_t len; -	u64 value;  	u32 key;  	zero_verdict_count(verd_map); @@ -1360,16 +1366,10 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,  	if (s2 < 0)  		goto close_srv1; -	key = 0; -	value = s1; -	err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST); +	err = add_to_sockmap(sock_map, s1, s2);  	if (err)  		goto close_srv2; -	key = 1; -	value = s2; -	err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST); -  	/* Connect to s2, reuseport BPF selects s1 via sock_map[0] */  	len = sizeof(addr);  	err = xgetsockname(s2, sockaddr(&addr), &len); @@ -1441,6 +1441,8 @@ static const char *family_str(sa_family_t family)  		return "IPv4";  	case AF_INET6:  		return "IPv6"; +	case AF_UNIX: +		return "Unix";  	default:  		return "unknown";  	} @@ -1563,6 +1565,101 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,  	}  } +static void unix_redir_to_connected(int sotype, int sock_mapfd, +			       int verd_mapfd, enum redir_mode mode) +{ +	const char *log_prefix = redir_mode_str(mode); +	int c0, c1, p0, p1; +	unsigned int pass; +	int retries = 100; +	int err, n; +	int sfd[2]; +	u32 key; +	char b; + +	zero_verdict_count(verd_mapfd); + +	if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) +		return; +	c0 = sfd[0], p0 = sfd[1]; + +	if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) +		goto close0; +	c1 = sfd[0], p1 = sfd[1]; + +	err = add_to_sockmap(sock_mapfd, p0, p1); +	if (err) +		goto close; + +	n = write(c1, "a", 1); +	if (n < 0) +		FAIL_ERRNO("%s: write", log_prefix); +	if (n == 0) +		FAIL("%s: incomplete write", log_prefix); +	if (n < 1) +		goto close; + +	key = SK_PASS; +	err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); +	if (err) +		goto close; +	if (pass != 1) +		FAIL("%s: want pass count 1, have %d", log_prefix, pass); + +again: +	n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); +	if (n < 0) { +		if (errno == EAGAIN && retries--) { +			usleep(1000); +			goto again; +		} +		FAIL_ERRNO("%s: read", log_prefix); +	} +	if (n == 0) +		FAIL("%s: incomplete read", log_prefix); + +close: +	xclose(c1); +	xclose(p1); +close0: +	xclose(c0); +	xclose(p0); +} + +static void unix_skb_redir_to_connected(struct test_sockmap_listen *skel, +					struct bpf_map *inner_map, int sotype) +{ +	int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); +	int verdict_map = bpf_map__fd(skel->maps.verdict_map); +	int sock_map = bpf_map__fd(inner_map); +	int err; + +	err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); +	if (err) +		return; + +	skel->bss->test_ingress = false; +	unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_EGRESS); +	skel->bss->test_ingress = true; +	unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_INGRESS); + +	xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, +			    int sotype) +{ +	const char *family_name, *map_name; +	char s[MAX_TEST_NAME]; + +	family_name = family_str(AF_UNIX); +	map_name = map_type_str(map); +	snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); +	if (!test__start_subtest(s)) +		return; +	unix_skb_redir_to_connected(skel, map, sotype); +} +  static void test_reuseport(struct test_sockmap_listen *skel,  			   struct bpf_map *map, int family, int sotype)  { @@ -1603,33 +1700,27 @@ static void test_reuseport(struct test_sockmap_listen *skel,  	}  } -static void udp_redir_to_connected(int family, int sotype, int sock_mapfd, -				   int verd_mapfd, enum redir_mode mode) +static int inet_socketpair(int family, int type, int *s, int *c)  { -	const char *log_prefix = redir_mode_str(mode);  	struct sockaddr_storage addr; -	int c0, c1, p0, p1; -	unsigned int pass; -	int retries = 100;  	socklen_t len; -	int err, n; -	u64 value; -	u32 key; -	char b; - -	zero_verdict_count(verd_mapfd); +	int p0, c0; +	int err; -	p0 = socket_loopback(family, sotype | SOCK_NONBLOCK); +	p0 = socket_loopback(family, type | SOCK_NONBLOCK);  	if (p0 < 0) -		return; +		return p0; +  	len = sizeof(addr);  	err = xgetsockname(p0, sockaddr(&addr), &len);  	if (err)  		goto close_peer0; -	c0 = xsocket(family, sotype | SOCK_NONBLOCK, 0); -	if (c0 < 0) +	c0 = xsocket(family, type | SOCK_NONBLOCK, 0); +	if (c0 < 0) { +		err = c0;  		goto close_peer0; +	}  	err = xconnect(c0, sockaddr(&addr), len);  	if (err)  		goto close_cli0; @@ -1640,35 +1731,133 @@ static void udp_redir_to_connected(int family, int sotype, int sock_mapfd,  	if (err)  		goto close_cli0; -	p1 = socket_loopback(family, sotype | SOCK_NONBLOCK); -	if (p1 < 0) -		goto close_cli0; -	err = xgetsockname(p1, sockaddr(&addr), &len); +	*s = p0; +	*c = c0; +	return 0; + +close_cli0: +	xclose(c0); +close_peer0: +	xclose(p0); +	return err; +} + +static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd, +				   enum redir_mode mode) +{ +	const char *log_prefix = redir_mode_str(mode); +	int c0, c1, p0, p1; +	unsigned int pass; +	int retries = 100; +	int err, n; +	u32 key; +	char b; + +	zero_verdict_count(verd_mapfd); + +	err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0); +	if (err) +		return; +	err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1);  	if (err)  		goto close_cli0; -	c1 = xsocket(family, sotype | SOCK_NONBLOCK, 0); -	if (c1 < 0) -		goto close_peer1; -	err = xconnect(c1, sockaddr(&addr), len); +	err = add_to_sockmap(sock_mapfd, p0, p1);  	if (err)  		goto close_cli1; -	err = xgetsockname(c1, sockaddr(&addr), &len); -	if (err) + +	n = write(c1, "a", 1); +	if (n < 0) +		FAIL_ERRNO("%s: write", log_prefix); +	if (n == 0) +		FAIL("%s: incomplete write", log_prefix); +	if (n < 1)  		goto close_cli1; -	err = xconnect(p1, sockaddr(&addr), len); + +	key = SK_PASS; +	err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);  	if (err)  		goto close_cli1; +	if (pass != 1) +		FAIL("%s: want pass count 1, have %d", log_prefix, pass); -	key = 0; -	value = p0; -	err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +again: +	n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); +	if (n < 0) { +		if (errno == EAGAIN && retries--) { +			usleep(1000); +			goto again; +		} +		FAIL_ERRNO("%s: read", log_prefix); +	} +	if (n == 0) +		FAIL("%s: incomplete read", log_prefix); + +close_cli1: +	xclose(c1); +	xclose(p1); +close_cli0: +	xclose(c0); +	xclose(p0); +} + +static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel, +				       struct bpf_map *inner_map, int family) +{ +	int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); +	int verdict_map = bpf_map__fd(skel->maps.verdict_map); +	int sock_map = bpf_map__fd(inner_map); +	int err; + +	err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0);  	if (err) -		goto close_cli1; +		return; -	key = 1; -	value = p1; -	err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +	skel->bss->test_ingress = false; +	udp_redir_to_connected(family, sock_map, verdict_map, REDIR_EGRESS); +	skel->bss->test_ingress = true; +	udp_redir_to_connected(family, sock_map, verdict_map, REDIR_INGRESS); + +	xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map, +			   int family) +{ +	const char *family_name, *map_name; +	char s[MAX_TEST_NAME]; + +	family_name = family_str(family); +	map_name = map_type_str(map); +	snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); +	if (!test__start_subtest(s)) +		return; +	udp_skb_redir_to_connected(skel, map, family); +} + +static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd, +					int verd_mapfd, enum redir_mode mode) +{ +	const char *log_prefix = redir_mode_str(mode); +	int c0, c1, p0, p1; +	unsigned int pass; +	int retries = 100; +	int err, n; +	int sfd[2]; +	u32 key; +	char b; + +	zero_verdict_count(verd_mapfd); + +	if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd)) +		return; +	c0 = sfd[0], p0 = sfd[1]; + +	err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1); +	if (err) +		goto close; + +	err = add_to_sockmap(sock_mapfd, p0, p1);  	if (err)  		goto close_cli1; @@ -1690,8 +1879,10 @@ static void udp_redir_to_connected(int family, int sotype, int sock_mapfd,  again:  	n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1);  	if (n < 0) { -		if (errno == EAGAIN && retries--) +		if (errno == EAGAIN && retries--) { +			usleep(1000);  			goto again; +		}  		FAIL_ERRNO("%s: read", log_prefix);  	}  	if (n == 0) @@ -1699,16 +1890,102 @@ again:  close_cli1:  	xclose(c1); -close_peer1: +	xclose(p1); +close: +	xclose(c0); +	xclose(p0); +} + +static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel, +					    struct bpf_map *inner_map, int family) +{ +	int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); +	int verdict_map = bpf_map__fd(skel->maps.verdict_map); +	int sock_map = bpf_map__fd(inner_map); +	int err; + +	err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); +	if (err) +		return; + +	skel->bss->test_ingress = false; +	inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, +				    REDIR_EGRESS); +	inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, +				    REDIR_EGRESS); +	skel->bss->test_ingress = true; +	inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, +				    REDIR_INGRESS); +	inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, +				    REDIR_INGRESS); + +	xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd, +					int verd_mapfd, enum redir_mode mode) +{ +	const char *log_prefix = redir_mode_str(mode); +	int c0, c1, p0, p1; +	unsigned int pass; +	int err, n; +	int sfd[2]; +	u32 key; +	char b; +	int retries = 100; + +	zero_verdict_count(verd_mapfd); + +	err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0); +	if (err) +		return; + +	if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd)) +		goto close_cli0; +	c1 = sfd[0], p1 = sfd[1]; + +	err = add_to_sockmap(sock_mapfd, p0, p1); +	if (err) +		goto close; + +	n = write(c1, "a", 1); +	if (n < 0) +		FAIL_ERRNO("%s: write", log_prefix); +	if (n == 0) +		FAIL("%s: incomplete write", log_prefix); +	if (n < 1) +		goto close; + +	key = SK_PASS; +	err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); +	if (err) +		goto close; +	if (pass != 1) +		FAIL("%s: want pass count 1, have %d", log_prefix, pass); + +again: +	n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); +	if (n < 0) { +		if (errno == EAGAIN && retries--) { +			usleep(1000); +			goto again; +		} +		FAIL_ERRNO("%s: read", log_prefix); +	} +	if (n == 0) +		FAIL("%s: incomplete read", log_prefix); + +close: +	xclose(c1);  	xclose(p1);  close_cli0:  	xclose(c0); -close_peer0:  	xclose(p0); +  } -static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel, -				       struct bpf_map *inner_map, int family) +static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel, +					    struct bpf_map *inner_map, int family)  {  	int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);  	int verdict_map = bpf_map__fd(skel->maps.verdict_map); @@ -1720,17 +1997,21 @@ static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel,  		return;  	skel->bss->test_ingress = false; -	udp_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, -			       REDIR_EGRESS); +	unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, +				     REDIR_EGRESS); +	unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, +				     REDIR_EGRESS);  	skel->bss->test_ingress = true; -	udp_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, -			       REDIR_INGRESS); +	unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, +				     REDIR_INGRESS); +	unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, +				     REDIR_INGRESS);  	xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);  } -static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map, -			   int family) +static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, +				int family)  {  	const char *family_name, *map_name;  	char s[MAX_TEST_NAME]; @@ -1740,7 +2021,8 @@ static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map  	snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__);  	if (!test__start_subtest(s))  		return; -	udp_skb_redir_to_connected(skel, map, family); +	inet_unix_skb_redir_to_connected(skel, map, family); +	unix_inet_skb_redir_to_connected(skel, map, family);  }  static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, @@ -1752,6 +2034,7 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,  	test_reuseport(skel, map, family, SOCK_STREAM);  	test_reuseport(skel, map, family, SOCK_DGRAM);  	test_udp_redir(skel, map, family); +	test_udp_unix_redir(skel, map, family);  }  void test_sockmap_listen(void) @@ -1767,10 +2050,14 @@ void test_sockmap_listen(void)  	skel->bss->test_sockmap = true;  	run_tests(skel, skel->maps.sock_map, AF_INET);  	run_tests(skel, skel->maps.sock_map, AF_INET6); +	test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM); +	test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM);  	skel->bss->test_sockmap = false;  	run_tests(skel, skel->maps.sock_hash, AF_INET);  	run_tests(skel, skel->maps.sock_hash, AF_INET6); +	test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM); +	test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM);  	test_sockmap_listen__destroy(skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index ec281b0363b8..86f97681ad89 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -195,8 +195,10 @@ static void run_test(int cgroup_fd)  	pthread_mutex_lock(&server_started_mtx);  	if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread, -				      (void *)&server_fd))) +				      (void *)&server_fd))) { +		pthread_mutex_unlock(&server_started_mtx);  		goto close_server_fd; +	}  	pthread_cond_wait(&server_started, &server_started_mtx);  	pthread_mutex_unlock(&server_started_mtx); diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c new file mode 100644 index 000000000000..6b53b3cb8dad --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include <netinet/tcp.h> +#include "sockopt_qos_to_cc.skel.h" + +static void run_setsockopt_test(int cg_fd, int sock_fd) +{ +	socklen_t optlen; +	char cc[16]; /* TCP_CA_NAME_MAX */ +	int buf; +	int err = -1; + +	buf = 0x2D; +	err = setsockopt(sock_fd, SOL_IPV6, IPV6_TCLASS, &buf, sizeof(buf)); +	if (!ASSERT_OK(err, "setsockopt(sock_fd, IPV6_TCLASS)")) +		return; + +	/* Verify the setsockopt cc change */ +	optlen = sizeof(cc); +	err = getsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, cc, &optlen); +	if (!ASSERT_OK(err, "getsockopt(sock_fd, TCP_CONGESTION)")) +		return; + +	if (!ASSERT_STREQ(cc, "reno", "getsockopt(sock_fd, TCP_CONGESTION)")) +		return; +} + +void test_sockopt_qos_to_cc(void) +{ +	struct sockopt_qos_to_cc *skel; +	char cc_cubic[16] = "cubic"; /* TCP_CA_NAME_MAX */ +	int cg_fd = -1; +	int sock_fd = -1; +	int err; + +	cg_fd = test__join_cgroup("/sockopt_qos_to_cc"); +	if (!ASSERT_GE(cg_fd, 0, "cg-join(sockopt_qos_to_cc)")) +		return; + +	skel = sockopt_qos_to_cc__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel")) +		goto done; + +	sock_fd = socket(AF_INET6, SOCK_STREAM, 0); +	if (!ASSERT_GE(sock_fd, 0, "v6 socket open")) +		goto done; + +	err = setsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, &cc_cubic, +			 sizeof(cc_cubic)); +	if (!ASSERT_OK(err, "setsockopt(sock_fd, TCP_CONGESTION)")) +		goto done; + +	skel->links.sockopt_qos_to_cc = +		bpf_program__attach_cgroup(skel->progs.sockopt_qos_to_cc, +					   cg_fd); +	if (!ASSERT_OK_PTR(skel->links.sockopt_qos_to_cc, +			   "prog_attach(sockopt_qos_to_cc)")) +		goto done; + +	run_setsockopt_test(cg_fd, sock_fd); + +done: +	if (sock_fd != -1) +		close(sock_fd); +	if (cg_fd != -1) +		close(cg_fd); +	/* destroy can take null and error pointer */ +	sockopt_qos_to_cc__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c new file mode 100644 index 000000000000..53f0e0fa1a53 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <test_progs.h> +#include <linux/ptrace.h> +#include "test_task_pt_regs.skel.h" + +void test_task_pt_regs(void) +{ +	struct test_task_pt_regs *skel; +	struct bpf_link *uprobe_link; +	size_t uprobe_offset; +	ssize_t base_addr; +	bool match; + +	base_addr = get_base_addr(); +	if (!ASSERT_GT(base_addr, 0, "get_base_addr")) +		return; +	uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); + +	skel = test_task_pt_regs__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		return; +	if (!ASSERT_OK_PTR(skel->bss, "check_bss")) +		goto cleanup; + +	uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe, +						 false /* retprobe */, +						 0 /* self pid */, +						 "/proc/self/exe", +						 uprobe_offset); +	if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe")) +		goto cleanup; +	skel->links.handle_uprobe = uprobe_link; + +	/* trigger & validate uprobe */ +	get_base_addr(); + +	if (!ASSERT_EQ(skel->bss->uprobe_res, 1, "check_uprobe_res")) +		goto cleanup; + +	match = !memcmp(&skel->bss->current_regs, &skel->bss->ctx_regs, +			sizeof(skel->bss->current_regs)); +	ASSERT_TRUE(match, "check_regs_match"); + +cleanup: +	test_task_pt_regs__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 5703c918812b..e7201ba29ccd 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -13,15 +13,16 @@  #define _GNU_SOURCE  #include <arpa/inet.h> +#include <linux/if.h> +#include <linux/if_tun.h>  #include <linux/limits.h>  #include <linux/sysctl.h> -#include <linux/if_tun.h> -#include <linux/if.h>  #include <sched.h>  #include <stdbool.h>  #include <stdio.h> -#include <sys/stat.h>  #include <sys/mount.h> +#include <sys/stat.h> +#include <unistd.h>  #include "test_progs.h"  #include "network_helpers.h" @@ -391,9 +392,7 @@ done:  static int test_ping(int family, const char *addr)  { -	const char *ping = family == AF_INET6 ? "ping6" : "ping"; - -	SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping, addr); +	SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping_command(family), addr);  	return 0;  fail:  	return -1; diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c new file mode 100644 index 000000000000..25f40e1b9967 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "timer.skel.h" + +static int timer(struct timer *timer_skel) +{ +	int err, prog_fd; +	__u32 duration = 0, retval; + +	err = timer__attach(timer_skel); +	if (!ASSERT_OK(err, "timer_attach")) +		return err; + +	ASSERT_EQ(timer_skel->data->callback_check, 52, "callback_check1"); +	ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1"); + +	prog_fd = bpf_program__fd(timer_skel->progs.test1); +	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +				NULL, NULL, &retval, &duration); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(retval, 0, "test_run"); +	timer__detach(timer_skel); + +	usleep(50); /* 10 usecs should be enough, but give it extra */ +	/* check that timer_cb1() was executed 10+10 times */ +	ASSERT_EQ(timer_skel->data->callback_check, 42, "callback_check2"); +	ASSERT_EQ(timer_skel->data->callback2_check, 42, "callback2_check2"); + +	/* check that timer_cb2() was executed twice */ +	ASSERT_EQ(timer_skel->bss->bss_data, 10, "bss_data"); + +	/* check that there were no errors in timer execution */ +	ASSERT_EQ(timer_skel->bss->err, 0, "err"); + +	/* check that code paths completed */ +	ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok"); + +	return 0; +} + +void test_timer(void) +{ +	struct timer *timer_skel = NULL; +	int err; + +	timer_skel = timer__open_and_load(); +	if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load")) +		goto cleanup; + +	err = timer(timer_skel); +	ASSERT_OK(err, "timer"); +cleanup: +	timer__destroy(timer_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c new file mode 100644 index 000000000000..ced8f6cf347c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "timer_mim.skel.h" +#include "timer_mim_reject.skel.h" + +static int timer_mim(struct timer_mim *timer_skel) +{ +	__u32 duration = 0, retval; +	__u64 cnt1, cnt2; +	int err, prog_fd, key1 = 1; + +	err = timer_mim__attach(timer_skel); +	if (!ASSERT_OK(err, "timer_attach")) +		return err; + +	prog_fd = bpf_program__fd(timer_skel->progs.test1); +	err = bpf_prog_test_run(prog_fd, 1, NULL, 0, +				NULL, NULL, &retval, &duration); +	ASSERT_OK(err, "test_run"); +	ASSERT_EQ(retval, 0, "test_run"); +	timer_mim__detach(timer_skel); + +	/* check that timer_cb[12] are incrementing 'cnt' */ +	cnt1 = READ_ONCE(timer_skel->bss->cnt); +	for (int i = 0; i < 100; i++) { +		cnt2 = READ_ONCE(timer_skel->bss->cnt); +		if (cnt2 != cnt1) +			break; +		usleep(200); /* 100 times more than interval */ +	} +	ASSERT_GT(cnt2, cnt1, "cnt"); + +	ASSERT_EQ(timer_skel->bss->err, 0, "err"); +	/* check that code paths completed */ +	ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok"); + +	close(bpf_map__fd(timer_skel->maps.inner_htab)); +	err = bpf_map_delete_elem(bpf_map__fd(timer_skel->maps.outer_arr), &key1); +	ASSERT_EQ(err, 0, "delete inner map"); + +	/* check that timer_cb[12] are no longer running */ +	cnt1 = READ_ONCE(timer_skel->bss->cnt); +	for (int i = 0; i < 100; i++) { +		usleep(200); /* 100 times more than interval */ +		cnt2 = READ_ONCE(timer_skel->bss->cnt); +		if (cnt2 == cnt1) +			break; +	} +	ASSERT_EQ(cnt2, cnt1, "cnt"); + +	return 0; +} + +void test_timer_mim(void) +{ +	struct timer_mim_reject *timer_reject_skel = NULL; +	libbpf_print_fn_t old_print_fn = NULL; +	struct timer_mim *timer_skel = NULL; +	int err; + +	old_print_fn = libbpf_set_print(NULL); +	timer_reject_skel = timer_mim_reject__open_and_load(); +	libbpf_set_print(old_print_fn); +	if (!ASSERT_ERR_PTR(timer_reject_skel, "timer_reject_skel_load")) +		goto cleanup; + +	timer_skel = timer_mim__open_and_load(); +	if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load")) +		goto cleanup; + +	err = timer_mim(timer_skel); +	ASSERT_OK(err, "timer_mim"); +cleanup: +	timer_mim__destroy(timer_skel); +	timer_mim_reject__destroy(timer_reject_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c new file mode 100644 index 000000000000..370d220288a6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0 + +/** + * Test XDP bonding support + * + * Sets up two bonded veth pairs between two fresh namespaces + * and verifies that XDP_TX program loaded on a bond device + * are correctly loaded onto the slave devices and XDP_TX'd + * packets are balanced using bonding. + */ + +#define _GNU_SOURCE +#include <sched.h> +#include <net/if.h> +#include <linux/if_link.h> +#include "test_progs.h" +#include "network_helpers.h" +#include <linux/if_bonding.h> +#include <linux/limits.h> +#include <linux/udp.h> + +#include "xdp_dummy.skel.h" +#include "xdp_redirect_multi_kern.skel.h" +#include "xdp_tx.skel.h" + +#define BOND1_MAC {0x00, 0x11, 0x22, 0x33, 0x44, 0x55} +#define BOND1_MAC_STR "00:11:22:33:44:55" +#define BOND2_MAC {0x00, 0x22, 0x33, 0x44, 0x55, 0x66} +#define BOND2_MAC_STR "00:22:33:44:55:66" +#define NPACKETS 100 + +static int root_netns_fd = -1; + +static void restore_root_netns(void) +{ +	ASSERT_OK(setns(root_netns_fd, CLONE_NEWNET), "restore_root_netns"); +} + +static int setns_by_name(char *name) +{ +	int nsfd, err; +	char nspath[PATH_MAX]; + +	snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); +	nsfd = open(nspath, O_RDONLY | O_CLOEXEC); +	if (nsfd < 0) +		return -1; + +	err = setns(nsfd, CLONE_NEWNET); +	close(nsfd); +	return err; +} + +static int get_rx_packets(const char *iface) +{ +	FILE *f; +	char line[512]; +	int iface_len = strlen(iface); + +	f = fopen("/proc/net/dev", "r"); +	if (!f) +		return -1; + +	while (fgets(line, sizeof(line), f)) { +		char *p = line; + +		while (*p == ' ') +			p++; /* skip whitespace */ +		if (!strncmp(p, iface, iface_len)) { +			p += iface_len; +			if (*p++ != ':') +				continue; +			while (*p == ' ') +				p++; /* skip whitespace */ +			while (*p && *p != ' ') +				p++; /* skip rx bytes */ +			while (*p == ' ') +				p++; /* skip whitespace */ +			fclose(f); +			return atoi(p); +		} +	} +	fclose(f); +	return -1; +} + +#define MAX_BPF_LINKS 8 + +struct skeletons { +	struct xdp_dummy *xdp_dummy; +	struct xdp_tx *xdp_tx; +	struct xdp_redirect_multi_kern *xdp_redirect_multi_kern; + +	int nlinks; +	struct bpf_link *links[MAX_BPF_LINKS]; +}; + +static int xdp_attach(struct skeletons *skeletons, struct bpf_program *prog, char *iface) +{ +	struct bpf_link *link; +	int ifindex; + +	ifindex = if_nametoindex(iface); +	if (!ASSERT_GT(ifindex, 0, "get ifindex")) +		return -1; + +	if (!ASSERT_LE(skeletons->nlinks+1, MAX_BPF_LINKS, "too many XDP programs attached")) +		return -1; + +	link = bpf_program__attach_xdp(prog, ifindex); +	if (!ASSERT_OK_PTR(link, "attach xdp program")) +		return -1; + +	skeletons->links[skeletons->nlinks++] = link; +	return 0; +} + +enum { +	BOND_ONE_NO_ATTACH = 0, +	BOND_BOTH_AND_ATTACH, +}; + +static const char * const mode_names[] = { +	[BOND_MODE_ROUNDROBIN]   = "balance-rr", +	[BOND_MODE_ACTIVEBACKUP] = "active-backup", +	[BOND_MODE_XOR]          = "balance-xor", +	[BOND_MODE_BROADCAST]    = "broadcast", +	[BOND_MODE_8023AD]       = "802.3ad", +	[BOND_MODE_TLB]          = "balance-tlb", +	[BOND_MODE_ALB]          = "balance-alb", +}; + +static const char * const xmit_policy_names[] = { +	[BOND_XMIT_POLICY_LAYER2]       = "layer2", +	[BOND_XMIT_POLICY_LAYER34]      = "layer3+4", +	[BOND_XMIT_POLICY_LAYER23]      = "layer2+3", +	[BOND_XMIT_POLICY_ENCAP23]      = "encap2+3", +	[BOND_XMIT_POLICY_ENCAP34]      = "encap3+4", +}; + +static int bonding_setup(struct skeletons *skeletons, int mode, int xmit_policy, +			 int bond_both_attach) +{ +#define SYS(fmt, ...)						\ +	({							\ +		char cmd[1024];					\ +		snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__);	\ +		if (!ASSERT_OK(system(cmd), cmd))		\ +			return -1;				\ +	}) + +	SYS("ip netns add ns_dst"); +	SYS("ip link add veth1_1 type veth peer name veth2_1 netns ns_dst"); +	SYS("ip link add veth1_2 type veth peer name veth2_2 netns ns_dst"); + +	SYS("ip link add bond1 type bond mode %s xmit_hash_policy %s", +	    mode_names[mode], xmit_policy_names[xmit_policy]); +	SYS("ip link set bond1 up address " BOND1_MAC_STR " addrgenmode none"); +	SYS("ip -netns ns_dst link add bond2 type bond mode %s xmit_hash_policy %s", +	    mode_names[mode], xmit_policy_names[xmit_policy]); +	SYS("ip -netns ns_dst link set bond2 up address " BOND2_MAC_STR " addrgenmode none"); + +	SYS("ip link set veth1_1 master bond1"); +	if (bond_both_attach == BOND_BOTH_AND_ATTACH) { +		SYS("ip link set veth1_2 master bond1"); +	} else { +		SYS("ip link set veth1_2 up addrgenmode none"); + +		if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "veth1_2")) +			return -1; +	} + +	SYS("ip -netns ns_dst link set veth2_1 master bond2"); + +	if (bond_both_attach == BOND_BOTH_AND_ATTACH) +		SYS("ip -netns ns_dst link set veth2_2 master bond2"); +	else +		SYS("ip -netns ns_dst link set veth2_2 up addrgenmode none"); + +	/* Load a dummy program on sending side as with veth peer needs to have a +	 * XDP program loaded as well. +	 */ +	if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "bond1")) +		return -1; + +	if (bond_both_attach == BOND_BOTH_AND_ATTACH) { +		if (!ASSERT_OK(setns_by_name("ns_dst"), "set netns to ns_dst")) +			return -1; + +		if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond2")) +			return -1; + +		restore_root_netns(); +	} + +	return 0; + +#undef SYS +} + +static void bonding_cleanup(struct skeletons *skeletons) +{ +	restore_root_netns(); +	while (skeletons->nlinks) { +		skeletons->nlinks--; +		bpf_link__destroy(skeletons->links[skeletons->nlinks]); +	} +	ASSERT_OK(system("ip link delete bond1"), "delete bond1"); +	ASSERT_OK(system("ip link delete veth1_1"), "delete veth1_1"); +	ASSERT_OK(system("ip link delete veth1_2"), "delete veth1_2"); +	ASSERT_OK(system("ip netns delete ns_dst"), "delete ns_dst"); +} + +static int send_udp_packets(int vary_dst_ip) +{ +	struct ethhdr eh = { +		.h_source = BOND1_MAC, +		.h_dest = BOND2_MAC, +		.h_proto = htons(ETH_P_IP), +	}; +	uint8_t buf[128] = {}; +	struct iphdr *iph = (struct iphdr *)(buf + sizeof(eh)); +	struct udphdr *uh = (struct udphdr *)(buf + sizeof(eh) + sizeof(*iph)); +	int i, s = -1; +	int ifindex; + +	s = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); +	if (!ASSERT_GE(s, 0, "socket")) +		goto err; + +	ifindex = if_nametoindex("bond1"); +	if (!ASSERT_GT(ifindex, 0, "get bond1 ifindex")) +		goto err; + +	memcpy(buf, &eh, sizeof(eh)); +	iph->ihl = 5; +	iph->version = 4; +	iph->tos = 16; +	iph->id = 1; +	iph->ttl = 64; +	iph->protocol = IPPROTO_UDP; +	iph->saddr = 1; +	iph->daddr = 2; +	iph->tot_len = htons(sizeof(buf) - ETH_HLEN); +	iph->check = 0; + +	for (i = 1; i <= NPACKETS; i++) { +		int n; +		struct sockaddr_ll saddr_ll = { +			.sll_ifindex = ifindex, +			.sll_halen = ETH_ALEN, +			.sll_addr = BOND2_MAC, +		}; + +		/* vary the UDP destination port for even distribution with roundrobin/xor modes */ +		uh->dest++; + +		if (vary_dst_ip) +			iph->daddr++; + +		n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&saddr_ll, sizeof(saddr_ll)); +		if (!ASSERT_EQ(n, sizeof(buf), "sendto")) +			goto err; +	} + +	return 0; + +err: +	if (s >= 0) +		close(s); +	return -1; +} + +static void test_xdp_bonding_with_mode(struct skeletons *skeletons, int mode, int xmit_policy) +{ +	int bond1_rx; + +	if (bonding_setup(skeletons, mode, xmit_policy, BOND_BOTH_AND_ATTACH)) +		goto out; + +	if (send_udp_packets(xmit_policy != BOND_XMIT_POLICY_LAYER34)) +		goto out; + +	bond1_rx = get_rx_packets("bond1"); +	ASSERT_EQ(bond1_rx, NPACKETS, "expected more received packets"); + +	switch (mode) { +	case BOND_MODE_ROUNDROBIN: +	case BOND_MODE_XOR: { +		int veth1_rx = get_rx_packets("veth1_1"); +		int veth2_rx = get_rx_packets("veth1_2"); +		int diff = abs(veth1_rx - veth2_rx); + +		ASSERT_GE(veth1_rx + veth2_rx, NPACKETS, "expected more packets"); + +		switch (xmit_policy) { +		case BOND_XMIT_POLICY_LAYER2: +			ASSERT_GE(diff, NPACKETS, +				  "expected packets on only one of the interfaces"); +			break; +		case BOND_XMIT_POLICY_LAYER23: +		case BOND_XMIT_POLICY_LAYER34: +			ASSERT_LT(diff, NPACKETS/2, +				  "expected even distribution of packets"); +			break; +		default: +			PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy); +			break; +		} +		break; +	} +	case BOND_MODE_ACTIVEBACKUP: { +		int veth1_rx = get_rx_packets("veth1_1"); +		int veth2_rx = get_rx_packets("veth1_2"); +		int diff = abs(veth1_rx - veth2_rx); + +		ASSERT_GE(diff, NPACKETS, +			  "expected packets on only one of the interfaces"); +		break; +	} +	default: +		PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy); +		break; +	} + +out: +	bonding_cleanup(skeletons); +} + +/* Test the broadcast redirection using xdp_redirect_map_multi_prog and adding + * all the interfaces to it and checking that broadcasting won't send the packet + * to neither the ingress bond device (bond2) or its slave (veth2_1). + */ +static void test_xdp_bonding_redirect_multi(struct skeletons *skeletons) +{ +	static const char * const ifaces[] = {"bond2", "veth2_1", "veth2_2"}; +	int veth1_1_rx, veth1_2_rx; +	int err; + +	if (bonding_setup(skeletons, BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, +			  BOND_ONE_NO_ATTACH)) +		goto out; + + +	if (!ASSERT_OK(setns_by_name("ns_dst"), "could not set netns to ns_dst")) +		goto out; + +	/* populate the devmap with the relevant interfaces */ +	for (int i = 0; i < ARRAY_SIZE(ifaces); i++) { +		int ifindex = if_nametoindex(ifaces[i]); +		int map_fd = bpf_map__fd(skeletons->xdp_redirect_multi_kern->maps.map_all); + +		if (!ASSERT_GT(ifindex, 0, "could not get interface index")) +			goto out; + +		err = bpf_map_update_elem(map_fd, &ifindex, &ifindex, 0); +		if (!ASSERT_OK(err, "add interface to map_all")) +			goto out; +	} + +	if (xdp_attach(skeletons, +		       skeletons->xdp_redirect_multi_kern->progs.xdp_redirect_map_multi_prog, +		       "bond2")) +		goto out; + +	restore_root_netns(); + +	if (send_udp_packets(BOND_MODE_ROUNDROBIN)) +		goto out; + +	veth1_1_rx = get_rx_packets("veth1_1"); +	veth1_2_rx = get_rx_packets("veth1_2"); + +	ASSERT_EQ(veth1_1_rx, 0, "expected no packets on veth1_1"); +	ASSERT_GE(veth1_2_rx, NPACKETS, "expected packets on veth1_2"); + +out: +	restore_root_netns(); +	bonding_cleanup(skeletons); +} + +/* Test that XDP programs cannot be attached to both the bond master and slaves simultaneously */ +static void test_xdp_bonding_attach(struct skeletons *skeletons) +{ +	struct bpf_link *link = NULL; +	struct bpf_link *link2 = NULL; +	int veth, bond; +	int err; + +	if (!ASSERT_OK(system("ip link add veth type veth"), "add veth")) +		goto out; +	if (!ASSERT_OK(system("ip link add bond type bond"), "add bond")) +		goto out; + +	veth = if_nametoindex("veth"); +	if (!ASSERT_GE(veth, 0, "if_nametoindex veth")) +		goto out; +	bond = if_nametoindex("bond"); +	if (!ASSERT_GE(bond, 0, "if_nametoindex bond")) +		goto out; + +	/* enslaving with a XDP program loaded fails */ +	link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth); +	if (!ASSERT_OK_PTR(link, "attach program to veth")) +		goto out; + +	err = system("ip link set veth master bond"); +	if (!ASSERT_NEQ(err, 0, "attaching slave with xdp program expected to fail")) +		goto out; + +	bpf_link__destroy(link); +	link = NULL; + +	err = system("ip link set veth master bond"); +	if (!ASSERT_OK(err, "set veth master")) +		goto out; + +	/* attaching to slave when master has no program is allowed */ +	link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth); +	if (!ASSERT_OK_PTR(link, "attach program to slave when enslaved")) +		goto out; + +	/* attaching to master not allowed when slave has program loaded */ +	link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); +	if (!ASSERT_ERR_PTR(link2, "attach program to master when slave has program")) +		goto out; + +	bpf_link__destroy(link); +	link = NULL; + +	/* attaching XDP program to master allowed when slave has no program */ +	link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); +	if (!ASSERT_OK_PTR(link, "attach program to master")) +		goto out; + +	/* attaching to slave not allowed when master has program loaded */ +	link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); +	ASSERT_ERR_PTR(link2, "attach program to slave when master has program"); + +out: +	bpf_link__destroy(link); +	bpf_link__destroy(link2); + +	system("ip link del veth"); +	system("ip link del bond"); +} + +static int libbpf_debug_print(enum libbpf_print_level level, +			      const char *format, va_list args) +{ +	if (level != LIBBPF_WARN) +		vprintf(format, args); +	return 0; +} + +struct bond_test_case { +	char *name; +	int mode; +	int xmit_policy; +}; + +static struct bond_test_case bond_test_cases[] = { +	{ "xdp_bonding_roundrobin", BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, }, +	{ "xdp_bonding_activebackup", BOND_MODE_ACTIVEBACKUP, BOND_XMIT_POLICY_LAYER23 }, + +	{ "xdp_bonding_xor_layer2", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER2, }, +	{ "xdp_bonding_xor_layer23", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER23, }, +	{ "xdp_bonding_xor_layer34", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER34, }, +}; + +void test_xdp_bonding(void) +{ +	libbpf_print_fn_t old_print_fn; +	struct skeletons skeletons = {}; +	int i; + +	old_print_fn = libbpf_set_print(libbpf_debug_print); + +	root_netns_fd = open("/proc/self/ns/net", O_RDONLY); +	if (!ASSERT_GE(root_netns_fd, 0, "open /proc/self/ns/net")) +		goto out; + +	skeletons.xdp_dummy = xdp_dummy__open_and_load(); +	if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load")) +		goto out; + +	skeletons.xdp_tx = xdp_tx__open_and_load(); +	if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load")) +		goto out; + +	skeletons.xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load(); +	if (!ASSERT_OK_PTR(skeletons.xdp_redirect_multi_kern, +			   "xdp_redirect_multi_kern__open_and_load")) +		goto out; + +	if (test__start_subtest("xdp_bonding_attach")) +		test_xdp_bonding_attach(&skeletons); + +	for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) { +		struct bond_test_case *test_case = &bond_test_cases[i]; + +		if (test__start_subtest(test_case->name)) +			test_xdp_bonding_with_mode( +				&skeletons, +				test_case->mode, +				test_case->xmit_policy); +	} + +	if (test__start_subtest("xdp_bonding_redirect_multi")) +		test_xdp_bonding_redirect_multi(&skeletons); + +out: +	xdp_dummy__destroy(skeletons.xdp_dummy); +	xdp_tx__destroy(skeletons.xdp_tx); +	xdp_redirect_multi_kern__destroy(skeletons.xdp_redirect_multi_kern); + +	libbpf_set_print(old_print_fn); +	if (root_netns_fd >= 0) +		close(root_netns_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c new file mode 100644 index 000000000000..ab4952b9fb1d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "test_xdp_context_test_run.skel.h" + +void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts, +			    __u32 data_meta, __u32 data, __u32 data_end, +			    __u32 ingress_ifindex, __u32 rx_queue_index, +			    __u32 egress_ifindex) +{ +	struct xdp_md ctx = { +		.data = data, +		.data_end = data_end, +		.data_meta = data_meta, +		.ingress_ifindex = ingress_ifindex, +		.rx_queue_index = rx_queue_index, +		.egress_ifindex = egress_ifindex, +	}; +	int err; + +	opts.ctx_in = &ctx; +	opts.ctx_size_in = sizeof(ctx); +	err = bpf_prog_test_run_opts(prog_fd, &opts); +	ASSERT_EQ(errno, EINVAL, "errno-EINVAL"); +	ASSERT_ERR(err, "bpf_prog_test_run"); +} + +void test_xdp_context_test_run(void) +{ +	struct test_xdp_context_test_run *skel = NULL; +	char data[sizeof(pkt_v4) + sizeof(__u32)]; +	char bad_ctx[sizeof(struct xdp_md) + 1]; +	struct xdp_md ctx_in, ctx_out; +	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, +			    .data_in = &data, +			    .data_size_in = sizeof(data), +			    .ctx_out = &ctx_out, +			    .ctx_size_out = sizeof(ctx_out), +			    .repeat = 1, +		); +	int err, prog_fd; + +	skel = test_xdp_context_test_run__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel")) +		return; +	prog_fd = bpf_program__fd(skel->progs.xdp_context); + +	/* Data past the end of the kernel's struct xdp_md must be 0 */ +	bad_ctx[sizeof(bad_ctx) - 1] = 1; +	opts.ctx_in = bad_ctx; +	opts.ctx_size_in = sizeof(bad_ctx); +	err = bpf_prog_test_run_opts(prog_fd, &opts); +	ASSERT_EQ(errno, E2BIG, "extradata-errno"); +	ASSERT_ERR(err, "bpf_prog_test_run(extradata)"); + +	*(__u32 *)data = XDP_PASS; +	*(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4; +	opts.ctx_in = &ctx_in; +	opts.ctx_size_in = sizeof(ctx_in); +	memset(&ctx_in, 0, sizeof(ctx_in)); +	ctx_in.data_meta = 0; +	ctx_in.data = sizeof(__u32); +	ctx_in.data_end = ctx_in.data + sizeof(pkt_v4); +	err = bpf_prog_test_run_opts(prog_fd, &opts); +	ASSERT_OK(err, "bpf_prog_test_run(valid)"); +	ASSERT_EQ(opts.retval, XDP_PASS, "valid-retval"); +	ASSERT_EQ(opts.data_size_out, sizeof(pkt_v4), "valid-datasize"); +	ASSERT_EQ(opts.ctx_size_out, opts.ctx_size_in, "valid-ctxsize"); +	ASSERT_EQ(ctx_out.data_meta, 0, "valid-datameta"); +	ASSERT_EQ(ctx_out.data, 0, "valid-data"); +	ASSERT_EQ(ctx_out.data_end, sizeof(pkt_v4), "valid-dataend"); + +	/* Meta data's size must be a multiple of 4 */ +	test_xdp_context_error(prog_fd, opts, 0, 1, sizeof(data), 0, 0, 0); + +	/* data_meta must reference the start of data */ +	test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data), +			       0, 0, 0); + +	/* Meta data must be 32 bytes or smaller */ +	test_xdp_context_error(prog_fd, opts, 0, 36, sizeof(data), 0, 0, 0); + +	/* Total size of data must match data_end - data_meta */ +	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), +			       sizeof(data) - 1, 0, 0, 0); +	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), +			       sizeof(data) + 1, 0, 0, 0); + +	/* RX queue cannot be specified without specifying an ingress */ +	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data), +			       0, 1, 0); + +	/* Interface 1 is always the loopback interface which always has only +	 * one RX queue (index 0). This makes index 1 an invalid rx queue index +	 * for interface 1. +	 */ +	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data), +			       1, 1, 0); + +	/* The egress cannot be specified */ +	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data), +			       0, 0, 1); + +	test_xdp_context_test_run__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index 0176573fe4e7..8755effd80b0 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -7,64 +7,53 @@  #define IFINDEX_LO	1 -void test_xdp_with_cpumap_helpers(void) +void test_xdp_cpumap_attach(void)  {  	struct test_xdp_with_cpumap_helpers *skel;  	struct bpf_prog_info info = {}; +	__u32 len = sizeof(info);  	struct bpf_cpumap_val val = {  		.qsize = 192,  	}; -	__u32 duration = 0, idx = 0; -	__u32 len = sizeof(info);  	int err, prog_fd, map_fd; +	__u32 idx = 0;  	skel = test_xdp_with_cpumap_helpers__open_and_load(); -	if (CHECK_FAIL(!skel)) { -		perror("test_xdp_with_cpumap_helpers__open_and_load"); +	if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load"))  		return; -	} -	/* can not attach program with cpumaps that allow programs -	 * as xdp generic -	 */  	prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog);  	err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); -	CHECK(err == 0, "Generic attach of program with 8-byte CPUMAP", -	      "should have failed\n"); +	if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP")) +		goto out_close; + +	err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); +	ASSERT_OK(err, "XDP program detach");  	prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);  	map_fd = bpf_map__fd(skel->maps.cpu_map);  	err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); -	if (CHECK_FAIL(err)) +	if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd"))  		goto out_close;  	val.bpf_prog.fd = prog_fd;  	err = bpf_map_update_elem(map_fd, &idx, &val, 0); -	CHECK(err, "Add program to cpumap entry", "err %d errno %d\n", -	      err, errno); +	ASSERT_OK(err, "Add program to cpumap entry");  	err = bpf_map_lookup_elem(map_fd, &idx, &val); -	CHECK(err, "Read cpumap entry", "err %d errno %d\n", err, errno); -	CHECK(info.id != val.bpf_prog.id, "Expected program id in cpumap entry", -	      "expected %u read %u\n", info.id, val.bpf_prog.id); +	ASSERT_OK(err, "Read cpumap entry"); +	ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id");  	/* can not attach BPF_XDP_CPUMAP program to a device */  	err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); -	CHECK(err == 0, "Attach of BPF_XDP_CPUMAP program", -	      "should have failed\n"); +	if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program")) +		bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE);  	val.qsize = 192;  	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);  	err = bpf_map_update_elem(map_fd, &idx, &val, 0); -	CHECK(err == 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry", -	      "should have failed\n"); +	ASSERT_NEQ(err, 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry");  out_close:  	test_xdp_with_cpumap_helpers__destroy(skel);  } - -void test_xdp_cpumap_attach(void) -{ -	if (test__start_subtest("cpumap_with_progs")) -		test_xdp_with_cpumap_helpers(); -} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index 88ef3ec8ac4c..c72af030ff10 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -16,50 +16,45 @@ void test_xdp_with_devmap_helpers(void)  		.ifindex = IFINDEX_LO,  	};  	__u32 len = sizeof(info); -	__u32 duration = 0, idx = 0;  	int err, dm_fd, map_fd; +	__u32 idx = 0;  	skel = test_xdp_with_devmap_helpers__open_and_load(); -	if (CHECK_FAIL(!skel)) { -		perror("test_xdp_with_devmap_helpers__open_and_load"); +	if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))  		return; -	} -	/* can not attach program with DEVMAPs that allow programs -	 * as xdp generic -	 */  	dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog);  	err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE); -	CHECK(err == 0, "Generic attach of program with 8-byte devmap", -	      "should have failed\n"); +	if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap")) +		goto out_close; + +	err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); +	ASSERT_OK(err, "XDP program detach");  	dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);  	map_fd = bpf_map__fd(skel->maps.dm_ports);  	err = bpf_obj_get_info_by_fd(dm_fd, &info, &len); -	if (CHECK_FAIL(err)) +	if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd"))  		goto out_close;  	val.bpf_prog.fd = dm_fd;  	err = bpf_map_update_elem(map_fd, &idx, &val, 0); -	CHECK(err, "Add program to devmap entry", -	      "err %d errno %d\n", err, errno); +	ASSERT_OK(err, "Add program to devmap entry");  	err = bpf_map_lookup_elem(map_fd, &idx, &val); -	CHECK(err, "Read devmap entry", "err %d errno %d\n", err, errno); -	CHECK(info.id != val.bpf_prog.id, "Expected program id in devmap entry", -	      "expected %u read %u\n", info.id, val.bpf_prog.id); +	ASSERT_OK(err, "Read devmap entry"); +	ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");  	/* can not attach BPF_XDP_DEVMAP program to a device */  	err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE); -	CHECK(err == 0, "Attach of BPF_XDP_DEVMAP program", -	      "should have failed\n"); +	if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program")) +		bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE);  	val.ifindex = 1;  	val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);  	err = bpf_map_update_elem(map_fd, &idx, &val, 0); -	CHECK(err == 0, "Add non-BPF_XDP_DEVMAP program to devmap entry", -	      "should have failed\n"); +	ASSERT_NEQ(err, 0, "Add non-BPF_XDP_DEVMAP program to devmap entry");  out_close:  	test_xdp_with_devmap_helpers__destroy(skel); @@ -68,12 +63,10 @@ out_close:  void test_neg_xdp_devmap_helpers(void)  {  	struct test_xdp_devmap_helpers *skel; -	__u32 duration = 0;  	skel = test_xdp_devmap_helpers__open_and_load(); -	if (CHECK(skel, -		  "Load of XDP program accessing egress ifindex without attach type", -		  "should have failed\n")) { +	if (!ASSERT_EQ(skel, NULL, +		    "Load of XDP program accessing egress ifindex without attach type")) {  		test_xdp_devmap_helpers__destroy(skel);  	}  } | 
