summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/btf.c
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2025-03-17 13:45:12 -0700
committerAndrii Nakryiko <andrii@kernel.org>2025-03-17 13:45:13 -0700
commitb02f072a36560a89155a1ebcb2ca6bd881333a8b (patch)
tree62436d27fd49365a4db2b9fa92065b6aabbca28c /tools/lib/bpf/btf.c
parent812f7702d83d84cdf776d75e2ba5386de9e8acc0 (diff)
parenta024843d92cca491034dac648bcfec967199a4a4 (diff)
Merge branch 'support-freplace-prog-from-user-namespace'
Mykyta Yatsenko says: ==================== Support freplace prog from user namespace From: Mykyta Yatsenko <yatsenko@meta.com> Freplace programs can't be loaded from user namespace, as bpf_program__set_attach_target() requires searching for target prog BTF, which is locked under CAP_SYS_ADMIN. This patch set enables this use case by: 1. Relaxing capable check in bpf's BPF_BTF_GET_FD_BY_ID, check for CAP_BPF instead of CAP_SYS_ADMIN, support BPF token in attr argument. 2. Pass BPF token around libbpf from bpf_program__set_attach_target() to bpf syscall where capable check is. 3. Validate positive/negative scenarios in selftests This patch set is enabled by the recent libbpf change[1], that introduced bpf_object__prepare() API. Calling bpf_object__prepare() for freplace program before bpf_program__set_attach_target() initializes BPF token, which is then passed to bpf syscall by libbpf. [1] https://lore.kernel.org/all/20250303135752.158343-1-mykyta.yatsenko5@gmail.com/ ==================== Link: https://patch.msgid.link/20250317174039.161275-1-mykyta.yatsenko5@gmail.com Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'tools/lib/bpf/btf.c')
-rw-r--r--tools/lib/bpf/btf.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index eea99c766a20..38bc6b14b066 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1619,12 +1619,18 @@ exit_free:
return btf;
}
-struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
+struct btf *btf_load_from_kernel(__u32 id, struct btf *base_btf, int token_fd)
{
struct btf *btf;
int btf_fd;
+ LIBBPF_OPTS(bpf_get_fd_by_id_opts, opts);
+
+ if (token_fd) {
+ opts.open_flags |= BPF_F_TOKEN_FD;
+ opts.token_fd = token_fd;
+ }
- btf_fd = bpf_btf_get_fd_by_id(id);
+ btf_fd = bpf_btf_get_fd_by_id_opts(id, &opts);
if (btf_fd < 0)
return libbpf_err_ptr(-errno);
@@ -1634,6 +1640,11 @@ struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
return libbpf_ptr(btf);
}
+struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
+{
+ return btf_load_from_kernel(id, base_btf, 0);
+}
+
struct btf *btf__load_from_kernel_by_id(__u32 id)
{
return btf__load_from_kernel_by_id_split(id, NULL);