From 912dd4b0c2a50a839301520badb241f36664ae07 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Wed, 22 Mar 2023 20:24:03 -0700 Subject: libbpf: Update a bpf_link with another struct_ops. Introduce bpf_link__update_map(), which allows to atomically update underlying struct_ops implementation for given struct_ops BPF link. Also add old_map_fd to struct bpf_link_update_opts to handle BPF_F_REPLACE feature. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20230323032405.3735486-7-kuifeng@meta.com Signed-off-by: Martin KaFai Lau --- tools/lib/bpf/bpf.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf/bpf.c') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index e750b6f5fcc36..767035900354d 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -794,11 +794,17 @@ int bpf_link_update(int link_fd, int new_prog_fd, if (!OPTS_VALID(opts, bpf_link_update_opts)) return libbpf_err(-EINVAL); + if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) + return libbpf_err(-EINVAL); + memset(&attr, 0, attr_sz); attr.link_update.link_fd = link_fd; attr.link_update.new_prog_fd = new_prog_fd; attr.link_update.flags = OPTS_GET(opts, flags, 0); - attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + if (OPTS_GET(opts, old_prog_fd, 0)) + attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + else if (OPTS_GET(opts, old_map_fd, 0)) + attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0); ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz); return libbpf_err_errno(ret); -- cgit v1.2.3 From e0aee1facccf9f12136600031be4ce21eb810a78 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:41:50 -0700 Subject: libbpf: Don't enforce unnecessary verifier log restrictions on libbpf side This basically prevents any forward compatibility. And we either way just return -EINVAL, which would otherwise be returned from bpf() syscall anyways. Similarly, drop enforcement of non-NULL log_buf when log_level > 0. This won't be true anymore soon. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20230406234205.323208-5-andrii@kernel.org --- tools/lib/bpf/bpf.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'tools/lib/bpf/bpf.c') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 767035900354d..f1d04ee14d83e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -290,10 +290,6 @@ int bpf_prog_load(enum bpf_prog_type prog_type, if (!!log_buf != !!log_size) return libbpf_err(-EINVAL); - if (log_level > (4 | 2 | 1)) - return libbpf_err(-EINVAL); - if (log_level && !log_buf) - return libbpf_err(-EINVAL); func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); func_info = OPTS_GET(opts, func_info, NULL); -- cgit v1.2.3 From 94e55c0fdaf4268bdda2d3b375bc61daba056e85 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:42:01 -0700 Subject: libbpf: Wire through log_true_size returned from kernel for BPF_PROG_LOAD Add output-only log_true_size field to bpf_prog_load_opts to return bpf_attr->log_true_size value back from bpf() syscall. Note, that we have to drop const modifier from opts in bpf_prog_load(). This could potentially cause compilation error for some users. But the usual practice is to define bpf_prog_load_ops as a local variable next to bpf_prog_load() call and pass pointer to it, so const vs non-const makes no difference and won't even come up in most (if not all) cases. There are no runtime and ABI backwards/forward compatibility issues at all. If user provides old struct bpf_prog_load_opts, libbpf won't set new fields. If old libbpf is provided new bpf_prog_load_opts, nothing will happen either as old libbpf doesn't yet know about this new field. Adding a new variant of bpf_prog_load() just for this seems like a big and unnecessary overkill. As a corroborating evidence is the fact that entire selftests/bpf code base required not adjustment whatsoever. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230406234205.323208-16-andrii@kernel.org --- tools/lib/bpf/bpf.c | 7 +++++-- tools/lib/bpf/bpf.h | 11 +++++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'tools/lib/bpf/bpf.c') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index f1d04ee14d83e..a031de0a707a7 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -230,9 +230,9 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt, int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, const struct bpf_insn *insns, size_t insn_cnt, - const struct bpf_prog_load_opts *opts) + struct bpf_prog_load_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, fd_array); + const size_t attr_sz = offsetofend(union bpf_attr, log_true_size); void *finfo = NULL, *linfo = NULL; const char *func_info, *line_info; __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; @@ -312,6 +312,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, } fd = sys_bpf_prog_load(&attr, attr_sz, attempts); + OPTS_SET(opts, log_true_size, attr.log_true_size); if (fd >= 0) return fd; @@ -352,6 +353,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, } fd = sys_bpf_prog_load(&attr, attr_sz, attempts); + OPTS_SET(opts, log_true_size, attr.log_true_size); if (fd >= 0) goto done; } @@ -366,6 +368,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type, attr.log_level = 1; fd = sys_bpf_prog_load(&attr, attr_sz, attempts); + OPTS_SET(opts, log_true_size, attr.log_true_size); } done: /* free() doesn't affect errno, so we don't need to restore it */ diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index b073e73439efd..cfa0cdfa50f82 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -96,13 +96,20 @@ struct bpf_prog_load_opts { __u32 log_level; __u32 log_size; char *log_buf; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + * If kernel doesn't support this feature, log_size is left unchanged. + */ + __u32 log_true_size; + size_t :0; }; -#define bpf_prog_load_opts__last_field log_buf +#define bpf_prog_load_opts__last_field log_true_size LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type, const char *prog_name, const char *license, const struct bpf_insn *insns, size_t insn_cnt, - const struct bpf_prog_load_opts *opts); + struct bpf_prog_load_opts *opts); /* Flags to direct loading requirements */ #define MAPS_RELAX_COMPAT 0x01 -- cgit v1.2.3 From 097d8002b754a865beef880e5c1cdc3ef7c2163d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:42:02 -0700 Subject: libbpf: Wire through log_true_size for bpf_btf_load() API Similar to what we did for bpf_prog_load() in previous patch, wire returning of log_true_size value from kernel back to the user through OPTS out field. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230406234205.323208-17-andrii@kernel.org --- tools/lib/bpf/bpf.c | 6 ++++-- tools/lib/bpf/bpf.h | 11 +++++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'tools/lib/bpf/bpf.c') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index a031de0a707a7..128ac723c4ea2 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -1083,9 +1083,9 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) return libbpf_err_errno(fd); } -int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts) +int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level); + const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size); union bpf_attr attr; char *log_buf; size_t log_size; @@ -1128,6 +1128,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa attr.btf_log_level = 1; fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); } + + OPTS_SET(opts, log_true_size, attr.btf_log_true_size); return libbpf_err_errno(fd); } diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index cfa0cdfa50f82..a2c091389b186 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -124,11 +124,18 @@ struct bpf_btf_load_opts { char *log_buf; __u32 log_level; __u32 log_size; + /* output: actual total log contents size (including termintaing zero). + * It could be both larger than original log_size (if log was + * truncated), or smaller (if log buffer wasn't filled completely). + * If kernel doesn't support this feature, log_size is left unchanged. + */ + __u32 log_true_size; + size_t :0; }; -#define bpf_btf_load_opts__last_field log_size +#define bpf_btf_load_opts__last_field log_true_size LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size, - const struct bpf_btf_load_opts *opts); + struct bpf_btf_load_opts *opts); LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); -- cgit v1.2.3