summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf.c
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2022-02-16 09:48:38 -0800
committerAndrii Nakryiko <andrii@kernel.org>2022-02-16 10:15:47 -0800
commit477bb4c1baa7983b1be14af7dbb14f0902fbddac (patch)
treec21e843d3c3be73c57977cde514121d3b806ec4f /tools/lib/bpf/libbpf.c
parent8cbf062a250ed52148badf6f3ffd03657dd4a3f0 (diff)
parent704c91e59fe045708aa3f11d0c2bf9c83e39d24c (diff)
Merge branch 'libbpf: Implement BTFGen'
Mauricio Vásquez <mauricio@kinvolk.io> says: ==================== CO-RE requires to have BTF information describing the kernel types in order to perform the relocations. This is usually provided by the kernel itself when it's configured with CONFIG_DEBUG_INFO_BTF. However, this configuration is not enabled in all the distributions and it's not available on kernels before 5.12. It's possible to use CO-RE in kernels without CONFIG_DEBUG_INFO_BTF support by providing the BTF information from an external source. BTFHub[0] contains BTF files to each released kernel not supporting BTF, for the most popular distributions. Providing this BTF file for a given kernel has some challenges: 1. Each BTF file is a few MBs big, then it's not possible to ship the eBPF program with all the BTF files needed to run in different kernels. (The BTF files will be in the order of GBs if you want to support a high number of kernels) 2. Downloading the BTF file for the current kernel at runtime delays the start of the program and it's not always possible to reach an external host to download such a file. Providing the BTF file with the information about all the data types of the kernel for running an eBPF program is an overkill in many of the cases. Usually the eBPF programs access only some kernel fields. This series implements BTFGen support in bpftool. This idea was discussed during the "Towards truly portable eBPF"[1] presentation at Linux Plumbers 2021. There is a good example[2] on how to use BTFGen and BTFHub together to generate multiple BTF files, to each existing/supported kernel, tailored to one application. For example: a complex bpf object might support nearly 400 kernels by having BTF files summing only 1.5 MB. [0]: https://github.com/aquasecurity/btfhub/ [1]: https://www.youtube.com/watch?v=igJLKyP1lFk&t=2418s [2]: https://github.com/aquasecurity/btfhub/tree/main/tools Changelog: v6 > v7: - use array instead of hashmap to store ids - use btf__add_{struct,union}() instead of memcpy() - don't use fixed path for testing BTF file - update example to use DECLARE_LIBBPF_OPTS() v5 > v6: - use BTF structure to store used member/types instead of hashmaps - remove support for input/output folders - remove bpf_core_{created,free}_cand_cache() - reorganize commits to avoid having unused static functions - remove usage of libbpf_get_error() - fix some errno propagation issues - do not record full types for type-based relocations - add support for BTF_KIND_FUNC_PROTO - implement tests based on core_reloc ones v4 > v5: - move some checks before invoking prog->obj->gen_loader - use p_info() instead of printf() - improve command output - fix issue with record_relo_core() - implement bash completion - write man page - implement some tests v3 > v4: - parse BTF and BTF.ext sections in bpftool and use bpf_core_calc_relo_insn() directly - expose less internal details from libbpf to bpftool - implement support for enum-based relocations - split commits in a more granular way v2 > v3: - expose internal libbpf APIs to bpftool instead - implement btfgen in bpftool - drop btf__raw_data() from libbpf v1 > v2: - introduce bpf_object__prepare() and ‘record_core_relos’ to expose CO-RE relocations instead of bpf_object__reloc_info_gen() - rename btf__save_to_file() to btf__raw_data() v1: https://lore.kernel.org/bpf/20211027203727.208847-1-mauricio@kinvolk.io/ v2: https://lore.kernel.org/bpf/20211116164208.164245-1-mauricio@kinvolk.io/ v3: https://lore.kernel.org/bpf/20211217185654.311609-1-mauricio@kinvolk.io/ v4: https://lore.kernel.org/bpf/20220112142709.102423-1-mauricio@kinvolk.io/ v5: https://lore.kernel.org/bpf/20220128223312.1253169-1-mauricio@kinvolk.io/ v6: https://lore.kernel.org/bpf/20220209222646.348365-1-mauricio@kinvolk.io/ Mauricio Vásquez (6): libbpf: split bpf_core_apply_relo() libbpf: Expose bpf_core_{add,free}_cands() to bpftool bpftool: Add gen min_core_btf command bpftool: Implement "gen min_core_btf" logic bpftool: Implement btfgen_get_btf() selftests/bpf: Test "bpftool gen min_core_btf" ==================== Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r--tools/lib/bpf/libbpf.c88
1 files changed, 50 insertions, 38 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2262bcdfee921..ad43b6ce825e6 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -5192,18 +5192,21 @@ size_t bpf_core_essential_name_len(const char *name)
return n;
}
-static void bpf_core_free_cands(struct bpf_core_cand_list *cands)
+void bpf_core_free_cands(struct bpf_core_cand_list *cands)
{
+ if (!cands)
+ return;
+
free(cands->cands);
free(cands);
}
-static int bpf_core_add_cands(struct bpf_core_cand *local_cand,
- size_t local_essent_len,
- const struct btf *targ_btf,
- const char *targ_btf_name,
- int targ_start_id,
- struct bpf_core_cand_list *cands)
+int bpf_core_add_cands(struct bpf_core_cand *local_cand,
+ size_t local_essent_len,
+ const struct btf *targ_btf,
+ const char *targ_btf_name,
+ int targ_start_id,
+ struct bpf_core_cand_list *cands)
{
struct bpf_core_cand *new_cands, *cand;
const struct btf_type *t, *local_t;
@@ -5530,11 +5533,12 @@ static int record_relo_core(struct bpf_program *prog,
return 0;
}
-static int bpf_core_apply_relo(struct bpf_program *prog,
- const struct bpf_core_relo *relo,
- int relo_idx,
- const struct btf *local_btf,
- struct hashmap *cand_cache)
+static int bpf_core_resolve_relo(struct bpf_program *prog,
+ const struct bpf_core_relo *relo,
+ int relo_idx,
+ const struct btf *local_btf,
+ struct hashmap *cand_cache,
+ struct bpf_core_relo_res *targ_res)
{
struct bpf_core_spec specs_scratch[3] = {};
const void *type_key = u32_as_hash_key(relo->type_id);
@@ -5543,20 +5547,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
const struct btf_type *local_type;
const char *local_name;
__u32 local_id = relo->type_id;
- struct bpf_insn *insn;
- int insn_idx, err;
-
- if (relo->insn_off % BPF_INSN_SZ)
- return -EINVAL;
- insn_idx = relo->insn_off / BPF_INSN_SZ;
- /* adjust insn_idx from section frame of reference to the local
- * program's frame of reference; (sub-)program code is not yet
- * relocated, so it's enough to just subtract in-section offset
- */
- insn_idx = insn_idx - prog->sec_insn_off;
- if (insn_idx >= prog->insns_cnt)
- return -EINVAL;
- insn = &prog->insns[insn_idx];
+ int err;
local_type = btf__type_by_id(local_btf, local_id);
if (!local_type)
@@ -5566,15 +5557,6 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
if (!local_name)
return -EINVAL;
- if (prog->obj->gen_loader) {
- const char *spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
-
- pr_debug("record_relo_core: prog %td insn[%d] %s %s %s final insn_idx %d\n",
- prog - prog->obj->programs, relo->insn_off / 8,
- btf_kind_str(local_type), local_name, spec_str, insn_idx);
- return record_relo_core(prog, relo, insn_idx);
- }
-
if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
!hashmap__find(cand_cache, type_key, (void **)&cands)) {
cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
@@ -5591,19 +5573,21 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
}
}
- return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo,
- relo_idx, local_btf, cands, specs_scratch);
+ return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
+ targ_res);
}
static int
bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
{
const struct btf_ext_info_sec *sec;
+ struct bpf_core_relo_res targ_res;
const struct bpf_core_relo *rec;
const struct btf_ext_info *seg;
struct hashmap_entry *entry;
struct hashmap *cand_cache = NULL;
struct bpf_program *prog;
+ struct bpf_insn *insn;
const char *sec_name;
int i, err = 0, insn_idx, sec_idx;
@@ -5654,6 +5638,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
+ if (rec->insn_off % BPF_INSN_SZ)
+ return -EINVAL;
insn_idx = rec->insn_off / BPF_INSN_SZ;
prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
if (!prog) {
@@ -5668,12 +5654,38 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
if (!prog->load)
continue;
- err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
+ /* adjust insn_idx from section frame of reference to the local
+ * program's frame of reference; (sub-)program code is not yet
+ * relocated, so it's enough to just subtract in-section offset
+ */
+ insn_idx = insn_idx - prog->sec_insn_off;
+ if (insn_idx >= prog->insns_cnt)
+ return -EINVAL;
+ insn = &prog->insns[insn_idx];
+
+ if (prog->obj->gen_loader) {
+ err = record_relo_core(prog, rec, insn_idx);
+ if (err) {
+ pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
+ prog->name, i, err);
+ goto out;
+ }
+ continue;
+ }
+
+ err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
prog->name, i, err);
goto out;
}
+
+ err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
+ if (err) {
+ pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
+ prog->name, i, insn_idx, err);
+ goto out;
+ }
}
}