diff options
Diffstat (limited to 'tools/perf/util/bpf-loader.c')
-rw-r--r-- | tools/perf/util/bpf-loader.c | 355 |
1 files changed, 267 insertions, 88 deletions
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 16ec605a9fe4..f8ad581ea247 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -26,6 +26,8 @@ #include "util.h" #include "llvm-utils.h" #include "c++/clang-c.h" +#include "hashmap.h" +#include "asm/bug.h" #include <internal/xyarray.h> @@ -49,38 +51,107 @@ struct bpf_prog_priv { int *type_mapping; }; +struct bpf_perf_object { + struct list_head list; + struct bpf_object *obj; +}; + +static LIST_HEAD(bpf_objects_list); +static struct hashmap *bpf_program_hash; +static struct hashmap *bpf_map_hash; + +static struct bpf_perf_object * +bpf_perf_object__next(struct bpf_perf_object *prev) +{ + struct bpf_perf_object *next; + + if (!prev) + next = list_first_entry(&bpf_objects_list, + struct bpf_perf_object, + list); + else + next = list_next_entry(prev, list); + + /* Empty list is noticed here so don't need checking on entry. */ + if (&next->list == &bpf_objects_list) + return NULL; + + return next; +} + +#define bpf_perf_object__for_each(perf_obj, tmp) \ + for ((perf_obj) = bpf_perf_object__next(NULL), \ + (tmp) = bpf_perf_object__next(perf_obj); \ + (perf_obj) != NULL; \ + (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp)) + static bool libbpf_initialized; +static int bpf_perf_object__add(struct bpf_object *obj) +{ + struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj)); + + if (perf_obj) { + INIT_LIST_HEAD(&perf_obj->list); + perf_obj->obj = obj; + list_add_tail(&perf_obj->list, &bpf_objects_list); + } + return perf_obj ? 0 : -ENOMEM; +} + +static int libbpf_init(void) +{ + if (libbpf_initialized) + return 0; + + libbpf_set_print(libbpf_perf_print); + libbpf_initialized = true; + return 0; +} + struct bpf_object * bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) { + LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name); struct bpf_object *obj; + int err; - if (!libbpf_initialized) { - libbpf_set_print(libbpf_perf_print); - libbpf_initialized = true; - } + err = libbpf_init(); + if (err) + return ERR_PTR(err); - obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); + obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts); if (IS_ERR_OR_NULL(obj)) { pr_debug("bpf: failed to load buffer\n"); return ERR_PTR(-EINVAL); } + if (bpf_perf_object__add(obj)) { + bpf_object__close(obj); + return ERR_PTR(-ENOMEM); + } + return obj; } +static void bpf_perf_object__close(struct bpf_perf_object *perf_obj) +{ + list_del(&perf_obj->list); + bpf_object__close(perf_obj->obj); + free(perf_obj); +} + struct bpf_object *bpf__prepare_load(const char *filename, bool source) { + LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename); struct bpf_object *obj; + int err; - if (!libbpf_initialized) { - libbpf_set_print(libbpf_perf_print); - libbpf_initialized = true; - } + err = libbpf_init(); + if (err) + return ERR_PTR(err); if (source) { - int err; void *obj_buf; size_t obj_buf_sz; @@ -94,35 +165,31 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source) return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); } else pr_debug("bpf: successful builtin compilation\n"); - obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); + obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts); if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj) llvm__dump_obj(filename, obj_buf, obj_buf_sz); free(obj_buf); - } else + } else { obj = bpf_object__open(filename); + } if (IS_ERR_OR_NULL(obj)) { pr_debug("bpf: failed to load %s\n", filename); return obj; } - return obj; -} - -void bpf__clear(void) -{ - struct bpf_object *obj, *tmp; - - bpf_object__for_each_safe(obj, tmp) { - bpf__unprobe(obj); + if (bpf_perf_object__add(obj)) { bpf_object__close(obj); + return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); } + + return obj; } static void -clear_prog_priv(struct bpf_program *prog __maybe_unused, +clear_prog_priv(const struct bpf_program *prog __maybe_unused, void *_priv) { struct bpf_prog_priv *priv = _priv; @@ -135,6 +202,83 @@ clear_prog_priv(struct bpf_program *prog __maybe_unused, free(priv); } +static void bpf_program_hash_free(void) +{ + struct hashmap_entry *cur; + size_t bkt; + + if (IS_ERR_OR_NULL(bpf_program_hash)) + return; + + hashmap__for_each_entry(bpf_program_hash, cur, bkt) + clear_prog_priv(cur->key, cur->value); + + hashmap__free(bpf_program_hash); + bpf_program_hash = NULL; +} + +static void bpf_map_hash_free(void); + +void bpf__clear(void) +{ + struct bpf_perf_object *perf_obj, *tmp; + + bpf_perf_object__for_each(perf_obj, tmp) { + bpf__unprobe(perf_obj->obj); + bpf_perf_object__close(perf_obj); + } + + bpf_program_hash_free(); + bpf_map_hash_free(); +} + +static size_t ptr_hash(const void *__key, void *ctx __maybe_unused) +{ + return (size_t) __key; +} + +static bool ptr_equal(const void *key1, const void *key2, + void *ctx __maybe_unused) +{ + return key1 == key2; +} + +static void *program_priv(const struct bpf_program *prog) +{ + void *priv; + + if (IS_ERR_OR_NULL(bpf_program_hash)) + return NULL; + if (!hashmap__find(bpf_program_hash, prog, &priv)) + return NULL; + return priv; +} + +static int program_set_priv(struct bpf_program *prog, void *priv) +{ + void *old_priv; + + /* + * Should not happen, we warn about it in the + * caller function - config_bpf_program + */ + if (IS_ERR(bpf_program_hash)) + return PTR_ERR(bpf_program_hash); + + if (!bpf_program_hash) { + bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL); + if (IS_ERR(bpf_program_hash)) + return PTR_ERR(bpf_program_hash); + } + + old_priv = program_priv(prog); + if (old_priv) { + clear_prog_priv(prog, old_priv); + return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL); + } + return hashmap__add(bpf_program_hash, prog, priv); +} + static int prog_config__exec(const char *value, struct perf_probe_event *pev) { @@ -376,7 +520,7 @@ config_bpf_program(struct bpf_program *prog) pr_debug("bpf: config '%s' is ok\n", config_str); set_priv: - err = bpf_program__set_priv(prog, priv, clear_prog_priv); + err = program_set_priv(prog, priv); if (err) { pr_debug("Failed to set priv for program '%s'\n", config_str); goto errout; @@ -417,7 +561,7 @@ preproc_gen_prologue(struct bpf_program *prog, int n, struct bpf_insn *orig_insns, int orig_insns_cnt, struct bpf_prog_prep_result *res) { - struct bpf_prog_priv *priv = bpf_program__priv(prog); + struct bpf_prog_priv *priv = program_priv(prog); struct probe_trace_event *tev; struct perf_probe_event *pev; struct bpf_insn *buf; @@ -568,7 +712,7 @@ static int map_prologue(struct perf_probe_event *pev, int *mapping, static int hook_load_preprocessor(struct bpf_program *prog) { - struct bpf_prog_priv *priv = bpf_program__priv(prog); + struct bpf_prog_priv *priv = program_priv(prog); struct perf_probe_event *pev; bool need_prologue = false; int err, i; @@ -644,7 +788,7 @@ int bpf__probe(struct bpf_object *obj) if (err) goto out; - priv = bpf_program__priv(prog); + priv = program_priv(prog); if (IS_ERR_OR_NULL(priv)) { if (!priv) err = -BPF_LOADER_ERRNO__INTERNAL; @@ -654,11 +798,11 @@ int bpf__probe(struct bpf_object *obj) } if (priv->is_tp) { - bpf_program__set_tracepoint(prog); + bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT); continue; } - bpf_program__set_kprobe(prog); + bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE); pev = &priv->pev; err = convert_perf_probe_events(pev, 1); @@ -696,7 +840,7 @@ int bpf__unprobe(struct bpf_object *obj) struct bpf_program *prog; bpf_object__for_each_program(prog, obj) { - struct bpf_prog_priv *priv = bpf_program__priv(prog); + struct bpf_prog_priv *priv = program_priv(prog); int i; if (IS_ERR_OR_NULL(priv) || priv->is_tp) @@ -752,7 +896,7 @@ int bpf__foreach_event(struct bpf_object *obj, int err; bpf_object__for_each_program(prog, obj) { - struct bpf_prog_priv *priv = bpf_program__priv(prog); + struct bpf_prog_priv *priv = program_priv(prog); struct probe_trace_event *tev; struct perf_probe_event *pev; int i, fd; @@ -848,7 +992,7 @@ bpf_map_priv__purge(struct bpf_map_priv *priv) } static void -bpf_map_priv__clear(struct bpf_map *map __maybe_unused, +bpf_map_priv__clear(const struct bpf_map *map __maybe_unused, void *_priv) { struct bpf_map_priv *priv = _priv; @@ -857,6 +1001,53 @@ bpf_map_priv__clear(struct bpf_map *map __maybe_unused, free(priv); } +static void *map_priv(const struct bpf_map *map) +{ + void *priv; + + if (IS_ERR_OR_NULL(bpf_map_hash)) + return NULL; + if (!hashmap__find(bpf_map_hash, map, &priv)) + return NULL; + return priv; +} + +static void bpf_map_hash_free(void) +{ + struct hashmap_entry *cur; + size_t bkt; + + if (IS_ERR_OR_NULL(bpf_map_hash)) + return; + + hashmap__for_each_entry(bpf_map_hash, cur, bkt) + bpf_map_priv__clear(cur->key, cur->value); + + hashmap__free(bpf_map_hash); + bpf_map_hash = NULL; +} + +static int map_set_priv(struct bpf_map *map, void *priv) +{ + void *old_priv; + + if (WARN_ON_ONCE(IS_ERR(bpf_map_hash))) + return PTR_ERR(bpf_program_hash); + + if (!bpf_map_hash) { + bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL); + if (IS_ERR(bpf_map_hash)) + return PTR_ERR(bpf_map_hash); + } + + old_priv = map_priv(map); + if (old_priv) { + bpf_map_priv__clear(map, old_priv); + return hashmap__set(bpf_map_hash, map, priv, NULL, NULL); + } + return hashmap__add(bpf_map_hash, map, priv); +} + static int bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term) { @@ -956,7 +1147,7 @@ static int bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) { const char *map_name = bpf_map__name(map); - struct bpf_map_priv *priv = bpf_map__priv(map); + struct bpf_map_priv *priv = map_priv(map); if (IS_ERR(priv)) { pr_debug("Failed to get private from map %s\n", map_name); @@ -971,7 +1162,7 @@ bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) } INIT_LIST_HEAD(&priv->ops_list); - if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) { + if (map_set_priv(map, priv)) { free(priv); return -BPF_LOADER_ERRNO__INTERNAL; } @@ -1005,24 +1196,22 @@ __bpf_map__config_value(struct bpf_map *map, { struct bpf_map_op *op; const char *map_name = bpf_map__name(map); - const struct bpf_map_def *def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("Unable to get map definition from '%s'\n", - map_name); + if (!map) { + pr_debug("Map '%s' is invalid\n", map_name); return -BPF_LOADER_ERRNO__INTERNAL; } - if (def->type != BPF_MAP_TYPE_ARRAY) { + if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) { pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", map_name); return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; } - if (def->key_size < sizeof(unsigned int)) { + if (bpf_map__key_size(map) < sizeof(unsigned int)) { pr_debug("Map %s has incorrect key size\n", map_name); return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; } - switch (def->value_size) { + switch (bpf_map__value_size(map)) { case 1: case 2: case 4: @@ -1064,7 +1253,6 @@ __bpf_map__config_event(struct bpf_map *map, struct parse_events_term *term, struct evlist *evlist) { - const struct bpf_map_def *def; struct bpf_map_op *op; const char *map_name = bpf_map__name(map); struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str); @@ -1075,18 +1263,16 @@ __bpf_map__config_event(struct bpf_map *map, return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; } - def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("Unable to get map definition from '%s'\n", - map_name); - return PTR_ERR(def); + if (!map) { + pr_debug("Map '%s' is invalid\n", map_name); + return PTR_ERR(map); } /* * No need to check key_size and value_size: * kernel has already checked them. */ - if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", map_name); return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; @@ -1135,7 +1321,6 @@ config_map_indices_range_check(struct parse_events_term *term, const char *map_name) { struct parse_events_array *array = &term->array; - const struct bpf_map_def *def; unsigned int i; if (!array->nr_ranges) @@ -1146,10 +1331,8 @@ config_map_indices_range_check(struct parse_events_term *term, return -BPF_LOADER_ERRNO__INTERNAL; } - def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("ERROR: Unable to get map definition from '%s'\n", - map_name); + if (!map) { + pr_debug("Map '%s' is invalid\n", map_name); return -BPF_LOADER_ERRNO__INTERNAL; } @@ -1158,7 +1341,7 @@ config_map_indices_range_check(struct parse_events_term *term, size_t length = array->ranges[i].length; unsigned int idx = start + length - 1; - if (idx >= def->max_entries) { + if (idx >= bpf_map__max_entries(map)) { pr_debug("ERROR: index %d too large\n", idx); return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; } @@ -1252,21 +1435,21 @@ out: } typedef int (*map_config_func_t)(const char *name, int map_fd, - const struct bpf_map_def *pdef, + const struct bpf_map *map, struct bpf_map_op *op, void *pkey, void *arg); static int foreach_key_array_all(map_config_func_t func, void *arg, const char *name, - int map_fd, const struct bpf_map_def *pdef, + int map_fd, const struct bpf_map *map, struct bpf_map_op *op) { unsigned int i; int err; - for (i = 0; i < pdef->max_entries; i++) { - err = func(name, map_fd, pdef, op, &i, arg); + for (i = 0; i < bpf_map__max_entries(map); i++) { + err = func(name, map_fd, map, op, &i, arg); if (err) { pr_debug("ERROR: failed to insert value to %s[%u]\n", name, i); @@ -1279,7 +1462,7 @@ foreach_key_array_all(map_config_func_t func, static int foreach_key_array_ranges(map_config_func_t func, void *arg, const char *name, int map_fd, - const struct bpf_map_def *pdef, + const struct bpf_map *map, struct bpf_map_op *op) { unsigned int i, j; @@ -1292,7 +1475,7 @@ foreach_key_array_ranges(map_config_func_t func, void *arg, for (j = 0; j < length; j++) { unsigned int idx = start + j; - err = func(name, map_fd, pdef, op, &idx, arg); + err = func(name, map_fd, map, op, &idx, arg); if (err) { pr_debug("ERROR: failed to insert value to %s[%u]\n", name, idx); @@ -1308,11 +1491,10 @@ bpf_map_config_foreach_key(struct bpf_map *map, map_config_func_t func, void *arg) { - int err, map_fd; + int err, map_fd, type; struct bpf_map_op *op; - const struct bpf_map_def *def; const char *name = bpf_map__name(map); - struct bpf_map_priv *priv = bpf_map__priv(map); + struct bpf_map_priv *priv = map_priv(map); if (IS_ERR(priv)) { pr_debug("ERROR: failed to get private from map %s\n", name); @@ -1323,9 +1505,8 @@ bpf_map_config_foreach_key(struct bpf_map *map, return 0; } - def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("ERROR: failed to get definition from map %s\n", name); + if (!map) { + pr_debug("Map '%s' is invalid\n", name); return -BPF_LOADER_ERRNO__INTERNAL; } map_fd = bpf_map__fd(map); @@ -1334,19 +1515,19 @@ bpf_map_config_foreach_key(struct bpf_map *map, return map_fd; } + type = bpf_map__type(map); list_for_each_entry(op, &priv->ops_list, list) { - switch (def->type) { + switch (type) { case BPF_MAP_TYPE_ARRAY: case BPF_MAP_TYPE_PERF_EVENT_ARRAY: switch (op->key_type) { case BPF_MAP_KEY_ALL: err = foreach_key_array_all(func, arg, name, - map_fd, def, op); + map_fd, map, op); break; case BPF_MAP_KEY_RANGES: err = foreach_key_array_ranges(func, arg, name, - map_fd, def, - op); + map_fd, map, op); break; default: pr_debug("ERROR: keytype for map '%s' invalid\n", @@ -1455,7 +1636,7 @@ apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, static int apply_obj_config_map_for_key(const char *name, int map_fd, - const struct bpf_map_def *pdef, + const struct bpf_map *map, struct bpf_map_op *op, void *pkey, void *arg __maybe_unused) { @@ -1464,7 +1645,7 @@ apply_obj_config_map_for_key(const char *name, int map_fd, switch (op->op_type) { case BPF_MAP_OP_SET_VALUE: err = apply_config_value_for_key(map_fd, pkey, - pdef->value_size, + bpf_map__value_size(map), op->v.value); break; case BPF_MAP_OP_SET_EVSEL: @@ -1502,11 +1683,11 @@ apply_obj_config_object(struct bpf_object *obj) int bpf__apply_obj_config(void) { - struct bpf_object *obj, *tmp; + struct bpf_perf_object *perf_obj, *tmp; int err; - bpf_object__for_each_safe(obj, tmp) { - err = apply_obj_config_object(obj); + bpf_perf_object__for_each(perf_obj, tmp) { + err = apply_obj_config_object(perf_obj->obj); if (err) return err; } @@ -1514,27 +1695,25 @@ int bpf__apply_obj_config(void) return 0; } -#define bpf__for_each_map(pos, obj, objtmp) \ - bpf_object__for_each_safe(obj, objtmp) \ - bpf_object__for_each_map(pos, obj) +#define bpf__perf_for_each_map(map, pobj, tmp) \ + bpf_perf_object__for_each(pobj, tmp) \ + bpf_object__for_each_map(map, pobj->obj) -#define bpf__for_each_map_named(pos, obj, objtmp, name) \ - bpf__for_each_map(pos, obj, objtmp) \ - if (bpf_map__name(pos) && \ - (strcmp(name, \ - bpf_map__name(pos)) == 0)) +#define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \ + bpf__perf_for_each_map(map, pobj, pobjtmp) \ + if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0)) struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name) { struct bpf_map_priv *tmpl_priv = NULL; - struct bpf_object *obj, *tmp; + struct bpf_perf_object *perf_obj, *tmp; struct evsel *evsel = NULL; struct bpf_map *map; int err; bool need_init = false; - bpf__for_each_map_named(map, obj, tmp, name) { - struct bpf_map_priv *priv = bpf_map__priv(map); + bpf__perf_for_each_map_named(map, perf_obj, tmp, name) { + struct bpf_map_priv *priv = map_priv(map); if (IS_ERR(priv)) return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); @@ -1569,8 +1748,8 @@ struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name) evsel = evlist__last(evlist); } - bpf__for_each_map_named(map, obj, tmp, name) { - struct bpf_map_priv *priv = bpf_map__priv(map); + bpf__perf_for_each_map_named(map, perf_obj, tmp, name) { + struct bpf_map_priv *priv = map_priv(map); if (IS_ERR(priv)) return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); @@ -1582,7 +1761,7 @@ struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name) if (!priv) return ERR_PTR(-ENOMEM); - err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); + err = map_set_priv(map, priv); if (err) { bpf_map_priv__clear(map, priv); return ERR_PTR(err); |