diff options
Diffstat (limited to 'tools/lib/bpf/bpf.c')
| -rw-r--r-- | tools/lib/bpf/bpf.c | 81 | 
1 files changed, 81 insertions, 0 deletions
| diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 1f84d706eb3e..9b64eed2b003 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -28,7 +28,9 @@  #include <asm/unistd.h>  #include <errno.h>  #include <linux/bpf.h> +#include <linux/filter.h>  #include <limits.h> +#include <sys/resource.h>  #include "bpf.h"  #include "libbpf.h"  #include "libbpf_internal.h" @@ -94,6 +96,77 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int  	return fd;  } +/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to + * memcg-based memory accounting for BPF maps and progs. This was done in [0]. + * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in + * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. + * + *   [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ + *   [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") + */ +int probe_memcg_account(void) +{ +	const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); +	struct bpf_insn insns[] = { +		BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), +		BPF_EXIT_INSN(), +	}; +	size_t insn_cnt = sizeof(insns) / sizeof(insns[0]); +	union bpf_attr attr; +	int prog_fd; + +	/* attempt loading freplace trying to use custom BTF */ +	memset(&attr, 0, prog_load_attr_sz); +	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; +	attr.insns = ptr_to_u64(insns); +	attr.insn_cnt = insn_cnt; +	attr.license = ptr_to_u64("GPL"); + +	prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz); +	if (prog_fd >= 0) { +		close(prog_fd); +		return 1; +	} +	return 0; +} + +static bool memlock_bumped; +static rlim_t memlock_rlim = RLIM_INFINITY; + +int libbpf_set_memlock_rlim(size_t memlock_bytes) +{ +	if (memlock_bumped) +		return libbpf_err(-EBUSY); + +	memlock_rlim = memlock_bytes; +	return 0; +} + +int bump_rlimit_memlock(void) +{ +	struct rlimit rlim; + +	/* this the default in libbpf 1.0, but for now user has to opt-in explicitly */ +	if (!(libbpf_mode & LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK)) +		return 0; + +	/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ +	if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT)) +		return 0; + +	memlock_bumped = true; + +	/* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ +	if (memlock_rlim == 0) +		return 0; + +	rlim.rlim_cur = rlim.rlim_max = memlock_rlim; +	if (setrlimit(RLIMIT_MEMLOCK, &rlim)) +		return -errno; + +	return 0; +} +  int bpf_map_create(enum bpf_map_type map_type,  		   const char *map_name,  		   __u32 key_size, @@ -105,6 +178,8 @@ int bpf_map_create(enum bpf_map_type map_type,  	union bpf_attr attr;  	int fd; +	bump_rlimit_memlock(); +  	memset(&attr, 0, attr_sz);  	if (!OPTS_VALID(opts, bpf_map_create_opts)) @@ -251,6 +326,8 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,  	union bpf_attr attr;  	char *log_buf; +	bump_rlimit_memlock(); +  	if (!OPTS_VALID(opts, bpf_prog_load_opts))  		return libbpf_err(-EINVAL); @@ -456,6 +533,8 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,  	union bpf_attr attr;  	int fd; +	bump_rlimit_memlock(); +  	memset(&attr, 0, sizeof(attr));  	attr.prog_type = type;  	attr.insn_cnt = (__u32)insns_cnt; @@ -1056,6 +1135,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa  	__u32 log_level;  	int fd; +	bump_rlimit_memlock(); +  	memset(&attr, 0, attr_sz);  	if (!OPTS_VALID(opts, bpf_btf_load_opts)) | 
