diff options
author | Alexei Starovoitov <ast@kernel.org> | 2020-05-01 10:36:32 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2020-05-01 10:37:34 -0700 |
commit | 3dbb5b5040c396274a253b607289748f39a680a0 (patch) | |
tree | 6b010c99c159d08a48680e709ca37e3fe3e17971 /kernel/bpf/syscall.c | |
parent | c321022244708aec4675de4f032ef1ba9ff0c640 (diff) | |
parent | 31a9f7fe93378ab587d758d5b2e96a237caa7b8c (diff) |
Merge branch 'bpf_enable_stats'
Song Liu says:
====================
run_time_ns is a useful stats for BPF programs. However, it is gated by
sysctl kernel.bpf_stats_enabled. When multiple user space tools are
toggling kernl.bpf_stats_enabled at the same time, they may confuse each
other.
Solve this problem with a new BPF command BPF_ENABLE_STATS.
Changes v8 => v9:
1. Clean up in selftest (Andrii).
2. Not using static variable in test program (Andrii).
Changes v7 => v8:
1. Change name BPF_STATS_RUNTIME_CNT => BPF_STATS_RUN_TIME (Alexei).
2. Add CHECK_ATTR to bpf_enable_stats() (Alexei).
3. Rebase (Andrii).
4. Simplfy the selftest (Alexei).
Changes v6 => v7:
1. Add test to verify run_cnt matches count measured by the program.
Changes v5 => v6:
1. Simplify test program (Yonghong).
2. Rebase (with some conflicts).
Changes v4 => v5:
1. Use memset to zero bpf_attr in bpf_enable_stats() (Andrii).
Changes v3 => v4:
1. Add libbpf support and selftest;
2. Avoid cleaning trailing space.
Changes v2 => v3:
1. Rename the command to BPF_ENABLE_STATS, and make it extendible.
2. fix commit log;
3. remove unnecessary headers.
====================
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r-- | kernel/bpf/syscall.c | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index c75b2dd2459c..4f34eecec9ce 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3872,6 +3872,60 @@ static int bpf_link_get_fd_by_id(const union bpf_attr *attr) return fd; } +DEFINE_MUTEX(bpf_stats_enabled_mutex); + +static int bpf_stats_release(struct inode *inode, struct file *file) +{ + mutex_lock(&bpf_stats_enabled_mutex); + static_key_slow_dec(&bpf_stats_enabled_key.key); + mutex_unlock(&bpf_stats_enabled_mutex); + return 0; +} + +static const struct file_operations bpf_stats_fops = { + .release = bpf_stats_release, +}; + +static int bpf_enable_runtime_stats(void) +{ + int fd; + + mutex_lock(&bpf_stats_enabled_mutex); + + /* Set a very high limit to avoid overflow */ + if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { + mutex_unlock(&bpf_stats_enabled_mutex); + return -EBUSY; + } + + fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); + if (fd >= 0) + static_key_slow_inc(&bpf_stats_enabled_key.key); + + mutex_unlock(&bpf_stats_enabled_mutex); + return fd; +} + +#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type + +static int bpf_enable_stats(union bpf_attr *attr) +{ + + if (CHECK_ATTR(BPF_ENABLE_STATS)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + switch (attr->enable_stats.type) { + case BPF_STATS_RUN_TIME: + return bpf_enable_runtime_stats(); + default: + break; + } + return -EINVAL; +} + SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { union bpf_attr attr; @@ -3996,6 +4050,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = bpf_obj_get_next_id(&attr, uattr, &link_idr, &link_idr_lock); break; + case BPF_ENABLE_STATS: + err = bpf_enable_stats(&attr); + break; default: err = -EINVAL; break; |