diff options
author | Emil Tsalapatis <emil@etsalapatis.com> | 2025-01-04 15:25:27 -0500 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2025-01-06 10:59:49 -0800 |
commit | 512816403ece6cbb67de3af359643384111a9647 (patch) | |
tree | dfa79743b38112ebd8ed921fc0bf863a03fe8c52 | |
parent | 2532608530eab68207e384053fae7db7f35256ee (diff) |
bpf: Allow bpf_for/bpf_repeat calls while holding a spinlock
Add the bpf_iter_num_* kfuncs called by bpf_for in special_kfunc_list,
and allow the calls even while holding a spin lock.
Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>
Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250104202528.882482-2-emil@etsalapatis.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r-- | kernel/bpf/verifier.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d77abb87ffb1..b8ca227c78af 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11690,6 +11690,9 @@ enum special_kfunc_type { KF_bpf_get_kmem_cache, KF_bpf_local_irq_save, KF_bpf_local_irq_restore, + KF_bpf_iter_num_new, + KF_bpf_iter_num_next, + KF_bpf_iter_num_destroy, }; BTF_SET_START(special_kfunc_set) @@ -11765,6 +11768,9 @@ BTF_ID_UNUSED BTF_ID(func, bpf_get_kmem_cache) BTF_ID(func, bpf_local_irq_save) BTF_ID(func, bpf_local_irq_restore) +BTF_ID(func, bpf_iter_num_new) +BTF_ID(func, bpf_iter_num_next) +BTF_ID(func, bpf_iter_num_destroy) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -12151,12 +12157,24 @@ static bool is_bpf_rbtree_api_kfunc(u32 btf_id) btf_id == special_kfunc_list[KF_bpf_rbtree_first]; } +static bool is_bpf_iter_num_api_kfunc(u32 btf_id) +{ + return btf_id == special_kfunc_list[KF_bpf_iter_num_new] || + btf_id == special_kfunc_list[KF_bpf_iter_num_next] || + btf_id == special_kfunc_list[KF_bpf_iter_num_destroy]; +} + static bool is_bpf_graph_api_kfunc(u32 btf_id) { return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) || btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; } +static bool kfunc_spin_allowed(u32 btf_id) +{ + return is_bpf_graph_api_kfunc(btf_id) || is_bpf_iter_num_api_kfunc(btf_id); +} + static bool is_sync_callback_calling_kfunc(u32 btf_id) { return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; @@ -19048,7 +19066,7 @@ static int do_check(struct bpf_verifier_env *env) if (env->cur_state->active_locks) { if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && - (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) { + (insn->off != 0 || !kfunc_spin_allowed(insn->imm)))) { verbose(env, "function calls are not allowed while holding a lock\n"); return -EINVAL; } |