summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrandon Kammerdiener <brandon.kammerdiener@intel.com>2025-04-24 11:32:51 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-05-29 11:02:01 +0200
commit8014d3e56ec0c662a1bc059b3e6b46b6103879d4 (patch)
treec0d211b45907de0ed5374af09aa0a7effb1e8312
parent218c838d0356a4305a5705fa4599b682d6312144 (diff)
bpf: fix possible endless loop in BPF map iteration
[ Upstream commit 75673fda0c557ae26078177dd14d4857afbf128d ] The _safe variant used here gets the next element before running the callback, avoiding the endless loop condition. Signed-off-by: Brandon Kammerdiener <brandon.kammerdiener@intel.com> Link: https://lore.kernel.org/r/20250424153246.141677-2-brandon.kammerdiener@intel.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--kernel/bpf/hashtab.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index bb3ba8ebaf3d..570e2f723144 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -2223,7 +2223,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
b = &htab->buckets[i];
rcu_read_lock();
head = &b->head;
- hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
+ hlist_nulls_for_each_entry_safe(elem, n, head, hash_node) {
key = elem->key;
if (is_percpu) {
/* current cpu value for percpu map */