summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-02-06 14:56:37 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-05-29 11:02:59 +0200
commit247b420fea7988dca0e1287f1d4eb1e0b16b20f1 (patch)
tree0165f088b99099656fdef66aefdc94545f45ae27
parentb063f36a929a39e17fb5622e10cecf2c5d94c6c3 (diff)
net: page_pool: avoid false positive warning if NAPI was never added
[ Upstream commit c1e00bc4be06cacee6307cedb9b55bbaddb5044d ] We expect NAPI to be in disabled state when page pool is torn down. But it is also legal if the NAPI is completely uninitialized. Reviewed-by: Mina Almasry <almasrymina@google.com> Link: https://patch.msgid.link/20250206225638.1387810-4-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--net/core/dev.h12
-rw-r--r--net/core/page_pool.c7
2 files changed, 14 insertions, 5 deletions
diff --git a/net/core/dev.h b/net/core/dev.h
index 2e3bb7669984..764e0097ccf2 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -148,6 +148,18 @@ void xdp_do_check_flushed(struct napi_struct *napi);
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
#endif
+/* Best effort check that NAPI is not idle (can't be scheduled to run) */
+static inline void napi_assert_will_not_race(const struct napi_struct *napi)
+{
+ /* uninitialized instance, can't race */
+ if (!napi->poll_list.next)
+ return;
+
+ /* SCHED bit is set on disabled instances */
+ WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
+ WARN_ON(READ_ONCE(napi->list_owner) != -1);
+}
+
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 7b20f6fcb82c..c8ce069605c4 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -25,6 +25,7 @@
#include <trace/events/page_pool.h>
+#include "dev.h"
#include "mp_dmabuf_devmem.h"
#include "netmem_priv.h"
#include "page_pool_priv.h"
@@ -1108,11 +1109,7 @@ void page_pool_disable_direct_recycling(struct page_pool *pool)
if (!pool->p.napi)
return;
- /* To avoid races with recycling and additional barriers make sure
- * pool and NAPI are unlinked when NAPI is disabled.
- */
- WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
- WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
+ napi_assert_will_not_race(pool->p.napi);
WRITE_ONCE(pool->p.napi, NULL);
}