summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhenhua Huang <quic_zhenhuah@quicinc.com>2025-04-21 15:52:32 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-05-09 09:50:49 +0200
commitdab2a13059a475b6392550f882276e170fe2fcff (patch)
treebfc1fbdb3ccab75e5129c42d64fffe6b04602f51
parente10ec6e32b003c9841505af3e82c73eb8e6d97af (diff)
mm, slab: clean up slab->obj_exts always
commit be8250786ca94952a19ce87f98ad9906448bc9ef upstream. When memory allocation profiling is disabled at runtime or due to an error, shutdown_mem_profiling() is called: slab->obj_exts which previously allocated remains. It won't be cleared by unaccount_slab() because of mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts should always be cleaned up in unaccount_slab() to avoid following error: [...]BUG: Bad page state in process... .. [...]page dumped because: page still charged to cgroup [andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user] Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions") Cc: stable@vger.kernel.org Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Harry Yoo <harry.yoo@oracle.com> Tested-by: Harry Yoo <harry.yoo@oracle.com> Acked-by: Suren Baghdasaryan <surenb@google.com> Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com Signed-off-by: Vlastimil Babka <vbabka@suse.cz> [surenb: fixed trivial merge conflict in alloc_tagging_slab_alloc_hook(), skipped inlining free_slab_obj_exts() as it's already inline in 6.12] Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--mm/slub.c27
1 files changed, 7 insertions, 20 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c26d9cd107cc..66f86e532818 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2035,18 +2035,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
slab->obj_exts = 0;
}
-static inline bool need_slab_obj_ext(void)
-{
- if (mem_alloc_profiling_enabled())
- return true;
-
- /*
- * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
- * inside memcg_slab_post_alloc_hook. No other users for now.
- */
- return false;
-}
-
#else /* CONFIG_SLAB_OBJ_EXT */
static inline void init_slab_obj_exts(struct slab *slab)
@@ -2063,11 +2051,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
{
}
-static inline bool need_slab_obj_ext(void)
-{
- return false;
-}
-
#endif /* CONFIG_SLAB_OBJ_EXT */
#ifdef CONFIG_MEM_ALLOC_PROFILING
@@ -2099,7 +2082,7 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
{
- if (need_slab_obj_ext()) {
+ if (mem_alloc_profiling_enabled()) {
struct slabobj_ext *obj_exts;
obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
@@ -2577,8 +2560,12 @@ static __always_inline void account_slab(struct slab *slab, int order,
static __always_inline void unaccount_slab(struct slab *slab, int order,
struct kmem_cache *s)
{
- if (memcg_kmem_online() || need_slab_obj_ext())
- free_slab_obj_exts(slab);
+ /*
+ * The slab object extensions should now be freed regardless of
+ * whether mem_alloc_profiling_enabled() or not because profiling
+ * might have been disabled after slab->obj_exts got allocated.
+ */
+ free_slab_obj_exts(slab);
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
-(PAGE_SIZE << order));