summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2023-12-08 17:50:18 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-01-01 12:39:08 +0000
commitec7b81b0abcde34ac75a4b02239aa88306feab67 (patch)
tree139ebf25bd9d1859dd0f489f6498c8df3820035f
parent41f4ff9fe22c4b83e8d6978118c3d89767b3438d (diff)
mm/damon/core: make damon_start() waits until kdamond_fn() starts
commit 6376a824595607e99d032a39ba3394988b4fce96 upstream. The cleanup tasks of kdamond threads including reset of corresponding DAMON context's ->kdamond field and decrease of global nr_running_ctxs counter is supposed to be executed by kdamond_fn(). However, commit 0f91d13366a4 ("mm/damon: simplify stop mechanism") made neither damon_start() nor damon_stop() ensure the corresponding kdamond has started the execution of kdamond_fn(). As a result, the cleanup can be skipped if damon_stop() is called fast enough after the previous damon_start(). Especially the skipped reset of ->kdamond could cause a use-after-free. Fix it by waiting for start of kdamond_fn() execution from damon_start(). Link: https://lkml.kernel.org/r/20231208175018.63880-1-sj@kernel.org Fixes: 0f91d13366a4 ("mm/damon: simplify stop mechanism") Signed-off-by: SeongJae Park <sj@kernel.org> Reported-by: Jakub Acs <acsjakub@amazon.de> Cc: Changbin Du <changbin.du@intel.com> Cc: Jakub Acs <acsjakub@amazon.de> Cc: <stable@vger.kernel.org> # 5.15.x Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: SeongJae Park <sj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--include/linux/damon.h3
-rw-r--r--mm/damon/core.c7
2 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/damon.h b/include/linux/damon.h
index b13be7ae2275..e6941b239f44 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -8,6 +8,7 @@
#ifndef _DAMON_H_
#define _DAMON_H_
+#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/time64.h>
#include <linux/types.h>
@@ -452,6 +453,8 @@ struct damon_ctx {
/* private: internal use only */
struct timespec64 last_aggregation;
struct timespec64 last_ops_update;
+ /* for waiting until the execution of the kdamond_fn is started */
+ struct completion kdamond_started;
/* public: */
struct task_struct *kdamond;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 36d098d06c55..5db9bec8ae67 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -383,6 +383,8 @@ struct damon_ctx *damon_new_ctx(void)
if (!ctx)
return NULL;
+ init_completion(&ctx->kdamond_started);
+
ctx->attrs.sample_interval = 5 * 1000;
ctx->attrs.aggr_interval = 100 * 1000;
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
@@ -519,11 +521,14 @@ static int __damon_start(struct damon_ctx *ctx)
mutex_lock(&ctx->kdamond_lock);
if (!ctx->kdamond) {
err = 0;
+ reinit_completion(&ctx->kdamond_started);
ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
nr_running_ctxs);
if (IS_ERR(ctx->kdamond)) {
err = PTR_ERR(ctx->kdamond);
ctx->kdamond = NULL;
+ } else {
+ wait_for_completion(&ctx->kdamond_started);
}
}
mutex_unlock(&ctx->kdamond_lock);
@@ -1147,6 +1152,8 @@ static int kdamond_fn(void *data)
pr_debug("kdamond (%d) starts\n", current->pid);
+ complete(&ctx->kdamond_started);
+
if (ctx->ops.init)
ctx->ops.init(ctx);
if (ctx->callback.before_start && ctx->callback.before_start(ctx))