summaryrefslogtreecommitdiff
path: root/samples/damon
diff options
context:
space:
mode:
Diffstat (limited to 'samples/damon')
-rw-r--r--samples/damon/Kconfig13
-rw-r--r--samples/damon/Makefile1
-rw-r--r--samples/damon/mtier.c234
-rw-r--r--samples/damon/prcl.c59
-rw-r--r--samples/damon/wsse.c57
5 files changed, 333 insertions, 31 deletions
diff --git a/samples/damon/Kconfig b/samples/damon/Kconfig
index 564c49ed69a2d..cbf96fd8a8bf9 100644
--- a/samples/damon/Kconfig
+++ b/samples/damon/Kconfig
@@ -27,4 +27,17 @@ config SAMPLE_DAMON_PRCL
If unsure, say N.
+config SAMPLE_DAMON_MTIER
+ bool "DAMON sample module for memory tiering"
+ depends on DAMON && DAMON_PADDR
+ help
+ Thps builds DAMON sample module for memory tierign.
+
+ The module assumes the system is constructed with two NUMA nodes,
+ which seems as local and remote nodes to all CPUs. For example,
+ node0 is for DDR5 DRAMs connected via DIMM, while node1 is for DDR4
+ DRAMs connected via CXL.
+
+ If unsure, say N.
+
endmenu
diff --git a/samples/damon/Makefile b/samples/damon/Makefile
index 7f155143f2371..72f68cbf422aa 100644
--- a/samples/damon/Makefile
+++ b/samples/damon/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_SAMPLE_DAMON_WSSE) += wsse.o
obj-$(CONFIG_SAMPLE_DAMON_PRCL) += prcl.o
+obj-$(CONFIG_SAMPLE_DAMON_MTIER) += mtier.o
diff --git a/samples/damon/mtier.c b/samples/damon/mtier.c
new file mode 100644
index 0000000000000..7ebd352138e4f
--- /dev/null
+++ b/samples/damon/mtier.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * memory tiering: migrate cold pages in node 0 and hot pages in node 1 to node
+ * 1 and node 0, respectively. Adjust the hotness/coldness threshold aiming
+ * resulting 99.6 % node 0 utilization ratio.
+ */
+
+#define pr_fmt(fmt) "damon_sample_mtier: " fmt
+
+#include <linux/damon.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "damon_sample_mtier."
+
+static unsigned long node0_start_addr __read_mostly;
+module_param(node0_start_addr, ulong, 0600);
+
+static unsigned long node0_end_addr __read_mostly;
+module_param(node0_end_addr, ulong, 0600);
+
+static unsigned long node1_start_addr __read_mostly;
+module_param(node1_start_addr, ulong, 0600);
+
+static unsigned long node1_end_addr __read_mostly;
+module_param(node1_end_addr, ulong, 0600);
+
+static unsigned long node0_mem_used_bp __read_mostly = 9970;
+module_param(node0_mem_used_bp, ulong, 0600);
+
+static unsigned long node0_mem_free_bp __read_mostly = 50;
+module_param(node0_mem_free_bp, ulong, 0600);
+
+static int damon_sample_mtier_enable_store(
+ const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops enabled_param_ops = {
+ .set = damon_sample_mtier_enable_store,
+ .get = param_get_bool,
+};
+
+static bool enabled __read_mostly;
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
+MODULE_PARM_DESC(enabled, "Enable or disable DAMON_SAMPLE_MTIER");
+
+static bool detect_node_addresses __read_mostly;
+module_param(detect_node_addresses, bool, 0600);
+
+static struct damon_ctx *ctxs[2];
+
+struct region_range {
+ phys_addr_t start;
+ phys_addr_t end;
+};
+
+static int nid_to_phys(int target_node, struct region_range *range)
+{
+ if (!node_online(target_node)) {
+ pr_err("NUMA node %d is not online\n", target_node);
+ return -EINVAL;
+ }
+
+ range->start = PFN_PHYS(node_start_pfn(target_node));
+ range->end = PFN_PHYS(node_end_pfn(target_node));
+
+ return 0;
+}
+
+static struct damon_ctx *damon_sample_mtier_build_ctx(bool promote)
+{
+ struct damon_ctx *ctx;
+ struct damon_attrs attrs;
+ struct damon_target *target;
+ struct damon_region *region;
+ struct damos *scheme;
+ struct damos_quota_goal *quota_goal;
+ struct damos_filter *filter;
+ struct region_range addr;
+ int ret;
+
+ ctx = damon_new_ctx();
+ if (!ctx)
+ return NULL;
+ attrs = (struct damon_attrs) {
+ .sample_interval = 5 * USEC_PER_MSEC,
+ .aggr_interval = 100 * USEC_PER_MSEC,
+ .ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
+ .min_nr_regions = 10,
+ .max_nr_regions = 1000,
+ };
+
+ /*
+ * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
+ * accesses ratio, keeping sampling interval in [5ms, 10s] range.
+ */
+ attrs.intervals_goal = (struct damon_intervals_goal) {
+ .access_bp = 400, .aggrs = 3,
+ .min_sample_us = 5000, .max_sample_us = 10000000,
+ };
+ if (damon_set_attrs(ctx, &attrs))
+ goto free_out;
+ if (damon_select_ops(ctx, DAMON_OPS_PADDR))
+ goto free_out;
+
+ target = damon_new_target();
+ if (!target)
+ goto free_out;
+ damon_add_target(ctx, target);
+
+ if (detect_node_addresses) {
+ ret = promote ? nid_to_phys(1, &addr) : nid_to_phys(0, &addr);
+ if (ret)
+ goto free_out;
+ } else {
+ addr.start = promote ? node1_start_addr : node0_start_addr;
+ addr.end = promote ? node1_end_addr : node0_end_addr;
+ }
+
+ region = damon_new_region(addr.start, addr.end);
+ if (!region)
+ goto free_out;
+ damon_add_region(region, target);
+
+ scheme = damon_new_scheme(
+ /* access pattern */
+ &(struct damos_access_pattern) {
+ .min_sz_region = PAGE_SIZE,
+ .max_sz_region = ULONG_MAX,
+ .min_nr_accesses = promote ? 1 : 0,
+ .max_nr_accesses = promote ? UINT_MAX : 0,
+ .min_age_region = 0,
+ .max_age_region = UINT_MAX},
+ /* action */
+ promote ? DAMOS_MIGRATE_HOT : DAMOS_MIGRATE_COLD,
+ 1000000, /* apply interval (1s) */
+ &(struct damos_quota){
+ /* 200 MiB per sec by most */
+ .reset_interval = 1000,
+ .sz = 200 * 1024 * 1024,
+ /* ignore size of region when prioritizing */
+ .weight_sz = 0,
+ .weight_nr_accesses = 100,
+ .weight_age = 100,
+ },
+ &(struct damos_watermarks){},
+ promote ? 0 : 1); /* migrate target node id */
+ if (!scheme)
+ goto free_out;
+ damon_set_schemes(ctx, &scheme, 1);
+ quota_goal = damos_new_quota_goal(
+ promote ? DAMOS_QUOTA_NODE_MEM_USED_BP :
+ DAMOS_QUOTA_NODE_MEM_FREE_BP,
+ promote ? node0_mem_used_bp : node0_mem_free_bp);
+ if (!quota_goal)
+ goto free_out;
+ quota_goal->nid = 0;
+ damos_add_quota_goal(&scheme->quota, quota_goal);
+ filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true, promote);
+ if (!filter)
+ goto free_out;
+ damos_add_filter(scheme, filter);
+ return ctx;
+free_out:
+ damon_destroy_ctx(ctx);
+ return NULL;
+}
+
+static int damon_sample_mtier_start(void)
+{
+ struct damon_ctx *ctx;
+
+ ctx = damon_sample_mtier_build_ctx(true);
+ if (!ctx)
+ return -ENOMEM;
+ ctxs[0] = ctx;
+ ctx = damon_sample_mtier_build_ctx(false);
+ if (!ctx) {
+ damon_destroy_ctx(ctxs[0]);
+ return -ENOMEM;
+ }
+ ctxs[1] = ctx;
+ return damon_start(ctxs, 2, true);
+}
+
+static void damon_sample_mtier_stop(void)
+{
+ damon_stop(ctxs, 2);
+ damon_destroy_ctx(ctxs[0]);
+ damon_destroy_ctx(ctxs[1]);
+}
+
+static bool init_called;
+
+static int damon_sample_mtier_enable_store(
+ const char *val, const struct kernel_param *kp)
+{
+ bool is_enabled = enabled;
+ int err;
+
+ err = kstrtobool(val, &enabled);
+ if (err)
+ return err;
+
+ if (enabled == is_enabled)
+ return 0;
+
+ if (enabled) {
+ err = damon_sample_mtier_start();
+ if (err)
+ enabled = false;
+ return err;
+ }
+ damon_sample_mtier_stop();
+ return 0;
+}
+
+static int __init damon_sample_mtier_init(void)
+{
+ int err = 0;
+
+ init_called = true;
+ if (enabled) {
+ err = damon_sample_mtier_start();
+ if (err)
+ enabled = false;
+ }
+ return 0;
+}
+
+module_init(damon_sample_mtier_init);
diff --git a/samples/damon/prcl.c b/samples/damon/prcl.c
index c3acbdab7a620..1b839c06a612f 100644
--- a/samples/damon/prcl.c
+++ b/samples/damon/prcl.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* proactive reclamation: monitor access pattern of a given process, find
- * regiosn that seems not accessed, and proactively page out the regions.
+ * regions that seems not accessed, and proactively page out the regions.
*/
#define pr_fmt(fmt) "damon_sample_prcl: " fmt
@@ -11,26 +11,32 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "damon_sample_prcl."
+
static int target_pid __read_mostly;
module_param(target_pid, int, 0600);
static int damon_sample_prcl_enable_store(
const char *val, const struct kernel_param *kp);
-static const struct kernel_param_ops enable_param_ops = {
+static const struct kernel_param_ops enabled_param_ops = {
.set = damon_sample_prcl_enable_store,
.get = param_get_bool,
};
-static bool enable __read_mostly;
-module_param_cb(enable, &enable_param_ops, &enable, 0600);
-MODULE_PARM_DESC(enable, "Enable of disable DAMON_SAMPLE_WSSE");
+static bool enabled __read_mostly;
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
+MODULE_PARM_DESC(enabled, "Enable or disable DAMON_SAMPLE_PRCL");
static struct damon_ctx *ctx;
static struct pid *target_pidp;
-static int damon_sample_prcl_after_aggregate(struct damon_ctx *c)
+static int damon_sample_prcl_repeat_call_fn(void *data)
{
+ struct damon_ctx *c = data;
struct damon_target *t;
damon_for_each_target(t, c) {
@@ -46,10 +52,16 @@ static int damon_sample_prcl_after_aggregate(struct damon_ctx *c)
return 0;
}
+static struct damon_call_control repeat_call_control = {
+ .fn = damon_sample_prcl_repeat_call_fn,
+ .repeat = true,
+};
+
static int damon_sample_prcl_start(void)
{
struct damon_target *target;
struct damos *scheme;
+ int err;
pr_info("start\n");
@@ -74,8 +86,6 @@ static int damon_sample_prcl_start(void)
}
target->pid = target_pidp;
- ctx->callback.after_aggregation = damon_sample_prcl_after_aggregate;
-
scheme = damon_new_scheme(
&(struct damos_access_pattern) {
.min_sz_region = PAGE_SIZE,
@@ -95,7 +105,12 @@ static int damon_sample_prcl_start(void)
}
damon_set_schemes(ctx, &scheme, 1);
- return damon_start(&ctx, 1, true);
+ err = damon_start(&ctx, 1, true);
+ if (err)
+ return err;
+
+ repeat_call_control.data = ctx;
+ return damon_call(ctx, &repeat_call_control);
}
static void damon_sample_prcl_stop(void)
@@ -105,31 +120,43 @@ static void damon_sample_prcl_stop(void)
damon_stop(&ctx, 1);
damon_destroy_ctx(ctx);
}
- if (target_pidp)
- put_pid(target_pidp);
}
+static bool init_called;
+
static int damon_sample_prcl_enable_store(
const char *val, const struct kernel_param *kp)
{
- bool enabled = enable;
+ bool is_enabled = enabled;
int err;
- err = kstrtobool(val, &enable);
+ err = kstrtobool(val, &enabled);
if (err)
return err;
- if (enable == enabled)
+ if (enabled == is_enabled)
return 0;
- if (enable)
- return damon_sample_prcl_start();
+ if (enabled) {
+ err = damon_sample_prcl_start();
+ if (err)
+ enabled = false;
+ return err;
+ }
damon_sample_prcl_stop();
return 0;
}
static int __init damon_sample_prcl_init(void)
{
+ int err = 0;
+
+ init_called = true;
+ if (enabled) {
+ err = damon_sample_prcl_start();
+ if (err)
+ enabled = false;
+ }
return 0;
}
diff --git a/samples/damon/wsse.c b/samples/damon/wsse.c
index 11be258032744..da052023b0990 100644
--- a/samples/damon/wsse.c
+++ b/samples/damon/wsse.c
@@ -12,26 +12,32 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "damon_sample_wsse."
+
static int target_pid __read_mostly;
module_param(target_pid, int, 0600);
static int damon_sample_wsse_enable_store(
const char *val, const struct kernel_param *kp);
-static const struct kernel_param_ops enable_param_ops = {
+static const struct kernel_param_ops enabled_param_ops = {
.set = damon_sample_wsse_enable_store,
.get = param_get_bool,
};
-static bool enable __read_mostly;
-module_param_cb(enable, &enable_param_ops, &enable, 0600);
-MODULE_PARM_DESC(enable, "Enable or disable DAMON_SAMPLE_WSSE");
+static bool enabled __read_mostly;
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
+MODULE_PARM_DESC(enabled, "Enable or disable DAMON_SAMPLE_WSSE");
static struct damon_ctx *ctx;
static struct pid *target_pidp;
-static int damon_sample_wsse_after_aggregate(struct damon_ctx *c)
+static int damon_sample_wsse_repeat_call_fn(void *data)
{
+ struct damon_ctx *c = data;
struct damon_target *t;
damon_for_each_target(t, c) {
@@ -47,9 +53,15 @@ static int damon_sample_wsse_after_aggregate(struct damon_ctx *c)
return 0;
}
+static struct damon_call_control repeat_call_control = {
+ .fn = damon_sample_wsse_repeat_call_fn,
+ .repeat = true,
+};
+
static int damon_sample_wsse_start(void)
{
struct damon_target *target;
+ int err;
pr_info("start\n");
@@ -74,8 +86,11 @@ static int damon_sample_wsse_start(void)
}
target->pid = target_pidp;
- ctx->callback.after_aggregation = damon_sample_wsse_after_aggregate;
- return damon_start(&ctx, 1, true);
+ err = damon_start(&ctx, 1, true);
+ if (err)
+ return err;
+ repeat_call_control.data = ctx;
+ return damon_call(ctx, &repeat_call_control);
}
static void damon_sample_wsse_stop(void)
@@ -85,32 +100,44 @@ static void damon_sample_wsse_stop(void)
damon_stop(&ctx, 1);
damon_destroy_ctx(ctx);
}
- if (target_pidp)
- put_pid(target_pidp);
}
+static bool init_called;
+
static int damon_sample_wsse_enable_store(
const char *val, const struct kernel_param *kp)
{
- bool enabled = enable;
+ bool is_enabled = enabled;
int err;
- err = kstrtobool(val, &enable);
+ err = kstrtobool(val, &enabled);
if (err)
return err;
- if (enable == enabled)
+ if (enabled == is_enabled)
return 0;
- if (enable)
- return damon_sample_wsse_start();
+ if (enabled) {
+ err = damon_sample_wsse_start();
+ if (err)
+ enabled = false;
+ return err;
+ }
damon_sample_wsse_stop();
return 0;
}
static int __init damon_sample_wsse_init(void)
{
- return 0;
+ int err = 0;
+
+ init_called = true;
+ if (enabled) {
+ err = damon_sample_wsse_start();
+ if (err)
+ enabled = false;
+ }
+ return err;
}
module_init(damon_sample_wsse_init);