diff options
-rw-r--r-- | mm/damon/stat.c | 72 |
1 files changed, 72 insertions, 0 deletions
diff --git a/mm/damon/stat.c b/mm/damon/stat.c index 3be86ca57c8b..b75af871627e 100644 --- a/mm/damon/stat.c +++ b/mm/damon/stat.c @@ -34,6 +34,11 @@ module_param(estimated_memory_bandwidth, ulong, 0400); MODULE_PARM_DESC(estimated_memory_bandwidth, "Estimated memory bandwidth usage in bytes per second"); +static unsigned long memory_idle_ms_percentiles[101] __read_mostly = {0,}; +module_param_array(memory_idle_ms_percentiles, ulong, NULL, 0400); +MODULE_PARM_DESC(memory_idle_ms_percentiles, + "Memory idle time percentiles in milliseconds"); + static struct damon_ctx *damon_stat_context; static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c) @@ -51,6 +56,72 @@ static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c) MSEC_PER_SEC / c->attrs.aggr_interval; } +static unsigned int damon_stat_idletime(const struct damon_region *r) +{ + if (r->nr_accesses) + return 0; + return r->age + 1; +} + +static int damon_stat_cmp_regions(const void *a, const void *b) +{ + const struct damon_region *ra = *(const struct damon_region **)a; + const struct damon_region *rb = *(const struct damon_region **)b; + + return damon_stat_idletime(ra) - damon_stat_idletime(rb); +} + +static int damon_stat_sort_regions(struct damon_ctx *c, + struct damon_region ***sorted_ptr, int *nr_regions_ptr, + unsigned long *total_sz_ptr) +{ + struct damon_target *t; + struct damon_region *r; + struct damon_region **region_pointers; + unsigned int nr_regions = 0; + unsigned long total_sz = 0; + + damon_for_each_target(t, c) { + /* there is only one target */ + region_pointers = kmalloc_array(damon_nr_regions(t), + sizeof(*region_pointers), GFP_KERNEL); + if (!region_pointers) + return -ENOMEM; + damon_for_each_region(r, t) { + region_pointers[nr_regions++] = r; + total_sz += r->ar.end - r->ar.start; + } + } + sort(region_pointers, nr_regions, sizeof(*region_pointers), + damon_stat_cmp_regions, NULL); + *sorted_ptr = region_pointers; + *nr_regions_ptr = nr_regions; + *total_sz_ptr = total_sz; + return 0; +} + +static void damon_stat_set_idletime_percentiles(struct damon_ctx *c) +{ + struct damon_region **sorted_regions, *region; + int nr_regions; + unsigned long total_sz, accounted_bytes = 0; + int err, i, next_percentile = 0; + + err = damon_stat_sort_regions(c, &sorted_regions, &nr_regions, + &total_sz); + if (err) + return; + for (i = 0; i < nr_regions; i++) { + region = sorted_regions[i]; + accounted_bytes += region->ar.end - region->ar.start; + while (next_percentile <= accounted_bytes * 100 / total_sz) + memory_idle_ms_percentiles[next_percentile++] = + damon_stat_idletime(region) * + c->attrs.aggr_interval / USEC_PER_MSEC; + } + kfree(sorted_regions); +} + static int damon_stat_after_aggregation(struct damon_ctx *c) { static unsigned long last_refresh_jiffies; @@ -62,6 +133,7 @@ static int damon_stat_after_aggregation(struct damon_ctx *c) last_refresh_jiffies = jiffies; damon_stat_set_estimated_memory_bandwidth(c); + damon_stat_set_idletime_percentiles(c); return 0; } |