xref: /linux/mm/damon/stat.c (revision e5d2585d9e859df712e5c9308bf19f83927e0021)
1369c415eSSeongJae Park // SPDX-License-Identifier: GPL-2.0
2369c415eSSeongJae Park /*
3369c415eSSeongJae Park  * Shows data access monitoring resutls in simple metrics.
4369c415eSSeongJae Park  */
5369c415eSSeongJae Park 
6369c415eSSeongJae Park #define pr_fmt(fmt) "damon-stat: " fmt
7369c415eSSeongJae Park 
8369c415eSSeongJae Park #include <linux/damon.h>
9369c415eSSeongJae Park #include <linux/init.h>
10369c415eSSeongJae Park #include <linux/kernel.h>
11369c415eSSeongJae Park #include <linux/module.h>
12369c415eSSeongJae Park #include <linux/sort.h>
13369c415eSSeongJae Park 
14369c415eSSeongJae Park #ifdef MODULE_PARAM_PREFIX
15369c415eSSeongJae Park #undef MODULE_PARAM_PREFIX
16369c415eSSeongJae Park #endif
17369c415eSSeongJae Park #define MODULE_PARAM_PREFIX "damon_stat."
18369c415eSSeongJae Park 
19369c415eSSeongJae Park static int damon_stat_enabled_store(
20369c415eSSeongJae Park 		const char *val, const struct kernel_param *kp);
21369c415eSSeongJae Park 
22369c415eSSeongJae Park static const struct kernel_param_ops enabled_param_ops = {
23369c415eSSeongJae Park 	.set = damon_stat_enabled_store,
24369c415eSSeongJae Park 	.get = param_get_bool,
25369c415eSSeongJae Park };
26369c415eSSeongJae Park 
27369c415eSSeongJae Park static bool enabled __read_mostly = IS_ENABLED(
28369c415eSSeongJae Park 	CONFIG_DAMON_STAT_ENABLED_DEFAULT);
29369c415eSSeongJae Park module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
30369c415eSSeongJae Park MODULE_PARM_DESC(enabled, "Enable of disable DAMON_STAT");
31369c415eSSeongJae Park 
32fabdd1e9SSeongJae Park static unsigned long estimated_memory_bandwidth __read_mostly;
33fabdd1e9SSeongJae Park module_param(estimated_memory_bandwidth, ulong, 0400);
34fabdd1e9SSeongJae Park MODULE_PARM_DESC(estimated_memory_bandwidth,
35fabdd1e9SSeongJae Park 		"Estimated memory bandwidth usage in bytes per second");
36fabdd1e9SSeongJae Park 
37*e5d2585dSSeongJae Park static unsigned long memory_idle_ms_percentiles[101] __read_mostly = {0,};
38*e5d2585dSSeongJae Park module_param_array(memory_idle_ms_percentiles, ulong, NULL, 0400);
39*e5d2585dSSeongJae Park MODULE_PARM_DESC(memory_idle_ms_percentiles,
40*e5d2585dSSeongJae Park 		"Memory idle time percentiles in milliseconds");
41*e5d2585dSSeongJae Park 
42369c415eSSeongJae Park static struct damon_ctx *damon_stat_context;
43369c415eSSeongJae Park 
44fabdd1e9SSeongJae Park static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c)
45fabdd1e9SSeongJae Park {
46fabdd1e9SSeongJae Park 	struct damon_target *t;
47fabdd1e9SSeongJae Park 	struct damon_region *r;
48fabdd1e9SSeongJae Park 	unsigned long access_bytes = 0;
49fabdd1e9SSeongJae Park 
50fabdd1e9SSeongJae Park 	damon_for_each_target(t, c) {
51fabdd1e9SSeongJae Park 		damon_for_each_region(r, t)
52fabdd1e9SSeongJae Park 			access_bytes += (r->ar.end - r->ar.start) *
53fabdd1e9SSeongJae Park 				r->nr_accesses;
54fabdd1e9SSeongJae Park 	}
55fabdd1e9SSeongJae Park 	estimated_memory_bandwidth = access_bytes * USEC_PER_MSEC *
56fabdd1e9SSeongJae Park 		MSEC_PER_SEC / c->attrs.aggr_interval;
57fabdd1e9SSeongJae Park }
58fabdd1e9SSeongJae Park 
59*e5d2585dSSeongJae Park static unsigned int damon_stat_idletime(const struct damon_region *r)
60*e5d2585dSSeongJae Park {
61*e5d2585dSSeongJae Park 	if (r->nr_accesses)
62*e5d2585dSSeongJae Park 		return 0;
63*e5d2585dSSeongJae Park 	return r->age + 1;
64*e5d2585dSSeongJae Park }
65*e5d2585dSSeongJae Park 
66*e5d2585dSSeongJae Park static int damon_stat_cmp_regions(const void *a, const void *b)
67*e5d2585dSSeongJae Park {
68*e5d2585dSSeongJae Park 	const struct damon_region *ra = *(const struct damon_region **)a;
69*e5d2585dSSeongJae Park 	const struct damon_region *rb = *(const struct damon_region **)b;
70*e5d2585dSSeongJae Park 
71*e5d2585dSSeongJae Park 	return damon_stat_idletime(ra) - damon_stat_idletime(rb);
72*e5d2585dSSeongJae Park }
73*e5d2585dSSeongJae Park 
74*e5d2585dSSeongJae Park static int damon_stat_sort_regions(struct damon_ctx *c,
75*e5d2585dSSeongJae Park 		struct damon_region ***sorted_ptr, int *nr_regions_ptr,
76*e5d2585dSSeongJae Park 		unsigned long *total_sz_ptr)
77*e5d2585dSSeongJae Park {
78*e5d2585dSSeongJae Park 	struct damon_target *t;
79*e5d2585dSSeongJae Park 	struct damon_region *r;
80*e5d2585dSSeongJae Park 	struct damon_region **region_pointers;
81*e5d2585dSSeongJae Park 	unsigned int nr_regions = 0;
82*e5d2585dSSeongJae Park 	unsigned long total_sz = 0;
83*e5d2585dSSeongJae Park 
84*e5d2585dSSeongJae Park 	damon_for_each_target(t, c) {
85*e5d2585dSSeongJae Park 		/* there is only one target */
86*e5d2585dSSeongJae Park 		region_pointers = kmalloc_array(damon_nr_regions(t),
87*e5d2585dSSeongJae Park 				sizeof(*region_pointers), GFP_KERNEL);
88*e5d2585dSSeongJae Park 		if (!region_pointers)
89*e5d2585dSSeongJae Park 			return -ENOMEM;
90*e5d2585dSSeongJae Park 		damon_for_each_region(r, t) {
91*e5d2585dSSeongJae Park 			region_pointers[nr_regions++] = r;
92*e5d2585dSSeongJae Park 			total_sz += r->ar.end - r->ar.start;
93*e5d2585dSSeongJae Park 		}
94*e5d2585dSSeongJae Park 	}
95*e5d2585dSSeongJae Park 	sort(region_pointers, nr_regions, sizeof(*region_pointers),
96*e5d2585dSSeongJae Park 			damon_stat_cmp_regions, NULL);
97*e5d2585dSSeongJae Park 	*sorted_ptr = region_pointers;
98*e5d2585dSSeongJae Park 	*nr_regions_ptr = nr_regions;
99*e5d2585dSSeongJae Park 	*total_sz_ptr = total_sz;
100*e5d2585dSSeongJae Park 	return 0;
101*e5d2585dSSeongJae Park }
102*e5d2585dSSeongJae Park 
103*e5d2585dSSeongJae Park static void damon_stat_set_idletime_percentiles(struct damon_ctx *c)
104*e5d2585dSSeongJae Park {
105*e5d2585dSSeongJae Park 	struct damon_region **sorted_regions, *region;
106*e5d2585dSSeongJae Park 	int nr_regions;
107*e5d2585dSSeongJae Park 	unsigned long total_sz, accounted_bytes = 0;
108*e5d2585dSSeongJae Park 	int err, i, next_percentile = 0;
109*e5d2585dSSeongJae Park 
110*e5d2585dSSeongJae Park 	err = damon_stat_sort_regions(c, &sorted_regions, &nr_regions,
111*e5d2585dSSeongJae Park 			&total_sz);
112*e5d2585dSSeongJae Park 	if (err)
113*e5d2585dSSeongJae Park 		return;
114*e5d2585dSSeongJae Park 	for (i = 0; i < nr_regions; i++) {
115*e5d2585dSSeongJae Park 		region = sorted_regions[i];
116*e5d2585dSSeongJae Park 		accounted_bytes += region->ar.end - region->ar.start;
117*e5d2585dSSeongJae Park 		while (next_percentile <= accounted_bytes * 100 / total_sz)
118*e5d2585dSSeongJae Park 			memory_idle_ms_percentiles[next_percentile++] =
119*e5d2585dSSeongJae Park 				damon_stat_idletime(region) *
120*e5d2585dSSeongJae Park 				c->attrs.aggr_interval / USEC_PER_MSEC;
121*e5d2585dSSeongJae Park 	}
122*e5d2585dSSeongJae Park 	kfree(sorted_regions);
123*e5d2585dSSeongJae Park }
124*e5d2585dSSeongJae Park 
125fabdd1e9SSeongJae Park static int damon_stat_after_aggregation(struct damon_ctx *c)
126fabdd1e9SSeongJae Park {
127fabdd1e9SSeongJae Park 	static unsigned long last_refresh_jiffies;
128fabdd1e9SSeongJae Park 
129fabdd1e9SSeongJae Park 	/* avoid unnecessarily frequent stat update */
130fabdd1e9SSeongJae Park 	if (time_before_eq(jiffies, last_refresh_jiffies +
131fabdd1e9SSeongJae Park 				msecs_to_jiffies(5 * MSEC_PER_SEC)))
132fabdd1e9SSeongJae Park 		return 0;
133fabdd1e9SSeongJae Park 	last_refresh_jiffies = jiffies;
134fabdd1e9SSeongJae Park 
135fabdd1e9SSeongJae Park 	damon_stat_set_estimated_memory_bandwidth(c);
136*e5d2585dSSeongJae Park 	damon_stat_set_idletime_percentiles(c);
137fabdd1e9SSeongJae Park 	return 0;
138fabdd1e9SSeongJae Park }
139fabdd1e9SSeongJae Park 
140369c415eSSeongJae Park static struct damon_ctx *damon_stat_build_ctx(void)
141369c415eSSeongJae Park {
142369c415eSSeongJae Park 	struct damon_ctx *ctx;
143369c415eSSeongJae Park 	struct damon_attrs attrs;
144369c415eSSeongJae Park 	struct damon_target *target;
145369c415eSSeongJae Park 	unsigned long start = 0, end = 0;
146369c415eSSeongJae Park 
147369c415eSSeongJae Park 	ctx = damon_new_ctx();
148369c415eSSeongJae Park 	if (!ctx)
149369c415eSSeongJae Park 		return NULL;
150369c415eSSeongJae Park 	attrs = (struct damon_attrs) {
151369c415eSSeongJae Park 		.sample_interval = 5 * USEC_PER_MSEC,
152369c415eSSeongJae Park 		.aggr_interval = 100 * USEC_PER_MSEC,
153369c415eSSeongJae Park 		.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
154369c415eSSeongJae Park 		.min_nr_regions = 10,
155369c415eSSeongJae Park 		.max_nr_regions = 1000,
156369c415eSSeongJae Park 	};
157369c415eSSeongJae Park 	/*
158369c415eSSeongJae Park 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
159369c415eSSeongJae Park 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
160369c415eSSeongJae Park 	 */
161369c415eSSeongJae Park 	attrs.intervals_goal = (struct damon_intervals_goal) {
162369c415eSSeongJae Park 		.access_bp = 400, .aggrs = 3,
163369c415eSSeongJae Park 		.min_sample_us = 5000, .max_sample_us = 10000000,
164369c415eSSeongJae Park 	};
165369c415eSSeongJae Park 	if (damon_set_attrs(ctx, &attrs))
166369c415eSSeongJae Park 		goto free_out;
167369c415eSSeongJae Park 
168369c415eSSeongJae Park 	/*
169369c415eSSeongJae Park 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
170369c415eSSeongJae Park 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
171369c415eSSeongJae Park 	 */
172369c415eSSeongJae Park 	ctx->attrs.intervals_goal = (struct damon_intervals_goal) {
173369c415eSSeongJae Park 		.access_bp = 400, .aggrs = 3,
174369c415eSSeongJae Park 		.min_sample_us = 5000, .max_sample_us = 10000000,
175369c415eSSeongJae Park 	};
176369c415eSSeongJae Park 	if (damon_select_ops(ctx, DAMON_OPS_PADDR))
177369c415eSSeongJae Park 		goto free_out;
178369c415eSSeongJae Park 
179369c415eSSeongJae Park 	target = damon_new_target();
180369c415eSSeongJae Park 	if (!target)
181369c415eSSeongJae Park 		goto free_out;
182369c415eSSeongJae Park 	damon_add_target(ctx, target);
183369c415eSSeongJae Park 	if (damon_set_region_biggest_system_ram_default(target, &start, &end))
184369c415eSSeongJae Park 		goto free_out;
185fabdd1e9SSeongJae Park 	ctx->callback.after_aggregation = damon_stat_after_aggregation;
186369c415eSSeongJae Park 	return ctx;
187369c415eSSeongJae Park free_out:
188369c415eSSeongJae Park 	damon_destroy_ctx(ctx);
189369c415eSSeongJae Park 	return NULL;
190369c415eSSeongJae Park }
191369c415eSSeongJae Park 
192369c415eSSeongJae Park static int damon_stat_start(void)
193369c415eSSeongJae Park {
194369c415eSSeongJae Park 	damon_stat_context = damon_stat_build_ctx();
195369c415eSSeongJae Park 	if (!damon_stat_context)
196369c415eSSeongJae Park 		return -ENOMEM;
197369c415eSSeongJae Park 	return damon_start(&damon_stat_context, 1, true);
198369c415eSSeongJae Park }
199369c415eSSeongJae Park 
200369c415eSSeongJae Park static void damon_stat_stop(void)
201369c415eSSeongJae Park {
202369c415eSSeongJae Park 	damon_stop(&damon_stat_context, 1);
203369c415eSSeongJae Park 	damon_destroy_ctx(damon_stat_context);
204369c415eSSeongJae Park }
205369c415eSSeongJae Park 
206369c415eSSeongJae Park static bool damon_stat_init_called;
207369c415eSSeongJae Park 
208369c415eSSeongJae Park static int damon_stat_enabled_store(
209369c415eSSeongJae Park 		const char *val, const struct kernel_param *kp)
210369c415eSSeongJae Park {
211369c415eSSeongJae Park 	bool is_enabled = enabled;
212369c415eSSeongJae Park 	int err;
213369c415eSSeongJae Park 
214369c415eSSeongJae Park 	err = kstrtobool(val, &enabled);
215369c415eSSeongJae Park 	if (err)
216369c415eSSeongJae Park 		return err;
217369c415eSSeongJae Park 
218369c415eSSeongJae Park 	if (is_enabled == enabled)
219369c415eSSeongJae Park 		return 0;
220369c415eSSeongJae Park 
221369c415eSSeongJae Park 	if (!damon_stat_init_called)
222369c415eSSeongJae Park 		/*
223369c415eSSeongJae Park 		 * probably called from command line parsing (parse_args()).
224369c415eSSeongJae Park 		 * Cannot call damon_new_ctx().  Let damon_stat_init() handle.
225369c415eSSeongJae Park 		 */
226369c415eSSeongJae Park 		return 0;
227369c415eSSeongJae Park 
228369c415eSSeongJae Park 	if (enabled) {
229369c415eSSeongJae Park 		err = damon_stat_start();
230369c415eSSeongJae Park 		if (err)
231369c415eSSeongJae Park 			enabled = false;
232369c415eSSeongJae Park 		return err;
233369c415eSSeongJae Park 	}
234369c415eSSeongJae Park 	damon_stat_stop();
235369c415eSSeongJae Park 	return 0;
236369c415eSSeongJae Park }
237369c415eSSeongJae Park 
238369c415eSSeongJae Park static int __init damon_stat_init(void)
239369c415eSSeongJae Park {
240369c415eSSeongJae Park 	int err = 0;
241369c415eSSeongJae Park 
242369c415eSSeongJae Park 	damon_stat_init_called = true;
243369c415eSSeongJae Park 
244369c415eSSeongJae Park 	/* probably set via command line */
245369c415eSSeongJae Park 	if (enabled)
246369c415eSSeongJae Park 		err = damon_stat_start();
247369c415eSSeongJae Park 
248369c415eSSeongJae Park 	if (err && enabled)
249369c415eSSeongJae Park 		enabled = false;
250369c415eSSeongJae Park 	return err;
251369c415eSSeongJae Park }
252369c415eSSeongJae Park 
253369c415eSSeongJae Park module_init(damon_stat_init);
254