xref: /linux/mm/damon/stat.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shows data access monitoring results in simple metrics.
4  */
5 
6 #define pr_fmt(fmt) "damon-stat: " fmt
7 
8 #include <linux/damon.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sort.h>
13 
14 #ifdef MODULE_PARAM_PREFIX
15 #undef MODULE_PARAM_PREFIX
16 #endif
17 #define MODULE_PARAM_PREFIX "damon_stat."
18 
19 static int damon_stat_enabled_store(
20 		const char *val, const struct kernel_param *kp);
21 
22 static int damon_stat_enabled_load(char *buffer,
23 		const struct kernel_param *kp);
24 
25 static const struct kernel_param_ops enabled_param_ops = {
26 	.set = damon_stat_enabled_store,
27 	.get = damon_stat_enabled_load,
28 };
29 
30 static bool enabled __read_mostly = IS_ENABLED(
31 	CONFIG_DAMON_STAT_ENABLED_DEFAULT);
32 module_param_cb(enabled, &enabled_param_ops, NULL, 0600);
33 MODULE_PARM_DESC(enabled, "Enable of disable DAMON_STAT");
34 
35 static unsigned long estimated_memory_bandwidth __read_mostly;
36 module_param(estimated_memory_bandwidth, ulong, 0400);
37 MODULE_PARM_DESC(estimated_memory_bandwidth,
38 		"Estimated memory bandwidth usage in bytes per second");
39 
40 static long memory_idle_ms_percentiles[101] = {0,};
41 module_param_array(memory_idle_ms_percentiles, long, NULL, 0400);
42 MODULE_PARM_DESC(memory_idle_ms_percentiles,
43 		"Memory idle time percentiles in milliseconds");
44 
45 static unsigned long aggr_interval_us;
46 module_param(aggr_interval_us, ulong, 0400);
47 MODULE_PARM_DESC(aggr_interval_us,
48 		"Current tuned aggregation interval in microseconds");
49 
50 static struct damon_ctx *damon_stat_context;
51 
52 static unsigned long damon_stat_last_refresh_jiffies;
53 
damon_stat_set_estimated_memory_bandwidth(struct damon_ctx * c)54 static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c)
55 {
56 	struct damon_target *t;
57 	struct damon_region *r;
58 	unsigned long access_bytes = 0;
59 
60 	damon_for_each_target(t, c) {
61 		damon_for_each_region(r, t)
62 			access_bytes += (r->ar.end - r->ar.start) *
63 				r->nr_accesses;
64 	}
65 	estimated_memory_bandwidth = access_bytes * USEC_PER_MSEC *
66 		MSEC_PER_SEC / c->attrs.aggr_interval;
67 }
68 
damon_stat_idletime(const struct damon_region * r)69 static int damon_stat_idletime(const struct damon_region *r)
70 {
71 	if (r->nr_accesses)
72 		return -1 * (r->age + 1);
73 	return r->age + 1;
74 }
75 
damon_stat_cmp_regions(const void * a,const void * b)76 static int damon_stat_cmp_regions(const void *a, const void *b)
77 {
78 	const struct damon_region *ra = *(const struct damon_region **)a;
79 	const struct damon_region *rb = *(const struct damon_region **)b;
80 
81 	return damon_stat_idletime(ra) - damon_stat_idletime(rb);
82 }
83 
damon_stat_sort_regions(struct damon_ctx * c,struct damon_region *** sorted_ptr,int * nr_regions_ptr,unsigned long * total_sz_ptr)84 static int damon_stat_sort_regions(struct damon_ctx *c,
85 		struct damon_region ***sorted_ptr, int *nr_regions_ptr,
86 		unsigned long *total_sz_ptr)
87 {
88 	struct damon_target *t;
89 	struct damon_region *r;
90 	struct damon_region **region_pointers;
91 	unsigned int nr_regions = 0;
92 	unsigned long total_sz = 0;
93 
94 	damon_for_each_target(t, c) {
95 		/* there is only one target */
96 		region_pointers = kmalloc_objs(*region_pointers,
97 					       damon_nr_regions(t));
98 		if (!region_pointers)
99 			return -ENOMEM;
100 		damon_for_each_region(r, t) {
101 			region_pointers[nr_regions++] = r;
102 			total_sz += r->ar.end - r->ar.start;
103 		}
104 	}
105 	sort(region_pointers, nr_regions, sizeof(*region_pointers),
106 			damon_stat_cmp_regions, NULL);
107 	*sorted_ptr = region_pointers;
108 	*nr_regions_ptr = nr_regions;
109 	*total_sz_ptr = total_sz;
110 	return 0;
111 }
112 
damon_stat_set_idletime_percentiles(struct damon_ctx * c)113 static void damon_stat_set_idletime_percentiles(struct damon_ctx *c)
114 {
115 	struct damon_region **sorted_regions, *region;
116 	int nr_regions;
117 	unsigned long total_sz, accounted_bytes = 0;
118 	int err, i, next_percentile = 0;
119 
120 	err = damon_stat_sort_regions(c, &sorted_regions, &nr_regions,
121 			&total_sz);
122 	if (err)
123 		return;
124 	for (i = 0; i < nr_regions; i++) {
125 		region = sorted_regions[i];
126 		accounted_bytes += region->ar.end - region->ar.start;
127 		while (next_percentile <= accounted_bytes * 100 / total_sz)
128 			memory_idle_ms_percentiles[next_percentile++] =
129 				damon_stat_idletime(region) *
130 				(long)c->attrs.aggr_interval / USEC_PER_MSEC;
131 	}
132 	kfree(sorted_regions);
133 }
134 
damon_stat_damon_call_fn(void * data)135 static int damon_stat_damon_call_fn(void *data)
136 {
137 	struct damon_ctx *c = data;
138 
139 	/* avoid unnecessarily frequent stat update */
140 	if (time_before_eq(jiffies, damon_stat_last_refresh_jiffies +
141 				msecs_to_jiffies(5 * MSEC_PER_SEC)))
142 		return 0;
143 	damon_stat_last_refresh_jiffies = jiffies;
144 
145 	aggr_interval_us = c->attrs.aggr_interval;
146 	damon_stat_set_estimated_memory_bandwidth(c);
147 	damon_stat_set_idletime_percentiles(c);
148 	return 0;
149 }
150 
151 struct damon_stat_system_ram_range_walk_arg {
152 	bool walked;
153 	struct resource res;
154 };
155 
damon_stat_system_ram_walk_fn(struct resource * res,void * arg)156 static int damon_stat_system_ram_walk_fn(struct resource *res, void *arg)
157 {
158 	struct damon_stat_system_ram_range_walk_arg *a = arg;
159 
160 	if (!a->walked) {
161 		a->walked = true;
162 		a->res.start = res->start;
163 	}
164 	a->res.end = res->end;
165 	return 0;
166 }
167 
damon_stat_res_to_core_addr(resource_size_t ra,unsigned long addr_unit)168 static unsigned long damon_stat_res_to_core_addr(resource_size_t ra,
169 		unsigned long addr_unit)
170 {
171 	/*
172 	 * Use div_u64() for avoiding linking errors related with __udivdi3,
173 	 * __aeabi_uldivmod, or similar problems.  This should also improve the
174 	 * performance optimization (read div_u64() comment for the detail).
175 	 */
176 	if (sizeof(ra) == 8 && sizeof(addr_unit) == 4)
177 		return div_u64(ra, addr_unit);
178 	return ra / addr_unit;
179 }
180 
damon_stat_set_monitoring_region(struct damon_target * t,unsigned long addr_unit,unsigned long min_region_sz)181 static int damon_stat_set_monitoring_region(struct damon_target *t,
182 		unsigned long addr_unit, unsigned long min_region_sz)
183 {
184 	struct damon_addr_range addr_range;
185 	struct damon_stat_system_ram_range_walk_arg arg = {};
186 
187 	walk_system_ram_res(0, -1, &arg, damon_stat_system_ram_walk_fn);
188 	if (!arg.walked)
189 		return -EINVAL;
190 	addr_range.start = damon_stat_res_to_core_addr(
191 			arg.res.start, addr_unit);
192 	addr_range.end = damon_stat_res_to_core_addr(
193 			arg.res.end + 1, addr_unit);
194 	if (addr_range.end <= addr_range.start)
195 		return -EINVAL;
196 	return damon_set_regions(t, &addr_range, 1, min_region_sz);
197 }
198 
damon_stat_build_ctx(void)199 static struct damon_ctx *damon_stat_build_ctx(void)
200 {
201 	struct damon_ctx *ctx;
202 	struct damon_attrs attrs;
203 	struct damon_target *target;
204 
205 	ctx = damon_new_ctx();
206 	if (!ctx)
207 		return NULL;
208 	attrs = (struct damon_attrs) {
209 		.sample_interval = 5 * USEC_PER_MSEC,
210 		.aggr_interval = 100 * USEC_PER_MSEC,
211 		.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
212 		.min_nr_regions = 10,
213 		.max_nr_regions = 1000,
214 	};
215 	/*
216 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
217 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
218 	 */
219 	attrs.intervals_goal = (struct damon_intervals_goal) {
220 		.access_bp = 400, .aggrs = 3,
221 		.min_sample_us = 5000, .max_sample_us = 10000000,
222 	};
223 	if (damon_set_attrs(ctx, &attrs))
224 		goto free_out;
225 
226 	if (damon_select_ops(ctx, DAMON_OPS_PADDR))
227 		goto free_out;
228 
229 	target = damon_new_target();
230 	if (!target)
231 		goto free_out;
232 	damon_add_target(ctx, target);
233 	if (damon_stat_set_monitoring_region(target, ctx->addr_unit,
234 				ctx->min_region_sz))
235 		goto free_out;
236 	return ctx;
237 free_out:
238 	damon_destroy_ctx(ctx);
239 	return NULL;
240 }
241 
242 static struct damon_call_control call_control = {
243 	.fn = damon_stat_damon_call_fn,
244 	.repeat = true,
245 };
246 
damon_stat_start(void)247 static int damon_stat_start(void)
248 {
249 	int err;
250 
251 	if (damon_stat_context) {
252 		if (damon_is_running(damon_stat_context))
253 			return -EAGAIN;
254 		damon_destroy_ctx(damon_stat_context);
255 	}
256 
257 	damon_stat_context = damon_stat_build_ctx();
258 	if (!damon_stat_context)
259 		return -ENOMEM;
260 	err = damon_start(&damon_stat_context, 1, true);
261 	if (err) {
262 		damon_destroy_ctx(damon_stat_context);
263 		damon_stat_context = NULL;
264 		return err;
265 	}
266 
267 	damon_stat_last_refresh_jiffies = jiffies;
268 	call_control.data = damon_stat_context;
269 	return damon_call(damon_stat_context, &call_control);
270 }
271 
damon_stat_stop(void)272 static void damon_stat_stop(void)
273 {
274 	damon_stop(&damon_stat_context, 1);
275 	damon_destroy_ctx(damon_stat_context);
276 	damon_stat_context = NULL;
277 }
278 
damon_stat_enabled(void)279 static bool damon_stat_enabled(void)
280 {
281 	if (!damon_stat_context)
282 		return false;
283 	return damon_is_running(damon_stat_context);
284 }
285 
damon_stat_enabled_store(const char * val,const struct kernel_param * kp)286 static int damon_stat_enabled_store(
287 		const char *val, const struct kernel_param *kp)
288 {
289 	int err;
290 
291 	err = kstrtobool(val, &enabled);
292 	if (err)
293 		return err;
294 
295 	if (damon_stat_enabled() == enabled)
296 		return 0;
297 
298 	if (!damon_initialized())
299 		/*
300 		 * probably called from command line parsing (parse_args()).
301 		 * Cannot call damon_new_ctx().  Let damon_stat_init() handle.
302 		 */
303 		return 0;
304 
305 	if (enabled)
306 		return damon_stat_start();
307 	damon_stat_stop();
308 	return 0;
309 }
310 
damon_stat_enabled_load(char * buffer,const struct kernel_param * kp)311 static int damon_stat_enabled_load(char *buffer, const struct kernel_param *kp)
312 {
313 	return sprintf(buffer, "%c\n", damon_stat_enabled() ? 'Y' : 'N');
314 }
315 
damon_stat_init(void)316 static int __init damon_stat_init(void)
317 {
318 	int err = 0;
319 
320 	if (!damon_initialized()) {
321 		err = -ENOMEM;
322 		goto out;
323 	}
324 
325 	/* probably set via command line */
326 	if (enabled)
327 		err = damon_stat_start();
328 
329 out:
330 	if (err && enabled)
331 		enabled = false;
332 	return err;
333 }
334 
335 module_init(damon_stat_init);
336