xref: /linux/mm/damon/stat.c (revision 537d196186e0a0ce28e494ca1881885accc35a12)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shows data access monitoring resutls in simple metrics.
4  */
5 
6 #define pr_fmt(fmt) "damon-stat: " fmt
7 
8 #include <linux/damon.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sort.h>
13 
14 #ifdef MODULE_PARAM_PREFIX
15 #undef MODULE_PARAM_PREFIX
16 #endif
17 #define MODULE_PARAM_PREFIX "damon_stat."
18 
19 static int damon_stat_enabled_store(
20 		const char *val, const struct kernel_param *kp);
21 
22 static const struct kernel_param_ops enabled_param_ops = {
23 	.set = damon_stat_enabled_store,
24 	.get = param_get_bool,
25 };
26 
27 static bool enabled __read_mostly = IS_ENABLED(
28 	CONFIG_DAMON_STAT_ENABLED_DEFAULT);
29 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
30 MODULE_PARM_DESC(enabled, "Enable of disable DAMON_STAT");
31 
32 static unsigned long estimated_memory_bandwidth __read_mostly;
33 module_param(estimated_memory_bandwidth, ulong, 0400);
34 MODULE_PARM_DESC(estimated_memory_bandwidth,
35 		"Estimated memory bandwidth usage in bytes per second");
36 
37 static long memory_idle_ms_percentiles[101] __read_mostly = {0,};
38 module_param_array(memory_idle_ms_percentiles, long, NULL, 0400);
39 MODULE_PARM_DESC(memory_idle_ms_percentiles,
40 		"Memory idle time percentiles in milliseconds");
41 
42 static unsigned long aggr_interval_us;
43 module_param(aggr_interval_us, ulong, 0400);
44 MODULE_PARM_DESC(aggr_interval_us,
45 		"Current tuned aggregation interval in microseconds");
46 
47 static struct damon_ctx *damon_stat_context;
48 
49 static unsigned long damon_stat_last_refresh_jiffies;
50 
damon_stat_set_estimated_memory_bandwidth(struct damon_ctx * c)51 static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c)
52 {
53 	struct damon_target *t;
54 	struct damon_region *r;
55 	unsigned long access_bytes = 0;
56 
57 	damon_for_each_target(t, c) {
58 		damon_for_each_region(r, t)
59 			access_bytes += (r->ar.end - r->ar.start) *
60 				r->nr_accesses;
61 	}
62 	estimated_memory_bandwidth = access_bytes * USEC_PER_MSEC *
63 		MSEC_PER_SEC / c->attrs.aggr_interval;
64 }
65 
damon_stat_idletime(const struct damon_region * r)66 static int damon_stat_idletime(const struct damon_region *r)
67 {
68 	if (r->nr_accesses)
69 		return -1 * (r->age + 1);
70 	return r->age + 1;
71 }
72 
damon_stat_cmp_regions(const void * a,const void * b)73 static int damon_stat_cmp_regions(const void *a, const void *b)
74 {
75 	const struct damon_region *ra = *(const struct damon_region **)a;
76 	const struct damon_region *rb = *(const struct damon_region **)b;
77 
78 	return damon_stat_idletime(ra) - damon_stat_idletime(rb);
79 }
80 
damon_stat_sort_regions(struct damon_ctx * c,struct damon_region *** sorted_ptr,int * nr_regions_ptr,unsigned long * total_sz_ptr)81 static int damon_stat_sort_regions(struct damon_ctx *c,
82 		struct damon_region ***sorted_ptr, int *nr_regions_ptr,
83 		unsigned long *total_sz_ptr)
84 {
85 	struct damon_target *t;
86 	struct damon_region *r;
87 	struct damon_region **region_pointers;
88 	unsigned int nr_regions = 0;
89 	unsigned long total_sz = 0;
90 
91 	damon_for_each_target(t, c) {
92 		/* there is only one target */
93 		region_pointers = kmalloc_array(damon_nr_regions(t),
94 				sizeof(*region_pointers), GFP_KERNEL);
95 		if (!region_pointers)
96 			return -ENOMEM;
97 		damon_for_each_region(r, t) {
98 			region_pointers[nr_regions++] = r;
99 			total_sz += r->ar.end - r->ar.start;
100 		}
101 	}
102 	sort(region_pointers, nr_regions, sizeof(*region_pointers),
103 			damon_stat_cmp_regions, NULL);
104 	*sorted_ptr = region_pointers;
105 	*nr_regions_ptr = nr_regions;
106 	*total_sz_ptr = total_sz;
107 	return 0;
108 }
109 
damon_stat_set_idletime_percentiles(struct damon_ctx * c)110 static void damon_stat_set_idletime_percentiles(struct damon_ctx *c)
111 {
112 	struct damon_region **sorted_regions, *region;
113 	int nr_regions;
114 	unsigned long total_sz, accounted_bytes = 0;
115 	int err, i, next_percentile = 0;
116 
117 	err = damon_stat_sort_regions(c, &sorted_regions, &nr_regions,
118 			&total_sz);
119 	if (err)
120 		return;
121 	for (i = 0; i < nr_regions; i++) {
122 		region = sorted_regions[i];
123 		accounted_bytes += region->ar.end - region->ar.start;
124 		while (next_percentile <= accounted_bytes * 100 / total_sz)
125 			memory_idle_ms_percentiles[next_percentile++] =
126 				damon_stat_idletime(region) *
127 				(long)c->attrs.aggr_interval / USEC_PER_MSEC;
128 	}
129 	kfree(sorted_regions);
130 }
131 
damon_stat_damon_call_fn(void * data)132 static int damon_stat_damon_call_fn(void *data)
133 {
134 	struct damon_ctx *c = data;
135 
136 	/* avoid unnecessarily frequent stat update */
137 	if (time_before_eq(jiffies, damon_stat_last_refresh_jiffies +
138 				msecs_to_jiffies(5 * MSEC_PER_SEC)))
139 		return 0;
140 	damon_stat_last_refresh_jiffies = jiffies;
141 
142 	aggr_interval_us = c->attrs.aggr_interval;
143 	damon_stat_set_estimated_memory_bandwidth(c);
144 	damon_stat_set_idletime_percentiles(c);
145 	return 0;
146 }
147 
damon_stat_build_ctx(void)148 static struct damon_ctx *damon_stat_build_ctx(void)
149 {
150 	struct damon_ctx *ctx;
151 	struct damon_attrs attrs;
152 	struct damon_target *target;
153 	unsigned long start = 0, end = 0;
154 
155 	ctx = damon_new_ctx();
156 	if (!ctx)
157 		return NULL;
158 	attrs = (struct damon_attrs) {
159 		.sample_interval = 5 * USEC_PER_MSEC,
160 		.aggr_interval = 100 * USEC_PER_MSEC,
161 		.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
162 		.min_nr_regions = 10,
163 		.max_nr_regions = 1000,
164 	};
165 	/*
166 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
167 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
168 	 */
169 	attrs.intervals_goal = (struct damon_intervals_goal) {
170 		.access_bp = 400, .aggrs = 3,
171 		.min_sample_us = 5000, .max_sample_us = 10000000,
172 	};
173 	if (damon_set_attrs(ctx, &attrs))
174 		goto free_out;
175 
176 	/*
177 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
178 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
179 	 */
180 	ctx->attrs.intervals_goal = (struct damon_intervals_goal) {
181 		.access_bp = 400, .aggrs = 3,
182 		.min_sample_us = 5000, .max_sample_us = 10000000,
183 	};
184 	if (damon_select_ops(ctx, DAMON_OPS_PADDR))
185 		goto free_out;
186 
187 	target = damon_new_target();
188 	if (!target)
189 		goto free_out;
190 	damon_add_target(ctx, target);
191 	if (damon_set_region_biggest_system_ram_default(target, &start, &end))
192 		goto free_out;
193 	return ctx;
194 free_out:
195 	damon_destroy_ctx(ctx);
196 	return NULL;
197 }
198 
199 static struct damon_call_control call_control = {
200 	.fn = damon_stat_damon_call_fn,
201 	.repeat = true,
202 };
203 
damon_stat_start(void)204 static int damon_stat_start(void)
205 {
206 	int err;
207 
208 	damon_stat_context = damon_stat_build_ctx();
209 	if (!damon_stat_context)
210 		return -ENOMEM;
211 	err = damon_start(&damon_stat_context, 1, true);
212 	if (err)
213 		return err;
214 
215 	damon_stat_last_refresh_jiffies = jiffies;
216 	call_control.data = damon_stat_context;
217 	return damon_call(damon_stat_context, &call_control);
218 }
219 
damon_stat_stop(void)220 static void damon_stat_stop(void)
221 {
222 	damon_stop(&damon_stat_context, 1);
223 	damon_destroy_ctx(damon_stat_context);
224 }
225 
damon_stat_enabled_store(const char * val,const struct kernel_param * kp)226 static int damon_stat_enabled_store(
227 		const char *val, const struct kernel_param *kp)
228 {
229 	bool is_enabled = enabled;
230 	int err;
231 
232 	err = kstrtobool(val, &enabled);
233 	if (err)
234 		return err;
235 
236 	if (is_enabled == enabled)
237 		return 0;
238 
239 	if (!damon_initialized())
240 		/*
241 		 * probably called from command line parsing (parse_args()).
242 		 * Cannot call damon_new_ctx().  Let damon_stat_init() handle.
243 		 */
244 		return 0;
245 
246 	if (enabled) {
247 		err = damon_stat_start();
248 		if (err)
249 			enabled = false;
250 		return err;
251 	}
252 	damon_stat_stop();
253 	return 0;
254 }
255 
damon_stat_init(void)256 static int __init damon_stat_init(void)
257 {
258 	int err = 0;
259 
260 	if (!damon_initialized()) {
261 		err = -ENOMEM;
262 		goto out;
263 	}
264 
265 	/* probably set via command line */
266 	if (enabled)
267 		err = damon_stat_start();
268 
269 out:
270 	if (err && enabled)
271 		enabled = false;
272 	return err;
273 }
274 
275 module_init(damon_stat_init);
276