xref: /linux/mm/damon/stat.c (revision 1a80ff0f8896750156f22dbf2d4591d79bb2a155)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shows data access monitoring resutls in simple metrics.
4  */
5 
6 #define pr_fmt(fmt) "damon-stat: " fmt
7 
8 #include <linux/damon.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sort.h>
13 
14 #ifdef MODULE_PARAM_PREFIX
15 #undef MODULE_PARAM_PREFIX
16 #endif
17 #define MODULE_PARAM_PREFIX "damon_stat."
18 
19 static int damon_stat_enabled_store(
20 		const char *val, const struct kernel_param *kp);
21 
22 static const struct kernel_param_ops enabled_param_ops = {
23 	.set = damon_stat_enabled_store,
24 	.get = param_get_bool,
25 };
26 
27 static bool enabled __read_mostly = IS_ENABLED(
28 	CONFIG_DAMON_STAT_ENABLED_DEFAULT);
29 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
30 MODULE_PARM_DESC(enabled, "Enable of disable DAMON_STAT");
31 
32 static unsigned long estimated_memory_bandwidth __read_mostly;
33 module_param(estimated_memory_bandwidth, ulong, 0400);
34 MODULE_PARM_DESC(estimated_memory_bandwidth,
35 		"Estimated memory bandwidth usage in bytes per second");
36 
37 static unsigned long memory_idle_ms_percentiles[101] __read_mostly = {0,};
38 module_param_array(memory_idle_ms_percentiles, ulong, NULL, 0400);
39 MODULE_PARM_DESC(memory_idle_ms_percentiles,
40 		"Memory idle time percentiles in milliseconds");
41 
42 static struct damon_ctx *damon_stat_context;
43 
44 static void damon_stat_set_estimated_memory_bandwidth(struct damon_ctx *c)
45 {
46 	struct damon_target *t;
47 	struct damon_region *r;
48 	unsigned long access_bytes = 0;
49 
50 	damon_for_each_target(t, c) {
51 		damon_for_each_region(r, t)
52 			access_bytes += (r->ar.end - r->ar.start) *
53 				r->nr_accesses;
54 	}
55 	estimated_memory_bandwidth = access_bytes * USEC_PER_MSEC *
56 		MSEC_PER_SEC / c->attrs.aggr_interval;
57 }
58 
59 static unsigned int damon_stat_idletime(const struct damon_region *r)
60 {
61 	if (r->nr_accesses)
62 		return 0;
63 	return r->age + 1;
64 }
65 
66 static int damon_stat_cmp_regions(const void *a, const void *b)
67 {
68 	const struct damon_region *ra = *(const struct damon_region **)a;
69 	const struct damon_region *rb = *(const struct damon_region **)b;
70 
71 	return damon_stat_idletime(ra) - damon_stat_idletime(rb);
72 }
73 
74 static int damon_stat_sort_regions(struct damon_ctx *c,
75 		struct damon_region ***sorted_ptr, int *nr_regions_ptr,
76 		unsigned long *total_sz_ptr)
77 {
78 	struct damon_target *t;
79 	struct damon_region *r;
80 	struct damon_region **region_pointers;
81 	unsigned int nr_regions = 0;
82 	unsigned long total_sz = 0;
83 
84 	damon_for_each_target(t, c) {
85 		/* there is only one target */
86 		region_pointers = kmalloc_array(damon_nr_regions(t),
87 				sizeof(*region_pointers), GFP_KERNEL);
88 		if (!region_pointers)
89 			return -ENOMEM;
90 		damon_for_each_region(r, t) {
91 			region_pointers[nr_regions++] = r;
92 			total_sz += r->ar.end - r->ar.start;
93 		}
94 	}
95 	sort(region_pointers, nr_regions, sizeof(*region_pointers),
96 			damon_stat_cmp_regions, NULL);
97 	*sorted_ptr = region_pointers;
98 	*nr_regions_ptr = nr_regions;
99 	*total_sz_ptr = total_sz;
100 	return 0;
101 }
102 
103 static void damon_stat_set_idletime_percentiles(struct damon_ctx *c)
104 {
105 	struct damon_region **sorted_regions, *region;
106 	int nr_regions;
107 	unsigned long total_sz, accounted_bytes = 0;
108 	int err, i, next_percentile = 0;
109 
110 	err = damon_stat_sort_regions(c, &sorted_regions, &nr_regions,
111 			&total_sz);
112 	if (err)
113 		return;
114 	for (i = 0; i < nr_regions; i++) {
115 		region = sorted_regions[i];
116 		accounted_bytes += region->ar.end - region->ar.start;
117 		while (next_percentile <= accounted_bytes * 100 / total_sz)
118 			memory_idle_ms_percentiles[next_percentile++] =
119 				damon_stat_idletime(region) *
120 				c->attrs.aggr_interval / USEC_PER_MSEC;
121 	}
122 	kfree(sorted_regions);
123 }
124 
125 static int damon_stat_after_aggregation(struct damon_ctx *c)
126 {
127 	static unsigned long last_refresh_jiffies;
128 
129 	/* avoid unnecessarily frequent stat update */
130 	if (time_before_eq(jiffies, last_refresh_jiffies +
131 				msecs_to_jiffies(5 * MSEC_PER_SEC)))
132 		return 0;
133 	last_refresh_jiffies = jiffies;
134 
135 	damon_stat_set_estimated_memory_bandwidth(c);
136 	damon_stat_set_idletime_percentiles(c);
137 	return 0;
138 }
139 
140 static struct damon_ctx *damon_stat_build_ctx(void)
141 {
142 	struct damon_ctx *ctx;
143 	struct damon_attrs attrs;
144 	struct damon_target *target;
145 	unsigned long start = 0, end = 0;
146 
147 	ctx = damon_new_ctx();
148 	if (!ctx)
149 		return NULL;
150 	attrs = (struct damon_attrs) {
151 		.sample_interval = 5 * USEC_PER_MSEC,
152 		.aggr_interval = 100 * USEC_PER_MSEC,
153 		.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
154 		.min_nr_regions = 10,
155 		.max_nr_regions = 1000,
156 	};
157 	/*
158 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
159 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
160 	 */
161 	attrs.intervals_goal = (struct damon_intervals_goal) {
162 		.access_bp = 400, .aggrs = 3,
163 		.min_sample_us = 5000, .max_sample_us = 10000000,
164 	};
165 	if (damon_set_attrs(ctx, &attrs))
166 		goto free_out;
167 
168 	/*
169 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
170 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
171 	 */
172 	ctx->attrs.intervals_goal = (struct damon_intervals_goal) {
173 		.access_bp = 400, .aggrs = 3,
174 		.min_sample_us = 5000, .max_sample_us = 10000000,
175 	};
176 	if (damon_select_ops(ctx, DAMON_OPS_PADDR))
177 		goto free_out;
178 
179 	target = damon_new_target();
180 	if (!target)
181 		goto free_out;
182 	damon_add_target(ctx, target);
183 	if (damon_set_region_biggest_system_ram_default(target, &start, &end))
184 		goto free_out;
185 	ctx->callback.after_aggregation = damon_stat_after_aggregation;
186 	return ctx;
187 free_out:
188 	damon_destroy_ctx(ctx);
189 	return NULL;
190 }
191 
192 static int damon_stat_start(void)
193 {
194 	damon_stat_context = damon_stat_build_ctx();
195 	if (!damon_stat_context)
196 		return -ENOMEM;
197 	return damon_start(&damon_stat_context, 1, true);
198 }
199 
200 static void damon_stat_stop(void)
201 {
202 	damon_stop(&damon_stat_context, 1);
203 	damon_destroy_ctx(damon_stat_context);
204 }
205 
206 static bool damon_stat_init_called;
207 
208 static int damon_stat_enabled_store(
209 		const char *val, const struct kernel_param *kp)
210 {
211 	bool is_enabled = enabled;
212 	int err;
213 
214 	err = kstrtobool(val, &enabled);
215 	if (err)
216 		return err;
217 
218 	if (is_enabled == enabled)
219 		return 0;
220 
221 	if (!damon_stat_init_called)
222 		/*
223 		 * probably called from command line parsing (parse_args()).
224 		 * Cannot call damon_new_ctx().  Let damon_stat_init() handle.
225 		 */
226 		return 0;
227 
228 	if (enabled) {
229 		err = damon_stat_start();
230 		if (err)
231 			enabled = false;
232 		return err;
233 	}
234 	damon_stat_stop();
235 	return 0;
236 }
237 
238 static int __init damon_stat_init(void)
239 {
240 	int err = 0;
241 
242 	damon_stat_init_called = true;
243 
244 	/* probably set via command line */
245 	if (enabled)
246 		err = damon_stat_start();
247 
248 	if (err && enabled)
249 		enabled = false;
250 	return err;
251 }
252 
253 module_init(damon_stat_init);
254