xref: /linux/mm/damon/lru_sort.c (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON-based LRU-lists Sorting
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon-lru-sort: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/kstrtox.h>
12 #include <linux/module.h>
13 
14 #include "modules-common.h"
15 
16 #ifdef MODULE_PARAM_PREFIX
17 #undef MODULE_PARAM_PREFIX
18 #endif
19 #define MODULE_PARAM_PREFIX "damon_lru_sort."
20 
21 /*
22  * Enable or disable DAMON_LRU_SORT.
23  *
24  * You can enable DAMON_LRU_SORT by setting the value of this parameter as
25  * ``Y``.  Setting it as ``N`` disables DAMON_LRU_SORT.  Note that
26  * DAMON_LRU_SORT could do no real monitoring and LRU-lists sorting due to the
27  * watermarks-based activation condition.  Refer to below descriptions for the
28  * watermarks parameter for this.
29  */
30 static bool enabled __read_mostly;
31 
32 /*
33  * Make DAMON_LRU_SORT reads the input parameters again, except ``enabled``.
34  *
35  * Input parameters that updated while DAMON_LRU_SORT is running are not
36  * applied by default.  Once this parameter is set as ``Y``, DAMON_LRU_SORT
37  * reads values of parameters except ``enabled`` again.  Once the re-reading is
38  * done, this parameter is set as ``N``.  If invalid parameters are found while
39  * the re-reading, DAMON_LRU_SORT will be disabled.
40  */
41 static bool commit_inputs __read_mostly;
42 module_param(commit_inputs, bool, 0600);
43 
44 /*
45  * Desired active to [in]active memory ratio in bp (1/10,000).
46  *
47  * While keeping the caps that set by other quotas, DAMON_LRU_SORT
48  * automatically increases and decreases the effective level of the quota
49  * aiming the LRU [de]prioritizations of the hot and cold memory resulting in
50  * this active to [in]active memory ratio.  Value zero means disabling this
51  * auto-tuning feature.
52  *
53  * Disabled by default.
54  */
55 static unsigned long active_mem_bp __read_mostly;
56 module_param(active_mem_bp, ulong, 0600);
57 
58 /*
59  * Auto-tune monitoring intervals.
60  *
61  * If this parameter is set as ``Y``, DAMON_LRU_SORT automatically tunes
62  * DAMON's sampling and aggregation intervals.  The auto-tuning aims to capture
63  * meaningful amount of access events in each DAMON-snapshot, while keeping the
64  * sampling interval 5 milliseconds in minimum, and 10 seconds in maximum.
65  * Setting this as ``N`` disables the auto-tuning.
66  *
67  * Disabled by default.
68  */
69 static bool autotune_monitoring_intervals __read_mostly;
70 module_param(autotune_monitoring_intervals, bool, 0600);
71 
72 /*
73  * Filter [non-]young pages accordingly for LRU [de]prioritizations.
74  *
75  * If this is set, check page level access (youngness) once again before each
76  * LRU [de]prioritization operation.  LRU prioritization operation is skipped
77  * if the page has not accessed since the last check (not young).  LRU
78  * deprioritization operation is skipped if the page has accessed since the
79  * last check (young).  The feature is enabled or disabled if this parameter is
80  * set as ``Y`` or ``N``, respectively.
81  *
82  * Disabled by default.
83  */
84 static bool filter_young_pages __read_mostly;
85 module_param(filter_young_pages, bool, 0600);
86 
87 /*
88  * Access frequency threshold for hot memory regions identification in permil.
89  *
90  * If a memory region is accessed in frequency of this or higher,
91  * DAMON_LRU_SORT identifies the region as hot, and mark it as accessed on the
92  * LRU list, so that it could not be reclaimed under memory pressure.  50% by
93  * default.
94  */
95 static unsigned long hot_thres_access_freq = 500;
96 module_param(hot_thres_access_freq, ulong, 0600);
97 
98 /*
99  * Time threshold for cold memory regions identification in microseconds.
100  *
101  * If a memory region is not accessed for this or longer time, DAMON_LRU_SORT
102  * identifies the region as cold, and mark it as unaccessed on the LRU list, so
103  * that it could be reclaimed first under memory pressure.  120 seconds by
104  * default.
105  */
106 static unsigned long cold_min_age __read_mostly = 120000000;
107 module_param(cold_min_age, ulong, 0600);
108 
109 static struct damos_quota damon_lru_sort_quota = {
110 	/* Use up to 10 ms per 1 sec, by default */
111 	.ms = 10,
112 	.sz = 0,
113 	.reset_interval = 1000,
114 	/* Within the quota, mark hotter regions accessed first. */
115 	.weight_sz = 0,
116 	.weight_nr_accesses = 1,
117 	.weight_age = 1,
118 };
119 DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(damon_lru_sort_quota);
120 
121 static struct damos_watermarks damon_lru_sort_wmarks = {
122 	.metric = DAMOS_WMARK_FREE_MEM_RATE,
123 	.interval = 5000000,	/* 5 seconds */
124 	.high = 200,		/* 20 percent */
125 	.mid = 150,		/* 15 percent */
126 	.low = 50,		/* 5 percent */
127 };
128 DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_lru_sort_wmarks);
129 
130 static struct damon_attrs damon_lru_sort_mon_attrs = {
131 	.sample_interval = 5000,	/* 5 ms */
132 	.aggr_interval = 100000,	/* 100 ms */
133 	.ops_update_interval = 0,
134 	.min_nr_regions = 10,
135 	.max_nr_regions = 1000,
136 };
137 DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_lru_sort_mon_attrs);
138 
139 /*
140  * Start of the target memory region in physical address.
141  *
142  * The start physical address of memory region that DAMON_LRU_SORT will do work
143  * against.  By default, biggest System RAM is used as the region.
144  */
145 static unsigned long monitor_region_start __read_mostly;
146 module_param(monitor_region_start, ulong, 0600);
147 
148 /*
149  * End of the target memory region in physical address.
150  *
151  * The end physical address of memory region that DAMON_LRU_SORT will do work
152  * against.  By default, biggest System RAM is used as the region.
153  */
154 static unsigned long monitor_region_end __read_mostly;
155 module_param(monitor_region_end, ulong, 0600);
156 
157 /*
158  * Scale factor for DAMON_LRU_SORT to ops address conversion.
159  *
160  * This parameter must not be set to 0.
161  */
162 static unsigned long addr_unit __read_mostly = 1;
163 
164 /*
165  * PID of the DAMON thread
166  *
167  * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread.
168  * Else, -1.
169  */
170 static int kdamond_pid __read_mostly = -1;
171 module_param(kdamond_pid, int, 0400);
172 
173 static struct damos_stat damon_lru_sort_hot_stat;
174 DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat,
175 		lru_sort_tried_hot_regions, lru_sorted_hot_regions,
176 		hot_quota_exceeds);
177 
178 static struct damos_stat damon_lru_sort_cold_stat;
179 DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_cold_stat,
180 		lru_sort_tried_cold_regions, lru_sorted_cold_regions,
181 		cold_quota_exceeds);
182 
183 static struct damos_access_pattern damon_lru_sort_stub_pattern = {
184 	/* Find regions having PAGE_SIZE or larger size */
185 	.min_sz_region = PAGE_SIZE,
186 	.max_sz_region = ULONG_MAX,
187 	/* no matter its access frequency */
188 	.min_nr_accesses = 0,
189 	.max_nr_accesses = UINT_MAX,
190 	/* no matter its age */
191 	.min_age_region = 0,
192 	.max_age_region = UINT_MAX,
193 };
194 
195 static struct damon_ctx *ctx;
196 static struct damon_target *target;
197 
198 static struct damos *damon_lru_sort_new_scheme(
199 		struct damos_access_pattern *pattern, enum damos_action action)
200 {
201 	struct damos_quota quota = damon_lru_sort_quota;
202 
203 	/* Use half of total quota for hot/cold pages sorting */
204 	quota.ms = quota.ms / 2;
205 
206 	return damon_new_scheme(
207 			/* find the pattern, and */
208 			pattern,
209 			/* (de)prioritize on LRU-lists */
210 			action,
211 			/* for each aggregation interval */
212 			0,
213 			/* under the quota. */
214 			&quota,
215 			/* (De)activate this according to the watermarks. */
216 			&damon_lru_sort_wmarks,
217 			NUMA_NO_NODE);
218 }
219 
220 /* Create a DAMON-based operation scheme for hot memory regions */
221 static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
222 {
223 	struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
224 
225 	pattern.min_nr_accesses = hot_thres;
226 	return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_PRIO);
227 }
228 
229 /* Create a DAMON-based operation scheme for cold memory regions */
230 static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
231 {
232 	struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
233 
234 	pattern.max_nr_accesses = 0;
235 	pattern.min_age_region = cold_thres;
236 	return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
237 }
238 
239 static int damon_lru_sort_add_quota_goals(struct damos *hot_scheme,
240 		struct damos *cold_scheme)
241 {
242 	struct damos_quota_goal *goal;
243 
244 	if (!active_mem_bp)
245 		return 0;
246 	goal = damos_new_quota_goal(DAMOS_QUOTA_ACTIVE_MEM_BP, active_mem_bp);
247 	if (!goal)
248 		return -ENOMEM;
249 	damos_add_quota_goal(&hot_scheme->quota, goal);
250 	/* aim 0.2 % goal conflict, to keep little ping pong */
251 	goal = damos_new_quota_goal(DAMOS_QUOTA_INACTIVE_MEM_BP,
252 			10000 - active_mem_bp + 2);
253 	if (!goal)
254 		return -ENOMEM;
255 	damos_add_quota_goal(&cold_scheme->quota, goal);
256 	return 0;
257 }
258 
259 static int damon_lru_sort_add_filters(struct damos *hot_scheme,
260 		struct damos *cold_scheme)
261 {
262 	struct damos_filter *filter;
263 
264 	if (!filter_young_pages)
265 		return 0;
266 
267 	/* disallow prioritizing not-young pages */
268 	filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, false, false);
269 	if (!filter)
270 		return -ENOMEM;
271 	damos_add_filter(hot_scheme, filter);
272 
273 	/* disabllow de-prioritizing young pages */
274 	filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true, false);
275 	if (!filter)
276 		return -ENOMEM;
277 	damos_add_filter(cold_scheme, filter);
278 	return 0;
279 }
280 
281 static int damon_lru_sort_apply_parameters(void)
282 {
283 	struct damon_ctx *param_ctx;
284 	struct damon_target *param_target;
285 	struct damon_attrs attrs;
286 	struct damos *hot_scheme, *cold_scheme;
287 	unsigned int hot_thres, cold_thres;
288 	int err;
289 
290 	err = damon_modules_new_paddr_ctx_target(&param_ctx, &param_target);
291 	if (err)
292 		return err;
293 
294 	/*
295 	 * If monitor_region_start/end are unset, always silently
296 	 * reset addr_unit to 1.
297 	 */
298 	if (!monitor_region_start && !monitor_region_end)
299 		addr_unit = 1;
300 	param_ctx->addr_unit = addr_unit;
301 	param_ctx->min_region_sz = max(DAMON_MIN_REGION_SZ / addr_unit, 1);
302 
303 	if (!damon_lru_sort_mon_attrs.sample_interval) {
304 		err = -EINVAL;
305 		goto out;
306 	}
307 
308 	attrs = damon_lru_sort_mon_attrs;
309 	if (autotune_monitoring_intervals) {
310 		attrs.sample_interval = 5000;
311 		attrs.aggr_interval = 100000;
312 		attrs.intervals_goal.access_bp = 40;
313 		attrs.intervals_goal.aggrs = 3;
314 		attrs.intervals_goal.min_sample_us = 5000;
315 		attrs.intervals_goal.max_sample_us = 10 * 1000 * 1000;
316 	}
317 	err = damon_set_attrs(param_ctx, &attrs);
318 	if (err)
319 		goto out;
320 
321 	err = -ENOMEM;
322 	hot_thres = damon_max_nr_accesses(&attrs) *
323 		hot_thres_access_freq / 1000;
324 	hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
325 	if (!hot_scheme)
326 		goto out;
327 
328 	cold_thres = cold_min_age / attrs.aggr_interval;
329 	cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
330 	if (!cold_scheme) {
331 		damon_destroy_scheme(hot_scheme);
332 		goto out;
333 	}
334 
335 	damon_set_schemes(param_ctx, &hot_scheme, 1);
336 	damon_add_scheme(param_ctx, cold_scheme);
337 
338 	err = damon_lru_sort_add_quota_goals(hot_scheme, cold_scheme);
339 	if (err)
340 		goto out;
341 	err = damon_lru_sort_add_filters(hot_scheme, cold_scheme);
342 	if (err)
343 		goto out;
344 
345 	err = damon_set_region_biggest_system_ram_default(param_target,
346 					&monitor_region_start,
347 					&monitor_region_end,
348 					param_ctx->min_region_sz);
349 	if (err)
350 		goto out;
351 	err = damon_commit_ctx(ctx, param_ctx);
352 out:
353 	damon_destroy_ctx(param_ctx);
354 	return err;
355 }
356 
357 static int damon_lru_sort_handle_commit_inputs(void)
358 {
359 	int err;
360 
361 	if (!commit_inputs)
362 		return 0;
363 
364 	err = damon_lru_sort_apply_parameters();
365 	commit_inputs = false;
366 	return err;
367 }
368 
369 static int damon_lru_sort_damon_call_fn(void *arg)
370 {
371 	struct damon_ctx *c = arg;
372 	struct damos *s;
373 
374 	/* update the stats parameter */
375 	damon_for_each_scheme(s, c) {
376 		if (s->action == DAMOS_LRU_PRIO)
377 			damon_lru_sort_hot_stat = s->stat;
378 		else if (s->action == DAMOS_LRU_DEPRIO)
379 			damon_lru_sort_cold_stat = s->stat;
380 	}
381 
382 	return damon_lru_sort_handle_commit_inputs();
383 }
384 
385 static struct damon_call_control call_control = {
386 	.fn = damon_lru_sort_damon_call_fn,
387 	.repeat = true,
388 };
389 
390 static int damon_lru_sort_turn(bool on)
391 {
392 	int err;
393 
394 	if (!on) {
395 		err = damon_stop(&ctx, 1);
396 		if (!err)
397 			kdamond_pid = -1;
398 		return err;
399 	}
400 
401 	err = damon_lru_sort_apply_parameters();
402 	if (err)
403 		return err;
404 
405 	err = damon_start(&ctx, 1, true);
406 	if (err)
407 		return err;
408 	kdamond_pid = damon_kdamond_pid(ctx);
409 	if (kdamond_pid < 0)
410 		return kdamond_pid;
411 	return damon_call(ctx, &call_control);
412 }
413 
414 static int damon_lru_sort_addr_unit_store(const char *val,
415 		const struct kernel_param *kp)
416 {
417 	unsigned long input_addr_unit;
418 	int err = kstrtoul(val, 0, &input_addr_unit);
419 
420 	if (err)
421 		return err;
422 	if (!input_addr_unit)
423 		return -EINVAL;
424 
425 	addr_unit = input_addr_unit;
426 	return 0;
427 }
428 
429 static const struct kernel_param_ops addr_unit_param_ops = {
430 	.set = damon_lru_sort_addr_unit_store,
431 	.get = param_get_ulong,
432 };
433 
434 module_param_cb(addr_unit, &addr_unit_param_ops, &addr_unit, 0600);
435 MODULE_PARM_DESC(addr_unit,
436 	"Scale factor for DAMON_LRU_SORT to ops address conversion (default: 1)");
437 
438 static int damon_lru_sort_enabled_store(const char *val,
439 		const struct kernel_param *kp)
440 {
441 	bool is_enabled = enabled;
442 	bool enable;
443 	int err;
444 
445 	err = kstrtobool(val, &enable);
446 	if (err)
447 		return err;
448 
449 	if (is_enabled == enable)
450 		return 0;
451 
452 	/* Called before init function.  The function will handle this. */
453 	if (!damon_initialized())
454 		goto set_param_out;
455 
456 	err = damon_lru_sort_turn(enable);
457 	if (err)
458 		return err;
459 
460 set_param_out:
461 	enabled = enable;
462 	return err;
463 }
464 
465 static const struct kernel_param_ops enabled_param_ops = {
466 	.set = damon_lru_sort_enabled_store,
467 	.get = param_get_bool,
468 };
469 
470 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
471 MODULE_PARM_DESC(enabled,
472 	"Enable or disable DAMON_LRU_SORT (default: disabled)");
473 
474 static int __init damon_lru_sort_init(void)
475 {
476 	int err;
477 
478 	if (!damon_initialized()) {
479 		err = -ENOMEM;
480 		goto out;
481 	}
482 	err = damon_modules_new_paddr_ctx_target(&ctx, &target);
483 	if (err)
484 		goto out;
485 
486 	call_control.data = ctx;
487 
488 	/* 'enabled' has set before this function, probably via command line */
489 	if (enabled)
490 		err = damon_lru_sort_turn(true);
491 
492 out:
493 	if (err && enabled)
494 		enabled = false;
495 	return err;
496 }
497 
498 module_init(damon_lru_sort_init);
499