xref: /linux/mm/damon/core.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/string_choices.h>
18 
19 #define CREATE_TRACE_POINTS
20 #include <trace/events/damon.h>
21 
22 #ifdef CONFIG_DAMON_KUNIT_TEST
23 #undef DAMON_MIN_REGION
24 #define DAMON_MIN_REGION 1
25 #endif
26 
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
29 static bool running_exclusive_ctxs;
30 
31 static DEFINE_MUTEX(damon_ops_lock);
32 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
33 
34 static struct kmem_cache *damon_region_cache __ro_after_init;
35 
36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
__damon_is_registered_ops(enum damon_ops_id id)37 static bool __damon_is_registered_ops(enum damon_ops_id id)
38 {
39 	struct damon_operations empty_ops = {};
40 
41 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
42 		return false;
43 	return true;
44 }
45 
46 /**
47  * damon_is_registered_ops() - Check if a given damon_operations is registered.
48  * @id:	Id of the damon_operations to check if registered.
49  *
50  * Return: true if the ops is set, false otherwise.
51  */
damon_is_registered_ops(enum damon_ops_id id)52 bool damon_is_registered_ops(enum damon_ops_id id)
53 {
54 	bool registered;
55 
56 	if (id >= NR_DAMON_OPS)
57 		return false;
58 	mutex_lock(&damon_ops_lock);
59 	registered = __damon_is_registered_ops(id);
60 	mutex_unlock(&damon_ops_lock);
61 	return registered;
62 }
63 
64 /**
65  * damon_register_ops() - Register a monitoring operations set to DAMON.
66  * @ops:	monitoring operations set to register.
67  *
68  * This function registers a monitoring operations set of valid &struct
69  * damon_operations->id so that others can find and use them later.
70  *
71  * Return: 0 on success, negative error code otherwise.
72  */
damon_register_ops(struct damon_operations * ops)73 int damon_register_ops(struct damon_operations *ops)
74 {
75 	int err = 0;
76 
77 	if (ops->id >= NR_DAMON_OPS)
78 		return -EINVAL;
79 	mutex_lock(&damon_ops_lock);
80 	/* Fail for already registered ops */
81 	if (__damon_is_registered_ops(ops->id)) {
82 		err = -EINVAL;
83 		goto out;
84 	}
85 	damon_registered_ops[ops->id] = *ops;
86 out:
87 	mutex_unlock(&damon_ops_lock);
88 	return err;
89 }
90 
91 /**
92  * damon_select_ops() - Select a monitoring operations to use with the context.
93  * @ctx:	monitoring context to use the operations.
94  * @id:		id of the registered monitoring operations to select.
95  *
96  * This function finds registered monitoring operations set of @id and make
97  * @ctx to use it.
98  *
99  * Return: 0 on success, negative error code otherwise.
100  */
damon_select_ops(struct damon_ctx * ctx,enum damon_ops_id id)101 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
102 {
103 	int err = 0;
104 
105 	if (id >= NR_DAMON_OPS)
106 		return -EINVAL;
107 
108 	mutex_lock(&damon_ops_lock);
109 	if (!__damon_is_registered_ops(id))
110 		err = -EINVAL;
111 	else
112 		ctx->ops = damon_registered_ops[id];
113 	mutex_unlock(&damon_ops_lock);
114 	return err;
115 }
116 
117 /*
118  * Construct a damon_region struct
119  *
120  * Returns the pointer to the new struct if success, or NULL otherwise
121  */
damon_new_region(unsigned long start,unsigned long end)122 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
123 {
124 	struct damon_region *region;
125 
126 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
127 	if (!region)
128 		return NULL;
129 
130 	region->ar.start = start;
131 	region->ar.end = end;
132 	region->nr_accesses = 0;
133 	region->nr_accesses_bp = 0;
134 	INIT_LIST_HEAD(&region->list);
135 
136 	region->age = 0;
137 	region->last_nr_accesses = 0;
138 
139 	return region;
140 }
141 
damon_add_region(struct damon_region * r,struct damon_target * t)142 void damon_add_region(struct damon_region *r, struct damon_target *t)
143 {
144 	list_add_tail(&r->list, &t->regions_list);
145 	t->nr_regions++;
146 }
147 
damon_del_region(struct damon_region * r,struct damon_target * t)148 static void damon_del_region(struct damon_region *r, struct damon_target *t)
149 {
150 	list_del(&r->list);
151 	t->nr_regions--;
152 }
153 
damon_free_region(struct damon_region * r)154 static void damon_free_region(struct damon_region *r)
155 {
156 	kmem_cache_free(damon_region_cache, r);
157 }
158 
damon_destroy_region(struct damon_region * r,struct damon_target * t)159 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
160 {
161 	damon_del_region(r, t);
162 	damon_free_region(r);
163 }
164 
165 /*
166  * Check whether a region is intersecting an address range
167  *
168  * Returns true if it is.
169  */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)170 static bool damon_intersect(struct damon_region *r,
171 		struct damon_addr_range *re)
172 {
173 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
174 }
175 
176 /*
177  * Fill holes in regions with new regions.
178  */
damon_fill_regions_holes(struct damon_region * first,struct damon_region * last,struct damon_target * t)179 static int damon_fill_regions_holes(struct damon_region *first,
180 		struct damon_region *last, struct damon_target *t)
181 {
182 	struct damon_region *r = first;
183 
184 	damon_for_each_region_from(r, t) {
185 		struct damon_region *next, *newr;
186 
187 		if (r == last)
188 			break;
189 		next = damon_next_region(r);
190 		if (r->ar.end != next->ar.start) {
191 			newr = damon_new_region(r->ar.end, next->ar.start);
192 			if (!newr)
193 				return -ENOMEM;
194 			damon_insert_region(newr, r, next, t);
195 		}
196 	}
197 	return 0;
198 }
199 
200 /*
201  * damon_set_regions() - Set regions of a target for given address ranges.
202  * @t:		the given target.
203  * @ranges:	array of new monitoring target ranges.
204  * @nr_ranges:	length of @ranges.
205  *
206  * This function adds new regions to, or modify existing regions of a
207  * monitoring target to fit in specific ranges.
208  *
209  * Return: 0 if success, or negative error code otherwise.
210  */
damon_set_regions(struct damon_target * t,struct damon_addr_range * ranges,unsigned int nr_ranges)211 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
212 		unsigned int nr_ranges)
213 {
214 	struct damon_region *r, *next;
215 	unsigned int i;
216 	int err;
217 
218 	/* Remove regions which are not in the new ranges */
219 	damon_for_each_region_safe(r, next, t) {
220 		for (i = 0; i < nr_ranges; i++) {
221 			if (damon_intersect(r, &ranges[i]))
222 				break;
223 		}
224 		if (i == nr_ranges)
225 			damon_destroy_region(r, t);
226 	}
227 
228 	r = damon_first_region(t);
229 	/* Add new regions or resize existing regions to fit in the ranges */
230 	for (i = 0; i < nr_ranges; i++) {
231 		struct damon_region *first = NULL, *last, *newr;
232 		struct damon_addr_range *range;
233 
234 		range = &ranges[i];
235 		/* Get the first/last regions intersecting with the range */
236 		damon_for_each_region_from(r, t) {
237 			if (damon_intersect(r, range)) {
238 				if (!first)
239 					first = r;
240 				last = r;
241 			}
242 			if (r->ar.start >= range->end)
243 				break;
244 		}
245 		if (!first) {
246 			/* no region intersects with this range */
247 			newr = damon_new_region(
248 					ALIGN_DOWN(range->start,
249 						DAMON_MIN_REGION),
250 					ALIGN(range->end, DAMON_MIN_REGION));
251 			if (!newr)
252 				return -ENOMEM;
253 			damon_insert_region(newr, damon_prev_region(r), r, t);
254 		} else {
255 			/* resize intersecting regions to fit in this range */
256 			first->ar.start = ALIGN_DOWN(range->start,
257 					DAMON_MIN_REGION);
258 			last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
259 
260 			/* fill possible holes in the range */
261 			err = damon_fill_regions_holes(first, last, t);
262 			if (err)
263 				return err;
264 		}
265 	}
266 	return 0;
267 }
268 
damos_new_filter(enum damos_filter_type type,bool matching,bool allow)269 struct damos_filter *damos_new_filter(enum damos_filter_type type,
270 		bool matching, bool allow)
271 {
272 	struct damos_filter *filter;
273 
274 	filter = kmalloc(sizeof(*filter), GFP_KERNEL);
275 	if (!filter)
276 		return NULL;
277 	filter->type = type;
278 	filter->matching = matching;
279 	filter->allow = allow;
280 	INIT_LIST_HEAD(&filter->list);
281 	return filter;
282 }
283 
damos_add_filter(struct damos * s,struct damos_filter * f)284 void damos_add_filter(struct damos *s, struct damos_filter *f)
285 {
286 	list_add_tail(&f->list, &s->filters);
287 }
288 
damos_del_filter(struct damos_filter * f)289 static void damos_del_filter(struct damos_filter *f)
290 {
291 	list_del(&f->list);
292 }
293 
damos_free_filter(struct damos_filter * f)294 static void damos_free_filter(struct damos_filter *f)
295 {
296 	kfree(f);
297 }
298 
damos_destroy_filter(struct damos_filter * f)299 void damos_destroy_filter(struct damos_filter *f)
300 {
301 	damos_del_filter(f);
302 	damos_free_filter(f);
303 }
304 
damos_new_quota_goal(enum damos_quota_goal_metric metric,unsigned long target_value)305 struct damos_quota_goal *damos_new_quota_goal(
306 		enum damos_quota_goal_metric metric,
307 		unsigned long target_value)
308 {
309 	struct damos_quota_goal *goal;
310 
311 	goal = kmalloc(sizeof(*goal), GFP_KERNEL);
312 	if (!goal)
313 		return NULL;
314 	goal->metric = metric;
315 	goal->target_value = target_value;
316 	INIT_LIST_HEAD(&goal->list);
317 	return goal;
318 }
319 
damos_add_quota_goal(struct damos_quota * q,struct damos_quota_goal * g)320 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
321 {
322 	list_add_tail(&g->list, &q->goals);
323 }
324 
damos_del_quota_goal(struct damos_quota_goal * g)325 static void damos_del_quota_goal(struct damos_quota_goal *g)
326 {
327 	list_del(&g->list);
328 }
329 
damos_free_quota_goal(struct damos_quota_goal * g)330 static void damos_free_quota_goal(struct damos_quota_goal *g)
331 {
332 	kfree(g);
333 }
334 
damos_destroy_quota_goal(struct damos_quota_goal * g)335 void damos_destroy_quota_goal(struct damos_quota_goal *g)
336 {
337 	damos_del_quota_goal(g);
338 	damos_free_quota_goal(g);
339 }
340 
341 /* initialize fields of @quota that normally API users wouldn't set */
damos_quota_init(struct damos_quota * quota)342 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
343 {
344 	quota->esz = 0;
345 	quota->total_charged_sz = 0;
346 	quota->total_charged_ns = 0;
347 	quota->charged_sz = 0;
348 	quota->charged_from = 0;
349 	quota->charge_target_from = NULL;
350 	quota->charge_addr_from = 0;
351 	quota->esz_bp = 0;
352 	return quota;
353 }
354 
damon_new_scheme(struct damos_access_pattern * pattern,enum damos_action action,unsigned long apply_interval_us,struct damos_quota * quota,struct damos_watermarks * wmarks,int target_nid)355 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
356 			enum damos_action action,
357 			unsigned long apply_interval_us,
358 			struct damos_quota *quota,
359 			struct damos_watermarks *wmarks,
360 			int target_nid)
361 {
362 	struct damos *scheme;
363 
364 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
365 	if (!scheme)
366 		return NULL;
367 	scheme->pattern = *pattern;
368 	scheme->action = action;
369 	scheme->apply_interval_us = apply_interval_us;
370 	/*
371 	 * next_apply_sis will be set when kdamond starts.  While kdamond is
372 	 * running, it will also updated when it is added to the DAMON context,
373 	 * or damon_attrs are updated.
374 	 */
375 	scheme->next_apply_sis = 0;
376 	INIT_LIST_HEAD(&scheme->filters);
377 	scheme->stat = (struct damos_stat){};
378 	INIT_LIST_HEAD(&scheme->list);
379 
380 	scheme->quota = *(damos_quota_init(quota));
381 	/* quota.goals should be separately set by caller */
382 	INIT_LIST_HEAD(&scheme->quota.goals);
383 
384 	scheme->wmarks = *wmarks;
385 	scheme->wmarks.activated = true;
386 
387 	scheme->target_nid = target_nid;
388 
389 	return scheme;
390 }
391 
damos_set_next_apply_sis(struct damos * s,struct damon_ctx * ctx)392 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
393 {
394 	unsigned long sample_interval = ctx->attrs.sample_interval ?
395 		ctx->attrs.sample_interval : 1;
396 	unsigned long apply_interval = s->apply_interval_us ?
397 		s->apply_interval_us : ctx->attrs.aggr_interval;
398 
399 	s->next_apply_sis = ctx->passed_sample_intervals +
400 		apply_interval / sample_interval;
401 }
402 
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)403 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
404 {
405 	list_add_tail(&s->list, &ctx->schemes);
406 	damos_set_next_apply_sis(s, ctx);
407 }
408 
damon_del_scheme(struct damos * s)409 static void damon_del_scheme(struct damos *s)
410 {
411 	list_del(&s->list);
412 }
413 
damon_free_scheme(struct damos * s)414 static void damon_free_scheme(struct damos *s)
415 {
416 	kfree(s);
417 }
418 
damon_destroy_scheme(struct damos * s)419 void damon_destroy_scheme(struct damos *s)
420 {
421 	struct damos_quota_goal *g, *g_next;
422 	struct damos_filter *f, *next;
423 
424 	damos_for_each_quota_goal_safe(g, g_next, &s->quota)
425 		damos_destroy_quota_goal(g);
426 
427 	damos_for_each_filter_safe(f, next, s)
428 		damos_destroy_filter(f);
429 	damon_del_scheme(s);
430 	damon_free_scheme(s);
431 }
432 
433 /*
434  * Construct a damon_target struct
435  *
436  * Returns the pointer to the new struct if success, or NULL otherwise
437  */
damon_new_target(void)438 struct damon_target *damon_new_target(void)
439 {
440 	struct damon_target *t;
441 
442 	t = kmalloc(sizeof(*t), GFP_KERNEL);
443 	if (!t)
444 		return NULL;
445 
446 	t->pid = NULL;
447 	t->nr_regions = 0;
448 	INIT_LIST_HEAD(&t->regions_list);
449 	INIT_LIST_HEAD(&t->list);
450 
451 	return t;
452 }
453 
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)454 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
455 {
456 	list_add_tail(&t->list, &ctx->adaptive_targets);
457 }
458 
damon_targets_empty(struct damon_ctx * ctx)459 bool damon_targets_empty(struct damon_ctx *ctx)
460 {
461 	return list_empty(&ctx->adaptive_targets);
462 }
463 
damon_del_target(struct damon_target * t)464 static void damon_del_target(struct damon_target *t)
465 {
466 	list_del(&t->list);
467 }
468 
damon_free_target(struct damon_target * t)469 void damon_free_target(struct damon_target *t)
470 {
471 	struct damon_region *r, *next;
472 
473 	damon_for_each_region_safe(r, next, t)
474 		damon_free_region(r);
475 	kfree(t);
476 }
477 
damon_destroy_target(struct damon_target * t)478 void damon_destroy_target(struct damon_target *t)
479 {
480 	damon_del_target(t);
481 	damon_free_target(t);
482 }
483 
damon_nr_regions(struct damon_target * t)484 unsigned int damon_nr_regions(struct damon_target *t)
485 {
486 	return t->nr_regions;
487 }
488 
damon_new_ctx(void)489 struct damon_ctx *damon_new_ctx(void)
490 {
491 	struct damon_ctx *ctx;
492 
493 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
494 	if (!ctx)
495 		return NULL;
496 
497 	init_completion(&ctx->kdamond_started);
498 
499 	ctx->attrs.sample_interval = 5 * 1000;
500 	ctx->attrs.aggr_interval = 100 * 1000;
501 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
502 
503 	ctx->passed_sample_intervals = 0;
504 	/* These will be set from kdamond_init_intervals_sis() */
505 	ctx->next_aggregation_sis = 0;
506 	ctx->next_ops_update_sis = 0;
507 
508 	mutex_init(&ctx->kdamond_lock);
509 	mutex_init(&ctx->call_control_lock);
510 	mutex_init(&ctx->walk_control_lock);
511 
512 	ctx->attrs.min_nr_regions = 10;
513 	ctx->attrs.max_nr_regions = 1000;
514 
515 	INIT_LIST_HEAD(&ctx->adaptive_targets);
516 	INIT_LIST_HEAD(&ctx->schemes);
517 
518 	return ctx;
519 }
520 
damon_destroy_targets(struct damon_ctx * ctx)521 static void damon_destroy_targets(struct damon_ctx *ctx)
522 {
523 	struct damon_target *t, *next_t;
524 
525 	if (ctx->ops.cleanup) {
526 		ctx->ops.cleanup(ctx);
527 		return;
528 	}
529 
530 	damon_for_each_target_safe(t, next_t, ctx)
531 		damon_destroy_target(t);
532 }
533 
damon_destroy_ctx(struct damon_ctx * ctx)534 void damon_destroy_ctx(struct damon_ctx *ctx)
535 {
536 	struct damos *s, *next_s;
537 
538 	damon_destroy_targets(ctx);
539 
540 	damon_for_each_scheme_safe(s, next_s, ctx)
541 		damon_destroy_scheme(s);
542 
543 	kfree(ctx);
544 }
545 
damon_age_for_new_attrs(unsigned int age,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)546 static unsigned int damon_age_for_new_attrs(unsigned int age,
547 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
548 {
549 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
550 }
551 
552 /* convert access ratio in bp (per 10,000) to nr_accesses */
damon_accesses_bp_to_nr_accesses(unsigned int accesses_bp,struct damon_attrs * attrs)553 static unsigned int damon_accesses_bp_to_nr_accesses(
554 		unsigned int accesses_bp, struct damon_attrs *attrs)
555 {
556 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
557 }
558 
559 /*
560  * Convert nr_accesses to access ratio in bp (per 10,000).
561  *
562  * Callers should ensure attrs.aggr_interval is not zero, like
563  * damon_update_monitoring_results() does .  Otherwise, divide-by-zero would
564  * happen.
565  */
damon_nr_accesses_to_accesses_bp(unsigned int nr_accesses,struct damon_attrs * attrs)566 static unsigned int damon_nr_accesses_to_accesses_bp(
567 		unsigned int nr_accesses, struct damon_attrs *attrs)
568 {
569 	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
570 }
571 
damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)572 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
573 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
574 {
575 	return damon_accesses_bp_to_nr_accesses(
576 			damon_nr_accesses_to_accesses_bp(
577 				nr_accesses, old_attrs),
578 			new_attrs);
579 }
580 
damon_update_monitoring_result(struct damon_region * r,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)581 static void damon_update_monitoring_result(struct damon_region *r,
582 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
583 {
584 	r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
585 			old_attrs, new_attrs);
586 	r->nr_accesses_bp = r->nr_accesses * 10000;
587 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
588 }
589 
590 /*
591  * region->nr_accesses is the number of sampling intervals in the last
592  * aggregation interval that access to the region has found, and region->age is
593  * the number of aggregation intervals that its access pattern has maintained.
594  * For the reason, the real meaning of the two fields depend on current
595  * sampling interval and aggregation interval.  This function updates
596  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
597  */
damon_update_monitoring_results(struct damon_ctx * ctx,struct damon_attrs * new_attrs)598 static void damon_update_monitoring_results(struct damon_ctx *ctx,
599 		struct damon_attrs *new_attrs)
600 {
601 	struct damon_attrs *old_attrs = &ctx->attrs;
602 	struct damon_target *t;
603 	struct damon_region *r;
604 
605 	/* if any interval is zero, simply forgive conversion */
606 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
607 			!new_attrs->sample_interval ||
608 			!new_attrs->aggr_interval)
609 		return;
610 
611 	damon_for_each_target(t, ctx)
612 		damon_for_each_region(r, t)
613 			damon_update_monitoring_result(
614 					r, old_attrs, new_attrs);
615 }
616 
617 /**
618  * damon_set_attrs() - Set attributes for the monitoring.
619  * @ctx:		monitoring context
620  * @attrs:		monitoring attributes
621  *
622  * This function should be called while the kdamond is not running, or an
623  * access check results aggregation is not ongoing (e.g., from
624  * &struct damon_callback->after_aggregation or
625  * &struct damon_callback->after_wmarks_check callbacks).
626  *
627  * Every time interval is in micro-seconds.
628  *
629  * Return: 0 on success, negative error code otherwise.
630  */
damon_set_attrs(struct damon_ctx * ctx,struct damon_attrs * attrs)631 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
632 {
633 	unsigned long sample_interval = attrs->sample_interval ?
634 		attrs->sample_interval : 1;
635 	struct damos *s;
636 
637 	if (attrs->min_nr_regions < 3)
638 		return -EINVAL;
639 	if (attrs->min_nr_regions > attrs->max_nr_regions)
640 		return -EINVAL;
641 	if (attrs->sample_interval > attrs->aggr_interval)
642 		return -EINVAL;
643 
644 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
645 		attrs->aggr_interval / sample_interval;
646 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
647 		attrs->ops_update_interval / sample_interval;
648 
649 	damon_update_monitoring_results(ctx, attrs);
650 	ctx->attrs = *attrs;
651 
652 	damon_for_each_scheme(s, ctx)
653 		damos_set_next_apply_sis(s, ctx);
654 
655 	return 0;
656 }
657 
658 /**
659  * damon_set_schemes() - Set data access monitoring based operation schemes.
660  * @ctx:	monitoring context
661  * @schemes:	array of the schemes
662  * @nr_schemes:	number of entries in @schemes
663  *
664  * This function should not be called while the kdamond of the context is
665  * running.
666  */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)667 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
668 			ssize_t nr_schemes)
669 {
670 	struct damos *s, *next;
671 	ssize_t i;
672 
673 	damon_for_each_scheme_safe(s, next, ctx)
674 		damon_destroy_scheme(s);
675 	for (i = 0; i < nr_schemes; i++)
676 		damon_add_scheme(ctx, schemes[i]);
677 }
678 
damos_nth_quota_goal(int n,struct damos_quota * q)679 static struct damos_quota_goal *damos_nth_quota_goal(
680 		int n, struct damos_quota *q)
681 {
682 	struct damos_quota_goal *goal;
683 	int i = 0;
684 
685 	damos_for_each_quota_goal(goal, q) {
686 		if (i++ == n)
687 			return goal;
688 	}
689 	return NULL;
690 }
691 
damos_commit_quota_goal(struct damos_quota_goal * dst,struct damos_quota_goal * src)692 static void damos_commit_quota_goal(
693 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
694 {
695 	dst->metric = src->metric;
696 	dst->target_value = src->target_value;
697 	if (dst->metric == DAMOS_QUOTA_USER_INPUT)
698 		dst->current_value = src->current_value;
699 	/* keep last_psi_total as is, since it will be updated in next cycle */
700 }
701 
702 /**
703  * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
704  * @dst:	The commit destination DAMOS quota.
705  * @src:	The commit source DAMOS quota.
706  *
707  * Copies user-specified parameters for quota goals from @src to @dst.  Users
708  * should use this function for quota goals-level parameters update of running
709  * DAMON contexts, instead of manual in-place updates.
710  *
711  * This function should be called from parameters-update safe context, like
712  * DAMON callbacks.
713  */
damos_commit_quota_goals(struct damos_quota * dst,struct damos_quota * src)714 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
715 {
716 	struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
717 	int i = 0, j = 0;
718 
719 	damos_for_each_quota_goal_safe(dst_goal, next, dst) {
720 		src_goal = damos_nth_quota_goal(i++, src);
721 		if (src_goal)
722 			damos_commit_quota_goal(dst_goal, src_goal);
723 		else
724 			damos_destroy_quota_goal(dst_goal);
725 	}
726 	damos_for_each_quota_goal_safe(src_goal, next, src) {
727 		if (j++ < i)
728 			continue;
729 		new_goal = damos_new_quota_goal(
730 				src_goal->metric, src_goal->target_value);
731 		if (!new_goal)
732 			return -ENOMEM;
733 		damos_add_quota_goal(dst, new_goal);
734 	}
735 	return 0;
736 }
737 
damos_commit_quota(struct damos_quota * dst,struct damos_quota * src)738 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
739 {
740 	int err;
741 
742 	dst->reset_interval = src->reset_interval;
743 	dst->ms = src->ms;
744 	dst->sz = src->sz;
745 	err = damos_commit_quota_goals(dst, src);
746 	if (err)
747 		return err;
748 	dst->weight_sz = src->weight_sz;
749 	dst->weight_nr_accesses = src->weight_nr_accesses;
750 	dst->weight_age = src->weight_age;
751 	return 0;
752 }
753 
damos_nth_filter(int n,struct damos * s)754 static struct damos_filter *damos_nth_filter(int n, struct damos *s)
755 {
756 	struct damos_filter *filter;
757 	int i = 0;
758 
759 	damos_for_each_filter(filter, s) {
760 		if (i++ == n)
761 			return filter;
762 	}
763 	return NULL;
764 }
765 
damos_commit_filter_arg(struct damos_filter * dst,struct damos_filter * src)766 static void damos_commit_filter_arg(
767 		struct damos_filter *dst, struct damos_filter *src)
768 {
769 	switch (dst->type) {
770 	case DAMOS_FILTER_TYPE_MEMCG:
771 		dst->memcg_id = src->memcg_id;
772 		break;
773 	case DAMOS_FILTER_TYPE_ADDR:
774 		dst->addr_range = src->addr_range;
775 		break;
776 	case DAMOS_FILTER_TYPE_TARGET:
777 		dst->target_idx = src->target_idx;
778 		break;
779 	default:
780 		break;
781 	}
782 }
783 
damos_commit_filter(struct damos_filter * dst,struct damos_filter * src)784 static void damos_commit_filter(
785 		struct damos_filter *dst, struct damos_filter *src)
786 {
787 	dst->type = src->type;
788 	dst->matching = src->matching;
789 	damos_commit_filter_arg(dst, src);
790 }
791 
damos_commit_filters(struct damos * dst,struct damos * src)792 static int damos_commit_filters(struct damos *dst, struct damos *src)
793 {
794 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
795 	int i = 0, j = 0;
796 
797 	damos_for_each_filter_safe(dst_filter, next, dst) {
798 		src_filter = damos_nth_filter(i++, src);
799 		if (src_filter)
800 			damos_commit_filter(dst_filter, src_filter);
801 		else
802 			damos_destroy_filter(dst_filter);
803 	}
804 
805 	damos_for_each_filter_safe(src_filter, next, src) {
806 		if (j++ < i)
807 			continue;
808 
809 		new_filter = damos_new_filter(
810 				src_filter->type, src_filter->matching,
811 				src_filter->allow);
812 		if (!new_filter)
813 			return -ENOMEM;
814 		damos_commit_filter_arg(new_filter, src_filter);
815 		damos_add_filter(dst, new_filter);
816 	}
817 	return 0;
818 }
819 
damon_nth_scheme(int n,struct damon_ctx * ctx)820 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
821 {
822 	struct damos *s;
823 	int i = 0;
824 
825 	damon_for_each_scheme(s, ctx) {
826 		if (i++ == n)
827 			return s;
828 	}
829 	return NULL;
830 }
831 
damos_commit(struct damos * dst,struct damos * src)832 static int damos_commit(struct damos *dst, struct damos *src)
833 {
834 	int err;
835 
836 	dst->pattern = src->pattern;
837 	dst->action = src->action;
838 	dst->apply_interval_us = src->apply_interval_us;
839 
840 	err = damos_commit_quota(&dst->quota, &src->quota);
841 	if (err)
842 		return err;
843 
844 	dst->wmarks = src->wmarks;
845 
846 	err = damos_commit_filters(dst, src);
847 	return err;
848 }
849 
damon_commit_schemes(struct damon_ctx * dst,struct damon_ctx * src)850 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
851 {
852 	struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
853 	int i = 0, j = 0, err;
854 
855 	damon_for_each_scheme_safe(dst_scheme, next, dst) {
856 		src_scheme = damon_nth_scheme(i++, src);
857 		if (src_scheme) {
858 			err = damos_commit(dst_scheme, src_scheme);
859 			if (err)
860 				return err;
861 		} else {
862 			damon_destroy_scheme(dst_scheme);
863 		}
864 	}
865 
866 	damon_for_each_scheme_safe(src_scheme, next, src) {
867 		if (j++ < i)
868 			continue;
869 		new_scheme = damon_new_scheme(&src_scheme->pattern,
870 				src_scheme->action,
871 				src_scheme->apply_interval_us,
872 				&src_scheme->quota, &src_scheme->wmarks,
873 				NUMA_NO_NODE);
874 		if (!new_scheme)
875 			return -ENOMEM;
876 		err = damos_commit(new_scheme, src_scheme);
877 		if (err) {
878 			damon_destroy_scheme(new_scheme);
879 			return err;
880 		}
881 		damon_add_scheme(dst, new_scheme);
882 	}
883 	return 0;
884 }
885 
damon_nth_target(int n,struct damon_ctx * ctx)886 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
887 {
888 	struct damon_target *t;
889 	int i = 0;
890 
891 	damon_for_each_target(t, ctx) {
892 		if (i++ == n)
893 			return t;
894 	}
895 	return NULL;
896 }
897 
898 /*
899  * The caller should ensure the regions of @src are
900  * 1. valid (end >= src) and
901  * 2. sorted by starting address.
902  *
903  * If @src has no region, @dst keeps current regions.
904  */
damon_commit_target_regions(struct damon_target * dst,struct damon_target * src)905 static int damon_commit_target_regions(
906 		struct damon_target *dst, struct damon_target *src)
907 {
908 	struct damon_region *src_region;
909 	struct damon_addr_range *ranges;
910 	int i = 0, err;
911 
912 	damon_for_each_region(src_region, src)
913 		i++;
914 	if (!i)
915 		return 0;
916 
917 	ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
918 	if (!ranges)
919 		return -ENOMEM;
920 	i = 0;
921 	damon_for_each_region(src_region, src)
922 		ranges[i++] = src_region->ar;
923 	err = damon_set_regions(dst, ranges, i);
924 	kfree(ranges);
925 	return err;
926 }
927 
damon_commit_target(struct damon_target * dst,bool dst_has_pid,struct damon_target * src,bool src_has_pid)928 static int damon_commit_target(
929 		struct damon_target *dst, bool dst_has_pid,
930 		struct damon_target *src, bool src_has_pid)
931 {
932 	int err;
933 
934 	err = damon_commit_target_regions(dst, src);
935 	if (err)
936 		return err;
937 	if (dst_has_pid)
938 		put_pid(dst->pid);
939 	if (src_has_pid)
940 		get_pid(src->pid);
941 	dst->pid = src->pid;
942 	return 0;
943 }
944 
damon_commit_targets(struct damon_ctx * dst,struct damon_ctx * src)945 static int damon_commit_targets(
946 		struct damon_ctx *dst, struct damon_ctx *src)
947 {
948 	struct damon_target *dst_target, *next, *src_target, *new_target;
949 	int i = 0, j = 0, err;
950 
951 	damon_for_each_target_safe(dst_target, next, dst) {
952 		src_target = damon_nth_target(i++, src);
953 		if (src_target) {
954 			err = damon_commit_target(
955 					dst_target, damon_target_has_pid(dst),
956 					src_target, damon_target_has_pid(src));
957 			if (err)
958 				return err;
959 		} else {
960 			if (damon_target_has_pid(dst))
961 				put_pid(dst_target->pid);
962 			damon_destroy_target(dst_target);
963 		}
964 	}
965 
966 	damon_for_each_target_safe(src_target, next, src) {
967 		if (j++ < i)
968 			continue;
969 		new_target = damon_new_target();
970 		if (!new_target)
971 			return -ENOMEM;
972 		err = damon_commit_target(new_target, false,
973 				src_target, damon_target_has_pid(src));
974 		if (err) {
975 			damon_destroy_target(new_target);
976 			return err;
977 		}
978 		damon_add_target(dst, new_target);
979 	}
980 	return 0;
981 }
982 
983 /**
984  * damon_commit_ctx() - Commit parameters of a DAMON context to another.
985  * @dst:	The commit destination DAMON context.
986  * @src:	The commit source DAMON context.
987  *
988  * This function copies user-specified parameters from @src to @dst and update
989  * the internal status and results accordingly.  Users should use this function
990  * for context-level parameters update of running context, instead of manual
991  * in-place updates.
992  *
993  * This function should be called from parameters-update safe context, like
994  * DAMON callbacks.
995  */
damon_commit_ctx(struct damon_ctx * dst,struct damon_ctx * src)996 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
997 {
998 	int err;
999 
1000 	err = damon_commit_schemes(dst, src);
1001 	if (err)
1002 		return err;
1003 	err = damon_commit_targets(dst, src);
1004 	if (err)
1005 		return err;
1006 	/*
1007 	 * schemes and targets should be updated first, since
1008 	 * 1. damon_set_attrs() updates monitoring results of targets and
1009 	 * next_apply_sis of schemes, and
1010 	 * 2. ops update should be done after pid handling is done (target
1011 	 *    committing require putting pids).
1012 	 */
1013 	err = damon_set_attrs(dst, &src->attrs);
1014 	if (err)
1015 		return err;
1016 	dst->ops = src->ops;
1017 
1018 	return 0;
1019 }
1020 
1021 /**
1022  * damon_nr_running_ctxs() - Return number of currently running contexts.
1023  */
damon_nr_running_ctxs(void)1024 int damon_nr_running_ctxs(void)
1025 {
1026 	int nr_ctxs;
1027 
1028 	mutex_lock(&damon_lock);
1029 	nr_ctxs = nr_running_ctxs;
1030 	mutex_unlock(&damon_lock);
1031 
1032 	return nr_ctxs;
1033 }
1034 
1035 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)1036 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1037 {
1038 	struct damon_target *t;
1039 	struct damon_region *r;
1040 	unsigned long sz = 0;
1041 
1042 	damon_for_each_target(t, ctx) {
1043 		damon_for_each_region(r, t)
1044 			sz += damon_sz_region(r);
1045 	}
1046 
1047 	if (ctx->attrs.min_nr_regions)
1048 		sz /= ctx->attrs.min_nr_regions;
1049 	if (sz < DAMON_MIN_REGION)
1050 		sz = DAMON_MIN_REGION;
1051 
1052 	return sz;
1053 }
1054 
1055 static int kdamond_fn(void *data);
1056 
1057 /*
1058  * __damon_start() - Starts monitoring with given context.
1059  * @ctx:	monitoring context
1060  *
1061  * This function should be called while damon_lock is hold.
1062  *
1063  * Return: 0 on success, negative error code otherwise.
1064  */
__damon_start(struct damon_ctx * ctx)1065 static int __damon_start(struct damon_ctx *ctx)
1066 {
1067 	int err = -EBUSY;
1068 
1069 	mutex_lock(&ctx->kdamond_lock);
1070 	if (!ctx->kdamond) {
1071 		err = 0;
1072 		reinit_completion(&ctx->kdamond_started);
1073 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1074 				nr_running_ctxs);
1075 		if (IS_ERR(ctx->kdamond)) {
1076 			err = PTR_ERR(ctx->kdamond);
1077 			ctx->kdamond = NULL;
1078 		} else {
1079 			wait_for_completion(&ctx->kdamond_started);
1080 		}
1081 	}
1082 	mutex_unlock(&ctx->kdamond_lock);
1083 
1084 	return err;
1085 }
1086 
1087 /**
1088  * damon_start() - Starts the monitorings for a given group of contexts.
1089  * @ctxs:	an array of the pointers for contexts to start monitoring
1090  * @nr_ctxs:	size of @ctxs
1091  * @exclusive:	exclusiveness of this contexts group
1092  *
1093  * This function starts a group of monitoring threads for a group of monitoring
1094  * contexts.  One thread per each context is created and run in parallel.  The
1095  * caller should handle synchronization between the threads by itself.  If
1096  * @exclusive is true and a group of threads that created by other
1097  * 'damon_start()' call is currently running, this function does nothing but
1098  * returns -EBUSY.
1099  *
1100  * Return: 0 on success, negative error code otherwise.
1101  */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs,bool exclusive)1102 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1103 {
1104 	int i;
1105 	int err = 0;
1106 
1107 	mutex_lock(&damon_lock);
1108 	if ((exclusive && nr_running_ctxs) ||
1109 			(!exclusive && running_exclusive_ctxs)) {
1110 		mutex_unlock(&damon_lock);
1111 		return -EBUSY;
1112 	}
1113 
1114 	for (i = 0; i < nr_ctxs; i++) {
1115 		err = __damon_start(ctxs[i]);
1116 		if (err)
1117 			break;
1118 		nr_running_ctxs++;
1119 	}
1120 	if (exclusive && nr_running_ctxs)
1121 		running_exclusive_ctxs = true;
1122 	mutex_unlock(&damon_lock);
1123 
1124 	return err;
1125 }
1126 
1127 /*
1128  * __damon_stop() - Stops monitoring of a given context.
1129  * @ctx:	monitoring context
1130  *
1131  * Return: 0 on success, negative error code otherwise.
1132  */
__damon_stop(struct damon_ctx * ctx)1133 static int __damon_stop(struct damon_ctx *ctx)
1134 {
1135 	struct task_struct *tsk;
1136 
1137 	mutex_lock(&ctx->kdamond_lock);
1138 	tsk = ctx->kdamond;
1139 	if (tsk) {
1140 		get_task_struct(tsk);
1141 		mutex_unlock(&ctx->kdamond_lock);
1142 		kthread_stop_put(tsk);
1143 		return 0;
1144 	}
1145 	mutex_unlock(&ctx->kdamond_lock);
1146 
1147 	return -EPERM;
1148 }
1149 
1150 /**
1151  * damon_stop() - Stops the monitorings for a given group of contexts.
1152  * @ctxs:	an array of the pointers for contexts to stop monitoring
1153  * @nr_ctxs:	size of @ctxs
1154  *
1155  * Return: 0 on success, negative error code otherwise.
1156  */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)1157 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1158 {
1159 	int i, err = 0;
1160 
1161 	for (i = 0; i < nr_ctxs; i++) {
1162 		/* nr_running_ctxs is decremented in kdamond_fn */
1163 		err = __damon_stop(ctxs[i]);
1164 		if (err)
1165 			break;
1166 	}
1167 	return err;
1168 }
1169 
damon_is_running(struct damon_ctx * ctx)1170 static bool damon_is_running(struct damon_ctx *ctx)
1171 {
1172 	bool running;
1173 
1174 	mutex_lock(&ctx->kdamond_lock);
1175 	running = ctx->kdamond != NULL;
1176 	mutex_unlock(&ctx->kdamond_lock);
1177 	return running;
1178 }
1179 
1180 /**
1181  * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1182  * @ctx:	DAMON context to call the function for.
1183  * @control:	Control variable of the call request.
1184  *
1185  * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1186  * argument data that respectively passed via &damon_call_control->fn and
1187  * &damon_call_control->data of @control, and wait until the kdamond finishes
1188  * handling of the request.
1189  *
1190  * The kdamond executes the function with the argument in the main loop, just
1191  * after a sampling of the iteration is finished.  The function can hence
1192  * safely access the internal data of the &struct damon_ctx without additional
1193  * synchronization.  The return value of the function will be saved in
1194  * &damon_call_control->return_code.
1195  *
1196  * Return: 0 on success, negative error code otherwise.
1197  */
damon_call(struct damon_ctx * ctx,struct damon_call_control * control)1198 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1199 {
1200 	init_completion(&control->completion);
1201 	control->canceled = false;
1202 
1203 	mutex_lock(&ctx->call_control_lock);
1204 	if (ctx->call_control) {
1205 		mutex_unlock(&ctx->call_control_lock);
1206 		return -EBUSY;
1207 	}
1208 	ctx->call_control = control;
1209 	mutex_unlock(&ctx->call_control_lock);
1210 	if (!damon_is_running(ctx))
1211 		return -EINVAL;
1212 	wait_for_completion(&control->completion);
1213 	if (control->canceled)
1214 		return -ECANCELED;
1215 	return 0;
1216 }
1217 
1218 /**
1219  * damos_walk() - Invoke a given functions while DAMOS walk regions.
1220  * @ctx:	DAMON context to call the functions for.
1221  * @control:	Control variable of the walk request.
1222  *
1223  * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1224  * that the kdamond will apply DAMOS action to, and wait until the kdamond
1225  * finishes handling of the request.
1226  *
1227  * The kdamond executes the given function in the main loop, for each region
1228  * just after it applied any DAMOS actions of @ctx to it.  The invocation is
1229  * made only within one &damos->apply_interval_us since damos_walk()
1230  * invocation, for each scheme.  The given callback function can hence safely
1231  * access the internal data of &struct damon_ctx and &struct damon_region that
1232  * each of the scheme will apply the action for next interval, without
1233  * additional synchronizations against the kdamond.  If every scheme of @ctx
1234  * passed at least one &damos->apply_interval_us, kdamond marks the request as
1235  * completed so that damos_walk() can wakeup and return.
1236  *
1237  * Return: 0 on success, negative error code otherwise.
1238  */
damos_walk(struct damon_ctx * ctx,struct damos_walk_control * control)1239 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1240 {
1241 	init_completion(&control->completion);
1242 	control->canceled = false;
1243 	mutex_lock(&ctx->walk_control_lock);
1244 	if (ctx->walk_control) {
1245 		mutex_unlock(&ctx->walk_control_lock);
1246 		return -EBUSY;
1247 	}
1248 	ctx->walk_control = control;
1249 	mutex_unlock(&ctx->walk_control_lock);
1250 	if (!damon_is_running(ctx))
1251 		return -EINVAL;
1252 	wait_for_completion(&control->completion);
1253 	if (control->canceled)
1254 		return -ECANCELED;
1255 	return 0;
1256 }
1257 
1258 /*
1259  * Reset the aggregated monitoring results ('nr_accesses' of each region).
1260  */
kdamond_reset_aggregated(struct damon_ctx * c)1261 static void kdamond_reset_aggregated(struct damon_ctx *c)
1262 {
1263 	struct damon_target *t;
1264 	unsigned int ti = 0;	/* target's index */
1265 
1266 	damon_for_each_target(t, c) {
1267 		struct damon_region *r;
1268 
1269 		damon_for_each_region(r, t) {
1270 			trace_damon_aggregated(ti, r, damon_nr_regions(t));
1271 			r->last_nr_accesses = r->nr_accesses;
1272 			r->nr_accesses = 0;
1273 		}
1274 		ti++;
1275 	}
1276 }
1277 
1278 static void damon_split_region_at(struct damon_target *t,
1279 				  struct damon_region *r, unsigned long sz_r);
1280 
__damos_valid_target(struct damon_region * r,struct damos * s)1281 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1282 {
1283 	unsigned long sz;
1284 	unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1285 
1286 	sz = damon_sz_region(r);
1287 	return s->pattern.min_sz_region <= sz &&
1288 		sz <= s->pattern.max_sz_region &&
1289 		s->pattern.min_nr_accesses <= nr_accesses &&
1290 		nr_accesses <= s->pattern.max_nr_accesses &&
1291 		s->pattern.min_age_region <= r->age &&
1292 		r->age <= s->pattern.max_age_region;
1293 }
1294 
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1295 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1296 		struct damon_region *r, struct damos *s)
1297 {
1298 	bool ret = __damos_valid_target(r, s);
1299 
1300 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1301 		return ret;
1302 
1303 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1304 }
1305 
1306 /*
1307  * damos_skip_charged_region() - Check if the given region or starting part of
1308  * it is already charged for the DAMOS quota.
1309  * @t:	The target of the region.
1310  * @rp:	The pointer to the region.
1311  * @s:	The scheme to be applied.
1312  *
1313  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1314  * action would applied to only a part of the target access pattern fulfilling
1315  * regions.  To avoid applying the scheme action to only already applied
1316  * regions, DAMON skips applying the scheme action to the regions that charged
1317  * in the previous charge window.
1318  *
1319  * This function checks if a given region should be skipped or not for the
1320  * reason.  If only the starting part of the region has previously charged,
1321  * this function splits the region into two so that the second one covers the
1322  * area that not charged in the previous charge widnow and saves the second
1323  * region in *rp and returns false, so that the caller can apply DAMON action
1324  * to the second one.
1325  *
1326  * Return: true if the region should be entirely skipped, false otherwise.
1327  */
damos_skip_charged_region(struct damon_target * t,struct damon_region ** rp,struct damos * s)1328 static bool damos_skip_charged_region(struct damon_target *t,
1329 		struct damon_region **rp, struct damos *s)
1330 {
1331 	struct damon_region *r = *rp;
1332 	struct damos_quota *quota = &s->quota;
1333 	unsigned long sz_to_skip;
1334 
1335 	/* Skip previously charged regions */
1336 	if (quota->charge_target_from) {
1337 		if (t != quota->charge_target_from)
1338 			return true;
1339 		if (r == damon_last_region(t)) {
1340 			quota->charge_target_from = NULL;
1341 			quota->charge_addr_from = 0;
1342 			return true;
1343 		}
1344 		if (quota->charge_addr_from &&
1345 				r->ar.end <= quota->charge_addr_from)
1346 			return true;
1347 
1348 		if (quota->charge_addr_from && r->ar.start <
1349 				quota->charge_addr_from) {
1350 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1351 					r->ar.start, DAMON_MIN_REGION);
1352 			if (!sz_to_skip) {
1353 				if (damon_sz_region(r) <= DAMON_MIN_REGION)
1354 					return true;
1355 				sz_to_skip = DAMON_MIN_REGION;
1356 			}
1357 			damon_split_region_at(t, r, sz_to_skip);
1358 			r = damon_next_region(r);
1359 			*rp = r;
1360 		}
1361 		quota->charge_target_from = NULL;
1362 		quota->charge_addr_from = 0;
1363 	}
1364 	return false;
1365 }
1366 
damos_update_stat(struct damos * s,unsigned long sz_tried,unsigned long sz_applied,unsigned long sz_ops_filter_passed)1367 static void damos_update_stat(struct damos *s,
1368 		unsigned long sz_tried, unsigned long sz_applied,
1369 		unsigned long sz_ops_filter_passed)
1370 {
1371 	s->stat.nr_tried++;
1372 	s->stat.sz_tried += sz_tried;
1373 	if (sz_applied)
1374 		s->stat.nr_applied++;
1375 	s->stat.sz_applied += sz_applied;
1376 	s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1377 }
1378 
damos_filter_match(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos_filter * filter)1379 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1380 		struct damon_region *r, struct damos_filter *filter)
1381 {
1382 	bool matched = false;
1383 	struct damon_target *ti;
1384 	int target_idx = 0;
1385 	unsigned long start, end;
1386 
1387 	switch (filter->type) {
1388 	case DAMOS_FILTER_TYPE_TARGET:
1389 		damon_for_each_target(ti, ctx) {
1390 			if (ti == t)
1391 				break;
1392 			target_idx++;
1393 		}
1394 		matched = target_idx == filter->target_idx;
1395 		break;
1396 	case DAMOS_FILTER_TYPE_ADDR:
1397 		start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
1398 		end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
1399 
1400 		/* inside the range */
1401 		if (start <= r->ar.start && r->ar.end <= end) {
1402 			matched = true;
1403 			break;
1404 		}
1405 		/* outside of the range */
1406 		if (r->ar.end <= start || end <= r->ar.start) {
1407 			matched = false;
1408 			break;
1409 		}
1410 		/* start before the range and overlap */
1411 		if (r->ar.start < start) {
1412 			damon_split_region_at(t, r, start - r->ar.start);
1413 			matched = false;
1414 			break;
1415 		}
1416 		/* start inside the range */
1417 		damon_split_region_at(t, r, end - r->ar.start);
1418 		matched = true;
1419 		break;
1420 	default:
1421 		return false;
1422 	}
1423 
1424 	return matched == filter->matching;
1425 }
1426 
damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s)1427 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1428 		struct damon_region *r, struct damos *s)
1429 {
1430 	struct damos_filter *filter;
1431 
1432 	damos_for_each_filter(filter, s) {
1433 		if (damos_filter_match(ctx, t, r, filter))
1434 			return !filter->allow;
1435 	}
1436 	return false;
1437 }
1438 
1439 /*
1440  * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1441  * @ctx:	The context of &damon_ctx->walk_control.
1442  * @t:		The monitoring target of @r that @s will be applied.
1443  * @r:		The region of @t that @s will be applied.
1444  * @s:		The scheme of @ctx that will be applied to @r.
1445  *
1446  * This function is called from kdamond whenever it asked the operation set to
1447  * apply a DAMOS scheme action to a region.  If a DAMOS walk request is
1448  * installed by damos_walk() and not yet uninstalled, invoke it.
1449  */
damos_walk_call_walk(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s,unsigned long sz_filter_passed)1450 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1451 		struct damon_region *r, struct damos *s,
1452 		unsigned long sz_filter_passed)
1453 {
1454 	struct damos_walk_control *control;
1455 
1456 	mutex_lock(&ctx->walk_control_lock);
1457 	control = ctx->walk_control;
1458 	mutex_unlock(&ctx->walk_control_lock);
1459 	if (!control)
1460 		return;
1461 	control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1462 }
1463 
1464 /*
1465  * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1466  * @ctx:	The context of &damon_ctx->walk_control.
1467  * @s:		A scheme of @ctx that all walks are now done.
1468  *
1469  * This function is called when kdamond finished applying the action of a DAMOS
1470  * scheme to all regions that eligible for the given &damos->apply_interval_us.
1471  * If every scheme of @ctx including @s now finished walking for at least one
1472  * &damos->apply_interval_us, this function makrs the handling of the given
1473  * DAMOS walk request is done, so that damos_walk() can wake up and return.
1474  */
damos_walk_complete(struct damon_ctx * ctx,struct damos * s)1475 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1476 {
1477 	struct damos *siter;
1478 	struct damos_walk_control *control;
1479 
1480 	mutex_lock(&ctx->walk_control_lock);
1481 	control = ctx->walk_control;
1482 	mutex_unlock(&ctx->walk_control_lock);
1483 	if (!control)
1484 		return;
1485 
1486 	s->walk_completed = true;
1487 	/* if all schemes completed, signal completion to walker */
1488 	damon_for_each_scheme(siter, ctx) {
1489 		if (!siter->walk_completed)
1490 			return;
1491 	}
1492 	complete(&control->completion);
1493 	mutex_lock(&ctx->walk_control_lock);
1494 	ctx->walk_control = NULL;
1495 	mutex_unlock(&ctx->walk_control_lock);
1496 }
1497 
1498 /*
1499  * damos_walk_cancel() - Cancel the current DAMOS walk request.
1500  * @ctx:	The context of &damon_ctx->walk_control.
1501  *
1502  * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1503  * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1504  * is already out of the main loop and therefore gonna be terminated, and hence
1505  * cannot continue the walks.  This function therefore marks the walk request
1506  * as canceled, so that damos_walk() can wake up and return.
1507  */
damos_walk_cancel(struct damon_ctx * ctx)1508 static void damos_walk_cancel(struct damon_ctx *ctx)
1509 {
1510 	struct damos_walk_control *control;
1511 
1512 	mutex_lock(&ctx->walk_control_lock);
1513 	control = ctx->walk_control;
1514 	mutex_unlock(&ctx->walk_control_lock);
1515 
1516 	if (!control)
1517 		return;
1518 	control->canceled = true;
1519 	complete(&control->completion);
1520 	mutex_lock(&ctx->walk_control_lock);
1521 	ctx->walk_control = NULL;
1522 	mutex_unlock(&ctx->walk_control_lock);
1523 }
1524 
damos_apply_scheme(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1525 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1526 		struct damon_region *r, struct damos *s)
1527 {
1528 	struct damos_quota *quota = &s->quota;
1529 	unsigned long sz = damon_sz_region(r);
1530 	struct timespec64 begin, end;
1531 	unsigned long sz_applied = 0;
1532 	unsigned long sz_ops_filter_passed = 0;
1533 	int err = 0;
1534 	/*
1535 	 * We plan to support multiple context per kdamond, as DAMON sysfs
1536 	 * implies with 'nr_contexts' file.  Nevertheless, only single context
1537 	 * per kdamond is supported for now.  So, we can simply use '0' context
1538 	 * index here.
1539 	 */
1540 	unsigned int cidx = 0;
1541 	struct damos *siter;		/* schemes iterator */
1542 	unsigned int sidx = 0;
1543 	struct damon_target *titer;	/* targets iterator */
1544 	unsigned int tidx = 0;
1545 	bool do_trace = false;
1546 
1547 	/* get indices for trace_damos_before_apply() */
1548 	if (trace_damos_before_apply_enabled()) {
1549 		damon_for_each_scheme(siter, c) {
1550 			if (siter == s)
1551 				break;
1552 			sidx++;
1553 		}
1554 		damon_for_each_target(titer, c) {
1555 			if (titer == t)
1556 				break;
1557 			tidx++;
1558 		}
1559 		do_trace = true;
1560 	}
1561 
1562 	if (c->ops.apply_scheme) {
1563 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
1564 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1565 					DAMON_MIN_REGION);
1566 			if (!sz)
1567 				goto update_stat;
1568 			damon_split_region_at(t, r, sz);
1569 		}
1570 		if (damos_filter_out(c, t, r, s))
1571 			return;
1572 		ktime_get_coarse_ts64(&begin);
1573 		if (c->callback.before_damos_apply)
1574 			err = c->callback.before_damos_apply(c, t, r, s);
1575 		if (!err) {
1576 			trace_damos_before_apply(cidx, sidx, tidx, r,
1577 					damon_nr_regions(t), do_trace);
1578 			sz_applied = c->ops.apply_scheme(c, t, r, s,
1579 					&sz_ops_filter_passed);
1580 		}
1581 		damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1582 		ktime_get_coarse_ts64(&end);
1583 		quota->total_charged_ns += timespec64_to_ns(&end) -
1584 			timespec64_to_ns(&begin);
1585 		quota->charged_sz += sz;
1586 		if (quota->esz && quota->charged_sz >= quota->esz) {
1587 			quota->charge_target_from = t;
1588 			quota->charge_addr_from = r->ar.end + 1;
1589 		}
1590 	}
1591 	if (s->action != DAMOS_STAT)
1592 		r->age = 0;
1593 
1594 update_stat:
1595 	damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1596 }
1597 
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)1598 static void damon_do_apply_schemes(struct damon_ctx *c,
1599 				   struct damon_target *t,
1600 				   struct damon_region *r)
1601 {
1602 	struct damos *s;
1603 
1604 	damon_for_each_scheme(s, c) {
1605 		struct damos_quota *quota = &s->quota;
1606 
1607 		if (c->passed_sample_intervals < s->next_apply_sis)
1608 			continue;
1609 
1610 		if (!s->wmarks.activated)
1611 			continue;
1612 
1613 		/* Check the quota */
1614 		if (quota->esz && quota->charged_sz >= quota->esz)
1615 			continue;
1616 
1617 		if (damos_skip_charged_region(t, &r, s))
1618 			continue;
1619 
1620 		if (!damos_valid_target(c, t, r, s))
1621 			continue;
1622 
1623 		damos_apply_scheme(c, t, r, s);
1624 	}
1625 }
1626 
1627 /*
1628  * damon_feed_loop_next_input() - get next input to achieve a target score.
1629  * @last_input	The last input.
1630  * @score	Current score that made with @last_input.
1631  *
1632  * Calculate next input to achieve the target score, based on the last input
1633  * and current score.  Assuming the input and the score are positively
1634  * proportional, calculate how much compensation should be added to or
1635  * subtracted from the last input as a proportion of the last input.  Avoid
1636  * next input always being zero by setting it non-zero always.  In short form
1637  * (assuming support of float and signed calculations), the algorithm is as
1638  * below.
1639  *
1640  * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1641  *
1642  * For simple implementation, we assume the target score is always 10,000.  The
1643  * caller should adjust @score for this.
1644  *
1645  * Returns next input that assumed to achieve the target score.
1646  */
damon_feed_loop_next_input(unsigned long last_input,unsigned long score)1647 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1648 		unsigned long score)
1649 {
1650 	const unsigned long goal = 10000;
1651 	/* Set minimum input as 10000 to avoid compensation be zero */
1652 	const unsigned long min_input = 10000;
1653 	unsigned long score_goal_diff, compensation;
1654 	bool over_achieving = score > goal;
1655 
1656 	if (score == goal)
1657 		return last_input;
1658 	if (score >= goal * 2)
1659 		return min_input;
1660 
1661 	if (over_achieving)
1662 		score_goal_diff = score - goal;
1663 	else
1664 		score_goal_diff = goal - score;
1665 
1666 	if (last_input < ULONG_MAX / score_goal_diff)
1667 		compensation = last_input * score_goal_diff / goal;
1668 	else
1669 		compensation = last_input / goal * score_goal_diff;
1670 
1671 	if (over_achieving)
1672 		return max(last_input - compensation, min_input);
1673 	if (last_input < ULONG_MAX - compensation)
1674 		return last_input + compensation;
1675 	return ULONG_MAX;
1676 }
1677 
1678 #ifdef CONFIG_PSI
1679 
damos_get_some_mem_psi_total(void)1680 static u64 damos_get_some_mem_psi_total(void)
1681 {
1682 	if (static_branch_likely(&psi_disabled))
1683 		return 0;
1684 	return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1685 			NSEC_PER_USEC);
1686 }
1687 
1688 #else	/* CONFIG_PSI */
1689 
damos_get_some_mem_psi_total(void)1690 static inline u64 damos_get_some_mem_psi_total(void)
1691 {
1692 	return 0;
1693 };
1694 
1695 #endif	/* CONFIG_PSI */
1696 
damos_set_quota_goal_current_value(struct damos_quota_goal * goal)1697 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
1698 {
1699 	u64 now_psi_total;
1700 
1701 	switch (goal->metric) {
1702 	case DAMOS_QUOTA_USER_INPUT:
1703 		/* User should already set goal->current_value */
1704 		break;
1705 	case DAMOS_QUOTA_SOME_MEM_PSI_US:
1706 		now_psi_total = damos_get_some_mem_psi_total();
1707 		goal->current_value = now_psi_total - goal->last_psi_total;
1708 		goal->last_psi_total = now_psi_total;
1709 		break;
1710 	default:
1711 		break;
1712 	}
1713 }
1714 
1715 /* Return the highest score since it makes schemes least aggressive */
damos_quota_score(struct damos_quota * quota)1716 static unsigned long damos_quota_score(struct damos_quota *quota)
1717 {
1718 	struct damos_quota_goal *goal;
1719 	unsigned long highest_score = 0;
1720 
1721 	damos_for_each_quota_goal(goal, quota) {
1722 		damos_set_quota_goal_current_value(goal);
1723 		highest_score = max(highest_score,
1724 				goal->current_value * 10000 /
1725 				goal->target_value);
1726 	}
1727 
1728 	return highest_score;
1729 }
1730 
1731 /*
1732  * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1733  */
damos_set_effective_quota(struct damos_quota * quota)1734 static void damos_set_effective_quota(struct damos_quota *quota)
1735 {
1736 	unsigned long throughput;
1737 	unsigned long esz = ULONG_MAX;
1738 
1739 	if (!quota->ms && list_empty(&quota->goals)) {
1740 		quota->esz = quota->sz;
1741 		return;
1742 	}
1743 
1744 	if (!list_empty(&quota->goals)) {
1745 		unsigned long score = damos_quota_score(quota);
1746 
1747 		quota->esz_bp = damon_feed_loop_next_input(
1748 				max(quota->esz_bp, 10000UL),
1749 				score);
1750 		esz = quota->esz_bp / 10000;
1751 	}
1752 
1753 	if (quota->ms) {
1754 		if (quota->total_charged_ns)
1755 			throughput = quota->total_charged_sz * 1000000 /
1756 				quota->total_charged_ns;
1757 		else
1758 			throughput = PAGE_SIZE * 1024;
1759 		esz = min(throughput * quota->ms, esz);
1760 	}
1761 
1762 	if (quota->sz && quota->sz < esz)
1763 		esz = quota->sz;
1764 
1765 	quota->esz = esz;
1766 }
1767 
damos_adjust_quota(struct damon_ctx * c,struct damos * s)1768 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1769 {
1770 	struct damos_quota *quota = &s->quota;
1771 	struct damon_target *t;
1772 	struct damon_region *r;
1773 	unsigned long cumulated_sz;
1774 	unsigned int score, max_score = 0;
1775 
1776 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
1777 		return;
1778 
1779 	/* New charge window starts */
1780 	if (time_after_eq(jiffies, quota->charged_from +
1781 				msecs_to_jiffies(quota->reset_interval))) {
1782 		if (quota->esz && quota->charged_sz >= quota->esz)
1783 			s->stat.qt_exceeds++;
1784 		quota->total_charged_sz += quota->charged_sz;
1785 		quota->charged_from = jiffies;
1786 		quota->charged_sz = 0;
1787 		damos_set_effective_quota(quota);
1788 	}
1789 
1790 	if (!c->ops.get_scheme_score)
1791 		return;
1792 
1793 	/* Fill up the score histogram */
1794 	memset(c->regions_score_histogram, 0,
1795 			sizeof(*c->regions_score_histogram) *
1796 			(DAMOS_MAX_SCORE + 1));
1797 	damon_for_each_target(t, c) {
1798 		damon_for_each_region(r, t) {
1799 			if (!__damos_valid_target(r, s))
1800 				continue;
1801 			score = c->ops.get_scheme_score(c, t, r, s);
1802 			c->regions_score_histogram[score] +=
1803 				damon_sz_region(r);
1804 			if (score > max_score)
1805 				max_score = score;
1806 		}
1807 	}
1808 
1809 	/* Set the min score limit */
1810 	for (cumulated_sz = 0, score = max_score; ; score--) {
1811 		cumulated_sz += c->regions_score_histogram[score];
1812 		if (cumulated_sz >= quota->esz || !score)
1813 			break;
1814 	}
1815 	quota->min_score = score;
1816 }
1817 
kdamond_apply_schemes(struct damon_ctx * c)1818 static void kdamond_apply_schemes(struct damon_ctx *c)
1819 {
1820 	struct damon_target *t;
1821 	struct damon_region *r, *next_r;
1822 	struct damos *s;
1823 	unsigned long sample_interval = c->attrs.sample_interval ?
1824 		c->attrs.sample_interval : 1;
1825 	bool has_schemes_to_apply = false;
1826 
1827 	damon_for_each_scheme(s, c) {
1828 		if (c->passed_sample_intervals < s->next_apply_sis)
1829 			continue;
1830 
1831 		if (!s->wmarks.activated)
1832 			continue;
1833 
1834 		has_schemes_to_apply = true;
1835 
1836 		damos_adjust_quota(c, s);
1837 	}
1838 
1839 	if (!has_schemes_to_apply)
1840 		return;
1841 
1842 	damon_for_each_target(t, c) {
1843 		damon_for_each_region_safe(r, next_r, t)
1844 			damon_do_apply_schemes(c, t, r);
1845 	}
1846 
1847 	damon_for_each_scheme(s, c) {
1848 		if (c->passed_sample_intervals < s->next_apply_sis)
1849 			continue;
1850 		damos_walk_complete(c, s);
1851 		s->next_apply_sis = c->passed_sample_intervals +
1852 			(s->apply_interval_us ? s->apply_interval_us :
1853 			 c->attrs.aggr_interval) / sample_interval;
1854 	}
1855 }
1856 
1857 /*
1858  * Merge two adjacent regions into one region
1859  */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)1860 static void damon_merge_two_regions(struct damon_target *t,
1861 		struct damon_region *l, struct damon_region *r)
1862 {
1863 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1864 
1865 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1866 			(sz_l + sz_r);
1867 	l->nr_accesses_bp = l->nr_accesses * 10000;
1868 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1869 	l->ar.end = r->ar.end;
1870 	damon_destroy_region(r, t);
1871 }
1872 
1873 /*
1874  * Merge adjacent regions having similar access frequencies
1875  *
1876  * t		target affected by this merge operation
1877  * thres	'->nr_accesses' diff threshold for the merge
1878  * sz_limit	size upper limit of each region
1879  */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)1880 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1881 				   unsigned long sz_limit)
1882 {
1883 	struct damon_region *r, *prev = NULL, *next;
1884 
1885 	damon_for_each_region_safe(r, next, t) {
1886 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1887 			r->age = 0;
1888 		else
1889 			r->age++;
1890 
1891 		if (prev && prev->ar.end == r->ar.start &&
1892 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1893 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1894 			damon_merge_two_regions(t, prev, r);
1895 		else
1896 			prev = r;
1897 	}
1898 }
1899 
1900 /*
1901  * Merge adjacent regions having similar access frequencies
1902  *
1903  * threshold	'->nr_accesses' diff threshold for the merge
1904  * sz_limit	size upper limit of each region
1905  *
1906  * This function merges monitoring target regions which are adjacent and their
1907  * access frequencies are similar.  This is for minimizing the monitoring
1908  * overhead under the dynamically changeable access pattern.  If a merge was
1909  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1910  *
1911  * The total number of regions could be higher than the user-defined limit,
1912  * max_nr_regions for some cases.  For example, the user can update
1913  * max_nr_regions to a number that lower than the current number of regions
1914  * while DAMON is running.  For such a case, repeat merging until the limit is
1915  * met while increasing @threshold up to possible maximum level.
1916  */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)1917 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1918 				  unsigned long sz_limit)
1919 {
1920 	struct damon_target *t;
1921 	unsigned int nr_regions;
1922 	unsigned int max_thres;
1923 
1924 	max_thres = c->attrs.aggr_interval /
1925 		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
1926 	do {
1927 		nr_regions = 0;
1928 		damon_for_each_target(t, c) {
1929 			damon_merge_regions_of(t, threshold, sz_limit);
1930 			nr_regions += damon_nr_regions(t);
1931 		}
1932 		threshold = max(1, threshold * 2);
1933 	} while (nr_regions > c->attrs.max_nr_regions &&
1934 			threshold / 2 < max_thres);
1935 }
1936 
1937 /*
1938  * Split a region in two
1939  *
1940  * r		the region to be split
1941  * sz_r		size of the first sub-region that will be made
1942  */
damon_split_region_at(struct damon_target * t,struct damon_region * r,unsigned long sz_r)1943 static void damon_split_region_at(struct damon_target *t,
1944 				  struct damon_region *r, unsigned long sz_r)
1945 {
1946 	struct damon_region *new;
1947 
1948 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1949 	if (!new)
1950 		return;
1951 
1952 	r->ar.end = new->ar.start;
1953 
1954 	new->age = r->age;
1955 	new->last_nr_accesses = r->last_nr_accesses;
1956 	new->nr_accesses_bp = r->nr_accesses_bp;
1957 	new->nr_accesses = r->nr_accesses;
1958 
1959 	damon_insert_region(new, r, damon_next_region(r), t);
1960 }
1961 
1962 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_target * t,int nr_subs)1963 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1964 {
1965 	struct damon_region *r, *next;
1966 	unsigned long sz_region, sz_sub = 0;
1967 	int i;
1968 
1969 	damon_for_each_region_safe(r, next, t) {
1970 		sz_region = damon_sz_region(r);
1971 
1972 		for (i = 0; i < nr_subs - 1 &&
1973 				sz_region > 2 * DAMON_MIN_REGION; i++) {
1974 			/*
1975 			 * Randomly select size of left sub-region to be at
1976 			 * least 10 percent and at most 90% of original region
1977 			 */
1978 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1979 					sz_region / 10, DAMON_MIN_REGION);
1980 			/* Do not allow blank region */
1981 			if (sz_sub == 0 || sz_sub >= sz_region)
1982 				continue;
1983 
1984 			damon_split_region_at(t, r, sz_sub);
1985 			sz_region = sz_sub;
1986 		}
1987 	}
1988 }
1989 
1990 /*
1991  * Split every target region into randomly-sized small regions
1992  *
1993  * This function splits every target region into random-sized small regions if
1994  * current total number of the regions is equal or smaller than half of the
1995  * user-specified maximum number of regions.  This is for maximizing the
1996  * monitoring accuracy under the dynamically changeable access patterns.  If a
1997  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1998  * it.
1999  */
kdamond_split_regions(struct damon_ctx * ctx)2000 static void kdamond_split_regions(struct damon_ctx *ctx)
2001 {
2002 	struct damon_target *t;
2003 	unsigned int nr_regions = 0;
2004 	static unsigned int last_nr_regions;
2005 	int nr_subregions = 2;
2006 
2007 	damon_for_each_target(t, ctx)
2008 		nr_regions += damon_nr_regions(t);
2009 
2010 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
2011 		return;
2012 
2013 	/* Maybe the middle of the region has different access frequency */
2014 	if (last_nr_regions == nr_regions &&
2015 			nr_regions < ctx->attrs.max_nr_regions / 3)
2016 		nr_subregions = 3;
2017 
2018 	damon_for_each_target(t, ctx)
2019 		damon_split_regions_of(t, nr_subregions);
2020 
2021 	last_nr_regions = nr_regions;
2022 }
2023 
2024 /*
2025  * Check whether current monitoring should be stopped
2026  *
2027  * The monitoring is stopped when either the user requested to stop, or all
2028  * monitoring targets are invalid.
2029  *
2030  * Returns true if need to stop current monitoring.
2031  */
kdamond_need_stop(struct damon_ctx * ctx)2032 static bool kdamond_need_stop(struct damon_ctx *ctx)
2033 {
2034 	struct damon_target *t;
2035 
2036 	if (kthread_should_stop())
2037 		return true;
2038 
2039 	if (!ctx->ops.target_valid)
2040 		return false;
2041 
2042 	damon_for_each_target(t, ctx) {
2043 		if (ctx->ops.target_valid(t))
2044 			return false;
2045 	}
2046 
2047 	return true;
2048 }
2049 
damos_get_wmark_metric_value(enum damos_wmark_metric metric,unsigned long * metric_value)2050 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2051 					unsigned long *metric_value)
2052 {
2053 	switch (metric) {
2054 	case DAMOS_WMARK_FREE_MEM_RATE:
2055 		*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2056 		       totalram_pages();
2057 		return 0;
2058 	default:
2059 		break;
2060 	}
2061 	return -EINVAL;
2062 }
2063 
2064 /*
2065  * Returns zero if the scheme is active.  Else, returns time to wait for next
2066  * watermark check in micro-seconds.
2067  */
damos_wmark_wait_us(struct damos * scheme)2068 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2069 {
2070 	unsigned long metric;
2071 
2072 	if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2073 		return 0;
2074 
2075 	/* higher than high watermark or lower than low watermark */
2076 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2077 		if (scheme->wmarks.activated)
2078 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
2079 				 scheme->action,
2080 				 str_high_low(metric > scheme->wmarks.high));
2081 		scheme->wmarks.activated = false;
2082 		return scheme->wmarks.interval;
2083 	}
2084 
2085 	/* inactive and higher than middle watermark */
2086 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2087 			!scheme->wmarks.activated)
2088 		return scheme->wmarks.interval;
2089 
2090 	if (!scheme->wmarks.activated)
2091 		pr_debug("activate a scheme (%d)\n", scheme->action);
2092 	scheme->wmarks.activated = true;
2093 	return 0;
2094 }
2095 
kdamond_usleep(unsigned long usecs)2096 static void kdamond_usleep(unsigned long usecs)
2097 {
2098 	if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2099 		schedule_timeout_idle(usecs_to_jiffies(usecs));
2100 	else
2101 		usleep_range_idle(usecs, usecs + 1);
2102 }
2103 
2104 /*
2105  * kdamond_call() - handle damon_call_control.
2106  * @ctx:	The &struct damon_ctx of the kdamond.
2107  * @cancel:	Whether to cancel the invocation of the function.
2108  *
2109  * If there is a &struct damon_call_control request that registered via
2110  * &damon_call() on @ctx, do or cancel the invocation of the function depending
2111  * on @cancel.  @cancel is set when the kdamond is deactivated by DAMOS
2112  * watermarks, or the kdamond is already out of the main loop and therefore
2113  * will be terminated.
2114  */
kdamond_call(struct damon_ctx * ctx,bool cancel)2115 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2116 {
2117 	struct damon_call_control *control;
2118 	int ret = 0;
2119 
2120 	mutex_lock(&ctx->call_control_lock);
2121 	control = ctx->call_control;
2122 	mutex_unlock(&ctx->call_control_lock);
2123 	if (!control)
2124 		return;
2125 	if (cancel) {
2126 		control->canceled = true;
2127 	} else {
2128 		ret = control->fn(control->data);
2129 		control->return_code = ret;
2130 	}
2131 	complete(&control->completion);
2132 	mutex_lock(&ctx->call_control_lock);
2133 	ctx->call_control = NULL;
2134 	mutex_unlock(&ctx->call_control_lock);
2135 }
2136 
2137 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)2138 static int kdamond_wait_activation(struct damon_ctx *ctx)
2139 {
2140 	struct damos *s;
2141 	unsigned long wait_time;
2142 	unsigned long min_wait_time = 0;
2143 	bool init_wait_time = false;
2144 
2145 	while (!kdamond_need_stop(ctx)) {
2146 		damon_for_each_scheme(s, ctx) {
2147 			wait_time = damos_wmark_wait_us(s);
2148 			if (!init_wait_time || wait_time < min_wait_time) {
2149 				init_wait_time = true;
2150 				min_wait_time = wait_time;
2151 			}
2152 		}
2153 		if (!min_wait_time)
2154 			return 0;
2155 
2156 		kdamond_usleep(min_wait_time);
2157 
2158 		if (ctx->callback.after_wmarks_check &&
2159 				ctx->callback.after_wmarks_check(ctx))
2160 			break;
2161 		kdamond_call(ctx, true);
2162 		damos_walk_cancel(ctx);
2163 	}
2164 	return -EBUSY;
2165 }
2166 
kdamond_init_intervals_sis(struct damon_ctx * ctx)2167 static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
2168 {
2169 	unsigned long sample_interval = ctx->attrs.sample_interval ?
2170 		ctx->attrs.sample_interval : 1;
2171 	unsigned long apply_interval;
2172 	struct damos *scheme;
2173 
2174 	ctx->passed_sample_intervals = 0;
2175 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2176 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2177 		sample_interval;
2178 
2179 	damon_for_each_scheme(scheme, ctx) {
2180 		apply_interval = scheme->apply_interval_us ?
2181 			scheme->apply_interval_us : ctx->attrs.aggr_interval;
2182 		scheme->next_apply_sis = apply_interval / sample_interval;
2183 	}
2184 }
2185 
2186 /*
2187  * The monitoring daemon that runs as a kernel thread
2188  */
kdamond_fn(void * data)2189 static int kdamond_fn(void *data)
2190 {
2191 	struct damon_ctx *ctx = data;
2192 	struct damon_target *t;
2193 	struct damon_region *r, *next;
2194 	unsigned int max_nr_accesses = 0;
2195 	unsigned long sz_limit = 0;
2196 
2197 	pr_debug("kdamond (%d) starts\n", current->pid);
2198 
2199 	complete(&ctx->kdamond_started);
2200 	kdamond_init_intervals_sis(ctx);
2201 
2202 	if (ctx->ops.init)
2203 		ctx->ops.init(ctx);
2204 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
2205 		goto done;
2206 	ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2207 			sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2208 	if (!ctx->regions_score_histogram)
2209 		goto done;
2210 
2211 	sz_limit = damon_region_sz_limit(ctx);
2212 
2213 	while (!kdamond_need_stop(ctx)) {
2214 		/*
2215 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2216 		 * be changed from after_wmarks_check() or after_aggregation()
2217 		 * callbacks.  Read the values here, and use those for this
2218 		 * iteration.  That is, damon_set_attrs() updated new values
2219 		 * are respected from next iteration.
2220 		 */
2221 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2222 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2223 		unsigned long sample_interval = ctx->attrs.sample_interval;
2224 
2225 		if (kdamond_wait_activation(ctx))
2226 			break;
2227 
2228 		if (ctx->ops.prepare_access_checks)
2229 			ctx->ops.prepare_access_checks(ctx);
2230 		if (ctx->callback.after_sampling &&
2231 				ctx->callback.after_sampling(ctx))
2232 			break;
2233 		kdamond_call(ctx, false);
2234 
2235 		kdamond_usleep(sample_interval);
2236 		ctx->passed_sample_intervals++;
2237 
2238 		if (ctx->ops.check_accesses)
2239 			max_nr_accesses = ctx->ops.check_accesses(ctx);
2240 
2241 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2242 			kdamond_merge_regions(ctx,
2243 					max_nr_accesses / 10,
2244 					sz_limit);
2245 			if (ctx->callback.after_aggregation &&
2246 					ctx->callback.after_aggregation(ctx))
2247 				break;
2248 		}
2249 
2250 		/*
2251 		 * do kdamond_apply_schemes() after kdamond_merge_regions() if
2252 		 * possible, to reduce overhead
2253 		 */
2254 		if (!list_empty(&ctx->schemes))
2255 			kdamond_apply_schemes(ctx);
2256 		else
2257 			damos_walk_cancel(ctx);
2258 
2259 		sample_interval = ctx->attrs.sample_interval ?
2260 			ctx->attrs.sample_interval : 1;
2261 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2262 			ctx->next_aggregation_sis = next_aggregation_sis +
2263 				ctx->attrs.aggr_interval / sample_interval;
2264 
2265 			kdamond_reset_aggregated(ctx);
2266 			kdamond_split_regions(ctx);
2267 			if (ctx->ops.reset_aggregated)
2268 				ctx->ops.reset_aggregated(ctx);
2269 		}
2270 
2271 		if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2272 			ctx->next_ops_update_sis = next_ops_update_sis +
2273 				ctx->attrs.ops_update_interval /
2274 				sample_interval;
2275 			if (ctx->ops.update)
2276 				ctx->ops.update(ctx);
2277 			sz_limit = damon_region_sz_limit(ctx);
2278 		}
2279 	}
2280 done:
2281 	damon_for_each_target(t, ctx) {
2282 		damon_for_each_region_safe(r, next, t)
2283 			damon_destroy_region(r, t);
2284 	}
2285 
2286 	if (ctx->callback.before_terminate)
2287 		ctx->callback.before_terminate(ctx);
2288 	if (ctx->ops.cleanup)
2289 		ctx->ops.cleanup(ctx);
2290 	kfree(ctx->regions_score_histogram);
2291 
2292 	pr_debug("kdamond (%d) finishes\n", current->pid);
2293 	mutex_lock(&ctx->kdamond_lock);
2294 	ctx->kdamond = NULL;
2295 	mutex_unlock(&ctx->kdamond_lock);
2296 
2297 	kdamond_call(ctx, true);
2298 	damos_walk_cancel(ctx);
2299 
2300 	mutex_lock(&damon_lock);
2301 	nr_running_ctxs--;
2302 	if (!nr_running_ctxs && running_exclusive_ctxs)
2303 		running_exclusive_ctxs = false;
2304 	mutex_unlock(&damon_lock);
2305 
2306 	return 0;
2307 }
2308 
2309 /*
2310  * struct damon_system_ram_region - System RAM resource address region of
2311  *				    [@start, @end).
2312  * @start:	Start address of the region (inclusive).
2313  * @end:	End address of the region (exclusive).
2314  */
2315 struct damon_system_ram_region {
2316 	unsigned long start;
2317 	unsigned long end;
2318 };
2319 
walk_system_ram(struct resource * res,void * arg)2320 static int walk_system_ram(struct resource *res, void *arg)
2321 {
2322 	struct damon_system_ram_region *a = arg;
2323 
2324 	if (a->end - a->start < resource_size(res)) {
2325 		a->start = res->start;
2326 		a->end = res->end;
2327 	}
2328 	return 0;
2329 }
2330 
2331 /*
2332  * Find biggest 'System RAM' resource and store its start and end address in
2333  * @start and @end, respectively.  If no System RAM is found, returns false.
2334  */
damon_find_biggest_system_ram(unsigned long * start,unsigned long * end)2335 static bool damon_find_biggest_system_ram(unsigned long *start,
2336 						unsigned long *end)
2337 
2338 {
2339 	struct damon_system_ram_region arg = {};
2340 
2341 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2342 	if (arg.end <= arg.start)
2343 		return false;
2344 
2345 	*start = arg.start;
2346 	*end = arg.end;
2347 	return true;
2348 }
2349 
2350 /**
2351  * damon_set_region_biggest_system_ram_default() - Set the region of the given
2352  * monitoring target as requested, or biggest 'System RAM'.
2353  * @t:		The monitoring target to set the region.
2354  * @start:	The pointer to the start address of the region.
2355  * @end:	The pointer to the end address of the region.
2356  *
2357  * This function sets the region of @t as requested by @start and @end.  If the
2358  * values of @start and @end are zero, however, this function finds the biggest
2359  * 'System RAM' resource and sets the region to cover the resource.  In the
2360  * latter case, this function saves the start and end addresses of the resource
2361  * in @start and @end, respectively.
2362  *
2363  * Return: 0 on success, negative error code otherwise.
2364  */
damon_set_region_biggest_system_ram_default(struct damon_target * t,unsigned long * start,unsigned long * end)2365 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2366 			unsigned long *start, unsigned long *end)
2367 {
2368 	struct damon_addr_range addr_range;
2369 
2370 	if (*start > *end)
2371 		return -EINVAL;
2372 
2373 	if (!*start && !*end &&
2374 		!damon_find_biggest_system_ram(start, end))
2375 		return -EINVAL;
2376 
2377 	addr_range.start = *start;
2378 	addr_range.end = *end;
2379 	return damon_set_regions(t, &addr_range, 1);
2380 }
2381 
2382 /*
2383  * damon_moving_sum() - Calculate an inferred moving sum value.
2384  * @mvsum:	Inferred sum of the last @len_window values.
2385  * @nomvsum:	Non-moving sum of the last discrete @len_window window values.
2386  * @len_window:	The number of last values to take care of.
2387  * @new_value:	New value that will be added to the pseudo moving sum.
2388  *
2389  * Moving sum (moving average * window size) is good for handling noise, but
2390  * the cost of keeping past values can be high for arbitrary window size.  This
2391  * function implements a lightweight pseudo moving sum function that doesn't
2392  * keep the past window values.
2393  *
2394  * It simply assumes there was no noise in the past, and get the no-noise
2395  * assumed past value to drop from @nomvsum and @len_window.  @nomvsum is a
2396  * non-moving sum of the last window.  For example, if @len_window is 10 and we
2397  * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2398  * values.  Hence, this function simply drops @nomvsum / @len_window from
2399  * given @mvsum and add @new_value.
2400  *
2401  * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2402  * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20.  For
2403  * calculating next moving sum with a new value, we should drop 0 from 50 and
2404  * add the new value.  However, this function assumes it got value 5 for each
2405  * of the last ten times.  Based on the assumption, when the next value is
2406  * measured, it drops the assumed past value, 5 from the current sum, and add
2407  * the new value to get the updated pseduo-moving average.
2408  *
2409  * This means the value could have errors, but the errors will be disappeared
2410  * for every @len_window aligned calls.  For example, if @len_window is 10, the
2411  * pseudo moving sum with 11th value to 19th value would have an error.  But
2412  * the sum with 20th value will not have the error.
2413  *
2414  * Return: Pseudo-moving average after getting the @new_value.
2415  */
damon_moving_sum(unsigned int mvsum,unsigned int nomvsum,unsigned int len_window,unsigned int new_value)2416 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2417 		unsigned int len_window, unsigned int new_value)
2418 {
2419 	return mvsum - nomvsum / len_window + new_value;
2420 }
2421 
2422 /**
2423  * damon_update_region_access_rate() - Update the access rate of a region.
2424  * @r:		The DAMON region to update for its access check result.
2425  * @accessed:	Whether the region has accessed during last sampling interval.
2426  * @attrs:	The damon_attrs of the DAMON context.
2427  *
2428  * Update the access rate of a region with the region's last sampling interval
2429  * access check result.
2430  *
2431  * Usually this will be called by &damon_operations->check_accesses callback.
2432  */
damon_update_region_access_rate(struct damon_region * r,bool accessed,struct damon_attrs * attrs)2433 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2434 		struct damon_attrs *attrs)
2435 {
2436 	unsigned int len_window = 1;
2437 
2438 	/*
2439 	 * sample_interval can be zero, but cannot be larger than
2440 	 * aggr_interval, owing to validation of damon_set_attrs().
2441 	 */
2442 	if (attrs->sample_interval)
2443 		len_window = damon_max_nr_accesses(attrs);
2444 	r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2445 			r->last_nr_accesses * 10000, len_window,
2446 			accessed ? 10000 : 0);
2447 
2448 	if (accessed)
2449 		r->nr_accesses++;
2450 }
2451 
damon_init(void)2452 static int __init damon_init(void)
2453 {
2454 	damon_region_cache = KMEM_CACHE(damon_region, 0);
2455 	if (unlikely(!damon_region_cache)) {
2456 		pr_err("creating damon_region_cache fails\n");
2457 		return -ENOMEM;
2458 	}
2459 
2460 	return 0;
2461 }
2462 
2463 subsys_initcall(damon_init);
2464 
2465 #include "tests/core-kunit.h"
2466