xref: /linux/mm/damon/core.c (revision 09cbdf7dbe2334d32853ad3ba3b54df017d7a37b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/memcontrol.h>
14 #include <linux/mm.h>
15 #include <linux/psi.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/string_choices.h>
19 
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/damon.h>
22 
23 static DEFINE_MUTEX(damon_lock);
24 static int nr_running_ctxs;
25 static bool running_exclusive_ctxs;
26 
27 static DEFINE_MUTEX(damon_ops_lock);
28 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
29 
30 static struct kmem_cache *damon_region_cache __ro_after_init;
31 
32 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
33 static bool __damon_is_registered_ops(enum damon_ops_id id)
34 {
35 	struct damon_operations empty_ops = {};
36 
37 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
38 		return false;
39 	return true;
40 }
41 
42 /**
43  * damon_is_registered_ops() - Check if a given damon_operations is registered.
44  * @id:	Id of the damon_operations to check if registered.
45  *
46  * Return: true if the ops is set, false otherwise.
47  */
48 bool damon_is_registered_ops(enum damon_ops_id id)
49 {
50 	bool registered;
51 
52 	if (id >= NR_DAMON_OPS)
53 		return false;
54 	mutex_lock(&damon_ops_lock);
55 	registered = __damon_is_registered_ops(id);
56 	mutex_unlock(&damon_ops_lock);
57 	return registered;
58 }
59 
60 /**
61  * damon_register_ops() - Register a monitoring operations set to DAMON.
62  * @ops:	monitoring operations set to register.
63  *
64  * This function registers a monitoring operations set of valid &struct
65  * damon_operations->id so that others can find and use them later.
66  *
67  * Return: 0 on success, negative error code otherwise.
68  */
69 int damon_register_ops(struct damon_operations *ops)
70 {
71 	int err = 0;
72 
73 	if (ops->id >= NR_DAMON_OPS)
74 		return -EINVAL;
75 
76 	mutex_lock(&damon_ops_lock);
77 	/* Fail for already registered ops */
78 	if (__damon_is_registered_ops(ops->id))
79 		err = -EINVAL;
80 	else
81 		damon_registered_ops[ops->id] = *ops;
82 	mutex_unlock(&damon_ops_lock);
83 	return err;
84 }
85 
86 /**
87  * damon_select_ops() - Select a monitoring operations to use with the context.
88  * @ctx:	monitoring context to use the operations.
89  * @id:		id of the registered monitoring operations to select.
90  *
91  * This function finds registered monitoring operations set of @id and make
92  * @ctx to use it.
93  *
94  * Return: 0 on success, negative error code otherwise.
95  */
96 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
97 {
98 	int err = 0;
99 
100 	if (id >= NR_DAMON_OPS)
101 		return -EINVAL;
102 
103 	mutex_lock(&damon_ops_lock);
104 	if (!__damon_is_registered_ops(id))
105 		err = -EINVAL;
106 	else
107 		ctx->ops = damon_registered_ops[id];
108 	mutex_unlock(&damon_ops_lock);
109 	return err;
110 }
111 
112 #ifdef CONFIG_DAMON_DEBUG_SANITY
113 static void damon_verify_new_region(unsigned long start, unsigned long end)
114 {
115 	WARN_ONCE(start >= end, "start %lu >= end %lu\n", start, end);
116 }
117 #else
118 static void damon_verify_new_region(unsigned long start, unsigned long end)
119 {
120 }
121 #endif
122 
123 /*
124  * Construct a damon_region struct
125  *
126  * Returns the pointer to the new struct if success, or NULL otherwise
127  */
128 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
129 {
130 	struct damon_region *region;
131 
132 	damon_verify_new_region(start, end);
133 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
134 	if (!region)
135 		return NULL;
136 
137 	region->ar.start = start;
138 	region->ar.end = end;
139 	region->nr_accesses = 0;
140 	region->nr_accesses_bp = 0;
141 	INIT_LIST_HEAD(&region->list);
142 
143 	region->age = 0;
144 	region->last_nr_accesses = 0;
145 
146 	return region;
147 }
148 
149 void damon_add_region(struct damon_region *r, struct damon_target *t)
150 {
151 	list_add_tail(&r->list, &t->regions_list);
152 	t->nr_regions++;
153 }
154 
155 #ifdef CONFIG_DAMON_DEBUG_SANITY
156 static void damon_verify_del_region(struct damon_target *t)
157 {
158 	WARN_ONCE(t->nr_regions == 0, "t->nr_regions == 0\n");
159 }
160 #else
161 static void damon_verify_del_region(struct damon_target *t)
162 {
163 }
164 #endif
165 
166 static void damon_del_region(struct damon_region *r, struct damon_target *t)
167 {
168 	damon_verify_del_region(t);
169 
170 	list_del(&r->list);
171 	t->nr_regions--;
172 }
173 
174 static void damon_free_region(struct damon_region *r)
175 {
176 	kmem_cache_free(damon_region_cache, r);
177 }
178 
179 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
180 {
181 	damon_del_region(r, t);
182 	damon_free_region(r);
183 }
184 
185 static bool damon_is_last_region(struct damon_region *r,
186 		struct damon_target *t)
187 {
188 	return list_is_last(&r->list, &t->regions_list);
189 }
190 
191 /*
192  * Check whether a region is intersecting an address range
193  *
194  * Returns true if it is.
195  */
196 static bool damon_intersect(struct damon_region *r,
197 		struct damon_addr_range *re)
198 {
199 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
200 }
201 
202 /*
203  * Fill holes in regions with new regions.
204  */
205 static int damon_fill_regions_holes(struct damon_region *first,
206 		struct damon_region *last, struct damon_target *t)
207 {
208 	struct damon_region *r = first;
209 
210 	damon_for_each_region_from(r, t) {
211 		struct damon_region *next, *newr;
212 
213 		if (r == last)
214 			break;
215 		next = damon_next_region(r);
216 		if (r->ar.end != next->ar.start) {
217 			newr = damon_new_region(r->ar.end, next->ar.start);
218 			if (!newr)
219 				return -ENOMEM;
220 			damon_insert_region(newr, r, next, t);
221 		}
222 	}
223 	return 0;
224 }
225 
226 /*
227  * damon_set_regions() - Set regions of a target for given address ranges.
228  * @t:		the given target.
229  * @ranges:	array of new monitoring target ranges.
230  * @nr_ranges:	length of @ranges.
231  * @min_region_sz:	minimum region size.
232  *
233  * This function adds new regions to, or modify existing regions of a
234  * monitoring target to fit in specific ranges.
235  *
236  * Return: 0 if success, or negative error code otherwise.
237  */
238 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
239 		unsigned int nr_ranges, unsigned long min_region_sz)
240 {
241 	struct damon_region *r, *next;
242 	unsigned int i;
243 	int err;
244 
245 	/* Remove regions which are not in the new ranges */
246 	damon_for_each_region_safe(r, next, t) {
247 		for (i = 0; i < nr_ranges; i++) {
248 			if (damon_intersect(r, &ranges[i]))
249 				break;
250 		}
251 		if (i == nr_ranges)
252 			damon_destroy_region(r, t);
253 	}
254 
255 	r = damon_first_region(t);
256 	/* Add new regions or resize existing regions to fit in the ranges */
257 	for (i = 0; i < nr_ranges; i++) {
258 		struct damon_region *first = NULL, *last, *newr;
259 		struct damon_addr_range *range;
260 
261 		range = &ranges[i];
262 		/* Get the first/last regions intersecting with the range */
263 		damon_for_each_region_from(r, t) {
264 			if (damon_intersect(r, range)) {
265 				if (!first)
266 					first = r;
267 				last = r;
268 			}
269 			if (r->ar.start >= range->end)
270 				break;
271 		}
272 		if (!first) {
273 			/* no region intersects with this range */
274 			newr = damon_new_region(
275 					ALIGN_DOWN(range->start,
276 						min_region_sz),
277 					ALIGN(range->end, min_region_sz));
278 			if (!newr)
279 				return -ENOMEM;
280 			damon_insert_region(newr, damon_prev_region(r), r, t);
281 		} else {
282 			/* resize intersecting regions to fit in this range */
283 			first->ar.start = ALIGN_DOWN(range->start,
284 					min_region_sz);
285 			last->ar.end = ALIGN(range->end, min_region_sz);
286 
287 			/* fill possible holes in the range */
288 			err = damon_fill_regions_holes(first, last, t);
289 			if (err)
290 				return err;
291 		}
292 	}
293 	return 0;
294 }
295 
296 struct damos_filter *damos_new_filter(enum damos_filter_type type,
297 		bool matching, bool allow)
298 {
299 	struct damos_filter *filter;
300 
301 	filter = kmalloc_obj(*filter);
302 	if (!filter)
303 		return NULL;
304 	filter->type = type;
305 	filter->matching = matching;
306 	filter->allow = allow;
307 	INIT_LIST_HEAD(&filter->list);
308 	return filter;
309 }
310 
311 /**
312  * damos_filter_for_ops() - Return if the filter is ops-handled one.
313  * @type:	type of the filter.
314  *
315  * Return: true if the filter of @type needs to be handled by ops layer, false
316  * otherwise.
317  */
318 bool damos_filter_for_ops(enum damos_filter_type type)
319 {
320 	switch (type) {
321 	case DAMOS_FILTER_TYPE_ADDR:
322 	case DAMOS_FILTER_TYPE_TARGET:
323 		return false;
324 	default:
325 		break;
326 	}
327 	return true;
328 }
329 
330 void damos_add_filter(struct damos *s, struct damos_filter *f)
331 {
332 	if (damos_filter_for_ops(f->type))
333 		list_add_tail(&f->list, &s->ops_filters);
334 	else
335 		list_add_tail(&f->list, &s->core_filters);
336 }
337 
338 static void damos_del_filter(struct damos_filter *f)
339 {
340 	list_del(&f->list);
341 }
342 
343 static void damos_free_filter(struct damos_filter *f)
344 {
345 	kfree(f);
346 }
347 
348 void damos_destroy_filter(struct damos_filter *f)
349 {
350 	damos_del_filter(f);
351 	damos_free_filter(f);
352 }
353 
354 struct damos_quota_goal *damos_new_quota_goal(
355 		enum damos_quota_goal_metric metric,
356 		unsigned long target_value)
357 {
358 	struct damos_quota_goal *goal;
359 
360 	goal = kmalloc_obj(*goal);
361 	if (!goal)
362 		return NULL;
363 	goal->metric = metric;
364 	goal->target_value = target_value;
365 	INIT_LIST_HEAD(&goal->list);
366 	return goal;
367 }
368 
369 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
370 {
371 	list_add_tail(&g->list, &q->goals);
372 }
373 
374 static void damos_del_quota_goal(struct damos_quota_goal *g)
375 {
376 	list_del(&g->list);
377 }
378 
379 static void damos_free_quota_goal(struct damos_quota_goal *g)
380 {
381 	kfree(g);
382 }
383 
384 void damos_destroy_quota_goal(struct damos_quota_goal *g)
385 {
386 	damos_del_quota_goal(g);
387 	damos_free_quota_goal(g);
388 }
389 
390 /* initialize fields of @quota that normally API users wouldn't set */
391 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
392 {
393 	quota->esz = 0;
394 	quota->total_charged_sz = 0;
395 	quota->total_charged_ns = 0;
396 	quota->charged_sz = 0;
397 	quota->charged_from = 0;
398 	quota->charge_target_from = NULL;
399 	quota->charge_addr_from = 0;
400 	quota->esz_bp = 0;
401 	return quota;
402 }
403 
404 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
405 			enum damos_action action,
406 			unsigned long apply_interval_us,
407 			struct damos_quota *quota,
408 			struct damos_watermarks *wmarks,
409 			int target_nid)
410 {
411 	struct damos *scheme;
412 
413 	scheme = kmalloc_obj(*scheme);
414 	if (!scheme)
415 		return NULL;
416 	scheme->pattern = *pattern;
417 	scheme->action = action;
418 	scheme->apply_interval_us = apply_interval_us;
419 	/*
420 	 * next_apply_sis will be set when kdamond starts.  While kdamond is
421 	 * running, it will also updated when it is added to the DAMON context,
422 	 * or damon_attrs are updated.
423 	 */
424 	scheme->next_apply_sis = 0;
425 	scheme->walk_completed = false;
426 	INIT_LIST_HEAD(&scheme->core_filters);
427 	INIT_LIST_HEAD(&scheme->ops_filters);
428 	scheme->stat = (struct damos_stat){};
429 	scheme->max_nr_snapshots = 0;
430 	INIT_LIST_HEAD(&scheme->list);
431 
432 	scheme->quota = *(damos_quota_init(quota));
433 	/* quota.goals should be separately set by caller */
434 	INIT_LIST_HEAD(&scheme->quota.goals);
435 
436 	scheme->wmarks = *wmarks;
437 	scheme->wmarks.activated = true;
438 
439 	scheme->migrate_dests = (struct damos_migrate_dests){};
440 	scheme->target_nid = target_nid;
441 
442 	return scheme;
443 }
444 
445 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
446 {
447 	unsigned long sample_interval = ctx->attrs.sample_interval ?
448 		ctx->attrs.sample_interval : 1;
449 	unsigned long apply_interval = s->apply_interval_us ?
450 		s->apply_interval_us : ctx->attrs.aggr_interval;
451 
452 	s->next_apply_sis = ctx->passed_sample_intervals +
453 		apply_interval / sample_interval;
454 }
455 
456 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
457 {
458 	list_add_tail(&s->list, &ctx->schemes);
459 	damos_set_next_apply_sis(s, ctx);
460 }
461 
462 static void damon_del_scheme(struct damos *s)
463 {
464 	list_del(&s->list);
465 }
466 
467 static void damon_free_scheme(struct damos *s)
468 {
469 	kfree(s);
470 }
471 
472 void damon_destroy_scheme(struct damos *s)
473 {
474 	struct damos_quota_goal *g, *g_next;
475 	struct damos_filter *f, *next;
476 
477 	damos_for_each_quota_goal_safe(g, g_next, &s->quota)
478 		damos_destroy_quota_goal(g);
479 
480 	damos_for_each_core_filter_safe(f, next, s)
481 		damos_destroy_filter(f);
482 
483 	damos_for_each_ops_filter_safe(f, next, s)
484 		damos_destroy_filter(f);
485 
486 	kfree(s->migrate_dests.node_id_arr);
487 	kfree(s->migrate_dests.weight_arr);
488 	damon_del_scheme(s);
489 	damon_free_scheme(s);
490 }
491 
492 /*
493  * Construct a damon_target struct
494  *
495  * Returns the pointer to the new struct if success, or NULL otherwise
496  */
497 struct damon_target *damon_new_target(void)
498 {
499 	struct damon_target *t;
500 
501 	t = kmalloc_obj(*t);
502 	if (!t)
503 		return NULL;
504 
505 	t->pid = NULL;
506 	t->nr_regions = 0;
507 	INIT_LIST_HEAD(&t->regions_list);
508 	INIT_LIST_HEAD(&t->list);
509 	t->obsolete = false;
510 
511 	return t;
512 }
513 
514 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
515 {
516 	list_add_tail(&t->list, &ctx->adaptive_targets);
517 }
518 
519 bool damon_targets_empty(struct damon_ctx *ctx)
520 {
521 	return list_empty(&ctx->adaptive_targets);
522 }
523 
524 static void damon_del_target(struct damon_target *t)
525 {
526 	list_del(&t->list);
527 }
528 
529 void damon_free_target(struct damon_target *t)
530 {
531 	struct damon_region *r, *next;
532 
533 	damon_for_each_region_safe(r, next, t)
534 		damon_free_region(r);
535 	kfree(t);
536 }
537 
538 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
539 {
540 
541 	if (ctx && ctx->ops.cleanup_target)
542 		ctx->ops.cleanup_target(t);
543 
544 	damon_del_target(t);
545 	damon_free_target(t);
546 }
547 
548 #ifdef CONFIG_DAMON_DEBUG_SANITY
549 static void damon_verify_nr_regions(struct damon_target *t)
550 {
551 	struct damon_region *r;
552 	unsigned int count = 0;
553 
554 	damon_for_each_region(r, t)
555 		count++;
556 	WARN_ONCE(count != t->nr_regions, "t->nr_regions (%u) != count (%u)\n",
557 			t->nr_regions, count);
558 }
559 #else
560 static void damon_verify_nr_regions(struct damon_target *t)
561 {
562 }
563 #endif
564 
565 unsigned int damon_nr_regions(struct damon_target *t)
566 {
567 	damon_verify_nr_regions(t);
568 
569 	return t->nr_regions;
570 }
571 
572 struct damon_ctx *damon_new_ctx(void)
573 {
574 	struct damon_ctx *ctx;
575 
576 	ctx = kzalloc_obj(*ctx);
577 	if (!ctx)
578 		return NULL;
579 
580 	init_completion(&ctx->kdamond_started);
581 
582 	ctx->attrs.sample_interval = 5 * 1000;
583 	ctx->attrs.aggr_interval = 100 * 1000;
584 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
585 
586 	ctx->passed_sample_intervals = 0;
587 	/* These will be set from kdamond_init_ctx() */
588 	ctx->next_aggregation_sis = 0;
589 	ctx->next_ops_update_sis = 0;
590 
591 	mutex_init(&ctx->kdamond_lock);
592 	INIT_LIST_HEAD(&ctx->call_controls);
593 	mutex_init(&ctx->call_controls_lock);
594 	mutex_init(&ctx->walk_control_lock);
595 
596 	ctx->attrs.min_nr_regions = 10;
597 	ctx->attrs.max_nr_regions = 1000;
598 
599 	ctx->addr_unit = 1;
600 	ctx->min_region_sz = DAMON_MIN_REGION_SZ;
601 
602 	INIT_LIST_HEAD(&ctx->adaptive_targets);
603 	INIT_LIST_HEAD(&ctx->schemes);
604 
605 	return ctx;
606 }
607 
608 static void damon_destroy_targets(struct damon_ctx *ctx)
609 {
610 	struct damon_target *t, *next_t;
611 
612 	damon_for_each_target_safe(t, next_t, ctx)
613 		damon_destroy_target(t, ctx);
614 }
615 
616 void damon_destroy_ctx(struct damon_ctx *ctx)
617 {
618 	struct damos *s, *next_s;
619 
620 	damon_destroy_targets(ctx);
621 
622 	damon_for_each_scheme_safe(s, next_s, ctx)
623 		damon_destroy_scheme(s);
624 
625 	kfree(ctx);
626 }
627 
628 static bool damon_attrs_equals(const struct damon_attrs *attrs1,
629 		const struct damon_attrs *attrs2)
630 {
631 	const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal;
632 	const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal;
633 
634 	return attrs1->sample_interval == attrs2->sample_interval &&
635 		attrs1->aggr_interval == attrs2->aggr_interval &&
636 		attrs1->ops_update_interval == attrs2->ops_update_interval &&
637 		attrs1->min_nr_regions == attrs2->min_nr_regions &&
638 		attrs1->max_nr_regions == attrs2->max_nr_regions &&
639 		ig1->access_bp == ig2->access_bp &&
640 		ig1->aggrs == ig2->aggrs &&
641 		ig1->min_sample_us == ig2->min_sample_us &&
642 		ig1->max_sample_us == ig2->max_sample_us;
643 }
644 
645 static unsigned int damon_age_for_new_attrs(unsigned int age,
646 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
647 {
648 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
649 }
650 
651 /* convert access ratio in bp (per 10,000) to nr_accesses */
652 static unsigned int damon_accesses_bp_to_nr_accesses(
653 		unsigned int accesses_bp, struct damon_attrs *attrs)
654 {
655 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
656 }
657 
658 /*
659  * Convert nr_accesses to access ratio in bp (per 10,000).
660  *
661  * Callers should ensure attrs.aggr_interval is not zero, like
662  * damon_update_monitoring_results() does .  Otherwise, divide-by-zero would
663  * happen.
664  */
665 static unsigned int damon_nr_accesses_to_accesses_bp(
666 		unsigned int nr_accesses, struct damon_attrs *attrs)
667 {
668 	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
669 }
670 
671 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
672 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
673 {
674 	return damon_accesses_bp_to_nr_accesses(
675 			damon_nr_accesses_to_accesses_bp(
676 				nr_accesses, old_attrs),
677 			new_attrs);
678 }
679 
680 static void damon_update_monitoring_result(struct damon_region *r,
681 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
682 		bool aggregating)
683 {
684 	if (!aggregating) {
685 		r->nr_accesses = damon_nr_accesses_for_new_attrs(
686 				r->nr_accesses, old_attrs, new_attrs);
687 		r->nr_accesses_bp = r->nr_accesses * 10000;
688 	} else {
689 		/*
690 		 * if this is called in the middle of the aggregation, reset
691 		 * the aggregations we made so far for this aggregation
692 		 * interval.  In other words, make the status like
693 		 * kdamond_reset_aggregated() is called.
694 		 */
695 		r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
696 				r->last_nr_accesses, old_attrs, new_attrs);
697 		r->nr_accesses_bp = r->last_nr_accesses * 10000;
698 		r->nr_accesses = 0;
699 	}
700 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
701 }
702 
703 /*
704  * region->nr_accesses is the number of sampling intervals in the last
705  * aggregation interval that access to the region has found, and region->age is
706  * the number of aggregation intervals that its access pattern has maintained.
707  * For the reason, the real meaning of the two fields depend on current
708  * sampling interval and aggregation interval.  This function updates
709  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
710  */
711 static void damon_update_monitoring_results(struct damon_ctx *ctx,
712 		struct damon_attrs *new_attrs, bool aggregating)
713 {
714 	struct damon_attrs *old_attrs = &ctx->attrs;
715 	struct damon_target *t;
716 	struct damon_region *r;
717 
718 	/* if any interval is zero, simply forgive conversion */
719 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
720 			!new_attrs->sample_interval ||
721 			!new_attrs->aggr_interval)
722 		return;
723 
724 	damon_for_each_target(t, ctx)
725 		damon_for_each_region(r, t)
726 			damon_update_monitoring_result(
727 					r, old_attrs, new_attrs, aggregating);
728 }
729 
730 /*
731  * damon_valid_intervals_goal() - return if the intervals goal of @attrs is
732  * valid.
733  */
734 static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
735 {
736 	struct damon_intervals_goal *goal = &attrs->intervals_goal;
737 
738 	/* tuning is disabled */
739 	if (!goal->aggrs)
740 		return true;
741 	if (goal->min_sample_us > goal->max_sample_us)
742 		return false;
743 	if (attrs->sample_interval < goal->min_sample_us ||
744 			goal->max_sample_us < attrs->sample_interval)
745 		return false;
746 	return true;
747 }
748 
749 /**
750  * damon_set_attrs() - Set attributes for the monitoring.
751  * @ctx:		monitoring context
752  * @attrs:		monitoring attributes
753  *
754  * This function should be called while the kdamond is not running, an access
755  * check results aggregation is not ongoing (e.g., from damon_call().
756  *
757  * Every time interval is in micro-seconds.
758  *
759  * Return: 0 on success, negative error code otherwise.
760  */
761 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
762 {
763 	unsigned long sample_interval = attrs->sample_interval ?
764 		attrs->sample_interval : 1;
765 	struct damos *s;
766 	bool aggregating = ctx->passed_sample_intervals <
767 		ctx->next_aggregation_sis;
768 
769 	if (!damon_valid_intervals_goal(attrs))
770 		return -EINVAL;
771 
772 	if (attrs->min_nr_regions < 3)
773 		return -EINVAL;
774 	if (attrs->min_nr_regions > attrs->max_nr_regions)
775 		return -EINVAL;
776 	if (attrs->sample_interval > attrs->aggr_interval)
777 		return -EINVAL;
778 
779 	/* calls from core-external doesn't set this. */
780 	if (!attrs->aggr_samples)
781 		attrs->aggr_samples = attrs->aggr_interval / sample_interval;
782 
783 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
784 		attrs->aggr_interval / sample_interval;
785 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
786 		attrs->ops_update_interval / sample_interval;
787 
788 	damon_update_monitoring_results(ctx, attrs, aggregating);
789 	ctx->attrs = *attrs;
790 
791 	damon_for_each_scheme(s, ctx)
792 		damos_set_next_apply_sis(s, ctx);
793 
794 	return 0;
795 }
796 
797 /**
798  * damon_set_schemes() - Set data access monitoring based operation schemes.
799  * @ctx:	monitoring context
800  * @schemes:	array of the schemes
801  * @nr_schemes:	number of entries in @schemes
802  *
803  * This function should not be called while the kdamond of the context is
804  * running.
805  */
806 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
807 			ssize_t nr_schemes)
808 {
809 	struct damos *s, *next;
810 	ssize_t i;
811 
812 	damon_for_each_scheme_safe(s, next, ctx)
813 		damon_destroy_scheme(s);
814 	for (i = 0; i < nr_schemes; i++)
815 		damon_add_scheme(ctx, schemes[i]);
816 }
817 
818 static struct damos_quota_goal *damos_nth_quota_goal(
819 		int n, struct damos_quota *q)
820 {
821 	struct damos_quota_goal *goal;
822 	int i = 0;
823 
824 	damos_for_each_quota_goal(goal, q) {
825 		if (i++ == n)
826 			return goal;
827 	}
828 	return NULL;
829 }
830 
831 static void damos_commit_quota_goal_union(
832 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
833 {
834 	switch (dst->metric) {
835 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
836 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
837 		dst->nid = src->nid;
838 		break;
839 	case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
840 	case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
841 		dst->nid = src->nid;
842 		dst->memcg_id = src->memcg_id;
843 		break;
844 	default:
845 		break;
846 	}
847 }
848 
849 static void damos_commit_quota_goal(
850 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
851 {
852 	dst->metric = src->metric;
853 	dst->target_value = src->target_value;
854 	if (dst->metric == DAMOS_QUOTA_USER_INPUT)
855 		dst->current_value = src->current_value;
856 	/* keep last_psi_total as is, since it will be updated in next cycle */
857 	damos_commit_quota_goal_union(dst, src);
858 }
859 
860 /**
861  * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
862  * @dst:	The commit destination DAMOS quota.
863  * @src:	The commit source DAMOS quota.
864  *
865  * Copies user-specified parameters for quota goals from @src to @dst.  Users
866  * should use this function for quota goals-level parameters update of running
867  * DAMON contexts, instead of manual in-place updates.
868  *
869  * This function should be called from parameters-update safe context, like
870  * damon_call().
871  */
872 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
873 {
874 	struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
875 	int i = 0, j = 0;
876 
877 	damos_for_each_quota_goal_safe(dst_goal, next, dst) {
878 		src_goal = damos_nth_quota_goal(i++, src);
879 		if (src_goal)
880 			damos_commit_quota_goal(dst_goal, src_goal);
881 		else
882 			damos_destroy_quota_goal(dst_goal);
883 	}
884 	damos_for_each_quota_goal_safe(src_goal, next, src) {
885 		if (j++ < i)
886 			continue;
887 		new_goal = damos_new_quota_goal(
888 				src_goal->metric, src_goal->target_value);
889 		if (!new_goal)
890 			return -ENOMEM;
891 		damos_commit_quota_goal(new_goal, src_goal);
892 		damos_add_quota_goal(dst, new_goal);
893 	}
894 	return 0;
895 }
896 
897 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
898 {
899 	int err;
900 
901 	dst->reset_interval = src->reset_interval;
902 	dst->ms = src->ms;
903 	dst->sz = src->sz;
904 	err = damos_commit_quota_goals(dst, src);
905 	if (err)
906 		return err;
907 	dst->weight_sz = src->weight_sz;
908 	dst->weight_nr_accesses = src->weight_nr_accesses;
909 	dst->weight_age = src->weight_age;
910 	return 0;
911 }
912 
913 static struct damos_filter *damos_nth_core_filter(int n, struct damos *s)
914 {
915 	struct damos_filter *filter;
916 	int i = 0;
917 
918 	damos_for_each_core_filter(filter, s) {
919 		if (i++ == n)
920 			return filter;
921 	}
922 	return NULL;
923 }
924 
925 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
926 {
927 	struct damos_filter *filter;
928 	int i = 0;
929 
930 	damos_for_each_ops_filter(filter, s) {
931 		if (i++ == n)
932 			return filter;
933 	}
934 	return NULL;
935 }
936 
937 static void damos_commit_filter_arg(
938 		struct damos_filter *dst, struct damos_filter *src)
939 {
940 	switch (dst->type) {
941 	case DAMOS_FILTER_TYPE_MEMCG:
942 		dst->memcg_id = src->memcg_id;
943 		break;
944 	case DAMOS_FILTER_TYPE_ADDR:
945 		dst->addr_range = src->addr_range;
946 		break;
947 	case DAMOS_FILTER_TYPE_TARGET:
948 		dst->target_idx = src->target_idx;
949 		break;
950 	case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
951 		dst->sz_range = src->sz_range;
952 		break;
953 	default:
954 		break;
955 	}
956 }
957 
958 static void damos_commit_filter(
959 		struct damos_filter *dst, struct damos_filter *src)
960 {
961 	dst->type = src->type;
962 	dst->matching = src->matching;
963 	dst->allow = src->allow;
964 	damos_commit_filter_arg(dst, src);
965 }
966 
967 static int damos_commit_core_filters(struct damos *dst, struct damos *src)
968 {
969 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
970 	int i = 0, j = 0;
971 
972 	damos_for_each_core_filter_safe(dst_filter, next, dst) {
973 		src_filter = damos_nth_core_filter(i++, src);
974 		if (src_filter)
975 			damos_commit_filter(dst_filter, src_filter);
976 		else
977 			damos_destroy_filter(dst_filter);
978 	}
979 
980 	damos_for_each_core_filter_safe(src_filter, next, src) {
981 		if (j++ < i)
982 			continue;
983 
984 		new_filter = damos_new_filter(
985 				src_filter->type, src_filter->matching,
986 				src_filter->allow);
987 		if (!new_filter)
988 			return -ENOMEM;
989 		damos_commit_filter_arg(new_filter, src_filter);
990 		damos_add_filter(dst, new_filter);
991 	}
992 	return 0;
993 }
994 
995 static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
996 {
997 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
998 	int i = 0, j = 0;
999 
1000 	damos_for_each_ops_filter_safe(dst_filter, next, dst) {
1001 		src_filter = damos_nth_ops_filter(i++, src);
1002 		if (src_filter)
1003 			damos_commit_filter(dst_filter, src_filter);
1004 		else
1005 			damos_destroy_filter(dst_filter);
1006 	}
1007 
1008 	damos_for_each_ops_filter_safe(src_filter, next, src) {
1009 		if (j++ < i)
1010 			continue;
1011 
1012 		new_filter = damos_new_filter(
1013 				src_filter->type, src_filter->matching,
1014 				src_filter->allow);
1015 		if (!new_filter)
1016 			return -ENOMEM;
1017 		damos_commit_filter_arg(new_filter, src_filter);
1018 		damos_add_filter(dst, new_filter);
1019 	}
1020 	return 0;
1021 }
1022 
1023 /**
1024  * damos_filters_default_reject() - decide whether to reject memory that didn't
1025  *				    match with any given filter.
1026  * @filters:	Given DAMOS filters of a group.
1027  */
1028 static bool damos_filters_default_reject(struct list_head *filters)
1029 {
1030 	struct damos_filter *last_filter;
1031 
1032 	if (list_empty(filters))
1033 		return false;
1034 	last_filter = list_last_entry(filters, struct damos_filter, list);
1035 	return last_filter->allow;
1036 }
1037 
1038 static void damos_set_filters_default_reject(struct damos *s)
1039 {
1040 	if (!list_empty(&s->ops_filters))
1041 		s->core_filters_default_reject = false;
1042 	else
1043 		s->core_filters_default_reject =
1044 			damos_filters_default_reject(&s->core_filters);
1045 	s->ops_filters_default_reject =
1046 		damos_filters_default_reject(&s->ops_filters);
1047 }
1048 
1049 static int damos_commit_dests(struct damos_migrate_dests *dst,
1050 		struct damos_migrate_dests *src)
1051 {
1052 	if (dst->nr_dests != src->nr_dests) {
1053 		kfree(dst->node_id_arr);
1054 		kfree(dst->weight_arr);
1055 
1056 		dst->node_id_arr = kmalloc_array(src->nr_dests,
1057 			sizeof(*dst->node_id_arr), GFP_KERNEL);
1058 		if (!dst->node_id_arr) {
1059 			dst->weight_arr = NULL;
1060 			return -ENOMEM;
1061 		}
1062 
1063 		dst->weight_arr = kmalloc_array(src->nr_dests,
1064 			sizeof(*dst->weight_arr), GFP_KERNEL);
1065 		if (!dst->weight_arr) {
1066 			/* ->node_id_arr will be freed by scheme destruction */
1067 			return -ENOMEM;
1068 		}
1069 	}
1070 
1071 	dst->nr_dests = src->nr_dests;
1072 	for (int i = 0; i < src->nr_dests; i++) {
1073 		dst->node_id_arr[i] = src->node_id_arr[i];
1074 		dst->weight_arr[i] = src->weight_arr[i];
1075 	}
1076 
1077 	return 0;
1078 }
1079 
1080 static int damos_commit_filters(struct damos *dst, struct damos *src)
1081 {
1082 	int err;
1083 
1084 	err = damos_commit_core_filters(dst, src);
1085 	if (err)
1086 		return err;
1087 	err = damos_commit_ops_filters(dst, src);
1088 	if (err)
1089 		return err;
1090 	damos_set_filters_default_reject(dst);
1091 	return 0;
1092 }
1093 
1094 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
1095 {
1096 	struct damos *s;
1097 	int i = 0;
1098 
1099 	damon_for_each_scheme(s, ctx) {
1100 		if (i++ == n)
1101 			return s;
1102 	}
1103 	return NULL;
1104 }
1105 
1106 static int damos_commit(struct damos *dst, struct damos *src)
1107 {
1108 	int err;
1109 
1110 	dst->pattern = src->pattern;
1111 	dst->action = src->action;
1112 	dst->apply_interval_us = src->apply_interval_us;
1113 
1114 	err = damos_commit_quota(&dst->quota, &src->quota);
1115 	if (err)
1116 		return err;
1117 
1118 	dst->wmarks = src->wmarks;
1119 	dst->target_nid = src->target_nid;
1120 
1121 	err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests);
1122 	if (err)
1123 		return err;
1124 
1125 	err = damos_commit_filters(dst, src);
1126 	if (err)
1127 		return err;
1128 
1129 	dst->max_nr_snapshots = src->max_nr_snapshots;
1130 	return 0;
1131 }
1132 
1133 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
1134 {
1135 	struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
1136 	int i = 0, j = 0, err;
1137 
1138 	damon_for_each_scheme_safe(dst_scheme, next, dst) {
1139 		src_scheme = damon_nth_scheme(i++, src);
1140 		if (src_scheme) {
1141 			err = damos_commit(dst_scheme, src_scheme);
1142 			if (err)
1143 				return err;
1144 		} else {
1145 			damon_destroy_scheme(dst_scheme);
1146 		}
1147 	}
1148 
1149 	damon_for_each_scheme_safe(src_scheme, next, src) {
1150 		if (j++ < i)
1151 			continue;
1152 		new_scheme = damon_new_scheme(&src_scheme->pattern,
1153 				src_scheme->action,
1154 				src_scheme->apply_interval_us,
1155 				&src_scheme->quota, &src_scheme->wmarks,
1156 				NUMA_NO_NODE);
1157 		if (!new_scheme)
1158 			return -ENOMEM;
1159 		err = damos_commit(new_scheme, src_scheme);
1160 		if (err) {
1161 			damon_destroy_scheme(new_scheme);
1162 			return err;
1163 		}
1164 		damon_add_scheme(dst, new_scheme);
1165 	}
1166 	return 0;
1167 }
1168 
1169 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1170 {
1171 	struct damon_target *t;
1172 	int i = 0;
1173 
1174 	damon_for_each_target(t, ctx) {
1175 		if (i++ == n)
1176 			return t;
1177 	}
1178 	return NULL;
1179 }
1180 
1181 /*
1182  * The caller should ensure the regions of @src are
1183  * 1. valid (end >= src) and
1184  * 2. sorted by starting address.
1185  *
1186  * If @src has no region, @dst keeps current regions.
1187  */
1188 static int damon_commit_target_regions(struct damon_target *dst,
1189 		struct damon_target *src, unsigned long src_min_region_sz)
1190 {
1191 	struct damon_region *src_region;
1192 	struct damon_addr_range *ranges;
1193 	int i = 0, err;
1194 
1195 	damon_for_each_region(src_region, src)
1196 		i++;
1197 	if (!i)
1198 		return 0;
1199 
1200 	ranges = kmalloc_objs(*ranges, i, GFP_KERNEL | __GFP_NOWARN);
1201 	if (!ranges)
1202 		return -ENOMEM;
1203 	i = 0;
1204 	damon_for_each_region(src_region, src)
1205 		ranges[i++] = src_region->ar;
1206 	err = damon_set_regions(dst, ranges, i, src_min_region_sz);
1207 	kfree(ranges);
1208 	return err;
1209 }
1210 
1211 static int damon_commit_target(
1212 		struct damon_target *dst, bool dst_has_pid,
1213 		struct damon_target *src, bool src_has_pid,
1214 		unsigned long src_min_region_sz)
1215 {
1216 	int err;
1217 
1218 	err = damon_commit_target_regions(dst, src, src_min_region_sz);
1219 	if (err)
1220 		return err;
1221 	if (dst_has_pid)
1222 		put_pid(dst->pid);
1223 	if (src_has_pid)
1224 		get_pid(src->pid);
1225 	dst->pid = src->pid;
1226 	return 0;
1227 }
1228 
1229 static int damon_commit_targets(
1230 		struct damon_ctx *dst, struct damon_ctx *src)
1231 {
1232 	struct damon_target *dst_target, *next, *src_target, *new_target;
1233 	int i = 0, j = 0, err;
1234 
1235 	damon_for_each_target_safe(dst_target, next, dst) {
1236 		src_target = damon_nth_target(i++, src);
1237 		/*
1238 		 * If src target is obsolete, do not commit the parameters to
1239 		 * the dst target, and further remove the dst target.
1240 		 */
1241 		if (src_target && !src_target->obsolete) {
1242 			err = damon_commit_target(
1243 					dst_target, damon_target_has_pid(dst),
1244 					src_target, damon_target_has_pid(src),
1245 					src->min_region_sz);
1246 			if (err)
1247 				return err;
1248 		} else {
1249 			struct damos *s;
1250 
1251 			damon_destroy_target(dst_target, dst);
1252 			damon_for_each_scheme(s, dst) {
1253 				if (s->quota.charge_target_from == dst_target) {
1254 					s->quota.charge_target_from = NULL;
1255 					s->quota.charge_addr_from = 0;
1256 				}
1257 			}
1258 		}
1259 	}
1260 
1261 	damon_for_each_target_safe(src_target, next, src) {
1262 		if (j++ < i)
1263 			continue;
1264 		/* target to remove has no matching dst */
1265 		if (src_target->obsolete)
1266 			return -EINVAL;
1267 		new_target = damon_new_target();
1268 		if (!new_target)
1269 			return -ENOMEM;
1270 		err = damon_commit_target(new_target, false,
1271 				src_target, damon_target_has_pid(src),
1272 				src->min_region_sz);
1273 		if (err) {
1274 			damon_destroy_target(new_target, NULL);
1275 			return err;
1276 		}
1277 		damon_add_target(dst, new_target);
1278 	}
1279 	return 0;
1280 }
1281 
1282 /**
1283  * damon_commit_ctx() - Commit parameters of a DAMON context to another.
1284  * @dst:	The commit destination DAMON context.
1285  * @src:	The commit source DAMON context.
1286  *
1287  * This function copies user-specified parameters from @src to @dst and update
1288  * the internal status and results accordingly.  Users should use this function
1289  * for context-level parameters update of running context, instead of manual
1290  * in-place updates.
1291  *
1292  * This function should be called from parameters-update safe context, like
1293  * damon_call().
1294  */
1295 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1296 {
1297 	int err;
1298 
1299 	dst->maybe_corrupted = true;
1300 	if (!is_power_of_2(src->min_region_sz))
1301 		return -EINVAL;
1302 
1303 	err = damon_commit_schemes(dst, src);
1304 	if (err)
1305 		return err;
1306 	err = damon_commit_targets(dst, src);
1307 	if (err)
1308 		return err;
1309 	/*
1310 	 * schemes and targets should be updated first, since
1311 	 * 1. damon_set_attrs() updates monitoring results of targets and
1312 	 * next_apply_sis of schemes, and
1313 	 * 2. ops update should be done after pid handling is done (target
1314 	 *    committing require putting pids).
1315 	 */
1316 	if (!damon_attrs_equals(&dst->attrs, &src->attrs)) {
1317 		err = damon_set_attrs(dst, &src->attrs);
1318 		if (err)
1319 			return err;
1320 	}
1321 	dst->ops = src->ops;
1322 	dst->addr_unit = src->addr_unit;
1323 	dst->min_region_sz = src->min_region_sz;
1324 
1325 	dst->maybe_corrupted = false;
1326 	return 0;
1327 }
1328 
1329 /**
1330  * damon_nr_running_ctxs() - Return number of currently running contexts.
1331  */
1332 int damon_nr_running_ctxs(void)
1333 {
1334 	int nr_ctxs;
1335 
1336 	mutex_lock(&damon_lock);
1337 	nr_ctxs = nr_running_ctxs;
1338 	mutex_unlock(&damon_lock);
1339 
1340 	return nr_ctxs;
1341 }
1342 
1343 /* Returns the size upper limit for each monitoring region */
1344 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1345 {
1346 	struct damon_target *t;
1347 	struct damon_region *r;
1348 	unsigned long sz = 0;
1349 
1350 	damon_for_each_target(t, ctx) {
1351 		damon_for_each_region(r, t)
1352 			sz += damon_sz_region(r);
1353 	}
1354 
1355 	if (ctx->attrs.min_nr_regions)
1356 		sz /= ctx->attrs.min_nr_regions;
1357 	if (sz < ctx->min_region_sz)
1358 		sz = ctx->min_region_sz;
1359 
1360 	return sz;
1361 }
1362 
1363 static void damon_split_region_at(struct damon_target *t,
1364 				  struct damon_region *r, unsigned long sz_r);
1365 
1366 /*
1367  * damon_apply_min_nr_regions() - Make effect of min_nr_regions parameter.
1368  * @ctx:	monitoring context.
1369  *
1370  * This function implement min_nr_regions (minimum number of damon_region
1371  * objects in the given monitoring context) behavior.  It first calculates
1372  * maximum size of each region for enforcing the min_nr_regions as total size
1373  * of the regions divided by the min_nr_regions.  After that, this function
1374  * splits regions to ensure all regions are equal to or smaller than the size
1375  * limit.  Finally, this function returns the maximum size limit.
1376  *
1377  * Returns: maximum size of each region for convincing min_nr_regions.
1378  */
1379 static unsigned long damon_apply_min_nr_regions(struct damon_ctx *ctx)
1380 {
1381 	unsigned long max_region_sz = damon_region_sz_limit(ctx);
1382 	struct damon_target *t;
1383 	struct damon_region *r, *next;
1384 
1385 	max_region_sz = ALIGN(max_region_sz, ctx->min_region_sz);
1386 	damon_for_each_target(t, ctx) {
1387 		damon_for_each_region_safe(r, next, t) {
1388 			while (damon_sz_region(r) > max_region_sz) {
1389 				damon_split_region_at(t, r, max_region_sz);
1390 				r = damon_next_region(r);
1391 			}
1392 		}
1393 	}
1394 	return max_region_sz;
1395 }
1396 
1397 static int kdamond_fn(void *data);
1398 
1399 /*
1400  * __damon_start() - Starts monitoring with given context.
1401  * @ctx:	monitoring context
1402  *
1403  * This function should be called while damon_lock is hold.
1404  *
1405  * Return: 0 on success, negative error code otherwise.
1406  */
1407 static int __damon_start(struct damon_ctx *ctx)
1408 {
1409 	int err = -EBUSY;
1410 
1411 	mutex_lock(&ctx->kdamond_lock);
1412 	if (!ctx->kdamond) {
1413 		err = 0;
1414 		reinit_completion(&ctx->kdamond_started);
1415 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1416 				nr_running_ctxs);
1417 		if (IS_ERR(ctx->kdamond)) {
1418 			err = PTR_ERR(ctx->kdamond);
1419 			ctx->kdamond = NULL;
1420 		} else {
1421 			wait_for_completion(&ctx->kdamond_started);
1422 		}
1423 	}
1424 	mutex_unlock(&ctx->kdamond_lock);
1425 
1426 	return err;
1427 }
1428 
1429 /**
1430  * damon_start() - Starts the monitorings for a given group of contexts.
1431  * @ctxs:	an array of the pointers for contexts to start monitoring
1432  * @nr_ctxs:	size of @ctxs
1433  * @exclusive:	exclusiveness of this contexts group
1434  *
1435  * This function starts a group of monitoring threads for a group of monitoring
1436  * contexts.  One thread per each context is created and run in parallel.  The
1437  * caller should handle synchronization between the threads by itself.  If
1438  * @exclusive is true and a group of threads that created by other
1439  * 'damon_start()' call is currently running, this function does nothing but
1440  * returns -EBUSY.
1441  *
1442  * Return: 0 on success, negative error code otherwise.
1443  */
1444 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1445 {
1446 	int i;
1447 	int err = 0;
1448 
1449 	mutex_lock(&damon_lock);
1450 	if ((exclusive && nr_running_ctxs) ||
1451 			(!exclusive && running_exclusive_ctxs)) {
1452 		mutex_unlock(&damon_lock);
1453 		return -EBUSY;
1454 	}
1455 
1456 	for (i = 0; i < nr_ctxs; i++) {
1457 		err = __damon_start(ctxs[i]);
1458 		if (err)
1459 			break;
1460 		nr_running_ctxs++;
1461 	}
1462 	if (exclusive && nr_running_ctxs)
1463 		running_exclusive_ctxs = true;
1464 	mutex_unlock(&damon_lock);
1465 
1466 	return err;
1467 }
1468 
1469 /*
1470  * __damon_stop() - Stops monitoring of a given context.
1471  * @ctx:	monitoring context
1472  *
1473  * Return: 0 on success, negative error code otherwise.
1474  */
1475 static int __damon_stop(struct damon_ctx *ctx)
1476 {
1477 	struct task_struct *tsk;
1478 
1479 	mutex_lock(&ctx->kdamond_lock);
1480 	tsk = ctx->kdamond;
1481 	if (tsk) {
1482 		get_task_struct(tsk);
1483 		mutex_unlock(&ctx->kdamond_lock);
1484 		kthread_stop_put(tsk);
1485 		return 0;
1486 	}
1487 	mutex_unlock(&ctx->kdamond_lock);
1488 
1489 	return -EPERM;
1490 }
1491 
1492 /**
1493  * damon_stop() - Stops the monitorings for a given group of contexts.
1494  * @ctxs:	an array of the pointers for contexts to stop monitoring
1495  * @nr_ctxs:	size of @ctxs
1496  *
1497  * Return: 0 on success, negative error code otherwise.
1498  */
1499 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1500 {
1501 	int i, err = 0;
1502 
1503 	for (i = 0; i < nr_ctxs; i++) {
1504 		/* nr_running_ctxs is decremented in kdamond_fn */
1505 		err = __damon_stop(ctxs[i]);
1506 		if (err)
1507 			break;
1508 	}
1509 	return err;
1510 }
1511 
1512 /**
1513  * damon_is_running() - Returns if a given DAMON context is running.
1514  * @ctx:	The DAMON context to see if running.
1515  *
1516  * Return: true if @ctx is running, false otherwise.
1517  */
1518 bool damon_is_running(struct damon_ctx *ctx)
1519 {
1520 	bool running;
1521 
1522 	mutex_lock(&ctx->kdamond_lock);
1523 	running = ctx->kdamond != NULL;
1524 	mutex_unlock(&ctx->kdamond_lock);
1525 	return running;
1526 }
1527 
1528 /**
1529  * damon_kdamond_pid() - Return pid of a given DAMON context's worker thread.
1530  * @ctx:	The DAMON context of the question.
1531  *
1532  * Return: pid if @ctx is running, negative error code otherwise.
1533  */
1534 int damon_kdamond_pid(struct damon_ctx *ctx)
1535 {
1536 	int pid = -EINVAL;
1537 
1538 	mutex_lock(&ctx->kdamond_lock);
1539 	if (ctx->kdamond)
1540 		pid = ctx->kdamond->pid;
1541 	mutex_unlock(&ctx->kdamond_lock);
1542 	return pid;
1543 }
1544 
1545 /*
1546  * damon_call_handle_inactive_ctx() - handle DAMON call request that added to
1547  *				      an inactive context.
1548  * @ctx:	The inactive DAMON context.
1549  * @control:	Control variable of the call request.
1550  *
1551  * This function is called in a case that @control is added to @ctx but @ctx is
1552  * not running (inactive).  See if @ctx handled @control or not, and cleanup
1553  * @control if it was not handled.
1554  *
1555  * Returns 0 if @control was handled by @ctx, negative error code otherwise.
1556  */
1557 static int damon_call_handle_inactive_ctx(
1558 		struct damon_ctx *ctx, struct damon_call_control *control)
1559 {
1560 	struct damon_call_control *c;
1561 
1562 	mutex_lock(&ctx->call_controls_lock);
1563 	list_for_each_entry(c, &ctx->call_controls, list) {
1564 		if (c == control) {
1565 			list_del(&control->list);
1566 			mutex_unlock(&ctx->call_controls_lock);
1567 			return -EINVAL;
1568 		}
1569 	}
1570 	mutex_unlock(&ctx->call_controls_lock);
1571 	return 0;
1572 }
1573 
1574 /**
1575  * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1576  * @ctx:	DAMON context to call the function for.
1577  * @control:	Control variable of the call request.
1578  *
1579  * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1580  * argument data that respectively passed via &damon_call_control->fn and
1581  * &damon_call_control->data of @control.  If &damon_call_control->repeat of
1582  * @control is unset, further wait until the kdamond finishes handling of the
1583  * request.  Otherwise, return as soon as the request is made.
1584  *
1585  * The kdamond executes the function with the argument in the main loop, just
1586  * after a sampling of the iteration is finished.  The function can hence
1587  * safely access the internal data of the &struct damon_ctx without additional
1588  * synchronization.  The return value of the function will be saved in
1589  * &damon_call_control->return_code.
1590  *
1591  * Return: 0 on success, negative error code otherwise.
1592  */
1593 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1594 {
1595 	if (!control->repeat)
1596 		init_completion(&control->completion);
1597 	control->canceled = false;
1598 	INIT_LIST_HEAD(&control->list);
1599 
1600 	mutex_lock(&ctx->call_controls_lock);
1601 	list_add_tail(&control->list, &ctx->call_controls);
1602 	mutex_unlock(&ctx->call_controls_lock);
1603 	if (!damon_is_running(ctx))
1604 		return damon_call_handle_inactive_ctx(ctx, control);
1605 	if (control->repeat)
1606 		return 0;
1607 	wait_for_completion(&control->completion);
1608 	if (control->canceled)
1609 		return -ECANCELED;
1610 	return 0;
1611 }
1612 
1613 /**
1614  * damos_walk() - Invoke a given functions while DAMOS walk regions.
1615  * @ctx:	DAMON context to call the functions for.
1616  * @control:	Control variable of the walk request.
1617  *
1618  * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1619  * that the kdamond will apply DAMOS action to, and wait until the kdamond
1620  * finishes handling of the request.
1621  *
1622  * The kdamond executes the given function in the main loop, for each region
1623  * just after it applied any DAMOS actions of @ctx to it.  The invocation is
1624  * made only within one &damos->apply_interval_us since damos_walk()
1625  * invocation, for each scheme.  The given callback function can hence safely
1626  * access the internal data of &struct damon_ctx and &struct damon_region that
1627  * each of the scheme will apply the action for next interval, without
1628  * additional synchronizations against the kdamond.  If every scheme of @ctx
1629  * passed at least one &damos->apply_interval_us, kdamond marks the request as
1630  * completed so that damos_walk() can wakeup and return.
1631  *
1632  * Return: 0 on success, negative error code otherwise.
1633  */
1634 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1635 {
1636 	init_completion(&control->completion);
1637 	control->canceled = false;
1638 	mutex_lock(&ctx->walk_control_lock);
1639 	if (ctx->walk_control) {
1640 		mutex_unlock(&ctx->walk_control_lock);
1641 		return -EBUSY;
1642 	}
1643 	ctx->walk_control = control;
1644 	mutex_unlock(&ctx->walk_control_lock);
1645 	if (!damon_is_running(ctx)) {
1646 		mutex_lock(&ctx->walk_control_lock);
1647 		if (ctx->walk_control == control)
1648 			ctx->walk_control = NULL;
1649 		mutex_unlock(&ctx->walk_control_lock);
1650 		return -EINVAL;
1651 	}
1652 	wait_for_completion(&control->completion);
1653 	if (control->canceled)
1654 		return -ECANCELED;
1655 	return 0;
1656 }
1657 
1658 /*
1659  * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing
1660  * the problem being propagated.
1661  */
1662 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
1663 {
1664 	if (r->nr_accesses_bp == r->nr_accesses * 10000)
1665 		return;
1666 	WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
1667 			r->nr_accesses_bp, r->nr_accesses);
1668 	r->nr_accesses_bp = r->nr_accesses * 10000;
1669 }
1670 
1671 #ifdef CONFIG_DAMON_DEBUG_SANITY
1672 static void damon_verify_reset_aggregated(struct damon_region *r,
1673 		struct damon_ctx *c)
1674 {
1675 	WARN_ONCE(r->nr_accesses_bp != r->last_nr_accesses * 10000,
1676 			"nr_accesses_bp %u last_nr_accesses %u sis %lu %lu\n",
1677 			r->nr_accesses_bp, r->last_nr_accesses,
1678 			c->passed_sample_intervals, c->next_aggregation_sis);
1679 }
1680 #else
1681 static void damon_verify_reset_aggregated(struct damon_region *r,
1682 		struct damon_ctx *c)
1683 {
1684 }
1685 #endif
1686 
1687 
1688 /*
1689  * Reset the aggregated monitoring results ('nr_accesses' of each region).
1690  */
1691 static void kdamond_reset_aggregated(struct damon_ctx *c)
1692 {
1693 	struct damon_target *t;
1694 	unsigned int ti = 0;	/* target's index */
1695 
1696 	damon_for_each_target(t, c) {
1697 		struct damon_region *r;
1698 
1699 		damon_for_each_region(r, t) {
1700 			trace_damon_aggregated(ti, r, damon_nr_regions(t));
1701 			damon_warn_fix_nr_accesses_corruption(r);
1702 			r->last_nr_accesses = r->nr_accesses;
1703 			r->nr_accesses = 0;
1704 			damon_verify_reset_aggregated(r, c);
1705 		}
1706 		ti++;
1707 	}
1708 }
1709 
1710 static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1711 {
1712 	struct damon_target *t;
1713 	struct damon_region *r;
1714 	unsigned long sz_region, max_access_events = 0, access_events = 0;
1715 	unsigned long target_access_events;
1716 	unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1717 
1718 	damon_for_each_target(t, c) {
1719 		damon_for_each_region(r, t) {
1720 			sz_region = damon_sz_region(r);
1721 			max_access_events += sz_region * c->attrs.aggr_samples;
1722 			access_events += sz_region * r->nr_accesses;
1723 		}
1724 	}
1725 	target_access_events = max_access_events * goal_bp / 10000;
1726 	target_access_events = target_access_events ? : 1;
1727 	return access_events * 10000 / target_access_events;
1728 }
1729 
1730 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1731 		unsigned long score);
1732 
1733 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1734 {
1735 	unsigned long score_bp, adaptation_bp;
1736 
1737 	score_bp = damon_get_intervals_score(c);
1738 	adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1739 		10000;
1740 	/*
1741 	 * adaptation_bp ranges from 1 to 20,000.  Avoid too rapid reduction of
1742 	 * the intervals by rescaling [1,10,000] to [5000, 10,000].
1743 	 */
1744 	if (adaptation_bp <= 10000)
1745 		adaptation_bp = 5000 + adaptation_bp / 2;
1746 	return adaptation_bp;
1747 }
1748 
1749 static void kdamond_tune_intervals(struct damon_ctx *c)
1750 {
1751 	unsigned long adaptation_bp;
1752 	struct damon_attrs new_attrs;
1753 	struct damon_intervals_goal *goal;
1754 
1755 	adaptation_bp = damon_get_intervals_adaptation_bp(c);
1756 	if (adaptation_bp == 10000)
1757 		return;
1758 
1759 	new_attrs = c->attrs;
1760 	goal = &c->attrs.intervals_goal;
1761 	new_attrs.sample_interval = min(goal->max_sample_us,
1762 			c->attrs.sample_interval * adaptation_bp / 10000);
1763 	new_attrs.sample_interval = max(goal->min_sample_us,
1764 			new_attrs.sample_interval);
1765 	new_attrs.aggr_interval = new_attrs.sample_interval *
1766 		c->attrs.aggr_samples;
1767 	trace_damon_monitor_intervals_tune(new_attrs.sample_interval);
1768 	damon_set_attrs(c, &new_attrs);
1769 }
1770 
1771 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1772 {
1773 	unsigned long sz;
1774 	unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1775 
1776 	sz = damon_sz_region(r);
1777 	return s->pattern.min_sz_region <= sz &&
1778 		sz <= s->pattern.max_sz_region &&
1779 		s->pattern.min_nr_accesses <= nr_accesses &&
1780 		nr_accesses <= s->pattern.max_nr_accesses &&
1781 		s->pattern.min_age_region <= r->age &&
1782 		r->age <= s->pattern.max_age_region;
1783 }
1784 
1785 static bool damos_valid_target(struct damon_ctx *c, struct damon_region *r,
1786 		struct damos *s)
1787 {
1788 	bool ret = __damos_valid_target(r, s);
1789 
1790 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1791 		return ret;
1792 
1793 	return c->ops.get_scheme_score(c, r, s) >= s->quota.min_score;
1794 }
1795 
1796 /*
1797  * damos_skip_charged_region() - Check if the given region or starting part of
1798  * it is already charged for the DAMOS quota.
1799  * @t:	The target of the region.
1800  * @rp:	The pointer to the region.
1801  * @s:	The scheme to be applied.
1802  * @min_region_sz:	minimum region size.
1803  *
1804  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1805  * action would applied to only a part of the target access pattern fulfilling
1806  * regions.  To avoid applying the scheme action to only already applied
1807  * regions, DAMON skips applying the scheme action to the regions that charged
1808  * in the previous charge window.
1809  *
1810  * This function checks if a given region should be skipped or not for the
1811  * reason.  If only the starting part of the region has previously charged,
1812  * this function splits the region into two so that the second one covers the
1813  * area that not charged in the previous charge widnow, and return true.  The
1814  * caller can see the second one on the next iteration of the region walk.
1815  * Note that this means the caller should use damon_for_each_region() instead
1816  * of damon_for_each_region_safe().  If damon_for_each_region_safe() is used,
1817  * the second region will just be ignored.
1818  *
1819  * Return: true if the region should be skipped, false otherwise.
1820  */
1821 static bool damos_skip_charged_region(struct damon_target *t,
1822 		struct damon_region *r, struct damos *s,
1823 		unsigned long min_region_sz)
1824 {
1825 	struct damos_quota *quota = &s->quota;
1826 	unsigned long sz_to_skip;
1827 
1828 	/* Skip previously charged regions */
1829 	if (quota->charge_target_from) {
1830 		if (t != quota->charge_target_from)
1831 			return true;
1832 		if (r == damon_last_region(t)) {
1833 			quota->charge_target_from = NULL;
1834 			quota->charge_addr_from = 0;
1835 			return true;
1836 		}
1837 		if (quota->charge_addr_from &&
1838 				r->ar.end <= quota->charge_addr_from)
1839 			return true;
1840 
1841 		if (quota->charge_addr_from && r->ar.start <
1842 				quota->charge_addr_from) {
1843 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1844 					r->ar.start, min_region_sz);
1845 			if (!sz_to_skip) {
1846 				if (damon_sz_region(r) <= min_region_sz)
1847 					return true;
1848 				sz_to_skip = min_region_sz;
1849 			}
1850 			damon_split_region_at(t, r, sz_to_skip);
1851 			return true;
1852 		}
1853 		quota->charge_target_from = NULL;
1854 		quota->charge_addr_from = 0;
1855 	}
1856 	return false;
1857 }
1858 
1859 static void damos_update_stat(struct damos *s,
1860 		unsigned long sz_tried, unsigned long sz_applied,
1861 		unsigned long sz_ops_filter_passed)
1862 {
1863 	s->stat.nr_tried++;
1864 	s->stat.sz_tried += sz_tried;
1865 	if (sz_applied)
1866 		s->stat.nr_applied++;
1867 	s->stat.sz_applied += sz_applied;
1868 	s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1869 }
1870 
1871 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1872 		struct damon_region *r, struct damos_filter *filter,
1873 		unsigned long min_region_sz)
1874 {
1875 	bool matched = false;
1876 	struct damon_target *ti;
1877 	int target_idx = 0;
1878 	unsigned long start, end;
1879 
1880 	switch (filter->type) {
1881 	case DAMOS_FILTER_TYPE_TARGET:
1882 		damon_for_each_target(ti, ctx) {
1883 			if (ti == t)
1884 				break;
1885 			target_idx++;
1886 		}
1887 		matched = target_idx == filter->target_idx;
1888 		break;
1889 	case DAMOS_FILTER_TYPE_ADDR:
1890 		start = ALIGN_DOWN(filter->addr_range.start, min_region_sz);
1891 		end = ALIGN_DOWN(filter->addr_range.end, min_region_sz);
1892 
1893 		/* inside the range */
1894 		if (start <= r->ar.start && r->ar.end <= end) {
1895 			matched = true;
1896 			break;
1897 		}
1898 		/* outside of the range */
1899 		if (r->ar.end <= start || end <= r->ar.start) {
1900 			matched = false;
1901 			break;
1902 		}
1903 		/* start before the range and overlap */
1904 		if (r->ar.start < start) {
1905 			damon_split_region_at(t, r, start - r->ar.start);
1906 			matched = false;
1907 			break;
1908 		}
1909 		/* start inside the range */
1910 		damon_split_region_at(t, r, end - r->ar.start);
1911 		matched = true;
1912 		break;
1913 	default:
1914 		return false;
1915 	}
1916 
1917 	return matched == filter->matching;
1918 }
1919 
1920 static bool damos_core_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1921 		struct damon_region *r, struct damos *s)
1922 {
1923 	struct damos_filter *filter;
1924 
1925 	s->core_filters_allowed = false;
1926 	damos_for_each_core_filter(filter, s) {
1927 		if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) {
1928 			if (filter->allow)
1929 				s->core_filters_allowed = true;
1930 			return !filter->allow;
1931 		}
1932 	}
1933 	return s->core_filters_default_reject;
1934 }
1935 
1936 /*
1937  * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1938  * @ctx:	The context of &damon_ctx->walk_control.
1939  * @t:		The monitoring target of @r that @s will be applied.
1940  * @r:		The region of @t that @s will be applied.
1941  * @s:		The scheme of @ctx that will be applied to @r.
1942  *
1943  * This function is called from kdamond whenever it asked the operation set to
1944  * apply a DAMOS scheme action to a region.  If a DAMOS walk request is
1945  * installed by damos_walk() and not yet uninstalled, invoke it.
1946  */
1947 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1948 		struct damon_region *r, struct damos *s,
1949 		unsigned long sz_filter_passed)
1950 {
1951 	struct damos_walk_control *control;
1952 
1953 	if (s->walk_completed)
1954 		return;
1955 
1956 	control = ctx->walk_control;
1957 	if (!control)
1958 		return;
1959 
1960 	control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1961 }
1962 
1963 /*
1964  * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1965  * @ctx:	The context of &damon_ctx->walk_control.
1966  * @s:		A scheme of @ctx that all walks are now done.
1967  *
1968  * This function is called when kdamond finished applying the action of a DAMOS
1969  * scheme to all regions that eligible for the given &damos->apply_interval_us.
1970  * If every scheme of @ctx including @s now finished walking for at least one
1971  * &damos->apply_interval_us, this function makrs the handling of the given
1972  * DAMOS walk request is done, so that damos_walk() can wake up and return.
1973  */
1974 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1975 {
1976 	struct damos *siter;
1977 	struct damos_walk_control *control;
1978 
1979 	control = ctx->walk_control;
1980 	if (!control)
1981 		return;
1982 
1983 	s->walk_completed = true;
1984 	/* if all schemes completed, signal completion to walker */
1985 	damon_for_each_scheme(siter, ctx) {
1986 		if (!siter->walk_completed)
1987 			return;
1988 	}
1989 	damon_for_each_scheme(siter, ctx)
1990 		siter->walk_completed = false;
1991 
1992 	complete(&control->completion);
1993 	ctx->walk_control = NULL;
1994 }
1995 
1996 /*
1997  * damos_walk_cancel() - Cancel the current DAMOS walk request.
1998  * @ctx:	The context of &damon_ctx->walk_control.
1999  *
2000  * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
2001  * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
2002  * is already out of the main loop and therefore gonna be terminated, and hence
2003  * cannot continue the walks.  This function therefore marks the walk request
2004  * as canceled, so that damos_walk() can wake up and return.
2005  */
2006 static void damos_walk_cancel(struct damon_ctx *ctx)
2007 {
2008 	struct damos_walk_control *control;
2009 
2010 	mutex_lock(&ctx->walk_control_lock);
2011 	control = ctx->walk_control;
2012 	mutex_unlock(&ctx->walk_control_lock);
2013 
2014 	if (!control)
2015 		return;
2016 	control->canceled = true;
2017 	complete(&control->completion);
2018 	mutex_lock(&ctx->walk_control_lock);
2019 	ctx->walk_control = NULL;
2020 	mutex_unlock(&ctx->walk_control_lock);
2021 }
2022 
2023 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
2024 		struct damon_region *r, struct damos *s)
2025 {
2026 	struct damos_quota *quota = &s->quota;
2027 	unsigned long sz = damon_sz_region(r);
2028 	struct timespec64 begin, end;
2029 	unsigned long sz_applied = 0;
2030 	unsigned long sz_ops_filter_passed = 0;
2031 	/*
2032 	 * We plan to support multiple context per kdamond, as DAMON sysfs
2033 	 * implies with 'nr_contexts' file.  Nevertheless, only single context
2034 	 * per kdamond is supported for now.  So, we can simply use '0' context
2035 	 * index here.
2036 	 */
2037 	unsigned int cidx = 0;
2038 	struct damos *siter;		/* schemes iterator */
2039 	unsigned int sidx = 0;
2040 	struct damon_target *titer;	/* targets iterator */
2041 	unsigned int tidx = 0;
2042 	bool do_trace = false;
2043 
2044 	/* get indices for trace_damos_before_apply() */
2045 	if (trace_damos_before_apply_enabled()) {
2046 		damon_for_each_scheme(siter, c) {
2047 			if (siter == s)
2048 				break;
2049 			sidx++;
2050 		}
2051 		damon_for_each_target(titer, c) {
2052 			if (titer == t)
2053 				break;
2054 			tidx++;
2055 		}
2056 		do_trace = true;
2057 	}
2058 
2059 	if (c->ops.apply_scheme) {
2060 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
2061 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
2062 					c->min_region_sz);
2063 			if (!sz)
2064 				goto update_stat;
2065 			damon_split_region_at(t, r, sz);
2066 		}
2067 		if (damos_core_filter_out(c, t, r, s))
2068 			return;
2069 		ktime_get_coarse_ts64(&begin);
2070 		trace_damos_before_apply(cidx, sidx, tidx, r,
2071 				damon_nr_regions(t), do_trace);
2072 		sz_applied = c->ops.apply_scheme(c, t, r, s,
2073 				&sz_ops_filter_passed);
2074 		damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
2075 		ktime_get_coarse_ts64(&end);
2076 		quota->total_charged_ns += timespec64_to_ns(&end) -
2077 			timespec64_to_ns(&begin);
2078 		quota->charged_sz += sz;
2079 		if (quota->esz && quota->charged_sz >= quota->esz) {
2080 			quota->charge_target_from = t;
2081 			quota->charge_addr_from = r->ar.end + 1;
2082 		}
2083 	}
2084 	if (s->action != DAMOS_STAT)
2085 		r->age = 0;
2086 
2087 update_stat:
2088 	damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
2089 }
2090 
2091 static void damon_do_apply_schemes(struct damon_ctx *c,
2092 				   struct damon_target *t,
2093 				   struct damon_region *r)
2094 {
2095 	struct damos *s;
2096 
2097 	damon_for_each_scheme(s, c) {
2098 		struct damos_quota *quota = &s->quota;
2099 
2100 		if (c->passed_sample_intervals < s->next_apply_sis)
2101 			continue;
2102 
2103 		if (!s->wmarks.activated)
2104 			continue;
2105 
2106 		/* Check the quota */
2107 		if (quota->esz && quota->charged_sz >= quota->esz)
2108 			continue;
2109 
2110 		if (damos_skip_charged_region(t, r, s, c->min_region_sz))
2111 			continue;
2112 
2113 		if (s->max_nr_snapshots &&
2114 				s->max_nr_snapshots <= s->stat.nr_snapshots)
2115 			continue;
2116 
2117 		if (damos_valid_target(c, r, s))
2118 			damos_apply_scheme(c, t, r, s);
2119 
2120 		if (damon_is_last_region(r, t))
2121 			s->stat.nr_snapshots++;
2122 	}
2123 }
2124 
2125 /*
2126  * damon_feed_loop_next_input() - get next input to achieve a target score.
2127  * @last_input	The last input.
2128  * @score	Current score that made with @last_input.
2129  *
2130  * Calculate next input to achieve the target score, based on the last input
2131  * and current score.  Assuming the input and the score are positively
2132  * proportional, calculate how much compensation should be added to or
2133  * subtracted from the last input as a proportion of the last input.  Avoid
2134  * next input always being zero by setting it non-zero always.  In short form
2135  * (assuming support of float and signed calculations), the algorithm is as
2136  * below.
2137  *
2138  * next_input = max(last_input * ((goal - current) / goal + 1), 1)
2139  *
2140  * For simple implementation, we assume the target score is always 10,000.  The
2141  * caller should adjust @score for this.
2142  *
2143  * Returns next input that assumed to achieve the target score.
2144  */
2145 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
2146 		unsigned long score)
2147 {
2148 	const unsigned long goal = 10000;
2149 	/* Set minimum input as 10000 to avoid compensation be zero */
2150 	const unsigned long min_input = 10000;
2151 	unsigned long score_goal_diff, compensation;
2152 	bool over_achieving = score > goal;
2153 
2154 	if (score == goal)
2155 		return last_input;
2156 	if (score >= goal * 2)
2157 		return min_input;
2158 
2159 	if (over_achieving)
2160 		score_goal_diff = score - goal;
2161 	else
2162 		score_goal_diff = goal - score;
2163 
2164 	if (last_input < ULONG_MAX / score_goal_diff)
2165 		compensation = last_input * score_goal_diff / goal;
2166 	else
2167 		compensation = last_input / goal * score_goal_diff;
2168 
2169 	if (over_achieving)
2170 		return max(last_input - compensation, min_input);
2171 	if (last_input < ULONG_MAX - compensation)
2172 		return last_input + compensation;
2173 	return ULONG_MAX;
2174 }
2175 
2176 #ifdef CONFIG_PSI
2177 
2178 static u64 damos_get_some_mem_psi_total(void)
2179 {
2180 	if (static_branch_likely(&psi_disabled))
2181 		return 0;
2182 	return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
2183 			NSEC_PER_USEC);
2184 }
2185 
2186 #else	/* CONFIG_PSI */
2187 
2188 static inline u64 damos_get_some_mem_psi_total(void)
2189 {
2190 	return 0;
2191 };
2192 
2193 #endif	/* CONFIG_PSI */
2194 
2195 #ifdef CONFIG_NUMA
2196 static __kernel_ulong_t damos_get_node_mem_bp(
2197 		struct damos_quota_goal *goal)
2198 {
2199 	struct sysinfo i;
2200 	__kernel_ulong_t numerator;
2201 
2202 	si_meminfo_node(&i, goal->nid);
2203 	if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP)
2204 		numerator = i.totalram - i.freeram;
2205 	else	/* DAMOS_QUOTA_NODE_MEM_FREE_BP */
2206 		numerator = i.freeram;
2207 	return numerator * 10000 / i.totalram;
2208 }
2209 
2210 static unsigned long damos_get_node_memcg_used_bp(
2211 		struct damos_quota_goal *goal)
2212 {
2213 	struct mem_cgroup *memcg;
2214 	struct lruvec *lruvec;
2215 	unsigned long used_pages, numerator;
2216 	struct sysinfo i;
2217 
2218 	memcg = mem_cgroup_get_from_id(goal->memcg_id);
2219 	if (!memcg) {
2220 		if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2221 			return 0;
2222 		else	/* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2223 			return 10000;
2224 	}
2225 
2226 	mem_cgroup_flush_stats(memcg);
2227 	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
2228 	used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
2229 	used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON);
2230 	used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
2231 	used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
2232 
2233 	mem_cgroup_put(memcg);
2234 
2235 	si_meminfo_node(&i, goal->nid);
2236 	if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2237 		numerator = used_pages;
2238 	else	/* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2239 		numerator = i.totalram - used_pages;
2240 	return numerator * 10000 / i.totalram;
2241 }
2242 #else
2243 static __kernel_ulong_t damos_get_node_mem_bp(
2244 		struct damos_quota_goal *goal)
2245 {
2246 	return 0;
2247 }
2248 
2249 static unsigned long damos_get_node_memcg_used_bp(
2250 		struct damos_quota_goal *goal)
2251 {
2252 	return 0;
2253 }
2254 #endif
2255 
2256 /*
2257  * Returns LRU-active or inactive memory to total LRU memory size ratio.
2258  */
2259 static unsigned int damos_get_in_active_mem_bp(bool active_ratio)
2260 {
2261 	unsigned long active, inactive, total;
2262 
2263 	/* This should align with /proc/meminfo output */
2264 	active = global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON) +
2265 		global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
2266 	inactive = global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON) +
2267 		global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
2268 	total = active + inactive;
2269 	if (active_ratio)
2270 		return active * 10000 / total;
2271 	return inactive * 10000 / total;
2272 }
2273 
2274 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
2275 {
2276 	u64 now_psi_total;
2277 
2278 	switch (goal->metric) {
2279 	case DAMOS_QUOTA_USER_INPUT:
2280 		/* User should already set goal->current_value */
2281 		break;
2282 	case DAMOS_QUOTA_SOME_MEM_PSI_US:
2283 		now_psi_total = damos_get_some_mem_psi_total();
2284 		goal->current_value = now_psi_total - goal->last_psi_total;
2285 		goal->last_psi_total = now_psi_total;
2286 		break;
2287 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
2288 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
2289 		goal->current_value = damos_get_node_mem_bp(goal);
2290 		break;
2291 	case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
2292 	case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
2293 		goal->current_value = damos_get_node_memcg_used_bp(goal);
2294 		break;
2295 	case DAMOS_QUOTA_ACTIVE_MEM_BP:
2296 	case DAMOS_QUOTA_INACTIVE_MEM_BP:
2297 		goal->current_value = damos_get_in_active_mem_bp(
2298 				goal->metric == DAMOS_QUOTA_ACTIVE_MEM_BP);
2299 		break;
2300 	default:
2301 		break;
2302 	}
2303 }
2304 
2305 /* Return the highest score since it makes schemes least aggressive */
2306 static unsigned long damos_quota_score(struct damos_quota *quota)
2307 {
2308 	struct damos_quota_goal *goal;
2309 	unsigned long highest_score = 0;
2310 
2311 	damos_for_each_quota_goal(goal, quota) {
2312 		damos_set_quota_goal_current_value(goal);
2313 		highest_score = max(highest_score,
2314 				goal->current_value * 10000 /
2315 				goal->target_value);
2316 	}
2317 
2318 	return highest_score;
2319 }
2320 
2321 /*
2322  * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
2323  */
2324 static void damos_set_effective_quota(struct damos_quota *quota)
2325 {
2326 	unsigned long throughput;
2327 	unsigned long esz = ULONG_MAX;
2328 
2329 	if (!quota->ms && list_empty(&quota->goals)) {
2330 		quota->esz = quota->sz;
2331 		return;
2332 	}
2333 
2334 	if (!list_empty(&quota->goals)) {
2335 		unsigned long score = damos_quota_score(quota);
2336 
2337 		quota->esz_bp = damon_feed_loop_next_input(
2338 				max(quota->esz_bp, 10000UL),
2339 				score);
2340 		esz = quota->esz_bp / 10000;
2341 	}
2342 
2343 	if (quota->ms) {
2344 		if (quota->total_charged_ns)
2345 			throughput = mult_frac(quota->total_charged_sz, 1000000,
2346 							quota->total_charged_ns);
2347 		else
2348 			throughput = PAGE_SIZE * 1024;
2349 		esz = min(throughput * quota->ms, esz);
2350 	}
2351 
2352 	if (quota->sz && quota->sz < esz)
2353 		esz = quota->sz;
2354 
2355 	quota->esz = esz;
2356 }
2357 
2358 static void damos_trace_esz(struct damon_ctx *c, struct damos *s,
2359 		struct damos_quota *quota)
2360 {
2361 	unsigned int cidx = 0, sidx = 0;
2362 	struct damos *siter;
2363 
2364 	damon_for_each_scheme(siter, c) {
2365 		if (siter == s)
2366 			break;
2367 		sidx++;
2368 	}
2369 	trace_damos_esz(cidx, sidx, quota->esz);
2370 }
2371 
2372 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
2373 {
2374 	struct damos_quota *quota = &s->quota;
2375 	struct damon_target *t;
2376 	struct damon_region *r;
2377 	unsigned long cumulated_sz, cached_esz;
2378 	unsigned int score, max_score = 0;
2379 
2380 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
2381 		return;
2382 
2383 	/* First charge window */
2384 	if (!quota->total_charged_sz && !quota->charged_from) {
2385 		quota->charged_from = jiffies;
2386 		damos_set_effective_quota(quota);
2387 	}
2388 
2389 	/* New charge window starts */
2390 	if (time_after_eq(jiffies, quota->charged_from +
2391 				msecs_to_jiffies(quota->reset_interval))) {
2392 		if (quota->esz && quota->charged_sz >= quota->esz)
2393 			s->stat.qt_exceeds++;
2394 		quota->total_charged_sz += quota->charged_sz;
2395 		quota->charged_from = jiffies;
2396 		quota->charged_sz = 0;
2397 		if (trace_damos_esz_enabled())
2398 			cached_esz = quota->esz;
2399 		damos_set_effective_quota(quota);
2400 		if (trace_damos_esz_enabled() && quota->esz != cached_esz)
2401 			damos_trace_esz(c, s, quota);
2402 	}
2403 
2404 	if (!c->ops.get_scheme_score)
2405 		return;
2406 
2407 	/* Fill up the score histogram */
2408 	memset(c->regions_score_histogram, 0,
2409 			sizeof(*c->regions_score_histogram) *
2410 			(DAMOS_MAX_SCORE + 1));
2411 	damon_for_each_target(t, c) {
2412 		damon_for_each_region(r, t) {
2413 			if (!__damos_valid_target(r, s))
2414 				continue;
2415 			if (damos_core_filter_out(c, t, r, s))
2416 				continue;
2417 			score = c->ops.get_scheme_score(c, r, s);
2418 			c->regions_score_histogram[score] +=
2419 				damon_sz_region(r);
2420 			if (score > max_score)
2421 				max_score = score;
2422 		}
2423 	}
2424 
2425 	/* Set the min score limit */
2426 	for (cumulated_sz = 0, score = max_score; ; score--) {
2427 		cumulated_sz += c->regions_score_histogram[score];
2428 		if (cumulated_sz >= quota->esz || !score)
2429 			break;
2430 	}
2431 	quota->min_score = score;
2432 }
2433 
2434 static void damos_trace_stat(struct damon_ctx *c, struct damos *s)
2435 {
2436 	unsigned int cidx = 0, sidx = 0;
2437 	struct damos *siter;
2438 
2439 	if (!trace_damos_stat_after_apply_interval_enabled())
2440 		return;
2441 
2442 	damon_for_each_scheme(siter, c) {
2443 		if (siter == s)
2444 			break;
2445 		sidx++;
2446 	}
2447 	trace_damos_stat_after_apply_interval(cidx, sidx, &s->stat);
2448 }
2449 
2450 static void kdamond_apply_schemes(struct damon_ctx *c)
2451 {
2452 	struct damon_target *t;
2453 	struct damon_region *r;
2454 	struct damos *s;
2455 	unsigned long sample_interval = c->attrs.sample_interval ?
2456 		c->attrs.sample_interval : 1;
2457 	bool has_schemes_to_apply = false;
2458 
2459 	damon_for_each_scheme(s, c) {
2460 		if (c->passed_sample_intervals < s->next_apply_sis)
2461 			continue;
2462 
2463 		if (!s->wmarks.activated)
2464 			continue;
2465 
2466 		has_schemes_to_apply = true;
2467 
2468 		damos_adjust_quota(c, s);
2469 	}
2470 
2471 	if (!has_schemes_to_apply)
2472 		return;
2473 
2474 	mutex_lock(&c->walk_control_lock);
2475 	damon_for_each_target(t, c) {
2476 		if (c->ops.target_valid && c->ops.target_valid(t) == false)
2477 			continue;
2478 
2479 		damon_for_each_region(r, t)
2480 			damon_do_apply_schemes(c, t, r);
2481 	}
2482 
2483 	damon_for_each_scheme(s, c) {
2484 		if (c->passed_sample_intervals < s->next_apply_sis)
2485 			continue;
2486 		damos_walk_complete(c, s);
2487 		s->next_apply_sis = c->passed_sample_intervals +
2488 			(s->apply_interval_us ? s->apply_interval_us :
2489 			 c->attrs.aggr_interval) / sample_interval;
2490 		s->last_applied = NULL;
2491 		damos_trace_stat(c, s);
2492 	}
2493 	mutex_unlock(&c->walk_control_lock);
2494 }
2495 
2496 #ifdef CONFIG_DAMON_DEBUG_SANITY
2497 static void damon_verify_merge_two_regions(
2498 		struct damon_region *l, struct damon_region *r)
2499 {
2500 	/* damon_merge_two_regions() may created incorrect left region */
2501 	WARN_ONCE(l->ar.start >= l->ar.end, "l: %lu-%lu, r: %lu-%lu\n",
2502 			l->ar.start, l->ar.end, r->ar.start, r->ar.end);
2503 }
2504 #else
2505 static void damon_verify_merge_two_regions(
2506 		struct damon_region *l, struct damon_region *r)
2507 {
2508 }
2509 #endif
2510 
2511 /*
2512  * Merge two adjacent regions into one region
2513  */
2514 static void damon_merge_two_regions(struct damon_target *t,
2515 		struct damon_region *l, struct damon_region *r)
2516 {
2517 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2518 
2519 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2520 			(sz_l + sz_r);
2521 	l->nr_accesses_bp = l->nr_accesses * 10000;
2522 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2523 	l->ar.end = r->ar.end;
2524 	damon_verify_merge_two_regions(l, r);
2525 	damon_destroy_region(r, t);
2526 }
2527 
2528 #ifdef CONFIG_DAMON_DEBUG_SANITY
2529 static void damon_verify_merge_regions_of(struct damon_region *r)
2530 {
2531 	WARN_ONCE(r->nr_accesses != r->nr_accesses_bp / 10000,
2532 			"nr_accesses (%u) != nr_accesses_bp (%u)\n",
2533 			r->nr_accesses, r->nr_accesses_bp);
2534 }
2535 #else
2536 static void damon_verify_merge_regions_of(struct damon_region *r)
2537 {
2538 }
2539 #endif
2540 
2541 
2542 /*
2543  * Merge adjacent regions having similar access frequencies
2544  *
2545  * t		target affected by this merge operation
2546  * thres	'->nr_accesses' diff threshold for the merge
2547  * sz_limit	size upper limit of each region
2548  */
2549 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2550 				   unsigned long sz_limit)
2551 {
2552 	struct damon_region *r, *prev = NULL, *next;
2553 
2554 	damon_for_each_region_safe(r, next, t) {
2555 		damon_verify_merge_regions_of(r);
2556 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2557 			r->age = 0;
2558 		else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0))
2559 			r->age = 0;
2560 		else
2561 			r->age++;
2562 
2563 		if (prev && prev->ar.end == r->ar.start &&
2564 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2565 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2566 			damon_merge_two_regions(t, prev, r);
2567 		else
2568 			prev = r;
2569 	}
2570 }
2571 
2572 /*
2573  * Merge adjacent regions having similar access frequencies
2574  *
2575  * threshold	'->nr_accesses' diff threshold for the merge
2576  * sz_limit	size upper limit of each region
2577  *
2578  * This function merges monitoring target regions which are adjacent and their
2579  * access frequencies are similar.  This is for minimizing the monitoring
2580  * overhead under the dynamically changeable access pattern.  If a merge was
2581  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
2582  *
2583  * The total number of regions could be higher than the user-defined limit,
2584  * max_nr_regions for some cases.  For example, the user can update
2585  * max_nr_regions to a number that lower than the current number of regions
2586  * while DAMON is running.  For such a case, repeat merging until the limit is
2587  * met while increasing @threshold up to possible maximum level.
2588  */
2589 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2590 				  unsigned long sz_limit)
2591 {
2592 	struct damon_target *t;
2593 	unsigned int nr_regions;
2594 	unsigned int max_thres;
2595 
2596 	max_thres = c->attrs.aggr_interval /
2597 		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
2598 	do {
2599 		nr_regions = 0;
2600 		damon_for_each_target(t, c) {
2601 			damon_merge_regions_of(t, threshold, sz_limit);
2602 			nr_regions += damon_nr_regions(t);
2603 		}
2604 		threshold = max(1, threshold * 2);
2605 	} while (nr_regions > c->attrs.max_nr_regions &&
2606 			threshold / 2 < max_thres);
2607 }
2608 
2609 #ifdef CONFIG_DAMON_DEBUG_SANITY
2610 static void damon_verify_split_region_at(struct damon_region *r,
2611 		unsigned long sz_r)
2612 {
2613 	WARN_ONCE(sz_r == 0 || sz_r >= damon_sz_region(r),
2614 			"sz_r: %lu r: %lu-%lu (%lu)\n",
2615 			sz_r, r->ar.start, r->ar.end, damon_sz_region(r));
2616 }
2617 #else
2618 static void damon_verify_split_region_at(struct damon_region *r,
2619 		unsigned long sz_r)
2620 {
2621 }
2622 #endif
2623 
2624 /*
2625  * Split a region in two
2626  *
2627  * r		the region to be split
2628  * sz_r		size of the first sub-region that will be made
2629  */
2630 static void damon_split_region_at(struct damon_target *t,
2631 				  struct damon_region *r, unsigned long sz_r)
2632 {
2633 	struct damon_region *new;
2634 
2635 	damon_verify_split_region_at(r, sz_r);
2636 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2637 	if (!new)
2638 		return;
2639 
2640 	r->ar.end = new->ar.start;
2641 
2642 	new->age = r->age;
2643 	new->last_nr_accesses = r->last_nr_accesses;
2644 	new->nr_accesses_bp = r->nr_accesses_bp;
2645 	new->nr_accesses = r->nr_accesses;
2646 
2647 	damon_insert_region(new, r, damon_next_region(r), t);
2648 }
2649 
2650 /* Split every region in the given target into 'nr_subs' regions */
2651 static void damon_split_regions_of(struct damon_target *t, int nr_subs,
2652 				  unsigned long min_region_sz)
2653 {
2654 	struct damon_region *r, *next;
2655 	unsigned long sz_region, sz_sub = 0;
2656 	int i;
2657 
2658 	damon_for_each_region_safe(r, next, t) {
2659 		sz_region = damon_sz_region(r);
2660 
2661 		for (i = 0; i < nr_subs - 1 &&
2662 				sz_region > 2 * min_region_sz; i++) {
2663 			/*
2664 			 * Randomly select size of left sub-region to be at
2665 			 * least 10 percent and at most 90% of original region
2666 			 */
2667 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2668 					sz_region / 10, min_region_sz);
2669 			/* Do not allow blank region */
2670 			if (sz_sub == 0 || sz_sub >= sz_region)
2671 				continue;
2672 
2673 			damon_split_region_at(t, r, sz_sub);
2674 			sz_region = sz_sub;
2675 		}
2676 	}
2677 }
2678 
2679 /*
2680  * Split every target region into randomly-sized small regions
2681  *
2682  * This function splits every target region into random-sized small regions if
2683  * current total number of the regions is equal or smaller than half of the
2684  * user-specified maximum number of regions.  This is for maximizing the
2685  * monitoring accuracy under the dynamically changeable access patterns.  If a
2686  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2687  * it.
2688  */
2689 static void kdamond_split_regions(struct damon_ctx *ctx)
2690 {
2691 	struct damon_target *t;
2692 	unsigned int nr_regions = 0;
2693 	static unsigned int last_nr_regions;
2694 	int nr_subregions = 2;
2695 
2696 	damon_for_each_target(t, ctx)
2697 		nr_regions += damon_nr_regions(t);
2698 
2699 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
2700 		return;
2701 
2702 	/* Maybe the middle of the region has different access frequency */
2703 	if (last_nr_regions == nr_regions &&
2704 			nr_regions < ctx->attrs.max_nr_regions / 3)
2705 		nr_subregions = 3;
2706 
2707 	damon_for_each_target(t, ctx)
2708 		damon_split_regions_of(t, nr_subregions, ctx->min_region_sz);
2709 
2710 	last_nr_regions = nr_regions;
2711 }
2712 
2713 /*
2714  * Check whether current monitoring should be stopped
2715  *
2716  * The monitoring is stopped when either the user requested to stop, or all
2717  * monitoring targets are invalid.
2718  *
2719  * Returns true if need to stop current monitoring.
2720  */
2721 static bool kdamond_need_stop(struct damon_ctx *ctx)
2722 {
2723 	struct damon_target *t;
2724 
2725 	if (kthread_should_stop())
2726 		return true;
2727 
2728 	if (!ctx->ops.target_valid)
2729 		return false;
2730 
2731 	damon_for_each_target(t, ctx) {
2732 		if (ctx->ops.target_valid(t))
2733 			return false;
2734 	}
2735 
2736 	return true;
2737 }
2738 
2739 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2740 					unsigned long *metric_value)
2741 {
2742 	switch (metric) {
2743 	case DAMOS_WMARK_FREE_MEM_RATE:
2744 		*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2745 		       totalram_pages();
2746 		return 0;
2747 	default:
2748 		break;
2749 	}
2750 	return -EINVAL;
2751 }
2752 
2753 /*
2754  * Returns zero if the scheme is active.  Else, returns time to wait for next
2755  * watermark check in micro-seconds.
2756  */
2757 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2758 {
2759 	unsigned long metric;
2760 
2761 	if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2762 		return 0;
2763 
2764 	/* higher than high watermark or lower than low watermark */
2765 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2766 		if (scheme->wmarks.activated)
2767 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
2768 				 scheme->action,
2769 				 str_high_low(metric > scheme->wmarks.high));
2770 		scheme->wmarks.activated = false;
2771 		return scheme->wmarks.interval;
2772 	}
2773 
2774 	/* inactive and higher than middle watermark */
2775 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2776 			!scheme->wmarks.activated)
2777 		return scheme->wmarks.interval;
2778 
2779 	if (!scheme->wmarks.activated)
2780 		pr_debug("activate a scheme (%d)\n", scheme->action);
2781 	scheme->wmarks.activated = true;
2782 	return 0;
2783 }
2784 
2785 static void kdamond_usleep(unsigned long usecs)
2786 {
2787 	if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2788 		schedule_timeout_idle(usecs_to_jiffies(usecs));
2789 	else
2790 		usleep_range_idle(usecs, usecs + 1);
2791 }
2792 
2793 /*
2794  * kdamond_call() - handle damon_call_control objects.
2795  * @ctx:	The &struct damon_ctx of the kdamond.
2796  * @cancel:	Whether to cancel the invocation of the function.
2797  *
2798  * If there are &struct damon_call_control requests that registered via
2799  * &damon_call() on @ctx, do or cancel the invocation of the function depending
2800  * on @cancel.  @cancel is set when the kdamond is already out of the main loop
2801  * and therefore will be terminated.
2802  */
2803 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2804 {
2805 	struct damon_call_control *control, *next;
2806 	LIST_HEAD(controls);
2807 
2808 	mutex_lock(&ctx->call_controls_lock);
2809 	list_splice_tail_init(&ctx->call_controls, &controls);
2810 	mutex_unlock(&ctx->call_controls_lock);
2811 
2812 	list_for_each_entry_safe(control, next, &controls, list) {
2813 		if (!control->repeat || cancel)
2814 			list_del(&control->list);
2815 
2816 		if (cancel)
2817 			control->canceled = true;
2818 		else
2819 			control->return_code = control->fn(control->data);
2820 
2821 		if (!control->repeat)
2822 			complete(&control->completion);
2823 		else if (control->canceled && control->dealloc_on_cancel)
2824 			kfree(control);
2825 		if (!cancel && ctx->maybe_corrupted)
2826 			break;
2827 	}
2828 
2829 	mutex_lock(&ctx->call_controls_lock);
2830 	list_splice_tail(&controls, &ctx->call_controls);
2831 	mutex_unlock(&ctx->call_controls_lock);
2832 }
2833 
2834 /* Returns negative error code if it's not activated but should return */
2835 static int kdamond_wait_activation(struct damon_ctx *ctx)
2836 {
2837 	struct damos *s;
2838 	unsigned long wait_time;
2839 	unsigned long min_wait_time = 0;
2840 	bool init_wait_time = false;
2841 
2842 	while (!kdamond_need_stop(ctx)) {
2843 		damon_for_each_scheme(s, ctx) {
2844 			wait_time = damos_wmark_wait_us(s);
2845 			if (!init_wait_time || wait_time < min_wait_time) {
2846 				init_wait_time = true;
2847 				min_wait_time = wait_time;
2848 			}
2849 		}
2850 		if (!min_wait_time)
2851 			return 0;
2852 
2853 		kdamond_usleep(min_wait_time);
2854 
2855 		kdamond_call(ctx, false);
2856 		if (ctx->maybe_corrupted)
2857 			return -EINVAL;
2858 		damos_walk_cancel(ctx);
2859 	}
2860 	return -EBUSY;
2861 }
2862 
2863 static void kdamond_init_ctx(struct damon_ctx *ctx)
2864 {
2865 	unsigned long sample_interval = ctx->attrs.sample_interval ?
2866 		ctx->attrs.sample_interval : 1;
2867 	unsigned long apply_interval;
2868 	struct damos *scheme;
2869 
2870 	ctx->passed_sample_intervals = 0;
2871 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2872 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2873 		sample_interval;
2874 	ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2875 		ctx->attrs.intervals_goal.aggrs;
2876 
2877 	damon_for_each_scheme(scheme, ctx) {
2878 		apply_interval = scheme->apply_interval_us ?
2879 			scheme->apply_interval_us : ctx->attrs.aggr_interval;
2880 		scheme->next_apply_sis = apply_interval / sample_interval;
2881 		damos_set_filters_default_reject(scheme);
2882 	}
2883 }
2884 
2885 /*
2886  * The monitoring daemon that runs as a kernel thread
2887  */
2888 static int kdamond_fn(void *data)
2889 {
2890 	struct damon_ctx *ctx = data;
2891 	unsigned int max_nr_accesses = 0;
2892 	unsigned long sz_limit = 0;
2893 
2894 	pr_debug("kdamond (%d) starts\n", current->pid);
2895 
2896 	complete(&ctx->kdamond_started);
2897 	kdamond_init_ctx(ctx);
2898 
2899 	if (ctx->ops.init)
2900 		ctx->ops.init(ctx);
2901 	ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2902 			sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2903 	if (!ctx->regions_score_histogram)
2904 		goto done;
2905 
2906 	sz_limit = damon_apply_min_nr_regions(ctx);
2907 
2908 	while (!kdamond_need_stop(ctx)) {
2909 		/*
2910 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2911 		 * be changed from kdamond_call().  Read the values here, and
2912 		 * use those for this iteration.  That is, damon_set_attrs()
2913 		 * updated new values are respected from next iteration.
2914 		 */
2915 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2916 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2917 		unsigned long sample_interval = ctx->attrs.sample_interval;
2918 
2919 		if (kdamond_wait_activation(ctx))
2920 			break;
2921 
2922 		if (ctx->ops.prepare_access_checks)
2923 			ctx->ops.prepare_access_checks(ctx);
2924 
2925 		kdamond_usleep(sample_interval);
2926 		ctx->passed_sample_intervals++;
2927 
2928 		if (ctx->ops.check_accesses)
2929 			max_nr_accesses = ctx->ops.check_accesses(ctx);
2930 
2931 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2932 			kdamond_merge_regions(ctx,
2933 					max_nr_accesses / 10,
2934 					sz_limit);
2935 			/* online updates might be made */
2936 			sz_limit = damon_apply_min_nr_regions(ctx);
2937 		}
2938 
2939 		/*
2940 		 * do kdamond_call() and kdamond_apply_schemes() after
2941 		 * kdamond_merge_regions() if possible, to reduce overhead
2942 		 */
2943 		kdamond_call(ctx, false);
2944 		if (ctx->maybe_corrupted)
2945 			break;
2946 		if (!list_empty(&ctx->schemes))
2947 			kdamond_apply_schemes(ctx);
2948 		else
2949 			damos_walk_cancel(ctx);
2950 
2951 		sample_interval = ctx->attrs.sample_interval ?
2952 			ctx->attrs.sample_interval : 1;
2953 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2954 			if (ctx->attrs.intervals_goal.aggrs &&
2955 					ctx->passed_sample_intervals >=
2956 					ctx->next_intervals_tune_sis) {
2957 				/*
2958 				 * ctx->next_aggregation_sis might be updated
2959 				 * from kdamond_call().  In the case,
2960 				 * damon_set_attrs() which will be called from
2961 				 * kdamond_tune_interval() may wrongly think
2962 				 * this is in the middle of the current
2963 				 * aggregation, and make aggregation
2964 				 * information reset for all regions.  Then,
2965 				 * following kdamond_reset_aggregated() call
2966 				 * will make the region information invalid,
2967 				 * particularly for ->nr_accesses_bp.
2968 				 *
2969 				 * Reset ->next_aggregation_sis to avoid that.
2970 				 * It will anyway correctly updated after this
2971 				 * if clause.
2972 				 */
2973 				ctx->next_aggregation_sis =
2974 					next_aggregation_sis;
2975 				ctx->next_intervals_tune_sis +=
2976 					ctx->attrs.aggr_samples *
2977 					ctx->attrs.intervals_goal.aggrs;
2978 				kdamond_tune_intervals(ctx);
2979 				sample_interval = ctx->attrs.sample_interval ?
2980 					ctx->attrs.sample_interval : 1;
2981 
2982 			}
2983 			ctx->next_aggregation_sis = next_aggregation_sis +
2984 				ctx->attrs.aggr_interval / sample_interval;
2985 
2986 			kdamond_reset_aggregated(ctx);
2987 			kdamond_split_regions(ctx);
2988 		}
2989 
2990 		if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2991 			ctx->next_ops_update_sis = next_ops_update_sis +
2992 				ctx->attrs.ops_update_interval /
2993 				sample_interval;
2994 			if (ctx->ops.update)
2995 				ctx->ops.update(ctx);
2996 		}
2997 	}
2998 done:
2999 	damon_destroy_targets(ctx);
3000 
3001 	kfree(ctx->regions_score_histogram);
3002 	kdamond_call(ctx, true);
3003 	damos_walk_cancel(ctx);
3004 
3005 	pr_debug("kdamond (%d) finishes\n", current->pid);
3006 	mutex_lock(&ctx->kdamond_lock);
3007 	ctx->kdamond = NULL;
3008 	mutex_unlock(&ctx->kdamond_lock);
3009 
3010 	mutex_lock(&damon_lock);
3011 	nr_running_ctxs--;
3012 	if (!nr_running_ctxs && running_exclusive_ctxs)
3013 		running_exclusive_ctxs = false;
3014 	mutex_unlock(&damon_lock);
3015 
3016 	return 0;
3017 }
3018 
3019 static int walk_system_ram(struct resource *res, void *arg)
3020 {
3021 	struct damon_addr_range *a = arg;
3022 
3023 	if (a->end - a->start < resource_size(res)) {
3024 		a->start = res->start;
3025 		a->end = res->end;
3026 	}
3027 	return 0;
3028 }
3029 
3030 /*
3031  * Find biggest 'System RAM' resource and store its start and end address in
3032  * @start and @end, respectively.  If no System RAM is found, returns false.
3033  */
3034 static bool damon_find_biggest_system_ram(unsigned long *start,
3035 						unsigned long *end)
3036 
3037 {
3038 	struct damon_addr_range arg = {};
3039 
3040 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
3041 	if (arg.end <= arg.start)
3042 		return false;
3043 
3044 	*start = arg.start;
3045 	*end = arg.end;
3046 	return true;
3047 }
3048 
3049 /**
3050  * damon_set_region_biggest_system_ram_default() - Set the region of the given
3051  * monitoring target as requested, or biggest 'System RAM'.
3052  * @t:		The monitoring target to set the region.
3053  * @start:	The pointer to the start address of the region.
3054  * @end:	The pointer to the end address of the region.
3055  * @min_region_sz:	Minimum region size.
3056  *
3057  * This function sets the region of @t as requested by @start and @end.  If the
3058  * values of @start and @end are zero, however, this function finds the biggest
3059  * 'System RAM' resource and sets the region to cover the resource.  In the
3060  * latter case, this function saves the start and end addresses of the resource
3061  * in @start and @end, respectively.
3062  *
3063  * Return: 0 on success, negative error code otherwise.
3064  */
3065 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
3066 			unsigned long *start, unsigned long *end,
3067 			unsigned long min_region_sz)
3068 {
3069 	struct damon_addr_range addr_range;
3070 
3071 	if (*start > *end)
3072 		return -EINVAL;
3073 
3074 	if (!*start && !*end &&
3075 		!damon_find_biggest_system_ram(start, end))
3076 		return -EINVAL;
3077 
3078 	addr_range.start = *start;
3079 	addr_range.end = *end;
3080 	return damon_set_regions(t, &addr_range, 1, min_region_sz);
3081 }
3082 
3083 /*
3084  * damon_moving_sum() - Calculate an inferred moving sum value.
3085  * @mvsum:	Inferred sum of the last @len_window values.
3086  * @nomvsum:	Non-moving sum of the last discrete @len_window window values.
3087  * @len_window:	The number of last values to take care of.
3088  * @new_value:	New value that will be added to the pseudo moving sum.
3089  *
3090  * Moving sum (moving average * window size) is good for handling noise, but
3091  * the cost of keeping past values can be high for arbitrary window size.  This
3092  * function implements a lightweight pseudo moving sum function that doesn't
3093  * keep the past window values.
3094  *
3095  * It simply assumes there was no noise in the past, and get the no-noise
3096  * assumed past value to drop from @nomvsum and @len_window.  @nomvsum is a
3097  * non-moving sum of the last window.  For example, if @len_window is 10 and we
3098  * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
3099  * values.  Hence, this function simply drops @nomvsum / @len_window from
3100  * given @mvsum and add @new_value.
3101  *
3102  * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
3103  * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20.  For
3104  * calculating next moving sum with a new value, we should drop 0 from 50 and
3105  * add the new value.  However, this function assumes it got value 5 for each
3106  * of the last ten times.  Based on the assumption, when the next value is
3107  * measured, it drops the assumed past value, 5 from the current sum, and add
3108  * the new value to get the updated pseduo-moving average.
3109  *
3110  * This means the value could have errors, but the errors will be disappeared
3111  * for every @len_window aligned calls.  For example, if @len_window is 10, the
3112  * pseudo moving sum with 11th value to 19th value would have an error.  But
3113  * the sum with 20th value will not have the error.
3114  *
3115  * Return: Pseudo-moving average after getting the @new_value.
3116  */
3117 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
3118 		unsigned int len_window, unsigned int new_value)
3119 {
3120 	return mvsum - nomvsum / len_window + new_value;
3121 }
3122 
3123 /**
3124  * damon_update_region_access_rate() - Update the access rate of a region.
3125  * @r:		The DAMON region to update for its access check result.
3126  * @accessed:	Whether the region has accessed during last sampling interval.
3127  * @attrs:	The damon_attrs of the DAMON context.
3128  *
3129  * Update the access rate of a region with the region's last sampling interval
3130  * access check result.
3131  *
3132  * Usually this will be called by &damon_operations->check_accesses callback.
3133  */
3134 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
3135 		struct damon_attrs *attrs)
3136 {
3137 	unsigned int len_window = 1;
3138 
3139 	/*
3140 	 * sample_interval can be zero, but cannot be larger than
3141 	 * aggr_interval, owing to validation of damon_set_attrs().
3142 	 */
3143 	if (attrs->sample_interval)
3144 		len_window = damon_max_nr_accesses(attrs);
3145 	r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
3146 			r->last_nr_accesses * 10000, len_window,
3147 			accessed ? 10000 : 0);
3148 
3149 	if (accessed)
3150 		r->nr_accesses++;
3151 }
3152 
3153 /**
3154  * damon_initialized() - Return if DAMON is ready to be used.
3155  *
3156  * Return: true if DAMON is ready to be used, false otherwise.
3157  */
3158 bool damon_initialized(void)
3159 {
3160 	return damon_region_cache != NULL;
3161 }
3162 
3163 static int __init damon_init(void)
3164 {
3165 	damon_region_cache = KMEM_CACHE(damon_region, 0);
3166 	if (unlikely(!damon_region_cache)) {
3167 		pr_err("creating damon_region_cache fails\n");
3168 		return -ENOMEM;
3169 	}
3170 
3171 	return 0;
3172 }
3173 
3174 subsys_initcall(damon_init);
3175 
3176 #include "tests/core-kunit.h"
3177