xref: /linux/mm/damon/core.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/memcontrol.h>
14 #include <linux/mm.h>
15 #include <linux/psi.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/string_choices.h>
19 
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/damon.h>
22 
23 static DEFINE_MUTEX(damon_lock);
24 static int nr_running_ctxs;
25 static bool running_exclusive_ctxs;
26 
27 static DEFINE_MUTEX(damon_ops_lock);
28 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
29 
30 static struct kmem_cache *damon_region_cache __ro_after_init;
31 
32 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
33 static bool __damon_is_registered_ops(enum damon_ops_id id)
34 {
35 	struct damon_operations empty_ops = {};
36 
37 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
38 		return false;
39 	return true;
40 }
41 
42 /**
43  * damon_is_registered_ops() - Check if a given damon_operations is registered.
44  * @id:	Id of the damon_operations to check if registered.
45  *
46  * Return: true if the ops is set, false otherwise.
47  */
48 bool damon_is_registered_ops(enum damon_ops_id id)
49 {
50 	bool registered;
51 
52 	if (id >= NR_DAMON_OPS)
53 		return false;
54 	mutex_lock(&damon_ops_lock);
55 	registered = __damon_is_registered_ops(id);
56 	mutex_unlock(&damon_ops_lock);
57 	return registered;
58 }
59 
60 /**
61  * damon_register_ops() - Register a monitoring operations set to DAMON.
62  * @ops:	monitoring operations set to register.
63  *
64  * This function registers a monitoring operations set of valid &struct
65  * damon_operations->id so that others can find and use them later.
66  *
67  * Return: 0 on success, negative error code otherwise.
68  */
69 int damon_register_ops(struct damon_operations *ops)
70 {
71 	int err = 0;
72 
73 	if (ops->id >= NR_DAMON_OPS)
74 		return -EINVAL;
75 
76 	mutex_lock(&damon_ops_lock);
77 	/* Fail for already registered ops */
78 	if (__damon_is_registered_ops(ops->id))
79 		err = -EINVAL;
80 	else
81 		damon_registered_ops[ops->id] = *ops;
82 	mutex_unlock(&damon_ops_lock);
83 	return err;
84 }
85 
86 /**
87  * damon_select_ops() - Select a monitoring operations to use with the context.
88  * @ctx:	monitoring context to use the operations.
89  * @id:		id of the registered monitoring operations to select.
90  *
91  * This function finds registered monitoring operations set of @id and make
92  * @ctx to use it.
93  *
94  * Return: 0 on success, negative error code otherwise.
95  */
96 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
97 {
98 	int err = 0;
99 
100 	if (id >= NR_DAMON_OPS)
101 		return -EINVAL;
102 
103 	mutex_lock(&damon_ops_lock);
104 	if (!__damon_is_registered_ops(id))
105 		err = -EINVAL;
106 	else
107 		ctx->ops = damon_registered_ops[id];
108 	mutex_unlock(&damon_ops_lock);
109 	return err;
110 }
111 
112 #ifdef CONFIG_DAMON_DEBUG_SANITY
113 static void damon_verify_new_region(unsigned long start, unsigned long end)
114 {
115 	WARN_ONCE(start >= end, "start %lu >= end %lu\n", start, end);
116 }
117 #else
118 static void damon_verify_new_region(unsigned long start, unsigned long end)
119 {
120 }
121 #endif
122 
123 /*
124  * Construct a damon_region struct
125  *
126  * Returns the pointer to the new struct if success, or NULL otherwise
127  */
128 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
129 {
130 	struct damon_region *region;
131 
132 	damon_verify_new_region(start, end);
133 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
134 	if (!region)
135 		return NULL;
136 
137 	region->ar.start = start;
138 	region->ar.end = end;
139 	region->nr_accesses = 0;
140 	region->nr_accesses_bp = 0;
141 	INIT_LIST_HEAD(&region->list);
142 
143 	region->age = 0;
144 	region->last_nr_accesses = 0;
145 
146 	return region;
147 }
148 
149 void damon_add_region(struct damon_region *r, struct damon_target *t)
150 {
151 	list_add_tail(&r->list, &t->regions_list);
152 	t->nr_regions++;
153 }
154 
155 #ifdef CONFIG_DAMON_DEBUG_SANITY
156 static void damon_verify_del_region(struct damon_target *t)
157 {
158 	WARN_ONCE(t->nr_regions == 0, "t->nr_regions == 0\n");
159 }
160 #else
161 static void damon_verify_del_region(struct damon_target *t)
162 {
163 }
164 #endif
165 
166 static void damon_del_region(struct damon_region *r, struct damon_target *t)
167 {
168 	damon_verify_del_region(t);
169 
170 	list_del(&r->list);
171 	t->nr_regions--;
172 }
173 
174 static void damon_free_region(struct damon_region *r)
175 {
176 	kmem_cache_free(damon_region_cache, r);
177 }
178 
179 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
180 {
181 	damon_del_region(r, t);
182 	damon_free_region(r);
183 }
184 
185 static bool damon_is_last_region(struct damon_region *r,
186 		struct damon_target *t)
187 {
188 	return list_is_last(&r->list, &t->regions_list);
189 }
190 
191 /*
192  * Check whether a region is intersecting an address range
193  *
194  * Returns true if it is.
195  */
196 static bool damon_intersect(struct damon_region *r,
197 		struct damon_addr_range *re)
198 {
199 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
200 }
201 
202 /*
203  * Fill holes in regions with new regions.
204  */
205 static int damon_fill_regions_holes(struct damon_region *first,
206 		struct damon_region *last, struct damon_target *t)
207 {
208 	struct damon_region *r = first;
209 
210 	damon_for_each_region_from(r, t) {
211 		struct damon_region *next, *newr;
212 
213 		if (r == last)
214 			break;
215 		next = damon_next_region(r);
216 		if (r->ar.end != next->ar.start) {
217 			newr = damon_new_region(r->ar.end, next->ar.start);
218 			if (!newr)
219 				return -ENOMEM;
220 			damon_insert_region(newr, r, next, t);
221 		}
222 	}
223 	return 0;
224 }
225 
226 /*
227  * damon_set_regions() - Set regions of a target for given address ranges.
228  * @t:		the given target.
229  * @ranges:	array of new monitoring target ranges.
230  * @nr_ranges:	length of @ranges.
231  * @min_region_sz:	minimum region size.
232  *
233  * This function adds new regions to, or modify existing regions of a
234  * monitoring target to fit in specific ranges.
235  *
236  * Return: 0 if success, or negative error code otherwise.
237  */
238 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
239 		unsigned int nr_ranges, unsigned long min_region_sz)
240 {
241 	struct damon_region *r, *next;
242 	unsigned int i;
243 	int err;
244 
245 	/* Remove regions which are not in the new ranges */
246 	damon_for_each_region_safe(r, next, t) {
247 		for (i = 0; i < nr_ranges; i++) {
248 			if (damon_intersect(r, &ranges[i]))
249 				break;
250 		}
251 		if (i == nr_ranges)
252 			damon_destroy_region(r, t);
253 	}
254 
255 	r = damon_first_region(t);
256 	/* Add new regions or resize existing regions to fit in the ranges */
257 	for (i = 0; i < nr_ranges; i++) {
258 		struct damon_region *first = NULL, *last, *newr;
259 		struct damon_addr_range *range;
260 
261 		range = &ranges[i];
262 		/* Get the first/last regions intersecting with the range */
263 		damon_for_each_region_from(r, t) {
264 			if (damon_intersect(r, range)) {
265 				if (!first)
266 					first = r;
267 				last = r;
268 			}
269 			if (r->ar.start >= range->end)
270 				break;
271 		}
272 		if (!first) {
273 			/* no region intersects with this range */
274 			newr = damon_new_region(
275 					ALIGN_DOWN(range->start,
276 						min_region_sz),
277 					ALIGN(range->end, min_region_sz));
278 			if (!newr)
279 				return -ENOMEM;
280 			damon_insert_region(newr, damon_prev_region(r), r, t);
281 		} else {
282 			/* resize intersecting regions to fit in this range */
283 			first->ar.start = ALIGN_DOWN(range->start,
284 					min_region_sz);
285 			last->ar.end = ALIGN(range->end, min_region_sz);
286 
287 			/* fill possible holes in the range */
288 			err = damon_fill_regions_holes(first, last, t);
289 			if (err)
290 				return err;
291 		}
292 	}
293 	return 0;
294 }
295 
296 struct damos_filter *damos_new_filter(enum damos_filter_type type,
297 		bool matching, bool allow)
298 {
299 	struct damos_filter *filter;
300 
301 	filter = kmalloc_obj(*filter);
302 	if (!filter)
303 		return NULL;
304 	filter->type = type;
305 	filter->matching = matching;
306 	filter->allow = allow;
307 	INIT_LIST_HEAD(&filter->list);
308 	return filter;
309 }
310 
311 /**
312  * damos_filter_for_ops() - Return if the filter is ops-handled one.
313  * @type:	type of the filter.
314  *
315  * Return: true if the filter of @type needs to be handled by ops layer, false
316  * otherwise.
317  */
318 bool damos_filter_for_ops(enum damos_filter_type type)
319 {
320 	switch (type) {
321 	case DAMOS_FILTER_TYPE_ADDR:
322 	case DAMOS_FILTER_TYPE_TARGET:
323 		return false;
324 	default:
325 		break;
326 	}
327 	return true;
328 }
329 
330 void damos_add_filter(struct damos *s, struct damos_filter *f)
331 {
332 	if (damos_filter_for_ops(f->type))
333 		list_add_tail(&f->list, &s->ops_filters);
334 	else
335 		list_add_tail(&f->list, &s->core_filters);
336 }
337 
338 static void damos_del_filter(struct damos_filter *f)
339 {
340 	list_del(&f->list);
341 }
342 
343 static void damos_free_filter(struct damos_filter *f)
344 {
345 	kfree(f);
346 }
347 
348 void damos_destroy_filter(struct damos_filter *f)
349 {
350 	damos_del_filter(f);
351 	damos_free_filter(f);
352 }
353 
354 struct damos_quota_goal *damos_new_quota_goal(
355 		enum damos_quota_goal_metric metric,
356 		unsigned long target_value)
357 {
358 	struct damos_quota_goal *goal;
359 
360 	goal = kmalloc_obj(*goal);
361 	if (!goal)
362 		return NULL;
363 	goal->metric = metric;
364 	goal->target_value = target_value;
365 	INIT_LIST_HEAD(&goal->list);
366 	return goal;
367 }
368 
369 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
370 {
371 	list_add_tail(&g->list, &q->goals);
372 }
373 
374 static void damos_del_quota_goal(struct damos_quota_goal *g)
375 {
376 	list_del(&g->list);
377 }
378 
379 static void damos_free_quota_goal(struct damos_quota_goal *g)
380 {
381 	kfree(g);
382 }
383 
384 void damos_destroy_quota_goal(struct damos_quota_goal *g)
385 {
386 	damos_del_quota_goal(g);
387 	damos_free_quota_goal(g);
388 }
389 
390 static bool damos_quota_goals_empty(struct damos_quota *q)
391 {
392 	return list_empty(&q->goals);
393 }
394 
395 /* initialize fields of @quota that normally API users wouldn't set */
396 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
397 {
398 	quota->esz = 0;
399 	quota->total_charged_sz = 0;
400 	quota->total_charged_ns = 0;
401 	quota->charged_sz = 0;
402 	quota->charged_from = 0;
403 	quota->charge_target_from = NULL;
404 	quota->charge_addr_from = 0;
405 	quota->esz_bp = 0;
406 	return quota;
407 }
408 
409 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
410 			enum damos_action action,
411 			unsigned long apply_interval_us,
412 			struct damos_quota *quota,
413 			struct damos_watermarks *wmarks,
414 			int target_nid)
415 {
416 	struct damos *scheme;
417 
418 	scheme = kmalloc_obj(*scheme);
419 	if (!scheme)
420 		return NULL;
421 	scheme->pattern = *pattern;
422 	scheme->action = action;
423 	scheme->apply_interval_us = apply_interval_us;
424 	/*
425 	 * next_apply_sis will be set when kdamond starts.  While kdamond is
426 	 * running, it will also updated when it is added to the DAMON context,
427 	 * or damon_attrs are updated.
428 	 */
429 	scheme->next_apply_sis = 0;
430 	scheme->walk_completed = false;
431 	INIT_LIST_HEAD(&scheme->core_filters);
432 	INIT_LIST_HEAD(&scheme->ops_filters);
433 	scheme->stat = (struct damos_stat){};
434 	scheme->max_nr_snapshots = 0;
435 	INIT_LIST_HEAD(&scheme->list);
436 
437 	scheme->quota = *(damos_quota_init(quota));
438 	/* quota.goals should be separately set by caller */
439 	INIT_LIST_HEAD(&scheme->quota.goals);
440 
441 	scheme->wmarks = *wmarks;
442 	scheme->wmarks.activated = true;
443 
444 	scheme->migrate_dests = (struct damos_migrate_dests){};
445 	scheme->target_nid = target_nid;
446 
447 	return scheme;
448 }
449 
450 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
451 {
452 	unsigned long sample_interval = ctx->attrs.sample_interval ?
453 		ctx->attrs.sample_interval : 1;
454 	unsigned long apply_interval = s->apply_interval_us ?
455 		s->apply_interval_us : ctx->attrs.aggr_interval;
456 
457 	s->next_apply_sis = ctx->passed_sample_intervals +
458 		apply_interval / sample_interval;
459 }
460 
461 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
462 {
463 	list_add_tail(&s->list, &ctx->schemes);
464 	damos_set_next_apply_sis(s, ctx);
465 }
466 
467 static void damon_del_scheme(struct damos *s)
468 {
469 	list_del(&s->list);
470 }
471 
472 static void damon_free_scheme(struct damos *s)
473 {
474 	kfree(s);
475 }
476 
477 void damon_destroy_scheme(struct damos *s)
478 {
479 	struct damos_quota_goal *g, *g_next;
480 	struct damos_filter *f, *next;
481 
482 	damos_for_each_quota_goal_safe(g, g_next, &s->quota)
483 		damos_destroy_quota_goal(g);
484 
485 	damos_for_each_core_filter_safe(f, next, s)
486 		damos_destroy_filter(f);
487 
488 	damos_for_each_ops_filter_safe(f, next, s)
489 		damos_destroy_filter(f);
490 
491 	kfree(s->migrate_dests.node_id_arr);
492 	kfree(s->migrate_dests.weight_arr);
493 	damon_del_scheme(s);
494 	damon_free_scheme(s);
495 }
496 
497 /*
498  * Construct a damon_target struct
499  *
500  * Returns the pointer to the new struct if success, or NULL otherwise
501  */
502 struct damon_target *damon_new_target(void)
503 {
504 	struct damon_target *t;
505 
506 	t = kmalloc_obj(*t);
507 	if (!t)
508 		return NULL;
509 
510 	t->pid = NULL;
511 	t->nr_regions = 0;
512 	INIT_LIST_HEAD(&t->regions_list);
513 	INIT_LIST_HEAD(&t->list);
514 	t->obsolete = false;
515 
516 	return t;
517 }
518 
519 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
520 {
521 	list_add_tail(&t->list, &ctx->adaptive_targets);
522 }
523 
524 bool damon_targets_empty(struct damon_ctx *ctx)
525 {
526 	return list_empty(&ctx->adaptive_targets);
527 }
528 
529 static void damon_del_target(struct damon_target *t)
530 {
531 	list_del(&t->list);
532 }
533 
534 void damon_free_target(struct damon_target *t)
535 {
536 	struct damon_region *r, *next;
537 
538 	damon_for_each_region_safe(r, next, t)
539 		damon_free_region(r);
540 	kfree(t);
541 }
542 
543 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
544 {
545 
546 	if (ctx && ctx->ops.cleanup_target)
547 		ctx->ops.cleanup_target(t);
548 
549 	damon_del_target(t);
550 	damon_free_target(t);
551 }
552 
553 #ifdef CONFIG_DAMON_DEBUG_SANITY
554 static void damon_verify_nr_regions(struct damon_target *t)
555 {
556 	struct damon_region *r;
557 	unsigned int count = 0;
558 
559 	damon_for_each_region(r, t)
560 		count++;
561 	WARN_ONCE(count != t->nr_regions, "t->nr_regions (%u) != count (%u)\n",
562 			t->nr_regions, count);
563 }
564 #else
565 static void damon_verify_nr_regions(struct damon_target *t)
566 {
567 }
568 #endif
569 
570 unsigned int damon_nr_regions(struct damon_target *t)
571 {
572 	damon_verify_nr_regions(t);
573 
574 	return t->nr_regions;
575 }
576 
577 struct damon_ctx *damon_new_ctx(void)
578 {
579 	struct damon_ctx *ctx;
580 
581 	ctx = kzalloc_obj(*ctx);
582 	if (!ctx)
583 		return NULL;
584 
585 	init_completion(&ctx->kdamond_started);
586 
587 	ctx->attrs.sample_interval = 5 * 1000;
588 	ctx->attrs.aggr_interval = 100 * 1000;
589 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
590 
591 	ctx->passed_sample_intervals = 0;
592 	/* These will be set from kdamond_init_ctx() */
593 	ctx->next_aggregation_sis = 0;
594 	ctx->next_ops_update_sis = 0;
595 
596 	mutex_init(&ctx->kdamond_lock);
597 	INIT_LIST_HEAD(&ctx->call_controls);
598 	mutex_init(&ctx->call_controls_lock);
599 	mutex_init(&ctx->walk_control_lock);
600 
601 	ctx->attrs.min_nr_regions = 10;
602 	ctx->attrs.max_nr_regions = 1000;
603 
604 	ctx->addr_unit = 1;
605 	ctx->min_region_sz = DAMON_MIN_REGION_SZ;
606 
607 	INIT_LIST_HEAD(&ctx->adaptive_targets);
608 	INIT_LIST_HEAD(&ctx->schemes);
609 
610 	return ctx;
611 }
612 
613 static void damon_destroy_targets(struct damon_ctx *ctx)
614 {
615 	struct damon_target *t, *next_t;
616 
617 	damon_for_each_target_safe(t, next_t, ctx)
618 		damon_destroy_target(t, ctx);
619 }
620 
621 void damon_destroy_ctx(struct damon_ctx *ctx)
622 {
623 	struct damos *s, *next_s;
624 
625 	damon_destroy_targets(ctx);
626 
627 	damon_for_each_scheme_safe(s, next_s, ctx)
628 		damon_destroy_scheme(s);
629 
630 	kfree(ctx);
631 }
632 
633 static bool damon_attrs_equals(const struct damon_attrs *attrs1,
634 		const struct damon_attrs *attrs2)
635 {
636 	const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal;
637 	const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal;
638 
639 	return attrs1->sample_interval == attrs2->sample_interval &&
640 		attrs1->aggr_interval == attrs2->aggr_interval &&
641 		attrs1->ops_update_interval == attrs2->ops_update_interval &&
642 		attrs1->min_nr_regions == attrs2->min_nr_regions &&
643 		attrs1->max_nr_regions == attrs2->max_nr_regions &&
644 		ig1->access_bp == ig2->access_bp &&
645 		ig1->aggrs == ig2->aggrs &&
646 		ig1->min_sample_us == ig2->min_sample_us &&
647 		ig1->max_sample_us == ig2->max_sample_us;
648 }
649 
650 static unsigned int damon_age_for_new_attrs(unsigned int age,
651 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
652 {
653 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
654 }
655 
656 /* convert access ratio in bp (per 10,000) to nr_accesses */
657 static unsigned int damon_accesses_bp_to_nr_accesses(
658 		unsigned int accesses_bp, struct damon_attrs *attrs)
659 {
660 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
661 }
662 
663 /*
664  * Convert nr_accesses to access ratio in bp (per 10,000).
665  *
666  * Callers should ensure attrs.aggr_interval is not zero, like
667  * damon_update_monitoring_results() does .  Otherwise, divide-by-zero would
668  * happen.
669  */
670 static unsigned int damon_nr_accesses_to_accesses_bp(
671 		unsigned int nr_accesses, struct damon_attrs *attrs)
672 {
673 	return mult_frac(nr_accesses, 10000, damon_max_nr_accesses(attrs));
674 }
675 
676 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
677 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
678 {
679 	return damon_accesses_bp_to_nr_accesses(
680 			damon_nr_accesses_to_accesses_bp(
681 				nr_accesses, old_attrs),
682 			new_attrs);
683 }
684 
685 static void damon_update_monitoring_result(struct damon_region *r,
686 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
687 		bool aggregating)
688 {
689 	if (!aggregating) {
690 		r->nr_accesses = damon_nr_accesses_for_new_attrs(
691 				r->nr_accesses, old_attrs, new_attrs);
692 		r->nr_accesses_bp = r->nr_accesses * 10000;
693 	} else {
694 		/*
695 		 * if this is called in the middle of the aggregation, reset
696 		 * the aggregations we made so far for this aggregation
697 		 * interval.  In other words, make the status like
698 		 * kdamond_reset_aggregated() is called.
699 		 */
700 		r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
701 				r->last_nr_accesses, old_attrs, new_attrs);
702 		r->nr_accesses_bp = r->last_nr_accesses * 10000;
703 		r->nr_accesses = 0;
704 	}
705 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
706 }
707 
708 /*
709  * region->nr_accesses is the number of sampling intervals in the last
710  * aggregation interval that access to the region has found, and region->age is
711  * the number of aggregation intervals that its access pattern has maintained.
712  * For the reason, the real meaning of the two fields depend on current
713  * sampling interval and aggregation interval.  This function updates
714  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
715  */
716 static void damon_update_monitoring_results(struct damon_ctx *ctx,
717 		struct damon_attrs *new_attrs, bool aggregating)
718 {
719 	struct damon_attrs *old_attrs = &ctx->attrs;
720 	struct damon_target *t;
721 	struct damon_region *r;
722 
723 	/* if any interval is zero, simply forgive conversion */
724 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
725 			!new_attrs->sample_interval ||
726 			!new_attrs->aggr_interval)
727 		return;
728 
729 	damon_for_each_target(t, ctx)
730 		damon_for_each_region(r, t)
731 			damon_update_monitoring_result(
732 					r, old_attrs, new_attrs, aggregating);
733 }
734 
735 /*
736  * damon_valid_intervals_goal() - return if the intervals goal of @attrs is
737  * valid.
738  */
739 static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
740 {
741 	struct damon_intervals_goal *goal = &attrs->intervals_goal;
742 
743 	/* tuning is disabled */
744 	if (!goal->aggrs)
745 		return true;
746 	if (goal->min_sample_us > goal->max_sample_us)
747 		return false;
748 	if (attrs->sample_interval < goal->min_sample_us ||
749 			goal->max_sample_us < attrs->sample_interval)
750 		return false;
751 	return true;
752 }
753 
754 /**
755  * damon_set_attrs() - Set attributes for the monitoring.
756  * @ctx:		monitoring context
757  * @attrs:		monitoring attributes
758  *
759  * This function updates monitoring results and next monitoring/damos operation
760  * schedules.  Because those are periodically updated by kdamond, this should
761  * be called from a safe contexts.  Such contexts include damon_ctx setup time
762  * while the kdamond is not yet started, and inside of kdamond_fn().
763  *
764  * In detail, all DAMON API callers directly call this function for initial
765  * setup of damon_ctx before calling damon_start().  Some of the API callers
766  * also indirectly call this function via damon_call() -> damon_commit() for
767  * online parameters updates.  Finally, kdamond_fn() itself use this for
768  * applying auto-tuned monitoring intervals.
769  *
770  * Every time interval is in micro-seconds.
771  *
772  * Return: 0 on success, negative error code otherwise.
773  */
774 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
775 {
776 	unsigned long sample_interval = attrs->sample_interval ?
777 		attrs->sample_interval : 1;
778 	struct damos *s;
779 	bool aggregating = ctx->passed_sample_intervals <
780 		ctx->next_aggregation_sis;
781 
782 	if (!damon_valid_intervals_goal(attrs))
783 		return -EINVAL;
784 
785 	if (attrs->min_nr_regions < 3)
786 		return -EINVAL;
787 	if (attrs->min_nr_regions > attrs->max_nr_regions)
788 		return -EINVAL;
789 	if (attrs->sample_interval > attrs->aggr_interval)
790 		return -EINVAL;
791 
792 	/* calls from core-external doesn't set this. */
793 	if (!attrs->aggr_samples)
794 		attrs->aggr_samples = attrs->aggr_interval / sample_interval;
795 
796 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
797 		attrs->aggr_interval / sample_interval;
798 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
799 		attrs->ops_update_interval / sample_interval;
800 
801 	damon_update_monitoring_results(ctx, attrs, aggregating);
802 	ctx->attrs = *attrs;
803 
804 	damon_for_each_scheme(s, ctx)
805 		damos_set_next_apply_sis(s, ctx);
806 
807 	return 0;
808 }
809 
810 /**
811  * damon_set_schemes() - Set data access monitoring based operation schemes.
812  * @ctx:	monitoring context
813  * @schemes:	array of the schemes
814  * @nr_schemes:	number of entries in @schemes
815  *
816  * This function should not be called while the kdamond of the context is
817  * running.
818  */
819 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
820 			ssize_t nr_schemes)
821 {
822 	struct damos *s, *next;
823 	ssize_t i;
824 
825 	damon_for_each_scheme_safe(s, next, ctx)
826 		damon_destroy_scheme(s);
827 	for (i = 0; i < nr_schemes; i++)
828 		damon_add_scheme(ctx, schemes[i]);
829 }
830 
831 static struct damos_quota_goal *damos_nth_quota_goal(
832 		int n, struct damos_quota *q)
833 {
834 	struct damos_quota_goal *goal;
835 	int i = 0;
836 
837 	damos_for_each_quota_goal(goal, q) {
838 		if (i++ == n)
839 			return goal;
840 	}
841 	return NULL;
842 }
843 
844 static void damos_commit_quota_goal_union(
845 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
846 {
847 	switch (dst->metric) {
848 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
849 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
850 		dst->nid = src->nid;
851 		break;
852 	case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
853 	case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
854 		dst->nid = src->nid;
855 		dst->memcg_id = src->memcg_id;
856 		break;
857 	default:
858 		break;
859 	}
860 }
861 
862 static void damos_commit_quota_goal(
863 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
864 {
865 	dst->metric = src->metric;
866 	dst->target_value = src->target_value;
867 	if (dst->metric == DAMOS_QUOTA_USER_INPUT)
868 		dst->current_value = src->current_value;
869 	/* keep last_psi_total as is, since it will be updated in next cycle */
870 	damos_commit_quota_goal_union(dst, src);
871 }
872 
873 /**
874  * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
875  * @dst:	The commit destination DAMOS quota.
876  * @src:	The commit source DAMOS quota.
877  *
878  * Copies user-specified parameters for quota goals from @src to @dst.  Users
879  * should use this function for quota goals-level parameters update of running
880  * DAMON contexts, instead of manual in-place updates.
881  *
882  * This function should be called from parameters-update safe context, like
883  * damon_call().
884  */
885 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
886 {
887 	struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
888 	int i = 0, j = 0;
889 
890 	damos_for_each_quota_goal_safe(dst_goal, next, dst) {
891 		src_goal = damos_nth_quota_goal(i++, src);
892 		if (src_goal)
893 			damos_commit_quota_goal(dst_goal, src_goal);
894 		else
895 			damos_destroy_quota_goal(dst_goal);
896 	}
897 	damos_for_each_quota_goal_safe(src_goal, next, src) {
898 		if (j++ < i)
899 			continue;
900 		new_goal = damos_new_quota_goal(
901 				src_goal->metric, src_goal->target_value);
902 		if (!new_goal)
903 			return -ENOMEM;
904 		damos_commit_quota_goal(new_goal, src_goal);
905 		damos_add_quota_goal(dst, new_goal);
906 	}
907 	return 0;
908 }
909 
910 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
911 {
912 	int err;
913 
914 	dst->reset_interval = src->reset_interval;
915 	dst->ms = src->ms;
916 	dst->sz = src->sz;
917 	err = damos_commit_quota_goals(dst, src);
918 	if (err)
919 		return err;
920 	dst->goal_tuner = src->goal_tuner;
921 	dst->weight_sz = src->weight_sz;
922 	dst->weight_nr_accesses = src->weight_nr_accesses;
923 	dst->weight_age = src->weight_age;
924 	return 0;
925 }
926 
927 static struct damos_filter *damos_nth_core_filter(int n, struct damos *s)
928 {
929 	struct damos_filter *filter;
930 	int i = 0;
931 
932 	damos_for_each_core_filter(filter, s) {
933 		if (i++ == n)
934 			return filter;
935 	}
936 	return NULL;
937 }
938 
939 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
940 {
941 	struct damos_filter *filter;
942 	int i = 0;
943 
944 	damos_for_each_ops_filter(filter, s) {
945 		if (i++ == n)
946 			return filter;
947 	}
948 	return NULL;
949 }
950 
951 static void damos_commit_filter_arg(
952 		struct damos_filter *dst, struct damos_filter *src)
953 {
954 	switch (dst->type) {
955 	case DAMOS_FILTER_TYPE_MEMCG:
956 		dst->memcg_id = src->memcg_id;
957 		break;
958 	case DAMOS_FILTER_TYPE_ADDR:
959 		dst->addr_range = src->addr_range;
960 		break;
961 	case DAMOS_FILTER_TYPE_TARGET:
962 		dst->target_idx = src->target_idx;
963 		break;
964 	case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
965 		dst->sz_range = src->sz_range;
966 		break;
967 	default:
968 		break;
969 	}
970 }
971 
972 static void damos_commit_filter(
973 		struct damos_filter *dst, struct damos_filter *src)
974 {
975 	dst->type = src->type;
976 	dst->matching = src->matching;
977 	dst->allow = src->allow;
978 	damos_commit_filter_arg(dst, src);
979 }
980 
981 static int damos_commit_core_filters(struct damos *dst, struct damos *src)
982 {
983 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
984 	int i = 0, j = 0;
985 
986 	damos_for_each_core_filter_safe(dst_filter, next, dst) {
987 		src_filter = damos_nth_core_filter(i++, src);
988 		if (src_filter)
989 			damos_commit_filter(dst_filter, src_filter);
990 		else
991 			damos_destroy_filter(dst_filter);
992 	}
993 
994 	damos_for_each_core_filter_safe(src_filter, next, src) {
995 		if (j++ < i)
996 			continue;
997 
998 		new_filter = damos_new_filter(
999 				src_filter->type, src_filter->matching,
1000 				src_filter->allow);
1001 		if (!new_filter)
1002 			return -ENOMEM;
1003 		damos_commit_filter_arg(new_filter, src_filter);
1004 		damos_add_filter(dst, new_filter);
1005 	}
1006 	return 0;
1007 }
1008 
1009 static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
1010 {
1011 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
1012 	int i = 0, j = 0;
1013 
1014 	damos_for_each_ops_filter_safe(dst_filter, next, dst) {
1015 		src_filter = damos_nth_ops_filter(i++, src);
1016 		if (src_filter)
1017 			damos_commit_filter(dst_filter, src_filter);
1018 		else
1019 			damos_destroy_filter(dst_filter);
1020 	}
1021 
1022 	damos_for_each_ops_filter_safe(src_filter, next, src) {
1023 		if (j++ < i)
1024 			continue;
1025 
1026 		new_filter = damos_new_filter(
1027 				src_filter->type, src_filter->matching,
1028 				src_filter->allow);
1029 		if (!new_filter)
1030 			return -ENOMEM;
1031 		damos_commit_filter_arg(new_filter, src_filter);
1032 		damos_add_filter(dst, new_filter);
1033 	}
1034 	return 0;
1035 }
1036 
1037 /**
1038  * damos_filters_default_reject() - decide whether to reject memory that didn't
1039  *				    match with any given filter.
1040  * @filters:	Given DAMOS filters of a group.
1041  */
1042 static bool damos_filters_default_reject(struct list_head *filters)
1043 {
1044 	struct damos_filter *last_filter;
1045 
1046 	if (list_empty(filters))
1047 		return false;
1048 	last_filter = list_last_entry(filters, struct damos_filter, list);
1049 	return last_filter->allow;
1050 }
1051 
1052 static void damos_set_filters_default_reject(struct damos *s)
1053 {
1054 	if (!list_empty(&s->ops_filters))
1055 		s->core_filters_default_reject = false;
1056 	else
1057 		s->core_filters_default_reject =
1058 			damos_filters_default_reject(&s->core_filters);
1059 	s->ops_filters_default_reject =
1060 		damos_filters_default_reject(&s->ops_filters);
1061 }
1062 
1063 /*
1064  * damos_commit_dests() - Copy migration destinations from @src to @dst.
1065  * @dst:	Destination structure to update.
1066  * @src:	Source structure to copy from.
1067  *
1068  * If the number of destinations has changed, the old arrays in @dst are freed
1069  * and new ones are allocated.  On success, @dst contains a full copy of
1070  * @src's arrays and count.
1071  *
1072  * On allocation failure, @dst is left in a partially torn-down state: its
1073  * arrays may be NULL and @nr_dests may not reflect the actual allocation
1074  * sizes.  The structure remains safe to deallocate via damon_destroy_scheme(),
1075  * but callers must not reuse @dst for further commits — it should be
1076  * discarded.
1077  *
1078  * Return: 0 on success, -ENOMEM on allocation failure.
1079  */
1080 static int damos_commit_dests(struct damos_migrate_dests *dst,
1081 		struct damos_migrate_dests *src)
1082 {
1083 	if (dst->nr_dests != src->nr_dests) {
1084 		kfree(dst->node_id_arr);
1085 		kfree(dst->weight_arr);
1086 
1087 		dst->node_id_arr = kmalloc_array(src->nr_dests,
1088 			sizeof(*dst->node_id_arr), GFP_KERNEL);
1089 		if (!dst->node_id_arr) {
1090 			dst->weight_arr = NULL;
1091 			return -ENOMEM;
1092 		}
1093 
1094 		dst->weight_arr = kmalloc_array(src->nr_dests,
1095 			sizeof(*dst->weight_arr), GFP_KERNEL);
1096 		if (!dst->weight_arr) {
1097 			/* ->node_id_arr will be freed by scheme destruction */
1098 			return -ENOMEM;
1099 		}
1100 	}
1101 
1102 	dst->nr_dests = src->nr_dests;
1103 	for (int i = 0; i < src->nr_dests; i++) {
1104 		dst->node_id_arr[i] = src->node_id_arr[i];
1105 		dst->weight_arr[i] = src->weight_arr[i];
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 static int damos_commit_filters(struct damos *dst, struct damos *src)
1112 {
1113 	int err;
1114 
1115 	err = damos_commit_core_filters(dst, src);
1116 	if (err)
1117 		return err;
1118 	err = damos_commit_ops_filters(dst, src);
1119 	if (err)
1120 		return err;
1121 	damos_set_filters_default_reject(dst);
1122 	return 0;
1123 }
1124 
1125 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
1126 {
1127 	struct damos *s;
1128 	int i = 0;
1129 
1130 	damon_for_each_scheme(s, ctx) {
1131 		if (i++ == n)
1132 			return s;
1133 	}
1134 	return NULL;
1135 }
1136 
1137 static int damos_commit(struct damos *dst, struct damos *src)
1138 {
1139 	int err;
1140 
1141 	dst->pattern = src->pattern;
1142 	dst->action = src->action;
1143 	dst->apply_interval_us = src->apply_interval_us;
1144 
1145 	err = damos_commit_quota(&dst->quota, &src->quota);
1146 	if (err)
1147 		return err;
1148 
1149 	dst->wmarks = src->wmarks;
1150 	dst->target_nid = src->target_nid;
1151 
1152 	err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests);
1153 	if (err)
1154 		return err;
1155 
1156 	err = damos_commit_filters(dst, src);
1157 	if (err)
1158 		return err;
1159 
1160 	dst->max_nr_snapshots = src->max_nr_snapshots;
1161 	return 0;
1162 }
1163 
1164 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
1165 {
1166 	struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
1167 	int i = 0, j = 0, err;
1168 
1169 	damon_for_each_scheme_safe(dst_scheme, next, dst) {
1170 		src_scheme = damon_nth_scheme(i++, src);
1171 		if (src_scheme) {
1172 			err = damos_commit(dst_scheme, src_scheme);
1173 			if (err)
1174 				return err;
1175 		} else {
1176 			damon_destroy_scheme(dst_scheme);
1177 		}
1178 	}
1179 
1180 	damon_for_each_scheme_safe(src_scheme, next, src) {
1181 		if (j++ < i)
1182 			continue;
1183 		new_scheme = damon_new_scheme(&src_scheme->pattern,
1184 				src_scheme->action,
1185 				src_scheme->apply_interval_us,
1186 				&src_scheme->quota, &src_scheme->wmarks,
1187 				NUMA_NO_NODE);
1188 		if (!new_scheme)
1189 			return -ENOMEM;
1190 		err = damos_commit(new_scheme, src_scheme);
1191 		if (err) {
1192 			damon_destroy_scheme(new_scheme);
1193 			return err;
1194 		}
1195 		damon_add_scheme(dst, new_scheme);
1196 	}
1197 	return 0;
1198 }
1199 
1200 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1201 {
1202 	struct damon_target *t;
1203 	int i = 0;
1204 
1205 	damon_for_each_target(t, ctx) {
1206 		if (i++ == n)
1207 			return t;
1208 	}
1209 	return NULL;
1210 }
1211 
1212 /*
1213  * The caller should ensure the regions of @src are
1214  * 1. valid (end >= src) and
1215  * 2. sorted by starting address.
1216  *
1217  * If @src has no region, @dst keeps current regions.
1218  */
1219 static int damon_commit_target_regions(struct damon_target *dst,
1220 		struct damon_target *src, unsigned long src_min_region_sz)
1221 {
1222 	struct damon_region *src_region;
1223 	struct damon_addr_range *ranges;
1224 	int i = 0, err;
1225 
1226 	damon_for_each_region(src_region, src)
1227 		i++;
1228 	if (!i)
1229 		return 0;
1230 
1231 	ranges = kmalloc_objs(*ranges, i, GFP_KERNEL | __GFP_NOWARN);
1232 	if (!ranges)
1233 		return -ENOMEM;
1234 	i = 0;
1235 	damon_for_each_region(src_region, src)
1236 		ranges[i++] = src_region->ar;
1237 	err = damon_set_regions(dst, ranges, i, src_min_region_sz);
1238 	kfree(ranges);
1239 	return err;
1240 }
1241 
1242 static int damon_commit_target(
1243 		struct damon_target *dst, bool dst_has_pid,
1244 		struct damon_target *src, bool src_has_pid,
1245 		unsigned long src_min_region_sz)
1246 {
1247 	int err;
1248 
1249 	err = damon_commit_target_regions(dst, src, src_min_region_sz);
1250 	if (err)
1251 		return err;
1252 	if (dst_has_pid)
1253 		put_pid(dst->pid);
1254 	if (src_has_pid)
1255 		get_pid(src->pid);
1256 	dst->pid = src->pid;
1257 	return 0;
1258 }
1259 
1260 static int damon_commit_targets(
1261 		struct damon_ctx *dst, struct damon_ctx *src)
1262 {
1263 	struct damon_target *dst_target, *next, *src_target, *new_target;
1264 	int i = 0, j = 0, err;
1265 
1266 	damon_for_each_target_safe(dst_target, next, dst) {
1267 		src_target = damon_nth_target(i++, src);
1268 		/*
1269 		 * If src target is obsolete, do not commit the parameters to
1270 		 * the dst target, and further remove the dst target.
1271 		 */
1272 		if (src_target && !src_target->obsolete) {
1273 			err = damon_commit_target(
1274 					dst_target, damon_target_has_pid(dst),
1275 					src_target, damon_target_has_pid(src),
1276 					src->min_region_sz);
1277 			if (err)
1278 				return err;
1279 		} else {
1280 			struct damos *s;
1281 
1282 			damon_destroy_target(dst_target, dst);
1283 			damon_for_each_scheme(s, dst) {
1284 				if (s->quota.charge_target_from == dst_target) {
1285 					s->quota.charge_target_from = NULL;
1286 					s->quota.charge_addr_from = 0;
1287 				}
1288 			}
1289 		}
1290 	}
1291 
1292 	damon_for_each_target_safe(src_target, next, src) {
1293 		if (j++ < i)
1294 			continue;
1295 		/* target to remove has no matching dst */
1296 		if (src_target->obsolete)
1297 			return -EINVAL;
1298 		new_target = damon_new_target();
1299 		if (!new_target)
1300 			return -ENOMEM;
1301 		err = damon_commit_target(new_target, false,
1302 				src_target, damon_target_has_pid(src),
1303 				src->min_region_sz);
1304 		if (err) {
1305 			damon_destroy_target(new_target, NULL);
1306 			return err;
1307 		}
1308 		damon_add_target(dst, new_target);
1309 	}
1310 	return 0;
1311 }
1312 
1313 /**
1314  * damon_commit_ctx() - Commit parameters of a DAMON context to another.
1315  * @dst:	The commit destination DAMON context.
1316  * @src:	The commit source DAMON context.
1317  *
1318  * This function copies user-specified parameters from @src to @dst and update
1319  * the internal status and results accordingly.  Users should use this function
1320  * for context-level parameters update of running context, instead of manual
1321  * in-place updates.
1322  *
1323  * This function should be called from parameters-update safe context, like
1324  * damon_call().
1325  */
1326 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1327 {
1328 	int err;
1329 
1330 	dst->maybe_corrupted = true;
1331 	if (!is_power_of_2(src->min_region_sz))
1332 		return -EINVAL;
1333 
1334 	err = damon_commit_schemes(dst, src);
1335 	if (err)
1336 		return err;
1337 	err = damon_commit_targets(dst, src);
1338 	if (err)
1339 		return err;
1340 	/*
1341 	 * schemes and targets should be updated first, since
1342 	 * 1. damon_set_attrs() updates monitoring results of targets and
1343 	 * next_apply_sis of schemes, and
1344 	 * 2. ops update should be done after pid handling is done (target
1345 	 *    committing require putting pids).
1346 	 */
1347 	if (!damon_attrs_equals(&dst->attrs, &src->attrs)) {
1348 		err = damon_set_attrs(dst, &src->attrs);
1349 		if (err)
1350 			return err;
1351 	}
1352 	dst->ops = src->ops;
1353 	dst->addr_unit = src->addr_unit;
1354 	dst->min_region_sz = src->min_region_sz;
1355 
1356 	dst->maybe_corrupted = false;
1357 	return 0;
1358 }
1359 
1360 /**
1361  * damon_nr_running_ctxs() - Return number of currently running contexts.
1362  */
1363 int damon_nr_running_ctxs(void)
1364 {
1365 	int nr_ctxs;
1366 
1367 	mutex_lock(&damon_lock);
1368 	nr_ctxs = nr_running_ctxs;
1369 	mutex_unlock(&damon_lock);
1370 
1371 	return nr_ctxs;
1372 }
1373 
1374 /* Returns the size upper limit for each monitoring region */
1375 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1376 {
1377 	struct damon_target *t;
1378 	struct damon_region *r;
1379 	unsigned long sz = 0;
1380 
1381 	damon_for_each_target(t, ctx) {
1382 		damon_for_each_region(r, t)
1383 			sz += damon_sz_region(r);
1384 	}
1385 
1386 	if (ctx->attrs.min_nr_regions)
1387 		sz /= ctx->attrs.min_nr_regions;
1388 	if (sz < ctx->min_region_sz)
1389 		sz = ctx->min_region_sz;
1390 
1391 	return sz;
1392 }
1393 
1394 static void damon_split_region_at(struct damon_target *t,
1395 				  struct damon_region *r, unsigned long sz_r);
1396 
1397 /*
1398  * damon_apply_min_nr_regions() - Make effect of min_nr_regions parameter.
1399  * @ctx:	monitoring context.
1400  *
1401  * This function implement min_nr_regions (minimum number of damon_region
1402  * objects in the given monitoring context) behavior.  It first calculates
1403  * maximum size of each region for enforcing the min_nr_regions as total size
1404  * of the regions divided by the min_nr_regions.  After that, this function
1405  * splits regions to ensure all regions are equal to or smaller than the size
1406  * limit.  Finally, this function returns the maximum size limit.
1407  *
1408  * Returns: maximum size of each region for convincing min_nr_regions.
1409  */
1410 static unsigned long damon_apply_min_nr_regions(struct damon_ctx *ctx)
1411 {
1412 	unsigned long max_region_sz = damon_region_sz_limit(ctx);
1413 	struct damon_target *t;
1414 	struct damon_region *r, *next;
1415 
1416 	max_region_sz = ALIGN(max_region_sz, ctx->min_region_sz);
1417 	damon_for_each_target(t, ctx) {
1418 		damon_for_each_region_safe(r, next, t) {
1419 			while (damon_sz_region(r) > max_region_sz) {
1420 				damon_split_region_at(t, r, max_region_sz);
1421 				r = damon_next_region(r);
1422 			}
1423 		}
1424 	}
1425 	return max_region_sz;
1426 }
1427 
1428 static int kdamond_fn(void *data);
1429 
1430 /*
1431  * __damon_start() - Starts monitoring with given context.
1432  * @ctx:	monitoring context
1433  *
1434  * This function should be called while damon_lock is hold.
1435  *
1436  * Return: 0 on success, negative error code otherwise.
1437  */
1438 static int __damon_start(struct damon_ctx *ctx)
1439 {
1440 	int err = -EBUSY;
1441 
1442 	mutex_lock(&ctx->kdamond_lock);
1443 	if (!ctx->kdamond) {
1444 		err = 0;
1445 		reinit_completion(&ctx->kdamond_started);
1446 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1447 				nr_running_ctxs);
1448 		if (IS_ERR(ctx->kdamond)) {
1449 			err = PTR_ERR(ctx->kdamond);
1450 			ctx->kdamond = NULL;
1451 		} else {
1452 			wait_for_completion(&ctx->kdamond_started);
1453 		}
1454 	}
1455 	mutex_unlock(&ctx->kdamond_lock);
1456 
1457 	return err;
1458 }
1459 
1460 /**
1461  * damon_start() - Starts the monitorings for a given group of contexts.
1462  * @ctxs:	an array of the pointers for contexts to start monitoring
1463  * @nr_ctxs:	size of @ctxs
1464  * @exclusive:	exclusiveness of this contexts group
1465  *
1466  * This function starts a group of monitoring threads for a group of monitoring
1467  * contexts.  One thread per each context is created and run in parallel.  The
1468  * caller should handle synchronization between the threads by itself.  If
1469  * @exclusive is true and a group of threads that created by other
1470  * 'damon_start()' call is currently running, this function does nothing but
1471  * returns -EBUSY.
1472  *
1473  * Return: 0 on success, negative error code otherwise.
1474  */
1475 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1476 {
1477 	int i;
1478 	int err = 0;
1479 
1480 	mutex_lock(&damon_lock);
1481 	if ((exclusive && nr_running_ctxs) ||
1482 			(!exclusive && running_exclusive_ctxs)) {
1483 		mutex_unlock(&damon_lock);
1484 		return -EBUSY;
1485 	}
1486 
1487 	for (i = 0; i < nr_ctxs; i++) {
1488 		err = __damon_start(ctxs[i]);
1489 		if (err)
1490 			break;
1491 		nr_running_ctxs++;
1492 	}
1493 	if (exclusive && nr_running_ctxs)
1494 		running_exclusive_ctxs = true;
1495 	mutex_unlock(&damon_lock);
1496 
1497 	return err;
1498 }
1499 
1500 /*
1501  * __damon_stop() - Stops monitoring of a given context.
1502  * @ctx:	monitoring context
1503  *
1504  * Return: 0 on success, negative error code otherwise.
1505  */
1506 static int __damon_stop(struct damon_ctx *ctx)
1507 {
1508 	struct task_struct *tsk;
1509 
1510 	mutex_lock(&ctx->kdamond_lock);
1511 	tsk = ctx->kdamond;
1512 	if (tsk) {
1513 		get_task_struct(tsk);
1514 		mutex_unlock(&ctx->kdamond_lock);
1515 		kthread_stop_put(tsk);
1516 		return 0;
1517 	}
1518 	mutex_unlock(&ctx->kdamond_lock);
1519 
1520 	return -EPERM;
1521 }
1522 
1523 /**
1524  * damon_stop() - Stops the monitorings for a given group of contexts.
1525  * @ctxs:	an array of the pointers for contexts to stop monitoring
1526  * @nr_ctxs:	size of @ctxs
1527  *
1528  * Return: 0 on success, negative error code otherwise.
1529  */
1530 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1531 {
1532 	int i, err = 0;
1533 
1534 	for (i = 0; i < nr_ctxs; i++) {
1535 		/* nr_running_ctxs is decremented in kdamond_fn */
1536 		err = __damon_stop(ctxs[i]);
1537 		if (err)
1538 			break;
1539 	}
1540 	return err;
1541 }
1542 
1543 /**
1544  * damon_is_running() - Returns if a given DAMON context is running.
1545  * @ctx:	The DAMON context to see if running.
1546  *
1547  * Return: true if @ctx is running, false otherwise.
1548  */
1549 bool damon_is_running(struct damon_ctx *ctx)
1550 {
1551 	bool running;
1552 
1553 	mutex_lock(&ctx->kdamond_lock);
1554 	running = ctx->kdamond != NULL;
1555 	mutex_unlock(&ctx->kdamond_lock);
1556 	return running;
1557 }
1558 
1559 /**
1560  * damon_kdamond_pid() - Return pid of a given DAMON context's worker thread.
1561  * @ctx:	The DAMON context of the question.
1562  *
1563  * Return: pid if @ctx is running, negative error code otherwise.
1564  */
1565 int damon_kdamond_pid(struct damon_ctx *ctx)
1566 {
1567 	int pid = -EINVAL;
1568 
1569 	mutex_lock(&ctx->kdamond_lock);
1570 	if (ctx->kdamond)
1571 		pid = ctx->kdamond->pid;
1572 	mutex_unlock(&ctx->kdamond_lock);
1573 	return pid;
1574 }
1575 
1576 /*
1577  * damon_call_handle_inactive_ctx() - handle DAMON call request that added to
1578  *				      an inactive context.
1579  * @ctx:	The inactive DAMON context.
1580  * @control:	Control variable of the call request.
1581  *
1582  * This function is called in a case that @control is added to @ctx but @ctx is
1583  * not running (inactive).  See if @ctx handled @control or not, and cleanup
1584  * @control if it was not handled.
1585  *
1586  * Returns 0 if @control was handled by @ctx, negative error code otherwise.
1587  */
1588 static int damon_call_handle_inactive_ctx(
1589 		struct damon_ctx *ctx, struct damon_call_control *control)
1590 {
1591 	struct damon_call_control *c;
1592 
1593 	mutex_lock(&ctx->call_controls_lock);
1594 	list_for_each_entry(c, &ctx->call_controls, list) {
1595 		if (c == control) {
1596 			list_del(&control->list);
1597 			mutex_unlock(&ctx->call_controls_lock);
1598 			return -EINVAL;
1599 		}
1600 	}
1601 	mutex_unlock(&ctx->call_controls_lock);
1602 	return 0;
1603 }
1604 
1605 /**
1606  * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1607  * @ctx:	DAMON context to call the function for.
1608  * @control:	Control variable of the call request.
1609  *
1610  * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1611  * argument data that respectively passed via &damon_call_control->fn and
1612  * &damon_call_control->data of @control.  If &damon_call_control->repeat of
1613  * @control is unset, further wait until the kdamond finishes handling of the
1614  * request.  Otherwise, return as soon as the request is made.
1615  *
1616  * The kdamond executes the function with the argument in the main loop, just
1617  * after a sampling of the iteration is finished.  The function can hence
1618  * safely access the internal data of the &struct damon_ctx without additional
1619  * synchronization.  The return value of the function will be saved in
1620  * &damon_call_control->return_code.
1621  *
1622  * Return: 0 on success, negative error code otherwise.
1623  */
1624 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1625 {
1626 	if (!control->repeat)
1627 		init_completion(&control->completion);
1628 	control->canceled = false;
1629 	INIT_LIST_HEAD(&control->list);
1630 
1631 	mutex_lock(&ctx->call_controls_lock);
1632 	list_add_tail(&control->list, &ctx->call_controls);
1633 	mutex_unlock(&ctx->call_controls_lock);
1634 	if (!damon_is_running(ctx))
1635 		return damon_call_handle_inactive_ctx(ctx, control);
1636 	if (control->repeat)
1637 		return 0;
1638 	wait_for_completion(&control->completion);
1639 	if (control->canceled)
1640 		return -ECANCELED;
1641 	return 0;
1642 }
1643 
1644 /**
1645  * damos_walk() - Invoke a given functions while DAMOS walk regions.
1646  * @ctx:	DAMON context to call the functions for.
1647  * @control:	Control variable of the walk request.
1648  *
1649  * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1650  * that the kdamond will apply DAMOS action to, and wait until the kdamond
1651  * finishes handling of the request.
1652  *
1653  * The kdamond executes the given function in the main loop, for each region
1654  * just after it applied any DAMOS actions of @ctx to it.  The invocation is
1655  * made only within one &damos->apply_interval_us since damos_walk()
1656  * invocation, for each scheme.  The given callback function can hence safely
1657  * access the internal data of &struct damon_ctx and &struct damon_region that
1658  * each of the scheme will apply the action for next interval, without
1659  * additional synchronizations against the kdamond.  If every scheme of @ctx
1660  * passed at least one &damos->apply_interval_us, kdamond marks the request as
1661  * completed so that damos_walk() can wakeup and return.
1662  *
1663  * Return: 0 on success, negative error code otherwise.
1664  */
1665 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1666 {
1667 	init_completion(&control->completion);
1668 	control->canceled = false;
1669 	mutex_lock(&ctx->walk_control_lock);
1670 	if (ctx->walk_control) {
1671 		mutex_unlock(&ctx->walk_control_lock);
1672 		return -EBUSY;
1673 	}
1674 	ctx->walk_control = control;
1675 	mutex_unlock(&ctx->walk_control_lock);
1676 	if (!damon_is_running(ctx)) {
1677 		mutex_lock(&ctx->walk_control_lock);
1678 		if (ctx->walk_control == control)
1679 			ctx->walk_control = NULL;
1680 		mutex_unlock(&ctx->walk_control_lock);
1681 		return -EINVAL;
1682 	}
1683 	wait_for_completion(&control->completion);
1684 	if (control->canceled)
1685 		return -ECANCELED;
1686 	return 0;
1687 }
1688 
1689 /*
1690  * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing
1691  * the problem being propagated.
1692  */
1693 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
1694 {
1695 	if (r->nr_accesses_bp == r->nr_accesses * 10000)
1696 		return;
1697 	WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
1698 			r->nr_accesses_bp, r->nr_accesses);
1699 	r->nr_accesses_bp = r->nr_accesses * 10000;
1700 }
1701 
1702 #ifdef CONFIG_DAMON_DEBUG_SANITY
1703 static void damon_verify_reset_aggregated(struct damon_region *r,
1704 		struct damon_ctx *c)
1705 {
1706 	WARN_ONCE(r->nr_accesses_bp != r->last_nr_accesses * 10000,
1707 			"nr_accesses_bp %u last_nr_accesses %u sis %lu %lu\n",
1708 			r->nr_accesses_bp, r->last_nr_accesses,
1709 			c->passed_sample_intervals, c->next_aggregation_sis);
1710 }
1711 #else
1712 static void damon_verify_reset_aggregated(struct damon_region *r,
1713 		struct damon_ctx *c)
1714 {
1715 }
1716 #endif
1717 
1718 
1719 /*
1720  * Reset the aggregated monitoring results ('nr_accesses' of each region).
1721  */
1722 static void kdamond_reset_aggregated(struct damon_ctx *c)
1723 {
1724 	struct damon_target *t;
1725 	unsigned int ti = 0;	/* target's index */
1726 
1727 	damon_for_each_target(t, c) {
1728 		struct damon_region *r;
1729 
1730 		damon_for_each_region(r, t) {
1731 			trace_damon_aggregated(ti, r, damon_nr_regions(t));
1732 			damon_warn_fix_nr_accesses_corruption(r);
1733 			r->last_nr_accesses = r->nr_accesses;
1734 			r->nr_accesses = 0;
1735 			damon_verify_reset_aggregated(r, c);
1736 		}
1737 		ti++;
1738 	}
1739 }
1740 
1741 static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1742 {
1743 	struct damon_target *t;
1744 	struct damon_region *r;
1745 	unsigned long sz_region, max_access_events = 0, access_events = 0;
1746 	unsigned long target_access_events;
1747 	unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1748 
1749 	damon_for_each_target(t, c) {
1750 		damon_for_each_region(r, t) {
1751 			sz_region = damon_sz_region(r);
1752 			max_access_events += sz_region * c->attrs.aggr_samples;
1753 			access_events += sz_region * r->nr_accesses;
1754 		}
1755 	}
1756 	target_access_events = max_access_events * goal_bp / 10000;
1757 	target_access_events = target_access_events ? : 1;
1758 	return mult_frac(access_events, 10000, target_access_events);
1759 }
1760 
1761 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1762 		unsigned long score);
1763 
1764 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1765 {
1766 	unsigned long score_bp, adaptation_bp;
1767 
1768 	score_bp = damon_get_intervals_score(c);
1769 	adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1770 		10000;
1771 	/*
1772 	 * adaptation_bp ranges from 1 to 20,000.  Avoid too rapid reduction of
1773 	 * the intervals by rescaling [1,10,000] to [5000, 10,000].
1774 	 */
1775 	if (adaptation_bp <= 10000)
1776 		adaptation_bp = 5000 + adaptation_bp / 2;
1777 	return adaptation_bp;
1778 }
1779 
1780 static void kdamond_tune_intervals(struct damon_ctx *c)
1781 {
1782 	unsigned long adaptation_bp;
1783 	struct damon_attrs new_attrs;
1784 	struct damon_intervals_goal *goal;
1785 
1786 	adaptation_bp = damon_get_intervals_adaptation_bp(c);
1787 	if (adaptation_bp == 10000)
1788 		return;
1789 
1790 	new_attrs = c->attrs;
1791 	goal = &c->attrs.intervals_goal;
1792 	new_attrs.sample_interval = min(goal->max_sample_us,
1793 			c->attrs.sample_interval * adaptation_bp / 10000);
1794 	new_attrs.sample_interval = max(goal->min_sample_us,
1795 			new_attrs.sample_interval);
1796 	new_attrs.aggr_interval = new_attrs.sample_interval *
1797 		c->attrs.aggr_samples;
1798 	trace_damon_monitor_intervals_tune(new_attrs.sample_interval);
1799 	damon_set_attrs(c, &new_attrs);
1800 }
1801 
1802 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1803 {
1804 	unsigned long sz;
1805 	unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1806 
1807 	sz = damon_sz_region(r);
1808 	return s->pattern.min_sz_region <= sz &&
1809 		sz <= s->pattern.max_sz_region &&
1810 		s->pattern.min_nr_accesses <= nr_accesses &&
1811 		nr_accesses <= s->pattern.max_nr_accesses &&
1812 		s->pattern.min_age_region <= r->age &&
1813 		r->age <= s->pattern.max_age_region;
1814 }
1815 
1816 /*
1817  * damos_quota_is_set() - Return if the given quota is actually set.
1818  * @quota:	The quota to check.
1819  *
1820  * Returns true if the quota is set, false otherwise.
1821  */
1822 static bool damos_quota_is_set(struct damos_quota *quota)
1823 {
1824 	return quota->esz || quota->sz || quota->ms ||
1825 		!damos_quota_goals_empty(quota);
1826 }
1827 
1828 static bool damos_valid_target(struct damon_ctx *c, struct damon_region *r,
1829 		struct damos *s)
1830 {
1831 	bool ret = __damos_valid_target(r, s);
1832 
1833 	if (!ret || !damos_quota_is_set(&s->quota) || !c->ops.get_scheme_score)
1834 		return ret;
1835 
1836 	return c->ops.get_scheme_score(c, r, s) >= s->quota.min_score;
1837 }
1838 
1839 /*
1840  * damos_skip_charged_region() - Check if the given region or starting part of
1841  * it is already charged for the DAMOS quota.
1842  * @t:	The target of the region.
1843  * @rp:	The pointer to the region.
1844  * @s:	The scheme to be applied.
1845  * @min_region_sz:	minimum region size.
1846  *
1847  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1848  * action would applied to only a part of the target access pattern fulfilling
1849  * regions.  To avoid applying the scheme action to only already applied
1850  * regions, DAMON skips applying the scheme action to the regions that charged
1851  * in the previous charge window.
1852  *
1853  * This function checks if a given region should be skipped or not for the
1854  * reason.  If only the starting part of the region has previously charged,
1855  * this function splits the region into two so that the second one covers the
1856  * area that not charged in the previous charge widnow, and return true.  The
1857  * caller can see the second one on the next iteration of the region walk.
1858  * Note that this means the caller should use damon_for_each_region() instead
1859  * of damon_for_each_region_safe().  If damon_for_each_region_safe() is used,
1860  * the second region will just be ignored.
1861  *
1862  * Return: true if the region should be skipped, false otherwise.
1863  */
1864 static bool damos_skip_charged_region(struct damon_target *t,
1865 		struct damon_region *r, struct damos *s,
1866 		unsigned long min_region_sz)
1867 {
1868 	struct damos_quota *quota = &s->quota;
1869 	unsigned long sz_to_skip;
1870 
1871 	/* Skip previously charged regions */
1872 	if (quota->charge_target_from) {
1873 		if (t != quota->charge_target_from)
1874 			return true;
1875 		if (r == damon_last_region(t)) {
1876 			quota->charge_target_from = NULL;
1877 			quota->charge_addr_from = 0;
1878 			return true;
1879 		}
1880 		if (quota->charge_addr_from &&
1881 				r->ar.end <= quota->charge_addr_from)
1882 			return true;
1883 
1884 		if (quota->charge_addr_from && r->ar.start <
1885 				quota->charge_addr_from) {
1886 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1887 					r->ar.start, min_region_sz);
1888 			if (!sz_to_skip) {
1889 				if (damon_sz_region(r) <= min_region_sz)
1890 					return true;
1891 				sz_to_skip = min_region_sz;
1892 			}
1893 			damon_split_region_at(t, r, sz_to_skip);
1894 			return true;
1895 		}
1896 		quota->charge_target_from = NULL;
1897 		quota->charge_addr_from = 0;
1898 	}
1899 	return false;
1900 }
1901 
1902 static void damos_update_stat(struct damos *s,
1903 		unsigned long sz_tried, unsigned long sz_applied,
1904 		unsigned long sz_ops_filter_passed)
1905 {
1906 	s->stat.nr_tried++;
1907 	s->stat.sz_tried += sz_tried;
1908 	if (sz_applied)
1909 		s->stat.nr_applied++;
1910 	s->stat.sz_applied += sz_applied;
1911 	s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1912 }
1913 
1914 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1915 		struct damon_region *r, struct damos_filter *filter,
1916 		unsigned long min_region_sz)
1917 {
1918 	bool matched = false;
1919 	struct damon_target *ti;
1920 	int target_idx = 0;
1921 	unsigned long start, end;
1922 
1923 	switch (filter->type) {
1924 	case DAMOS_FILTER_TYPE_TARGET:
1925 		damon_for_each_target(ti, ctx) {
1926 			if (ti == t)
1927 				break;
1928 			target_idx++;
1929 		}
1930 		matched = target_idx == filter->target_idx;
1931 		break;
1932 	case DAMOS_FILTER_TYPE_ADDR:
1933 		start = ALIGN_DOWN(filter->addr_range.start, min_region_sz);
1934 		end = ALIGN_DOWN(filter->addr_range.end, min_region_sz);
1935 
1936 		/* inside the range */
1937 		if (start <= r->ar.start && r->ar.end <= end) {
1938 			matched = true;
1939 			break;
1940 		}
1941 		/* outside of the range */
1942 		if (r->ar.end <= start || end <= r->ar.start) {
1943 			matched = false;
1944 			break;
1945 		}
1946 		/* start before the range and overlap */
1947 		if (r->ar.start < start) {
1948 			damon_split_region_at(t, r, start - r->ar.start);
1949 			matched = false;
1950 			break;
1951 		}
1952 		/* start inside the range */
1953 		damon_split_region_at(t, r, end - r->ar.start);
1954 		matched = true;
1955 		break;
1956 	default:
1957 		return false;
1958 	}
1959 
1960 	return matched == filter->matching;
1961 }
1962 
1963 static bool damos_core_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1964 		struct damon_region *r, struct damos *s)
1965 {
1966 	struct damos_filter *filter;
1967 
1968 	s->core_filters_allowed = false;
1969 	damos_for_each_core_filter(filter, s) {
1970 		if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) {
1971 			if (filter->allow)
1972 				s->core_filters_allowed = true;
1973 			return !filter->allow;
1974 		}
1975 	}
1976 	return s->core_filters_default_reject;
1977 }
1978 
1979 /*
1980  * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1981  * @ctx:	The context of &damon_ctx->walk_control.
1982  * @t:		The monitoring target of @r that @s will be applied.
1983  * @r:		The region of @t that @s will be applied.
1984  * @s:		The scheme of @ctx that will be applied to @r.
1985  *
1986  * This function is called from kdamond whenever it asked the operation set to
1987  * apply a DAMOS scheme action to a region.  If a DAMOS walk request is
1988  * installed by damos_walk() and not yet uninstalled, invoke it.
1989  */
1990 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1991 		struct damon_region *r, struct damos *s,
1992 		unsigned long sz_filter_passed)
1993 {
1994 	struct damos_walk_control *control;
1995 
1996 	if (s->walk_completed)
1997 		return;
1998 
1999 	control = ctx->walk_control;
2000 	if (!control)
2001 		return;
2002 
2003 	control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
2004 }
2005 
2006 /*
2007  * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
2008  * @ctx:	The context of &damon_ctx->walk_control.
2009  * @s:		A scheme of @ctx that all walks are now done.
2010  *
2011  * This function is called when kdamond finished applying the action of a DAMOS
2012  * scheme to all regions that eligible for the given &damos->apply_interval_us.
2013  * If every scheme of @ctx including @s now finished walking for at least one
2014  * &damos->apply_interval_us, this function makrs the handling of the given
2015  * DAMOS walk request is done, so that damos_walk() can wake up and return.
2016  */
2017 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
2018 {
2019 	struct damos *siter;
2020 	struct damos_walk_control *control;
2021 
2022 	control = ctx->walk_control;
2023 	if (!control)
2024 		return;
2025 
2026 	s->walk_completed = true;
2027 	/* if all schemes completed, signal completion to walker */
2028 	damon_for_each_scheme(siter, ctx) {
2029 		if (!siter->walk_completed)
2030 			return;
2031 	}
2032 	damon_for_each_scheme(siter, ctx)
2033 		siter->walk_completed = false;
2034 
2035 	complete(&control->completion);
2036 	ctx->walk_control = NULL;
2037 }
2038 
2039 /*
2040  * damos_walk_cancel() - Cancel the current DAMOS walk request.
2041  * @ctx:	The context of &damon_ctx->walk_control.
2042  *
2043  * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
2044  * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
2045  * is already out of the main loop and therefore gonna be terminated, and hence
2046  * cannot continue the walks.  This function therefore marks the walk request
2047  * as canceled, so that damos_walk() can wake up and return.
2048  */
2049 static void damos_walk_cancel(struct damon_ctx *ctx)
2050 {
2051 	struct damos_walk_control *control;
2052 
2053 	mutex_lock(&ctx->walk_control_lock);
2054 	control = ctx->walk_control;
2055 	mutex_unlock(&ctx->walk_control_lock);
2056 
2057 	if (!control)
2058 		return;
2059 	control->canceled = true;
2060 	complete(&control->completion);
2061 	mutex_lock(&ctx->walk_control_lock);
2062 	ctx->walk_control = NULL;
2063 	mutex_unlock(&ctx->walk_control_lock);
2064 }
2065 
2066 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
2067 		struct damon_region *r, struct damos *s)
2068 {
2069 	struct damos_quota *quota = &s->quota;
2070 	unsigned long sz = damon_sz_region(r);
2071 	struct timespec64 begin, end;
2072 	unsigned long sz_applied = 0;
2073 	unsigned long sz_ops_filter_passed = 0;
2074 	/*
2075 	 * We plan to support multiple context per kdamond, as DAMON sysfs
2076 	 * implies with 'nr_contexts' file.  Nevertheless, only single context
2077 	 * per kdamond is supported for now.  So, we can simply use '0' context
2078 	 * index here.
2079 	 */
2080 	unsigned int cidx = 0;
2081 	struct damos *siter;		/* schemes iterator */
2082 	unsigned int sidx = 0;
2083 	struct damon_target *titer;	/* targets iterator */
2084 	unsigned int tidx = 0;
2085 	bool do_trace = false;
2086 
2087 	/* get indices for trace_damos_before_apply() */
2088 	if (trace_damos_before_apply_enabled()) {
2089 		damon_for_each_scheme(siter, c) {
2090 			if (siter == s)
2091 				break;
2092 			sidx++;
2093 		}
2094 		damon_for_each_target(titer, c) {
2095 			if (titer == t)
2096 				break;
2097 			tidx++;
2098 		}
2099 		do_trace = true;
2100 	}
2101 
2102 	if (c->ops.apply_scheme) {
2103 		if (damos_quota_is_set(quota) &&
2104 				quota->charged_sz + sz > quota->esz) {
2105 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
2106 					c->min_region_sz);
2107 			if (!sz)
2108 				goto update_stat;
2109 			damon_split_region_at(t, r, sz);
2110 		}
2111 		if (damos_core_filter_out(c, t, r, s))
2112 			return;
2113 		ktime_get_coarse_ts64(&begin);
2114 		trace_damos_before_apply(cidx, sidx, tidx, r,
2115 				damon_nr_regions(t), do_trace);
2116 		sz_applied = c->ops.apply_scheme(c, t, r, s,
2117 				&sz_ops_filter_passed);
2118 		damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
2119 		ktime_get_coarse_ts64(&end);
2120 		quota->total_charged_ns += timespec64_to_ns(&end) -
2121 			timespec64_to_ns(&begin);
2122 		quota->charged_sz += sz;
2123 		if (damos_quota_is_set(quota) &&
2124 				quota->charged_sz >= quota->esz) {
2125 			quota->charge_target_from = t;
2126 			quota->charge_addr_from = r->ar.end + 1;
2127 		}
2128 	}
2129 	if (s->action != DAMOS_STAT)
2130 		r->age = 0;
2131 
2132 update_stat:
2133 	damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
2134 }
2135 
2136 static void damon_do_apply_schemes(struct damon_ctx *c,
2137 				   struct damon_target *t,
2138 				   struct damon_region *r)
2139 {
2140 	struct damos *s;
2141 
2142 	damon_for_each_scheme(s, c) {
2143 		struct damos_quota *quota = &s->quota;
2144 
2145 		if (time_before(c->passed_sample_intervals, s->next_apply_sis))
2146 			continue;
2147 
2148 		if (!s->wmarks.activated)
2149 			continue;
2150 
2151 		/* Check the quota */
2152 		if (damos_quota_is_set(quota) &&
2153 				quota->charged_sz >= quota->esz)
2154 			continue;
2155 
2156 		if (damos_skip_charged_region(t, r, s, c->min_region_sz))
2157 			continue;
2158 
2159 		if (s->max_nr_snapshots &&
2160 				s->max_nr_snapshots <= s->stat.nr_snapshots)
2161 			continue;
2162 
2163 		if (damos_valid_target(c, r, s))
2164 			damos_apply_scheme(c, t, r, s);
2165 
2166 		if (damon_is_last_region(r, t))
2167 			s->stat.nr_snapshots++;
2168 	}
2169 }
2170 
2171 /*
2172  * damon_feed_loop_next_input() - get next input to achieve a target score.
2173  * @last_input	The last input.
2174  * @score	Current score that made with @last_input.
2175  *
2176  * Calculate next input to achieve the target score, based on the last input
2177  * and current score.  Assuming the input and the score are positively
2178  * proportional, calculate how much compensation should be added to or
2179  * subtracted from the last input as a proportion of the last input.  Avoid
2180  * next input always being zero by setting it non-zero always.  In short form
2181  * (assuming support of float and signed calculations), the algorithm is as
2182  * below.
2183  *
2184  * next_input = max(last_input * ((goal - current) / goal + 1), 1)
2185  *
2186  * For simple implementation, we assume the target score is always 10,000.  The
2187  * caller should adjust @score for this.
2188  *
2189  * Returns next input that assumed to achieve the target score.
2190  */
2191 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
2192 		unsigned long score)
2193 {
2194 	const unsigned long goal = 10000;
2195 	/* Set minimum input as 10000 to avoid compensation be zero */
2196 	const unsigned long min_input = 10000;
2197 	unsigned long score_goal_diff, compensation;
2198 	bool over_achieving = score > goal;
2199 
2200 	if (score == goal)
2201 		return last_input;
2202 	if (score >= goal * 2)
2203 		return min_input;
2204 
2205 	if (over_achieving)
2206 		score_goal_diff = score - goal;
2207 	else
2208 		score_goal_diff = goal - score;
2209 
2210 	if (last_input < ULONG_MAX / score_goal_diff)
2211 		compensation = last_input * score_goal_diff / goal;
2212 	else
2213 		compensation = last_input / goal * score_goal_diff;
2214 
2215 	if (over_achieving)
2216 		return max(last_input - compensation, min_input);
2217 	if (last_input < ULONG_MAX - compensation)
2218 		return last_input + compensation;
2219 	return ULONG_MAX;
2220 }
2221 
2222 #ifdef CONFIG_PSI
2223 
2224 static u64 damos_get_some_mem_psi_total(void)
2225 {
2226 	if (static_branch_likely(&psi_disabled))
2227 		return 0;
2228 	return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
2229 			NSEC_PER_USEC);
2230 }
2231 
2232 #else	/* CONFIG_PSI */
2233 
2234 static inline u64 damos_get_some_mem_psi_total(void)
2235 {
2236 	return 0;
2237 };
2238 
2239 #endif	/* CONFIG_PSI */
2240 
2241 #ifdef CONFIG_NUMA
2242 static __kernel_ulong_t damos_get_node_mem_bp(
2243 		struct damos_quota_goal *goal)
2244 {
2245 	struct sysinfo i;
2246 	__kernel_ulong_t numerator;
2247 
2248 	si_meminfo_node(&i, goal->nid);
2249 	if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP)
2250 		numerator = i.totalram - i.freeram;
2251 	else	/* DAMOS_QUOTA_NODE_MEM_FREE_BP */
2252 		numerator = i.freeram;
2253 	return mult_frac(numerator, 10000, i.totalram);
2254 }
2255 
2256 static unsigned long damos_get_node_memcg_used_bp(
2257 		struct damos_quota_goal *goal)
2258 {
2259 	struct mem_cgroup *memcg;
2260 	struct lruvec *lruvec;
2261 	unsigned long used_pages, numerator;
2262 	struct sysinfo i;
2263 
2264 	memcg = mem_cgroup_get_from_id(goal->memcg_id);
2265 	if (!memcg) {
2266 		if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2267 			return 0;
2268 		else	/* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2269 			return 10000;
2270 	}
2271 
2272 	mem_cgroup_flush_stats(memcg);
2273 	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid));
2274 	used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON);
2275 	used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON);
2276 	used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE);
2277 	used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE);
2278 
2279 	mem_cgroup_put(memcg);
2280 
2281 	si_meminfo_node(&i, goal->nid);
2282 	if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP)
2283 		numerator = used_pages;
2284 	else	/* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */
2285 		numerator = i.totalram - used_pages;
2286 	return mult_frac(numerator, 10000, i.totalram);
2287 }
2288 #else
2289 static __kernel_ulong_t damos_get_node_mem_bp(
2290 		struct damos_quota_goal *goal)
2291 {
2292 	return 0;
2293 }
2294 
2295 static unsigned long damos_get_node_memcg_used_bp(
2296 		struct damos_quota_goal *goal)
2297 {
2298 	return 0;
2299 }
2300 #endif
2301 
2302 /*
2303  * Returns LRU-active or inactive memory to total LRU memory size ratio.
2304  */
2305 static unsigned int damos_get_in_active_mem_bp(bool active_ratio)
2306 {
2307 	unsigned long active, inactive, total;
2308 
2309 	/* This should align with /proc/meminfo output */
2310 	active = global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON) +
2311 		global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
2312 	inactive = global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON) +
2313 		global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
2314 	total = active + inactive;
2315 	if (active_ratio)
2316 		return mult_frac(active, 10000, total);
2317 	return mult_frac(inactive, 10000, total);
2318 }
2319 
2320 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
2321 {
2322 	u64 now_psi_total;
2323 
2324 	switch (goal->metric) {
2325 	case DAMOS_QUOTA_USER_INPUT:
2326 		/* User should already set goal->current_value */
2327 		break;
2328 	case DAMOS_QUOTA_SOME_MEM_PSI_US:
2329 		now_psi_total = damos_get_some_mem_psi_total();
2330 		goal->current_value = now_psi_total - goal->last_psi_total;
2331 		goal->last_psi_total = now_psi_total;
2332 		break;
2333 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
2334 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
2335 		goal->current_value = damos_get_node_mem_bp(goal);
2336 		break;
2337 	case DAMOS_QUOTA_NODE_MEMCG_USED_BP:
2338 	case DAMOS_QUOTA_NODE_MEMCG_FREE_BP:
2339 		goal->current_value = damos_get_node_memcg_used_bp(goal);
2340 		break;
2341 	case DAMOS_QUOTA_ACTIVE_MEM_BP:
2342 	case DAMOS_QUOTA_INACTIVE_MEM_BP:
2343 		goal->current_value = damos_get_in_active_mem_bp(
2344 				goal->metric == DAMOS_QUOTA_ACTIVE_MEM_BP);
2345 		break;
2346 	default:
2347 		break;
2348 	}
2349 }
2350 
2351 /* Return the highest score since it makes schemes least aggressive */
2352 static unsigned long damos_quota_score(struct damos_quota *quota)
2353 {
2354 	struct damos_quota_goal *goal;
2355 	unsigned long highest_score = 0;
2356 
2357 	damos_for_each_quota_goal(goal, quota) {
2358 		damos_set_quota_goal_current_value(goal);
2359 		highest_score = max(highest_score,
2360 				mult_frac(goal->current_value, 10000,
2361 					goal->target_value));
2362 	}
2363 
2364 	return highest_score;
2365 }
2366 
2367 static void damos_goal_tune_esz_bp_consist(struct damos_quota *quota)
2368 {
2369 	unsigned long score = damos_quota_score(quota);
2370 
2371 	quota->esz_bp = damon_feed_loop_next_input(
2372 			max(quota->esz_bp, 10000UL), score);
2373 }
2374 
2375 static void damos_goal_tune_esz_bp_temporal(struct damos_quota *quota)
2376 {
2377 	unsigned long score = damos_quota_score(quota);
2378 
2379 	if (score >= 10000)
2380 		quota->esz_bp = 0;
2381 	else if (quota->sz)
2382 		quota->esz_bp = quota->sz * 10000;
2383 	else
2384 		quota->esz_bp = ULONG_MAX;
2385 }
2386 
2387 /*
2388  * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
2389  */
2390 static void damos_set_effective_quota(struct damos_quota *quota)
2391 {
2392 	unsigned long throughput;
2393 	unsigned long esz = ULONG_MAX;
2394 
2395 	if (!quota->ms && list_empty(&quota->goals)) {
2396 		quota->esz = quota->sz;
2397 		return;
2398 	}
2399 
2400 	if (!list_empty(&quota->goals)) {
2401 		if (quota->goal_tuner == DAMOS_QUOTA_GOAL_TUNER_CONSIST)
2402 			damos_goal_tune_esz_bp_consist(quota);
2403 		else if (quota->goal_tuner == DAMOS_QUOTA_GOAL_TUNER_TEMPORAL)
2404 			damos_goal_tune_esz_bp_temporal(quota);
2405 		esz = quota->esz_bp / 10000;
2406 	}
2407 
2408 	if (quota->ms) {
2409 		if (quota->total_charged_ns)
2410 			throughput = mult_frac(quota->total_charged_sz,
2411 					1000000, quota->total_charged_ns);
2412 		else
2413 			throughput = PAGE_SIZE * 1024;
2414 		esz = min(throughput * quota->ms, esz);
2415 	}
2416 
2417 	if (quota->sz && quota->sz < esz)
2418 		esz = quota->sz;
2419 
2420 	quota->esz = esz;
2421 }
2422 
2423 static void damos_trace_esz(struct damon_ctx *c, struct damos *s,
2424 		struct damos_quota *quota)
2425 {
2426 	unsigned int cidx = 0, sidx = 0;
2427 	struct damos *siter;
2428 
2429 	damon_for_each_scheme(siter, c) {
2430 		if (siter == s)
2431 			break;
2432 		sidx++;
2433 	}
2434 	trace_damos_esz(cidx, sidx, quota->esz);
2435 }
2436 
2437 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
2438 {
2439 	struct damos_quota *quota = &s->quota;
2440 	struct damon_target *t;
2441 	struct damon_region *r;
2442 	unsigned long cumulated_sz, cached_esz;
2443 	unsigned int score, max_score = 0;
2444 
2445 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
2446 		return;
2447 
2448 	/* First charge window */
2449 	if (!quota->total_charged_sz && !quota->charged_from) {
2450 		quota->charged_from = jiffies;
2451 		damos_set_effective_quota(quota);
2452 	}
2453 
2454 	/* New charge window starts */
2455 	if (time_after_eq(jiffies, quota->charged_from +
2456 				msecs_to_jiffies(quota->reset_interval))) {
2457 		if (damos_quota_is_set(quota) &&
2458 				quota->charged_sz >= quota->esz)
2459 			s->stat.qt_exceeds++;
2460 		quota->total_charged_sz += quota->charged_sz;
2461 		quota->charged_from = jiffies;
2462 		quota->charged_sz = 0;
2463 		if (trace_damos_esz_enabled())
2464 			cached_esz = quota->esz;
2465 		damos_set_effective_quota(quota);
2466 		if (trace_damos_esz_enabled() && quota->esz != cached_esz)
2467 			damos_trace_esz(c, s, quota);
2468 	}
2469 
2470 	if (!c->ops.get_scheme_score)
2471 		return;
2472 
2473 	/* Fill up the score histogram */
2474 	memset(c->regions_score_histogram, 0,
2475 			sizeof(*c->regions_score_histogram) *
2476 			(DAMOS_MAX_SCORE + 1));
2477 	damon_for_each_target(t, c) {
2478 		damon_for_each_region(r, t) {
2479 			if (!__damos_valid_target(r, s))
2480 				continue;
2481 			if (damos_core_filter_out(c, t, r, s))
2482 				continue;
2483 			score = c->ops.get_scheme_score(c, r, s);
2484 			c->regions_score_histogram[score] +=
2485 				damon_sz_region(r);
2486 			if (score > max_score)
2487 				max_score = score;
2488 		}
2489 	}
2490 
2491 	/* Set the min score limit */
2492 	for (cumulated_sz = 0, score = max_score; ; score--) {
2493 		cumulated_sz += c->regions_score_histogram[score];
2494 		if (cumulated_sz >= quota->esz || !score)
2495 			break;
2496 	}
2497 	quota->min_score = score;
2498 }
2499 
2500 static void damos_trace_stat(struct damon_ctx *c, struct damos *s)
2501 {
2502 	unsigned int cidx = 0, sidx = 0;
2503 	struct damos *siter;
2504 
2505 	if (!trace_damos_stat_after_apply_interval_enabled())
2506 		return;
2507 
2508 	damon_for_each_scheme(siter, c) {
2509 		if (siter == s)
2510 			break;
2511 		sidx++;
2512 	}
2513 	trace_damos_stat_after_apply_interval(cidx, sidx, &s->stat);
2514 }
2515 
2516 static void kdamond_apply_schemes(struct damon_ctx *c)
2517 {
2518 	struct damon_target *t;
2519 	struct damon_region *r;
2520 	struct damos *s;
2521 	bool has_schemes_to_apply = false;
2522 
2523 	damon_for_each_scheme(s, c) {
2524 		if (time_before(c->passed_sample_intervals, s->next_apply_sis))
2525 			continue;
2526 
2527 		if (!s->wmarks.activated)
2528 			continue;
2529 
2530 		has_schemes_to_apply = true;
2531 
2532 		damos_adjust_quota(c, s);
2533 	}
2534 
2535 	if (!has_schemes_to_apply)
2536 		return;
2537 
2538 	mutex_lock(&c->walk_control_lock);
2539 	damon_for_each_target(t, c) {
2540 		if (c->ops.target_valid && c->ops.target_valid(t) == false)
2541 			continue;
2542 
2543 		damon_for_each_region(r, t)
2544 			damon_do_apply_schemes(c, t, r);
2545 	}
2546 
2547 	damon_for_each_scheme(s, c) {
2548 		if (time_before(c->passed_sample_intervals, s->next_apply_sis))
2549 			continue;
2550 		damos_walk_complete(c, s);
2551 		damos_set_next_apply_sis(s, c);
2552 		s->last_applied = NULL;
2553 		damos_trace_stat(c, s);
2554 	}
2555 	mutex_unlock(&c->walk_control_lock);
2556 }
2557 
2558 #ifdef CONFIG_DAMON_DEBUG_SANITY
2559 static void damon_verify_merge_two_regions(
2560 		struct damon_region *l, struct damon_region *r)
2561 {
2562 	/* damon_merge_two_regions() may created incorrect left region */
2563 	WARN_ONCE(l->ar.start >= l->ar.end, "l: %lu-%lu, r: %lu-%lu\n",
2564 			l->ar.start, l->ar.end, r->ar.start, r->ar.end);
2565 }
2566 #else
2567 static void damon_verify_merge_two_regions(
2568 		struct damon_region *l, struct damon_region *r)
2569 {
2570 }
2571 #endif
2572 
2573 /*
2574  * Merge two adjacent regions into one region
2575  */
2576 static void damon_merge_two_regions(struct damon_target *t,
2577 		struct damon_region *l, struct damon_region *r)
2578 {
2579 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2580 
2581 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2582 			(sz_l + sz_r);
2583 	l->nr_accesses_bp = l->nr_accesses * 10000;
2584 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2585 	l->ar.end = r->ar.end;
2586 	damon_verify_merge_two_regions(l, r);
2587 	damon_destroy_region(r, t);
2588 }
2589 
2590 #ifdef CONFIG_DAMON_DEBUG_SANITY
2591 static void damon_verify_merge_regions_of(struct damon_region *r)
2592 {
2593 	WARN_ONCE(r->nr_accesses != r->nr_accesses_bp / 10000,
2594 			"nr_accesses (%u) != nr_accesses_bp (%u)\n",
2595 			r->nr_accesses, r->nr_accesses_bp);
2596 }
2597 #else
2598 static void damon_verify_merge_regions_of(struct damon_region *r)
2599 {
2600 }
2601 #endif
2602 
2603 
2604 /*
2605  * Merge adjacent regions having similar access frequencies
2606  *
2607  * t		target affected by this merge operation
2608  * thres	'->nr_accesses' diff threshold for the merge
2609  * sz_limit	size upper limit of each region
2610  */
2611 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2612 				   unsigned long sz_limit)
2613 {
2614 	struct damon_region *r, *prev = NULL, *next;
2615 
2616 	damon_for_each_region_safe(r, next, t) {
2617 		damon_verify_merge_regions_of(r);
2618 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2619 			r->age = 0;
2620 		else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0))
2621 			r->age = 0;
2622 		else
2623 			r->age++;
2624 
2625 		if (prev && prev->ar.end == r->ar.start &&
2626 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2627 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2628 			damon_merge_two_regions(t, prev, r);
2629 		else
2630 			prev = r;
2631 	}
2632 }
2633 
2634 /*
2635  * Merge adjacent regions having similar access frequencies
2636  *
2637  * threshold	'->nr_accesses' diff threshold for the merge
2638  * sz_limit	size upper limit of each region
2639  *
2640  * This function merges monitoring target regions which are adjacent and their
2641  * access frequencies are similar.  This is for minimizing the monitoring
2642  * overhead under the dynamically changeable access pattern.  If a merge was
2643  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
2644  *
2645  * The total number of regions could be higher than the user-defined limit,
2646  * max_nr_regions for some cases.  For example, the user can update
2647  * max_nr_regions to a number that lower than the current number of regions
2648  * while DAMON is running.  For such a case, repeat merging until the limit is
2649  * met while increasing @threshold up to possible maximum level.
2650  */
2651 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2652 				  unsigned long sz_limit)
2653 {
2654 	struct damon_target *t;
2655 	unsigned int nr_regions;
2656 	unsigned int max_thres;
2657 
2658 	max_thres = c->attrs.aggr_interval /
2659 		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
2660 	do {
2661 		nr_regions = 0;
2662 		damon_for_each_target(t, c) {
2663 			damon_merge_regions_of(t, threshold, sz_limit);
2664 			nr_regions += damon_nr_regions(t);
2665 		}
2666 		threshold = max(1, threshold * 2);
2667 	} while (nr_regions > c->attrs.max_nr_regions &&
2668 			threshold / 2 < max_thres);
2669 }
2670 
2671 #ifdef CONFIG_DAMON_DEBUG_SANITY
2672 static void damon_verify_split_region_at(struct damon_region *r,
2673 		unsigned long sz_r)
2674 {
2675 	WARN_ONCE(sz_r == 0 || sz_r >= damon_sz_region(r),
2676 			"sz_r: %lu r: %lu-%lu (%lu)\n",
2677 			sz_r, r->ar.start, r->ar.end, damon_sz_region(r));
2678 }
2679 #else
2680 static void damon_verify_split_region_at(struct damon_region *r,
2681 		unsigned long sz_r)
2682 {
2683 }
2684 #endif
2685 
2686 /*
2687  * Split a region in two
2688  *
2689  * r		the region to be split
2690  * sz_r		size of the first sub-region that will be made
2691  */
2692 static void damon_split_region_at(struct damon_target *t,
2693 				  struct damon_region *r, unsigned long sz_r)
2694 {
2695 	struct damon_region *new;
2696 
2697 	damon_verify_split_region_at(r, sz_r);
2698 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2699 	if (!new)
2700 		return;
2701 
2702 	r->ar.end = new->ar.start;
2703 
2704 	new->age = r->age;
2705 	new->last_nr_accesses = r->last_nr_accesses;
2706 	new->nr_accesses_bp = r->nr_accesses_bp;
2707 	new->nr_accesses = r->nr_accesses;
2708 
2709 	damon_insert_region(new, r, damon_next_region(r), t);
2710 }
2711 
2712 /* Split every region in the given target into 'nr_subs' regions */
2713 static void damon_split_regions_of(struct damon_target *t, int nr_subs,
2714 				  unsigned long min_region_sz)
2715 {
2716 	struct damon_region *r, *next;
2717 	unsigned long sz_region, sz_sub = 0;
2718 	int i;
2719 
2720 	damon_for_each_region_safe(r, next, t) {
2721 		sz_region = damon_sz_region(r);
2722 
2723 		for (i = 0; i < nr_subs - 1 &&
2724 				sz_region > 2 * min_region_sz; i++) {
2725 			/*
2726 			 * Randomly select size of left sub-region to be at
2727 			 * least 10 percent and at most 90% of original region
2728 			 */
2729 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2730 					sz_region / 10, min_region_sz);
2731 			/* Do not allow blank region */
2732 			if (sz_sub == 0 || sz_sub >= sz_region)
2733 				continue;
2734 
2735 			damon_split_region_at(t, r, sz_sub);
2736 			sz_region = sz_sub;
2737 		}
2738 	}
2739 }
2740 
2741 /*
2742  * Split every target region into randomly-sized small regions
2743  *
2744  * This function splits every target region into random-sized small regions if
2745  * current total number of the regions is equal or smaller than half of the
2746  * user-specified maximum number of regions.  This is for maximizing the
2747  * monitoring accuracy under the dynamically changeable access patterns.  If a
2748  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2749  * it.
2750  */
2751 static void kdamond_split_regions(struct damon_ctx *ctx)
2752 {
2753 	struct damon_target *t;
2754 	unsigned int nr_regions = 0;
2755 	static unsigned int last_nr_regions;
2756 	int nr_subregions = 2;
2757 
2758 	damon_for_each_target(t, ctx)
2759 		nr_regions += damon_nr_regions(t);
2760 
2761 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
2762 		return;
2763 
2764 	/* Maybe the middle of the region has different access frequency */
2765 	if (last_nr_regions == nr_regions &&
2766 			nr_regions < ctx->attrs.max_nr_regions / 3)
2767 		nr_subregions = 3;
2768 
2769 	damon_for_each_target(t, ctx)
2770 		damon_split_regions_of(t, nr_subregions, ctx->min_region_sz);
2771 
2772 	last_nr_regions = nr_regions;
2773 }
2774 
2775 /*
2776  * Check whether current monitoring should be stopped
2777  *
2778  * The monitoring is stopped when either the user requested to stop, or all
2779  * monitoring targets are invalid.
2780  *
2781  * Returns true if need to stop current monitoring.
2782  */
2783 static bool kdamond_need_stop(struct damon_ctx *ctx)
2784 {
2785 	struct damon_target *t;
2786 
2787 	if (kthread_should_stop())
2788 		return true;
2789 
2790 	if (!ctx->ops.target_valid)
2791 		return false;
2792 
2793 	damon_for_each_target(t, ctx) {
2794 		if (ctx->ops.target_valid(t))
2795 			return false;
2796 	}
2797 
2798 	return true;
2799 }
2800 
2801 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2802 					unsigned long *metric_value)
2803 {
2804 	switch (metric) {
2805 	case DAMOS_WMARK_FREE_MEM_RATE:
2806 		*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2807 		       totalram_pages();
2808 		return 0;
2809 	default:
2810 		break;
2811 	}
2812 	return -EINVAL;
2813 }
2814 
2815 /*
2816  * Returns zero if the scheme is active.  Else, returns time to wait for next
2817  * watermark check in micro-seconds.
2818  */
2819 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2820 {
2821 	unsigned long metric;
2822 
2823 	if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2824 		return 0;
2825 
2826 	/* higher than high watermark or lower than low watermark */
2827 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2828 		if (scheme->wmarks.activated)
2829 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
2830 				 scheme->action,
2831 				 str_high_low(metric > scheme->wmarks.high));
2832 		scheme->wmarks.activated = false;
2833 		return scheme->wmarks.interval;
2834 	}
2835 
2836 	/* inactive and higher than middle watermark */
2837 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2838 			!scheme->wmarks.activated)
2839 		return scheme->wmarks.interval;
2840 
2841 	if (!scheme->wmarks.activated)
2842 		pr_debug("activate a scheme (%d)\n", scheme->action);
2843 	scheme->wmarks.activated = true;
2844 	return 0;
2845 }
2846 
2847 static void kdamond_usleep(unsigned long usecs)
2848 {
2849 	if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2850 		schedule_timeout_idle(usecs_to_jiffies(usecs));
2851 	else
2852 		usleep_range_idle(usecs, usecs + 1);
2853 }
2854 
2855 /*
2856  * kdamond_call() - handle damon_call_control objects.
2857  * @ctx:	The &struct damon_ctx of the kdamond.
2858  * @cancel:	Whether to cancel the invocation of the function.
2859  *
2860  * If there are &struct damon_call_control requests that registered via
2861  * &damon_call() on @ctx, do or cancel the invocation of the function depending
2862  * on @cancel.  @cancel is set when the kdamond is already out of the main loop
2863  * and therefore will be terminated.
2864  */
2865 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2866 {
2867 	struct damon_call_control *control, *next;
2868 	LIST_HEAD(controls);
2869 
2870 	mutex_lock(&ctx->call_controls_lock);
2871 	list_splice_tail_init(&ctx->call_controls, &controls);
2872 	mutex_unlock(&ctx->call_controls_lock);
2873 
2874 	list_for_each_entry_safe(control, next, &controls, list) {
2875 		if (!control->repeat || cancel)
2876 			list_del(&control->list);
2877 
2878 		if (cancel)
2879 			control->canceled = true;
2880 		else
2881 			control->return_code = control->fn(control->data);
2882 
2883 		if (!control->repeat)
2884 			complete(&control->completion);
2885 		else if (control->canceled && control->dealloc_on_cancel)
2886 			kfree(control);
2887 		if (!cancel && ctx->maybe_corrupted)
2888 			break;
2889 	}
2890 
2891 	mutex_lock(&ctx->call_controls_lock);
2892 	list_splice_tail(&controls, &ctx->call_controls);
2893 	mutex_unlock(&ctx->call_controls_lock);
2894 }
2895 
2896 /* Returns negative error code if it's not activated but should return */
2897 static int kdamond_wait_activation(struct damon_ctx *ctx)
2898 {
2899 	struct damos *s;
2900 	unsigned long wait_time;
2901 	unsigned long min_wait_time = 0;
2902 	bool init_wait_time = false;
2903 
2904 	while (!kdamond_need_stop(ctx)) {
2905 		damon_for_each_scheme(s, ctx) {
2906 			wait_time = damos_wmark_wait_us(s);
2907 			if (!init_wait_time || wait_time < min_wait_time) {
2908 				init_wait_time = true;
2909 				min_wait_time = wait_time;
2910 			}
2911 		}
2912 		if (!min_wait_time)
2913 			return 0;
2914 
2915 		kdamond_usleep(min_wait_time);
2916 
2917 		kdamond_call(ctx, false);
2918 		if (ctx->maybe_corrupted)
2919 			return -EINVAL;
2920 		damos_walk_cancel(ctx);
2921 	}
2922 	return -EBUSY;
2923 }
2924 
2925 static void kdamond_init_ctx(struct damon_ctx *ctx)
2926 {
2927 	unsigned long sample_interval = ctx->attrs.sample_interval ?
2928 		ctx->attrs.sample_interval : 1;
2929 	struct damos *scheme;
2930 
2931 	ctx->passed_sample_intervals = 0;
2932 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2933 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2934 		sample_interval;
2935 	ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2936 		ctx->attrs.intervals_goal.aggrs;
2937 
2938 	damon_for_each_scheme(scheme, ctx) {
2939 		damos_set_next_apply_sis(scheme, ctx);
2940 		damos_set_filters_default_reject(scheme);
2941 	}
2942 }
2943 
2944 /*
2945  * The monitoring daemon that runs as a kernel thread
2946  */
2947 static int kdamond_fn(void *data)
2948 {
2949 	struct damon_ctx *ctx = data;
2950 	unsigned int max_nr_accesses = 0;
2951 	unsigned long sz_limit = 0;
2952 
2953 	pr_debug("kdamond (%d) starts\n", current->pid);
2954 
2955 	complete(&ctx->kdamond_started);
2956 	kdamond_init_ctx(ctx);
2957 
2958 	if (ctx->ops.init)
2959 		ctx->ops.init(ctx);
2960 	ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2961 			sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2962 	if (!ctx->regions_score_histogram)
2963 		goto done;
2964 
2965 	sz_limit = damon_apply_min_nr_regions(ctx);
2966 
2967 	while (!kdamond_need_stop(ctx)) {
2968 		/*
2969 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2970 		 * be changed from kdamond_call().  Read the values here, and
2971 		 * use those for this iteration.  That is, damon_set_attrs()
2972 		 * updated new values are respected from next iteration.
2973 		 */
2974 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2975 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2976 		unsigned long sample_interval = ctx->attrs.sample_interval;
2977 
2978 		if (kdamond_wait_activation(ctx))
2979 			break;
2980 
2981 		if (ctx->ops.prepare_access_checks)
2982 			ctx->ops.prepare_access_checks(ctx);
2983 
2984 		kdamond_usleep(sample_interval);
2985 		ctx->passed_sample_intervals++;
2986 
2987 		if (ctx->ops.check_accesses)
2988 			max_nr_accesses = ctx->ops.check_accesses(ctx);
2989 
2990 		if (time_after_eq(ctx->passed_sample_intervals,
2991 					next_aggregation_sis)) {
2992 			kdamond_merge_regions(ctx,
2993 					max_nr_accesses / 10,
2994 					sz_limit);
2995 			/* online updates might be made */
2996 			sz_limit = damon_apply_min_nr_regions(ctx);
2997 		}
2998 
2999 		/*
3000 		 * do kdamond_call() and kdamond_apply_schemes() after
3001 		 * kdamond_merge_regions() if possible, to reduce overhead
3002 		 */
3003 		kdamond_call(ctx, false);
3004 		if (ctx->maybe_corrupted)
3005 			break;
3006 		if (!list_empty(&ctx->schemes))
3007 			kdamond_apply_schemes(ctx);
3008 		else
3009 			damos_walk_cancel(ctx);
3010 
3011 		sample_interval = ctx->attrs.sample_interval ?
3012 			ctx->attrs.sample_interval : 1;
3013 		if (time_after_eq(ctx->passed_sample_intervals,
3014 					next_aggregation_sis)) {
3015 			if (ctx->attrs.intervals_goal.aggrs &&
3016 					time_after_eq(
3017 						ctx->passed_sample_intervals,
3018 						ctx->next_intervals_tune_sis)) {
3019 				/*
3020 				 * ctx->next_aggregation_sis might be updated
3021 				 * from kdamond_call().  In the case,
3022 				 * damon_set_attrs() which will be called from
3023 				 * kdamond_tune_interval() may wrongly think
3024 				 * this is in the middle of the current
3025 				 * aggregation, and make aggregation
3026 				 * information reset for all regions.  Then,
3027 				 * following kdamond_reset_aggregated() call
3028 				 * will make the region information invalid,
3029 				 * particularly for ->nr_accesses_bp.
3030 				 *
3031 				 * Reset ->next_aggregation_sis to avoid that.
3032 				 * It will anyway correctly updated after this
3033 				 * if clause.
3034 				 */
3035 				ctx->next_aggregation_sis =
3036 					next_aggregation_sis;
3037 				ctx->next_intervals_tune_sis +=
3038 					ctx->attrs.aggr_samples *
3039 					ctx->attrs.intervals_goal.aggrs;
3040 				kdamond_tune_intervals(ctx);
3041 				sample_interval = ctx->attrs.sample_interval ?
3042 					ctx->attrs.sample_interval : 1;
3043 
3044 			}
3045 			ctx->next_aggregation_sis = next_aggregation_sis +
3046 				ctx->attrs.aggr_interval / sample_interval;
3047 
3048 			kdamond_reset_aggregated(ctx);
3049 			kdamond_split_regions(ctx);
3050 		}
3051 
3052 		if (time_after_eq(ctx->passed_sample_intervals,
3053 					next_ops_update_sis)) {
3054 			ctx->next_ops_update_sis = next_ops_update_sis +
3055 				ctx->attrs.ops_update_interval /
3056 				sample_interval;
3057 			if (ctx->ops.update)
3058 				ctx->ops.update(ctx);
3059 		}
3060 	}
3061 done:
3062 	damon_destroy_targets(ctx);
3063 
3064 	kfree(ctx->regions_score_histogram);
3065 	kdamond_call(ctx, true);
3066 	damos_walk_cancel(ctx);
3067 
3068 	pr_debug("kdamond (%d) finishes\n", current->pid);
3069 	mutex_lock(&ctx->kdamond_lock);
3070 	ctx->kdamond = NULL;
3071 	mutex_unlock(&ctx->kdamond_lock);
3072 
3073 	mutex_lock(&damon_lock);
3074 	nr_running_ctxs--;
3075 	if (!nr_running_ctxs && running_exclusive_ctxs)
3076 		running_exclusive_ctxs = false;
3077 	mutex_unlock(&damon_lock);
3078 
3079 	return 0;
3080 }
3081 
3082 static int walk_system_ram(struct resource *res, void *arg)
3083 {
3084 	struct resource *a = arg;
3085 
3086 	if (resource_size(a) < resource_size(res)) {
3087 		a->start = res->start;
3088 		a->end = res->end;
3089 	}
3090 	return 0;
3091 }
3092 
3093 static unsigned long damon_res_to_core_addr(resource_size_t ra,
3094 		unsigned long addr_unit)
3095 {
3096 	/*
3097 	 * Use div_u64() for avoiding linking errors related with __udivdi3,
3098 	 * __aeabi_uldivmod, or similar problems.  This should also improve the
3099 	 * performance optimization (read div_u64() comment for the detail).
3100 	 */
3101 	if (sizeof(ra) == 8 && sizeof(addr_unit) == 4)
3102 		return div_u64(ra, addr_unit);
3103 	return ra / addr_unit;
3104 }
3105 
3106 /*
3107  * Find biggest 'System RAM' resource and store its start and end address in
3108  * @start and @end, respectively.  If no System RAM is found, returns false.
3109  */
3110 static bool damon_find_biggest_system_ram(unsigned long *start,
3111 		unsigned long *end, unsigned long addr_unit)
3112 
3113 {
3114 	struct resource res = {};
3115 
3116 	walk_system_ram_res(0, -1, &res, walk_system_ram);
3117 	*start = damon_res_to_core_addr(res.start, addr_unit);
3118 	*end = damon_res_to_core_addr(res.end + 1, addr_unit);
3119 	if (*end <= *start)
3120 		return false;
3121 	return true;
3122 }
3123 
3124 /**
3125  * damon_set_region_biggest_system_ram_default() - Set the region of the given
3126  * monitoring target as requested, or biggest 'System RAM'.
3127  * @t:		The monitoring target to set the region.
3128  * @start:	The pointer to the start address of the region.
3129  * @end:	The pointer to the end address of the region.
3130  * @addr_unit:	The address unit for the damon_ctx of @t.
3131  * @min_region_sz:	Minimum region size.
3132  *
3133  * This function sets the region of @t as requested by @start and @end.  If the
3134  * values of @start and @end are zero, however, this function finds the biggest
3135  * 'System RAM' resource and sets the region to cover the resource.  In the
3136  * latter case, this function saves the start and end addresses of the resource
3137  * in @start and @end, respectively.
3138  *
3139  * Return: 0 on success, negative error code otherwise.
3140  */
3141 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
3142 			unsigned long *start, unsigned long *end,
3143 			unsigned long addr_unit, unsigned long min_region_sz)
3144 {
3145 	struct damon_addr_range addr_range;
3146 
3147 	if (*start > *end)
3148 		return -EINVAL;
3149 
3150 	if (!*start && !*end &&
3151 			!damon_find_biggest_system_ram(start, end, addr_unit))
3152 		return -EINVAL;
3153 
3154 	addr_range.start = *start;
3155 	addr_range.end = *end;
3156 	return damon_set_regions(t, &addr_range, 1, min_region_sz);
3157 }
3158 
3159 /*
3160  * damon_moving_sum() - Calculate an inferred moving sum value.
3161  * @mvsum:	Inferred sum of the last @len_window values.
3162  * @nomvsum:	Non-moving sum of the last discrete @len_window window values.
3163  * @len_window:	The number of last values to take care of.
3164  * @new_value:	New value that will be added to the pseudo moving sum.
3165  *
3166  * Moving sum (moving average * window size) is good for handling noise, but
3167  * the cost of keeping past values can be high for arbitrary window size.  This
3168  * function implements a lightweight pseudo moving sum function that doesn't
3169  * keep the past window values.
3170  *
3171  * It simply assumes there was no noise in the past, and get the no-noise
3172  * assumed past value to drop from @nomvsum and @len_window.  @nomvsum is a
3173  * non-moving sum of the last window.  For example, if @len_window is 10 and we
3174  * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
3175  * values.  Hence, this function simply drops @nomvsum / @len_window from
3176  * given @mvsum and add @new_value.
3177  *
3178  * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
3179  * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20.  For
3180  * calculating next moving sum with a new value, we should drop 0 from 50 and
3181  * add the new value.  However, this function assumes it got value 5 for each
3182  * of the last ten times.  Based on the assumption, when the next value is
3183  * measured, it drops the assumed past value, 5 from the current sum, and add
3184  * the new value to get the updated pseduo-moving average.
3185  *
3186  * This means the value could have errors, but the errors will be disappeared
3187  * for every @len_window aligned calls.  For example, if @len_window is 10, the
3188  * pseudo moving sum with 11th value to 19th value would have an error.  But
3189  * the sum with 20th value will not have the error.
3190  *
3191  * Return: Pseudo-moving average after getting the @new_value.
3192  */
3193 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
3194 		unsigned int len_window, unsigned int new_value)
3195 {
3196 	return mvsum - nomvsum / len_window + new_value;
3197 }
3198 
3199 /**
3200  * damon_update_region_access_rate() - Update the access rate of a region.
3201  * @r:		The DAMON region to update for its access check result.
3202  * @accessed:	Whether the region has accessed during last sampling interval.
3203  * @attrs:	The damon_attrs of the DAMON context.
3204  *
3205  * Update the access rate of a region with the region's last sampling interval
3206  * access check result.
3207  *
3208  * Usually this will be called by &damon_operations->check_accesses callback.
3209  */
3210 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
3211 		struct damon_attrs *attrs)
3212 {
3213 	unsigned int len_window = 1;
3214 
3215 	/*
3216 	 * sample_interval can be zero, but cannot be larger than
3217 	 * aggr_interval, owing to validation of damon_set_attrs().
3218 	 */
3219 	if (attrs->sample_interval)
3220 		len_window = damon_max_nr_accesses(attrs);
3221 	r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
3222 			r->last_nr_accesses * 10000, len_window,
3223 			accessed ? 10000 : 0);
3224 
3225 	if (accessed)
3226 		r->nr_accesses++;
3227 }
3228 
3229 /**
3230  * damon_initialized() - Return if DAMON is ready to be used.
3231  *
3232  * Return: true if DAMON is ready to be used, false otherwise.
3233  */
3234 bool damon_initialized(void)
3235 {
3236 	return damon_region_cache != NULL;
3237 }
3238 
3239 static int __init damon_init(void)
3240 {
3241 	damon_region_cache = KMEM_CACHE(damon_region, 0);
3242 	if (unlikely(!damon_region_cache)) {
3243 		pr_err("creating damon_region_cache fails\n");
3244 		return -ENOMEM;
3245 	}
3246 
3247 	return 0;
3248 }
3249 
3250 subsys_initcall(damon_init);
3251 
3252 #include "tests/core-kunit.h"
3253