xref: /linux/mm/damon/core.c (revision 73519ded992fc9dda2807450d6931002bb93cb16)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/damon.h>
20 
21 #ifdef CONFIG_DAMON_KUNIT_TEST
22 #undef DAMON_MIN_REGION
23 #define DAMON_MIN_REGION 1
24 #endif
25 
26 static DEFINE_MUTEX(damon_lock);
27 static int nr_running_ctxs;
28 static bool running_exclusive_ctxs;
29 
30 static DEFINE_MUTEX(damon_ops_lock);
31 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
32 
33 static struct kmem_cache *damon_region_cache __ro_after_init;
34 
35 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
36 static bool __damon_is_registered_ops(enum damon_ops_id id)
37 {
38 	struct damon_operations empty_ops = {};
39 
40 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
41 		return false;
42 	return true;
43 }
44 
45 /**
46  * damon_is_registered_ops() - Check if a given damon_operations is registered.
47  * @id:	Id of the damon_operations to check if registered.
48  *
49  * Return: true if the ops is set, false otherwise.
50  */
51 bool damon_is_registered_ops(enum damon_ops_id id)
52 {
53 	bool registered;
54 
55 	if (id >= NR_DAMON_OPS)
56 		return false;
57 	mutex_lock(&damon_ops_lock);
58 	registered = __damon_is_registered_ops(id);
59 	mutex_unlock(&damon_ops_lock);
60 	return registered;
61 }
62 
63 /**
64  * damon_register_ops() - Register a monitoring operations set to DAMON.
65  * @ops:	monitoring operations set to register.
66  *
67  * This function registers a monitoring operations set of valid &struct
68  * damon_operations->id so that others can find and use them later.
69  *
70  * Return: 0 on success, negative error code otherwise.
71  */
72 int damon_register_ops(struct damon_operations *ops)
73 {
74 	int err = 0;
75 
76 	if (ops->id >= NR_DAMON_OPS)
77 		return -EINVAL;
78 	mutex_lock(&damon_ops_lock);
79 	/* Fail for already registered ops */
80 	if (__damon_is_registered_ops(ops->id)) {
81 		err = -EINVAL;
82 		goto out;
83 	}
84 	damon_registered_ops[ops->id] = *ops;
85 out:
86 	mutex_unlock(&damon_ops_lock);
87 	return err;
88 }
89 
90 /**
91  * damon_select_ops() - Select a monitoring operations to use with the context.
92  * @ctx:	monitoring context to use the operations.
93  * @id:		id of the registered monitoring operations to select.
94  *
95  * This function finds registered monitoring operations set of @id and make
96  * @ctx to use it.
97  *
98  * Return: 0 on success, negative error code otherwise.
99  */
100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101 {
102 	int err = 0;
103 
104 	if (id >= NR_DAMON_OPS)
105 		return -EINVAL;
106 
107 	mutex_lock(&damon_ops_lock);
108 	if (!__damon_is_registered_ops(id))
109 		err = -EINVAL;
110 	else
111 		ctx->ops = damon_registered_ops[id];
112 	mutex_unlock(&damon_ops_lock);
113 	return err;
114 }
115 
116 /*
117  * Construct a damon_region struct
118  *
119  * Returns the pointer to the new struct if success, or NULL otherwise
120  */
121 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122 {
123 	struct damon_region *region;
124 
125 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 	if (!region)
127 		return NULL;
128 
129 	region->ar.start = start;
130 	region->ar.end = end;
131 	region->nr_accesses = 0;
132 	region->nr_accesses_bp = 0;
133 	INIT_LIST_HEAD(&region->list);
134 
135 	region->age = 0;
136 	region->last_nr_accesses = 0;
137 
138 	return region;
139 }
140 
141 void damon_add_region(struct damon_region *r, struct damon_target *t)
142 {
143 	list_add_tail(&r->list, &t->regions_list);
144 	t->nr_regions++;
145 }
146 
147 static void damon_del_region(struct damon_region *r, struct damon_target *t)
148 {
149 	list_del(&r->list);
150 	t->nr_regions--;
151 }
152 
153 static void damon_free_region(struct damon_region *r)
154 {
155 	kmem_cache_free(damon_region_cache, r);
156 }
157 
158 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159 {
160 	damon_del_region(r, t);
161 	damon_free_region(r);
162 }
163 
164 /*
165  * Check whether a region is intersecting an address range
166  *
167  * Returns true if it is.
168  */
169 static bool damon_intersect(struct damon_region *r,
170 		struct damon_addr_range *re)
171 {
172 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
173 }
174 
175 /*
176  * Fill holes in regions with new regions.
177  */
178 static int damon_fill_regions_holes(struct damon_region *first,
179 		struct damon_region *last, struct damon_target *t)
180 {
181 	struct damon_region *r = first;
182 
183 	damon_for_each_region_from(r, t) {
184 		struct damon_region *next, *newr;
185 
186 		if (r == last)
187 			break;
188 		next = damon_next_region(r);
189 		if (r->ar.end != next->ar.start) {
190 			newr = damon_new_region(r->ar.end, next->ar.start);
191 			if (!newr)
192 				return -ENOMEM;
193 			damon_insert_region(newr, r, next, t);
194 		}
195 	}
196 	return 0;
197 }
198 
199 /*
200  * damon_set_regions() - Set regions of a target for given address ranges.
201  * @t:		the given target.
202  * @ranges:	array of new monitoring target ranges.
203  * @nr_ranges:	length of @ranges.
204  *
205  * This function adds new regions to, or modify existing regions of a
206  * monitoring target to fit in specific ranges.
207  *
208  * Return: 0 if success, or negative error code otherwise.
209  */
210 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
211 		unsigned int nr_ranges)
212 {
213 	struct damon_region *r, *next;
214 	unsigned int i;
215 	int err;
216 
217 	/* Remove regions which are not in the new ranges */
218 	damon_for_each_region_safe(r, next, t) {
219 		for (i = 0; i < nr_ranges; i++) {
220 			if (damon_intersect(r, &ranges[i]))
221 				break;
222 		}
223 		if (i == nr_ranges)
224 			damon_destroy_region(r, t);
225 	}
226 
227 	r = damon_first_region(t);
228 	/* Add new regions or resize existing regions to fit in the ranges */
229 	for (i = 0; i < nr_ranges; i++) {
230 		struct damon_region *first = NULL, *last, *newr;
231 		struct damon_addr_range *range;
232 
233 		range = &ranges[i];
234 		/* Get the first/last regions intersecting with the range */
235 		damon_for_each_region_from(r, t) {
236 			if (damon_intersect(r, range)) {
237 				if (!first)
238 					first = r;
239 				last = r;
240 			}
241 			if (r->ar.start >= range->end)
242 				break;
243 		}
244 		if (!first) {
245 			/* no region intersects with this range */
246 			newr = damon_new_region(
247 					ALIGN_DOWN(range->start,
248 						DAMON_MIN_REGION),
249 					ALIGN(range->end, DAMON_MIN_REGION));
250 			if (!newr)
251 				return -ENOMEM;
252 			damon_insert_region(newr, damon_prev_region(r), r, t);
253 		} else {
254 			/* resize intersecting regions to fit in this range */
255 			first->ar.start = ALIGN_DOWN(range->start,
256 					DAMON_MIN_REGION);
257 			last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
258 
259 			/* fill possible holes in the range */
260 			err = damon_fill_regions_holes(first, last, t);
261 			if (err)
262 				return err;
263 		}
264 	}
265 	return 0;
266 }
267 
268 struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 		bool matching, bool allow)
270 {
271 	struct damos_filter *filter;
272 
273 	filter = kmalloc(sizeof(*filter), GFP_KERNEL);
274 	if (!filter)
275 		return NULL;
276 	filter->type = type;
277 	filter->matching = matching;
278 	filter->allow = allow;
279 	INIT_LIST_HEAD(&filter->list);
280 	return filter;
281 }
282 
283 void damos_add_filter(struct damos *s, struct damos_filter *f)
284 {
285 	list_add_tail(&f->list, &s->filters);
286 }
287 
288 static void damos_del_filter(struct damos_filter *f)
289 {
290 	list_del(&f->list);
291 }
292 
293 static void damos_free_filter(struct damos_filter *f)
294 {
295 	kfree(f);
296 }
297 
298 void damos_destroy_filter(struct damos_filter *f)
299 {
300 	damos_del_filter(f);
301 	damos_free_filter(f);
302 }
303 
304 struct damos_quota_goal *damos_new_quota_goal(
305 		enum damos_quota_goal_metric metric,
306 		unsigned long target_value)
307 {
308 	struct damos_quota_goal *goal;
309 
310 	goal = kmalloc(sizeof(*goal), GFP_KERNEL);
311 	if (!goal)
312 		return NULL;
313 	goal->metric = metric;
314 	goal->target_value = target_value;
315 	INIT_LIST_HEAD(&goal->list);
316 	return goal;
317 }
318 
319 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
320 {
321 	list_add_tail(&g->list, &q->goals);
322 }
323 
324 static void damos_del_quota_goal(struct damos_quota_goal *g)
325 {
326 	list_del(&g->list);
327 }
328 
329 static void damos_free_quota_goal(struct damos_quota_goal *g)
330 {
331 	kfree(g);
332 }
333 
334 void damos_destroy_quota_goal(struct damos_quota_goal *g)
335 {
336 	damos_del_quota_goal(g);
337 	damos_free_quota_goal(g);
338 }
339 
340 /* initialize fields of @quota that normally API users wouldn't set */
341 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
342 {
343 	quota->esz = 0;
344 	quota->total_charged_sz = 0;
345 	quota->total_charged_ns = 0;
346 	quota->charged_sz = 0;
347 	quota->charged_from = 0;
348 	quota->charge_target_from = NULL;
349 	quota->charge_addr_from = 0;
350 	quota->esz_bp = 0;
351 	return quota;
352 }
353 
354 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
355 			enum damos_action action,
356 			unsigned long apply_interval_us,
357 			struct damos_quota *quota,
358 			struct damos_watermarks *wmarks,
359 			int target_nid)
360 {
361 	struct damos *scheme;
362 
363 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
364 	if (!scheme)
365 		return NULL;
366 	scheme->pattern = *pattern;
367 	scheme->action = action;
368 	scheme->apply_interval_us = apply_interval_us;
369 	/*
370 	 * next_apply_sis will be set when kdamond starts.  While kdamond is
371 	 * running, it will also updated when it is added to the DAMON context,
372 	 * or damon_attrs are updated.
373 	 */
374 	scheme->next_apply_sis = 0;
375 	INIT_LIST_HEAD(&scheme->filters);
376 	scheme->stat = (struct damos_stat){};
377 	INIT_LIST_HEAD(&scheme->list);
378 
379 	scheme->quota = *(damos_quota_init(quota));
380 	/* quota.goals should be separately set by caller */
381 	INIT_LIST_HEAD(&scheme->quota.goals);
382 
383 	scheme->wmarks = *wmarks;
384 	scheme->wmarks.activated = true;
385 
386 	scheme->target_nid = target_nid;
387 
388 	return scheme;
389 }
390 
391 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
392 {
393 	unsigned long sample_interval = ctx->attrs.sample_interval ?
394 		ctx->attrs.sample_interval : 1;
395 	unsigned long apply_interval = s->apply_interval_us ?
396 		s->apply_interval_us : ctx->attrs.aggr_interval;
397 
398 	s->next_apply_sis = ctx->passed_sample_intervals +
399 		apply_interval / sample_interval;
400 }
401 
402 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
403 {
404 	list_add_tail(&s->list, &ctx->schemes);
405 	damos_set_next_apply_sis(s, ctx);
406 }
407 
408 static void damon_del_scheme(struct damos *s)
409 {
410 	list_del(&s->list);
411 }
412 
413 static void damon_free_scheme(struct damos *s)
414 {
415 	kfree(s);
416 }
417 
418 void damon_destroy_scheme(struct damos *s)
419 {
420 	struct damos_quota_goal *g, *g_next;
421 	struct damos_filter *f, *next;
422 
423 	damos_for_each_quota_goal_safe(g, g_next, &s->quota)
424 		damos_destroy_quota_goal(g);
425 
426 	damos_for_each_filter_safe(f, next, s)
427 		damos_destroy_filter(f);
428 	damon_del_scheme(s);
429 	damon_free_scheme(s);
430 }
431 
432 /*
433  * Construct a damon_target struct
434  *
435  * Returns the pointer to the new struct if success, or NULL otherwise
436  */
437 struct damon_target *damon_new_target(void)
438 {
439 	struct damon_target *t;
440 
441 	t = kmalloc(sizeof(*t), GFP_KERNEL);
442 	if (!t)
443 		return NULL;
444 
445 	t->pid = NULL;
446 	t->nr_regions = 0;
447 	INIT_LIST_HEAD(&t->regions_list);
448 	INIT_LIST_HEAD(&t->list);
449 
450 	return t;
451 }
452 
453 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
454 {
455 	list_add_tail(&t->list, &ctx->adaptive_targets);
456 }
457 
458 bool damon_targets_empty(struct damon_ctx *ctx)
459 {
460 	return list_empty(&ctx->adaptive_targets);
461 }
462 
463 static void damon_del_target(struct damon_target *t)
464 {
465 	list_del(&t->list);
466 }
467 
468 void damon_free_target(struct damon_target *t)
469 {
470 	struct damon_region *r, *next;
471 
472 	damon_for_each_region_safe(r, next, t)
473 		damon_free_region(r);
474 	kfree(t);
475 }
476 
477 void damon_destroy_target(struct damon_target *t)
478 {
479 	damon_del_target(t);
480 	damon_free_target(t);
481 }
482 
483 unsigned int damon_nr_regions(struct damon_target *t)
484 {
485 	return t->nr_regions;
486 }
487 
488 struct damon_ctx *damon_new_ctx(void)
489 {
490 	struct damon_ctx *ctx;
491 
492 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
493 	if (!ctx)
494 		return NULL;
495 
496 	init_completion(&ctx->kdamond_started);
497 
498 	ctx->attrs.sample_interval = 5 * 1000;
499 	ctx->attrs.aggr_interval = 100 * 1000;
500 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
501 
502 	ctx->passed_sample_intervals = 0;
503 	/* These will be set from kdamond_init_intervals_sis() */
504 	ctx->next_aggregation_sis = 0;
505 	ctx->next_ops_update_sis = 0;
506 
507 	mutex_init(&ctx->kdamond_lock);
508 	mutex_init(&ctx->call_control_lock);
509 	mutex_init(&ctx->walk_control_lock);
510 
511 	ctx->attrs.min_nr_regions = 10;
512 	ctx->attrs.max_nr_regions = 1000;
513 
514 	INIT_LIST_HEAD(&ctx->adaptive_targets);
515 	INIT_LIST_HEAD(&ctx->schemes);
516 
517 	return ctx;
518 }
519 
520 static void damon_destroy_targets(struct damon_ctx *ctx)
521 {
522 	struct damon_target *t, *next_t;
523 
524 	if (ctx->ops.cleanup) {
525 		ctx->ops.cleanup(ctx);
526 		return;
527 	}
528 
529 	damon_for_each_target_safe(t, next_t, ctx)
530 		damon_destroy_target(t);
531 }
532 
533 void damon_destroy_ctx(struct damon_ctx *ctx)
534 {
535 	struct damos *s, *next_s;
536 
537 	damon_destroy_targets(ctx);
538 
539 	damon_for_each_scheme_safe(s, next_s, ctx)
540 		damon_destroy_scheme(s);
541 
542 	kfree(ctx);
543 }
544 
545 static unsigned int damon_age_for_new_attrs(unsigned int age,
546 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
547 {
548 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
549 }
550 
551 /* convert access ratio in bp (per 10,000) to nr_accesses */
552 static unsigned int damon_accesses_bp_to_nr_accesses(
553 		unsigned int accesses_bp, struct damon_attrs *attrs)
554 {
555 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
556 }
557 
558 /*
559  * Convert nr_accesses to access ratio in bp (per 10,000).
560  *
561  * Callers should ensure attrs.aggr_interval is not zero, like
562  * damon_update_monitoring_results() does .  Otherwise, divide-by-zero would
563  * happen.
564  */
565 static unsigned int damon_nr_accesses_to_accesses_bp(
566 		unsigned int nr_accesses, struct damon_attrs *attrs)
567 {
568 	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
569 }
570 
571 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
572 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
573 {
574 	return damon_accesses_bp_to_nr_accesses(
575 			damon_nr_accesses_to_accesses_bp(
576 				nr_accesses, old_attrs),
577 			new_attrs);
578 }
579 
580 static void damon_update_monitoring_result(struct damon_region *r,
581 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
582 {
583 	r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
584 			old_attrs, new_attrs);
585 	r->nr_accesses_bp = r->nr_accesses * 10000;
586 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
587 }
588 
589 /*
590  * region->nr_accesses is the number of sampling intervals in the last
591  * aggregation interval that access to the region has found, and region->age is
592  * the number of aggregation intervals that its access pattern has maintained.
593  * For the reason, the real meaning of the two fields depend on current
594  * sampling interval and aggregation interval.  This function updates
595  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
596  */
597 static void damon_update_monitoring_results(struct damon_ctx *ctx,
598 		struct damon_attrs *new_attrs)
599 {
600 	struct damon_attrs *old_attrs = &ctx->attrs;
601 	struct damon_target *t;
602 	struct damon_region *r;
603 
604 	/* if any interval is zero, simply forgive conversion */
605 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
606 			!new_attrs->sample_interval ||
607 			!new_attrs->aggr_interval)
608 		return;
609 
610 	damon_for_each_target(t, ctx)
611 		damon_for_each_region(r, t)
612 			damon_update_monitoring_result(
613 					r, old_attrs, new_attrs);
614 }
615 
616 /**
617  * damon_set_attrs() - Set attributes for the monitoring.
618  * @ctx:		monitoring context
619  * @attrs:		monitoring attributes
620  *
621  * This function should be called while the kdamond is not running, or an
622  * access check results aggregation is not ongoing (e.g., from
623  * &struct damon_callback->after_aggregation or
624  * &struct damon_callback->after_wmarks_check callbacks).
625  *
626  * Every time interval is in micro-seconds.
627  *
628  * Return: 0 on success, negative error code otherwise.
629  */
630 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
631 {
632 	unsigned long sample_interval = attrs->sample_interval ?
633 		attrs->sample_interval : 1;
634 	struct damos *s;
635 
636 	if (attrs->min_nr_regions < 3)
637 		return -EINVAL;
638 	if (attrs->min_nr_regions > attrs->max_nr_regions)
639 		return -EINVAL;
640 	if (attrs->sample_interval > attrs->aggr_interval)
641 		return -EINVAL;
642 
643 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
644 		attrs->aggr_interval / sample_interval;
645 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
646 		attrs->ops_update_interval / sample_interval;
647 
648 	damon_update_monitoring_results(ctx, attrs);
649 	ctx->attrs = *attrs;
650 
651 	damon_for_each_scheme(s, ctx)
652 		damos_set_next_apply_sis(s, ctx);
653 
654 	return 0;
655 }
656 
657 /**
658  * damon_set_schemes() - Set data access monitoring based operation schemes.
659  * @ctx:	monitoring context
660  * @schemes:	array of the schemes
661  * @nr_schemes:	number of entries in @schemes
662  *
663  * This function should not be called while the kdamond of the context is
664  * running.
665  */
666 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
667 			ssize_t nr_schemes)
668 {
669 	struct damos *s, *next;
670 	ssize_t i;
671 
672 	damon_for_each_scheme_safe(s, next, ctx)
673 		damon_destroy_scheme(s);
674 	for (i = 0; i < nr_schemes; i++)
675 		damon_add_scheme(ctx, schemes[i]);
676 }
677 
678 static struct damos_quota_goal *damos_nth_quota_goal(
679 		int n, struct damos_quota *q)
680 {
681 	struct damos_quota_goal *goal;
682 	int i = 0;
683 
684 	damos_for_each_quota_goal(goal, q) {
685 		if (i++ == n)
686 			return goal;
687 	}
688 	return NULL;
689 }
690 
691 static void damos_commit_quota_goal(
692 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
693 {
694 	dst->metric = src->metric;
695 	dst->target_value = src->target_value;
696 	if (dst->metric == DAMOS_QUOTA_USER_INPUT)
697 		dst->current_value = src->current_value;
698 	/* keep last_psi_total as is, since it will be updated in next cycle */
699 }
700 
701 /**
702  * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
703  * @dst:	The commit destination DAMOS quota.
704  * @src:	The commit source DAMOS quota.
705  *
706  * Copies user-specified parameters for quota goals from @src to @dst.  Users
707  * should use this function for quota goals-level parameters update of running
708  * DAMON contexts, instead of manual in-place updates.
709  *
710  * This function should be called from parameters-update safe context, like
711  * DAMON callbacks.
712  */
713 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
714 {
715 	struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
716 	int i = 0, j = 0;
717 
718 	damos_for_each_quota_goal_safe(dst_goal, next, dst) {
719 		src_goal = damos_nth_quota_goal(i++, src);
720 		if (src_goal)
721 			damos_commit_quota_goal(dst_goal, src_goal);
722 		else
723 			damos_destroy_quota_goal(dst_goal);
724 	}
725 	damos_for_each_quota_goal_safe(src_goal, next, src) {
726 		if (j++ < i)
727 			continue;
728 		new_goal = damos_new_quota_goal(
729 				src_goal->metric, src_goal->target_value);
730 		if (!new_goal)
731 			return -ENOMEM;
732 		damos_add_quota_goal(dst, new_goal);
733 	}
734 	return 0;
735 }
736 
737 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
738 {
739 	int err;
740 
741 	dst->reset_interval = src->reset_interval;
742 	dst->ms = src->ms;
743 	dst->sz = src->sz;
744 	err = damos_commit_quota_goals(dst, src);
745 	if (err)
746 		return err;
747 	dst->weight_sz = src->weight_sz;
748 	dst->weight_nr_accesses = src->weight_nr_accesses;
749 	dst->weight_age = src->weight_age;
750 	return 0;
751 }
752 
753 static struct damos_filter *damos_nth_filter(int n, struct damos *s)
754 {
755 	struct damos_filter *filter;
756 	int i = 0;
757 
758 	damos_for_each_filter(filter, s) {
759 		if (i++ == n)
760 			return filter;
761 	}
762 	return NULL;
763 }
764 
765 static void damos_commit_filter_arg(
766 		struct damos_filter *dst, struct damos_filter *src)
767 {
768 	switch (dst->type) {
769 	case DAMOS_FILTER_TYPE_MEMCG:
770 		dst->memcg_id = src->memcg_id;
771 		break;
772 	case DAMOS_FILTER_TYPE_ADDR:
773 		dst->addr_range = src->addr_range;
774 		break;
775 	case DAMOS_FILTER_TYPE_TARGET:
776 		dst->target_idx = src->target_idx;
777 		break;
778 	default:
779 		break;
780 	}
781 }
782 
783 static void damos_commit_filter(
784 		struct damos_filter *dst, struct damos_filter *src)
785 {
786 	dst->type = src->type;
787 	dst->matching = src->matching;
788 	damos_commit_filter_arg(dst, src);
789 }
790 
791 static int damos_commit_filters(struct damos *dst, struct damos *src)
792 {
793 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
794 	int i = 0, j = 0;
795 
796 	damos_for_each_filter_safe(dst_filter, next, dst) {
797 		src_filter = damos_nth_filter(i++, src);
798 		if (src_filter)
799 			damos_commit_filter(dst_filter, src_filter);
800 		else
801 			damos_destroy_filter(dst_filter);
802 	}
803 
804 	damos_for_each_filter_safe(src_filter, next, src) {
805 		if (j++ < i)
806 			continue;
807 
808 		new_filter = damos_new_filter(
809 				src_filter->type, src_filter->matching,
810 				src_filter->allow);
811 		if (!new_filter)
812 			return -ENOMEM;
813 		damos_commit_filter_arg(new_filter, src_filter);
814 		damos_add_filter(dst, new_filter);
815 	}
816 	return 0;
817 }
818 
819 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
820 {
821 	struct damos *s;
822 	int i = 0;
823 
824 	damon_for_each_scheme(s, ctx) {
825 		if (i++ == n)
826 			return s;
827 	}
828 	return NULL;
829 }
830 
831 static int damos_commit(struct damos *dst, struct damos *src)
832 {
833 	int err;
834 
835 	dst->pattern = src->pattern;
836 	dst->action = src->action;
837 	dst->apply_interval_us = src->apply_interval_us;
838 
839 	err = damos_commit_quota(&dst->quota, &src->quota);
840 	if (err)
841 		return err;
842 
843 	dst->wmarks = src->wmarks;
844 
845 	err = damos_commit_filters(dst, src);
846 	return err;
847 }
848 
849 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
850 {
851 	struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
852 	int i = 0, j = 0, err;
853 
854 	damon_for_each_scheme_safe(dst_scheme, next, dst) {
855 		src_scheme = damon_nth_scheme(i++, src);
856 		if (src_scheme) {
857 			err = damos_commit(dst_scheme, src_scheme);
858 			if (err)
859 				return err;
860 		} else {
861 			damon_destroy_scheme(dst_scheme);
862 		}
863 	}
864 
865 	damon_for_each_scheme_safe(src_scheme, next, src) {
866 		if (j++ < i)
867 			continue;
868 		new_scheme = damon_new_scheme(&src_scheme->pattern,
869 				src_scheme->action,
870 				src_scheme->apply_interval_us,
871 				&src_scheme->quota, &src_scheme->wmarks,
872 				NUMA_NO_NODE);
873 		if (!new_scheme)
874 			return -ENOMEM;
875 		err = damos_commit(new_scheme, src_scheme);
876 		if (err) {
877 			damon_destroy_scheme(new_scheme);
878 			return err;
879 		}
880 		damon_add_scheme(dst, new_scheme);
881 	}
882 	return 0;
883 }
884 
885 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
886 {
887 	struct damon_target *t;
888 	int i = 0;
889 
890 	damon_for_each_target(t, ctx) {
891 		if (i++ == n)
892 			return t;
893 	}
894 	return NULL;
895 }
896 
897 /*
898  * The caller should ensure the regions of @src are
899  * 1. valid (end >= src) and
900  * 2. sorted by starting address.
901  *
902  * If @src has no region, @dst keeps current regions.
903  */
904 static int damon_commit_target_regions(
905 		struct damon_target *dst, struct damon_target *src)
906 {
907 	struct damon_region *src_region;
908 	struct damon_addr_range *ranges;
909 	int i = 0, err;
910 
911 	damon_for_each_region(src_region, src)
912 		i++;
913 	if (!i)
914 		return 0;
915 
916 	ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
917 	if (!ranges)
918 		return -ENOMEM;
919 	i = 0;
920 	damon_for_each_region(src_region, src)
921 		ranges[i++] = src_region->ar;
922 	err = damon_set_regions(dst, ranges, i);
923 	kfree(ranges);
924 	return err;
925 }
926 
927 static int damon_commit_target(
928 		struct damon_target *dst, bool dst_has_pid,
929 		struct damon_target *src, bool src_has_pid)
930 {
931 	int err;
932 
933 	err = damon_commit_target_regions(dst, src);
934 	if (err)
935 		return err;
936 	if (dst_has_pid)
937 		put_pid(dst->pid);
938 	if (src_has_pid)
939 		get_pid(src->pid);
940 	dst->pid = src->pid;
941 	return 0;
942 }
943 
944 static int damon_commit_targets(
945 		struct damon_ctx *dst, struct damon_ctx *src)
946 {
947 	struct damon_target *dst_target, *next, *src_target, *new_target;
948 	int i = 0, j = 0, err;
949 
950 	damon_for_each_target_safe(dst_target, next, dst) {
951 		src_target = damon_nth_target(i++, src);
952 		if (src_target) {
953 			err = damon_commit_target(
954 					dst_target, damon_target_has_pid(dst),
955 					src_target, damon_target_has_pid(src));
956 			if (err)
957 				return err;
958 		} else {
959 			if (damon_target_has_pid(dst))
960 				put_pid(dst_target->pid);
961 			damon_destroy_target(dst_target);
962 		}
963 	}
964 
965 	damon_for_each_target_safe(src_target, next, src) {
966 		if (j++ < i)
967 			continue;
968 		new_target = damon_new_target();
969 		if (!new_target)
970 			return -ENOMEM;
971 		err = damon_commit_target(new_target, false,
972 				src_target, damon_target_has_pid(src));
973 		if (err) {
974 			damon_destroy_target(new_target);
975 			return err;
976 		}
977 		damon_add_target(dst, new_target);
978 	}
979 	return 0;
980 }
981 
982 /**
983  * damon_commit_ctx() - Commit parameters of a DAMON context to another.
984  * @dst:	The commit destination DAMON context.
985  * @src:	The commit source DAMON context.
986  *
987  * This function copies user-specified parameters from @src to @dst and update
988  * the internal status and results accordingly.  Users should use this function
989  * for context-level parameters update of running context, instead of manual
990  * in-place updates.
991  *
992  * This function should be called from parameters-update safe context, like
993  * DAMON callbacks.
994  */
995 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
996 {
997 	int err;
998 
999 	err = damon_commit_schemes(dst, src);
1000 	if (err)
1001 		return err;
1002 	err = damon_commit_targets(dst, src);
1003 	if (err)
1004 		return err;
1005 	/*
1006 	 * schemes and targets should be updated first, since
1007 	 * 1. damon_set_attrs() updates monitoring results of targets and
1008 	 * next_apply_sis of schemes, and
1009 	 * 2. ops update should be done after pid handling is done (target
1010 	 *    committing require putting pids).
1011 	 */
1012 	err = damon_set_attrs(dst, &src->attrs);
1013 	if (err)
1014 		return err;
1015 	dst->ops = src->ops;
1016 
1017 	return 0;
1018 }
1019 
1020 /**
1021  * damon_nr_running_ctxs() - Return number of currently running contexts.
1022  */
1023 int damon_nr_running_ctxs(void)
1024 {
1025 	int nr_ctxs;
1026 
1027 	mutex_lock(&damon_lock);
1028 	nr_ctxs = nr_running_ctxs;
1029 	mutex_unlock(&damon_lock);
1030 
1031 	return nr_ctxs;
1032 }
1033 
1034 /* Returns the size upper limit for each monitoring region */
1035 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1036 {
1037 	struct damon_target *t;
1038 	struct damon_region *r;
1039 	unsigned long sz = 0;
1040 
1041 	damon_for_each_target(t, ctx) {
1042 		damon_for_each_region(r, t)
1043 			sz += damon_sz_region(r);
1044 	}
1045 
1046 	if (ctx->attrs.min_nr_regions)
1047 		sz /= ctx->attrs.min_nr_regions;
1048 	if (sz < DAMON_MIN_REGION)
1049 		sz = DAMON_MIN_REGION;
1050 
1051 	return sz;
1052 }
1053 
1054 static int kdamond_fn(void *data);
1055 
1056 /*
1057  * __damon_start() - Starts monitoring with given context.
1058  * @ctx:	monitoring context
1059  *
1060  * This function should be called while damon_lock is hold.
1061  *
1062  * Return: 0 on success, negative error code otherwise.
1063  */
1064 static int __damon_start(struct damon_ctx *ctx)
1065 {
1066 	int err = -EBUSY;
1067 
1068 	mutex_lock(&ctx->kdamond_lock);
1069 	if (!ctx->kdamond) {
1070 		err = 0;
1071 		reinit_completion(&ctx->kdamond_started);
1072 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1073 				nr_running_ctxs);
1074 		if (IS_ERR(ctx->kdamond)) {
1075 			err = PTR_ERR(ctx->kdamond);
1076 			ctx->kdamond = NULL;
1077 		} else {
1078 			wait_for_completion(&ctx->kdamond_started);
1079 		}
1080 	}
1081 	mutex_unlock(&ctx->kdamond_lock);
1082 
1083 	return err;
1084 }
1085 
1086 /**
1087  * damon_start() - Starts the monitorings for a given group of contexts.
1088  * @ctxs:	an array of the pointers for contexts to start monitoring
1089  * @nr_ctxs:	size of @ctxs
1090  * @exclusive:	exclusiveness of this contexts group
1091  *
1092  * This function starts a group of monitoring threads for a group of monitoring
1093  * contexts.  One thread per each context is created and run in parallel.  The
1094  * caller should handle synchronization between the threads by itself.  If
1095  * @exclusive is true and a group of threads that created by other
1096  * 'damon_start()' call is currently running, this function does nothing but
1097  * returns -EBUSY.
1098  *
1099  * Return: 0 on success, negative error code otherwise.
1100  */
1101 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1102 {
1103 	int i;
1104 	int err = 0;
1105 
1106 	mutex_lock(&damon_lock);
1107 	if ((exclusive && nr_running_ctxs) ||
1108 			(!exclusive && running_exclusive_ctxs)) {
1109 		mutex_unlock(&damon_lock);
1110 		return -EBUSY;
1111 	}
1112 
1113 	for (i = 0; i < nr_ctxs; i++) {
1114 		err = __damon_start(ctxs[i]);
1115 		if (err)
1116 			break;
1117 		nr_running_ctxs++;
1118 	}
1119 	if (exclusive && nr_running_ctxs)
1120 		running_exclusive_ctxs = true;
1121 	mutex_unlock(&damon_lock);
1122 
1123 	return err;
1124 }
1125 
1126 /*
1127  * __damon_stop() - Stops monitoring of a given context.
1128  * @ctx:	monitoring context
1129  *
1130  * Return: 0 on success, negative error code otherwise.
1131  */
1132 static int __damon_stop(struct damon_ctx *ctx)
1133 {
1134 	struct task_struct *tsk;
1135 
1136 	mutex_lock(&ctx->kdamond_lock);
1137 	tsk = ctx->kdamond;
1138 	if (tsk) {
1139 		get_task_struct(tsk);
1140 		mutex_unlock(&ctx->kdamond_lock);
1141 		kthread_stop_put(tsk);
1142 		return 0;
1143 	}
1144 	mutex_unlock(&ctx->kdamond_lock);
1145 
1146 	return -EPERM;
1147 }
1148 
1149 /**
1150  * damon_stop() - Stops the monitorings for a given group of contexts.
1151  * @ctxs:	an array of the pointers for contexts to stop monitoring
1152  * @nr_ctxs:	size of @ctxs
1153  *
1154  * Return: 0 on success, negative error code otherwise.
1155  */
1156 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1157 {
1158 	int i, err = 0;
1159 
1160 	for (i = 0; i < nr_ctxs; i++) {
1161 		/* nr_running_ctxs is decremented in kdamond_fn */
1162 		err = __damon_stop(ctxs[i]);
1163 		if (err)
1164 			break;
1165 	}
1166 	return err;
1167 }
1168 
1169 static bool damon_is_running(struct damon_ctx *ctx)
1170 {
1171 	bool running;
1172 
1173 	mutex_lock(&ctx->kdamond_lock);
1174 	running = ctx->kdamond != NULL;
1175 	mutex_unlock(&ctx->kdamond_lock);
1176 	return running;
1177 }
1178 
1179 /**
1180  * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1181  * @ctx:	DAMON context to call the function for.
1182  * @control:	Control variable of the call request.
1183  *
1184  * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1185  * argument data that respectively passed via &damon_call_control->fn and
1186  * &damon_call_control->data of @control, and wait until the kdamond finishes
1187  * handling of the request.
1188  *
1189  * The kdamond executes the function with the argument in the main loop, just
1190  * after a sampling of the iteration is finished.  The function can hence
1191  * safely access the internal data of the &struct damon_ctx without additional
1192  * synchronization.  The return value of the function will be saved in
1193  * &damon_call_control->return_code.
1194  *
1195  * Return: 0 on success, negative error code otherwise.
1196  */
1197 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1198 {
1199 	init_completion(&control->completion);
1200 	control->canceled = false;
1201 
1202 	mutex_lock(&ctx->call_control_lock);
1203 	if (ctx->call_control) {
1204 		mutex_unlock(&ctx->call_control_lock);
1205 		return -EBUSY;
1206 	}
1207 	ctx->call_control = control;
1208 	mutex_unlock(&ctx->call_control_lock);
1209 	if (!damon_is_running(ctx))
1210 		return -EINVAL;
1211 	wait_for_completion(&control->completion);
1212 	if (control->canceled)
1213 		return -ECANCELED;
1214 	return 0;
1215 }
1216 
1217 /**
1218  * damos_walk() - Invoke a given functions while DAMOS walk regions.
1219  * @ctx:	DAMON context to call the functions for.
1220  * @control:	Control variable of the walk request.
1221  *
1222  * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1223  * that the kdamond will apply DAMOS action to, and wait until the kdamond
1224  * finishes handling of the request.
1225  *
1226  * The kdamond executes the given function in the main loop, for each region
1227  * just after it applied any DAMOS actions of @ctx to it.  The invocation is
1228  * made only within one &damos->apply_interval_us since damos_walk()
1229  * invocation, for each scheme.  The given callback function can hence safely
1230  * access the internal data of &struct damon_ctx and &struct damon_region that
1231  * each of the scheme will apply the action for next interval, without
1232  * additional synchronizations against the kdamond.  If every scheme of @ctx
1233  * passed at least one &damos->apply_interval_us, kdamond marks the request as
1234  * completed so that damos_walk() can wakeup and return.
1235  *
1236  * Return: 0 on success, negative error code otherwise.
1237  */
1238 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1239 {
1240 	init_completion(&control->completion);
1241 	control->canceled = false;
1242 	mutex_lock(&ctx->walk_control_lock);
1243 	if (ctx->walk_control) {
1244 		mutex_unlock(&ctx->walk_control_lock);
1245 		return -EBUSY;
1246 	}
1247 	ctx->walk_control = control;
1248 	mutex_unlock(&ctx->walk_control_lock);
1249 	if (!damon_is_running(ctx))
1250 		return -EINVAL;
1251 	wait_for_completion(&control->completion);
1252 	if (control->canceled)
1253 		return -ECANCELED;
1254 	return 0;
1255 }
1256 
1257 /*
1258  * Reset the aggregated monitoring results ('nr_accesses' of each region).
1259  */
1260 static void kdamond_reset_aggregated(struct damon_ctx *c)
1261 {
1262 	struct damon_target *t;
1263 	unsigned int ti = 0;	/* target's index */
1264 
1265 	damon_for_each_target(t, c) {
1266 		struct damon_region *r;
1267 
1268 		damon_for_each_region(r, t) {
1269 			trace_damon_aggregated(ti, r, damon_nr_regions(t));
1270 			r->last_nr_accesses = r->nr_accesses;
1271 			r->nr_accesses = 0;
1272 		}
1273 		ti++;
1274 	}
1275 }
1276 
1277 static void damon_split_region_at(struct damon_target *t,
1278 				  struct damon_region *r, unsigned long sz_r);
1279 
1280 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1281 {
1282 	unsigned long sz;
1283 	unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1284 
1285 	sz = damon_sz_region(r);
1286 	return s->pattern.min_sz_region <= sz &&
1287 		sz <= s->pattern.max_sz_region &&
1288 		s->pattern.min_nr_accesses <= nr_accesses &&
1289 		nr_accesses <= s->pattern.max_nr_accesses &&
1290 		s->pattern.min_age_region <= r->age &&
1291 		r->age <= s->pattern.max_age_region;
1292 }
1293 
1294 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1295 		struct damon_region *r, struct damos *s)
1296 {
1297 	bool ret = __damos_valid_target(r, s);
1298 
1299 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1300 		return ret;
1301 
1302 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1303 }
1304 
1305 /*
1306  * damos_skip_charged_region() - Check if the given region or starting part of
1307  * it is already charged for the DAMOS quota.
1308  * @t:	The target of the region.
1309  * @rp:	The pointer to the region.
1310  * @s:	The scheme to be applied.
1311  *
1312  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1313  * action would applied to only a part of the target access pattern fulfilling
1314  * regions.  To avoid applying the scheme action to only already applied
1315  * regions, DAMON skips applying the scheme action to the regions that charged
1316  * in the previous charge window.
1317  *
1318  * This function checks if a given region should be skipped or not for the
1319  * reason.  If only the starting part of the region has previously charged,
1320  * this function splits the region into two so that the second one covers the
1321  * area that not charged in the previous charge widnow and saves the second
1322  * region in *rp and returns false, so that the caller can apply DAMON action
1323  * to the second one.
1324  *
1325  * Return: true if the region should be entirely skipped, false otherwise.
1326  */
1327 static bool damos_skip_charged_region(struct damon_target *t,
1328 		struct damon_region **rp, struct damos *s)
1329 {
1330 	struct damon_region *r = *rp;
1331 	struct damos_quota *quota = &s->quota;
1332 	unsigned long sz_to_skip;
1333 
1334 	/* Skip previously charged regions */
1335 	if (quota->charge_target_from) {
1336 		if (t != quota->charge_target_from)
1337 			return true;
1338 		if (r == damon_last_region(t)) {
1339 			quota->charge_target_from = NULL;
1340 			quota->charge_addr_from = 0;
1341 			return true;
1342 		}
1343 		if (quota->charge_addr_from &&
1344 				r->ar.end <= quota->charge_addr_from)
1345 			return true;
1346 
1347 		if (quota->charge_addr_from && r->ar.start <
1348 				quota->charge_addr_from) {
1349 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1350 					r->ar.start, DAMON_MIN_REGION);
1351 			if (!sz_to_skip) {
1352 				if (damon_sz_region(r) <= DAMON_MIN_REGION)
1353 					return true;
1354 				sz_to_skip = DAMON_MIN_REGION;
1355 			}
1356 			damon_split_region_at(t, r, sz_to_skip);
1357 			r = damon_next_region(r);
1358 			*rp = r;
1359 		}
1360 		quota->charge_target_from = NULL;
1361 		quota->charge_addr_from = 0;
1362 	}
1363 	return false;
1364 }
1365 
1366 static void damos_update_stat(struct damos *s,
1367 		unsigned long sz_tried, unsigned long sz_applied,
1368 		unsigned long sz_ops_filter_passed)
1369 {
1370 	s->stat.nr_tried++;
1371 	s->stat.sz_tried += sz_tried;
1372 	if (sz_applied)
1373 		s->stat.nr_applied++;
1374 	s->stat.sz_applied += sz_applied;
1375 	s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1376 }
1377 
1378 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1379 		struct damon_region *r, struct damos_filter *filter)
1380 {
1381 	bool matched = false;
1382 	struct damon_target *ti;
1383 	int target_idx = 0;
1384 	unsigned long start, end;
1385 
1386 	switch (filter->type) {
1387 	case DAMOS_FILTER_TYPE_TARGET:
1388 		damon_for_each_target(ti, ctx) {
1389 			if (ti == t)
1390 				break;
1391 			target_idx++;
1392 		}
1393 		matched = target_idx == filter->target_idx;
1394 		break;
1395 	case DAMOS_FILTER_TYPE_ADDR:
1396 		start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
1397 		end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
1398 
1399 		/* inside the range */
1400 		if (start <= r->ar.start && r->ar.end <= end) {
1401 			matched = true;
1402 			break;
1403 		}
1404 		/* outside of the range */
1405 		if (r->ar.end <= start || end <= r->ar.start) {
1406 			matched = false;
1407 			break;
1408 		}
1409 		/* start before the range and overlap */
1410 		if (r->ar.start < start) {
1411 			damon_split_region_at(t, r, start - r->ar.start);
1412 			matched = false;
1413 			break;
1414 		}
1415 		/* start inside the range */
1416 		damon_split_region_at(t, r, end - r->ar.start);
1417 		matched = true;
1418 		break;
1419 	default:
1420 		return false;
1421 	}
1422 
1423 	return matched == filter->matching;
1424 }
1425 
1426 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1427 		struct damon_region *r, struct damos *s)
1428 {
1429 	struct damos_filter *filter;
1430 
1431 	damos_for_each_filter(filter, s) {
1432 		if (damos_filter_match(ctx, t, r, filter))
1433 			return !filter->allow;
1434 	}
1435 	return false;
1436 }
1437 
1438 /*
1439  * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1440  * @ctx:	The context of &damon_ctx->walk_control.
1441  * @t:		The monitoring target of @r that @s will be applied.
1442  * @r:		The region of @t that @s will be applied.
1443  * @s:		The scheme of @ctx that will be applied to @r.
1444  *
1445  * This function is called from kdamond whenever it asked the operation set to
1446  * apply a DAMOS scheme action to a region.  If a DAMOS walk request is
1447  * installed by damos_walk() and not yet uninstalled, invoke it.
1448  */
1449 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1450 		struct damon_region *r, struct damos *s,
1451 		unsigned long sz_filter_passed)
1452 {
1453 	struct damos_walk_control *control;
1454 
1455 	mutex_lock(&ctx->walk_control_lock);
1456 	control = ctx->walk_control;
1457 	mutex_unlock(&ctx->walk_control_lock);
1458 	if (!control)
1459 		return;
1460 	control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1461 }
1462 
1463 /*
1464  * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1465  * @ctx:	The context of &damon_ctx->walk_control.
1466  * @s:		A scheme of @ctx that all walks are now done.
1467  *
1468  * This function is called when kdamond finished applying the action of a DAMOS
1469  * scheme to all regions that eligible for the given &damos->apply_interval_us.
1470  * If every scheme of @ctx including @s now finished walking for at least one
1471  * &damos->apply_interval_us, this function makrs the handling of the given
1472  * DAMOS walk request is done, so that damos_walk() can wake up and return.
1473  */
1474 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1475 {
1476 	struct damos *siter;
1477 	struct damos_walk_control *control;
1478 
1479 	mutex_lock(&ctx->walk_control_lock);
1480 	control = ctx->walk_control;
1481 	mutex_unlock(&ctx->walk_control_lock);
1482 	if (!control)
1483 		return;
1484 
1485 	s->walk_completed = true;
1486 	/* if all schemes completed, signal completion to walker */
1487 	damon_for_each_scheme(siter, ctx) {
1488 		if (!siter->walk_completed)
1489 			return;
1490 	}
1491 	complete(&control->completion);
1492 	mutex_lock(&ctx->walk_control_lock);
1493 	ctx->walk_control = NULL;
1494 	mutex_unlock(&ctx->walk_control_lock);
1495 }
1496 
1497 /*
1498  * damos_walk_cancel() - Cancel the current DAMOS walk request.
1499  * @ctx:	The context of &damon_ctx->walk_control.
1500  *
1501  * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1502  * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1503  * is already out of the main loop and therefore gonna be terminated, and hence
1504  * cannot continue the walks.  This function therefore marks the walk request
1505  * as canceled, so that damos_walk() can wake up and return.
1506  */
1507 static void damos_walk_cancel(struct damon_ctx *ctx)
1508 {
1509 	struct damos_walk_control *control;
1510 
1511 	mutex_lock(&ctx->walk_control_lock);
1512 	control = ctx->walk_control;
1513 	mutex_unlock(&ctx->walk_control_lock);
1514 
1515 	if (!control)
1516 		return;
1517 	control->canceled = true;
1518 	complete(&control->completion);
1519 	mutex_lock(&ctx->walk_control_lock);
1520 	ctx->walk_control = NULL;
1521 	mutex_unlock(&ctx->walk_control_lock);
1522 }
1523 
1524 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1525 		struct damon_region *r, struct damos *s)
1526 {
1527 	struct damos_quota *quota = &s->quota;
1528 	unsigned long sz = damon_sz_region(r);
1529 	struct timespec64 begin, end;
1530 	unsigned long sz_applied = 0;
1531 	unsigned long sz_ops_filter_passed = 0;
1532 	int err = 0;
1533 	/*
1534 	 * We plan to support multiple context per kdamond, as DAMON sysfs
1535 	 * implies with 'nr_contexts' file.  Nevertheless, only single context
1536 	 * per kdamond is supported for now.  So, we can simply use '0' context
1537 	 * index here.
1538 	 */
1539 	unsigned int cidx = 0;
1540 	struct damos *siter;		/* schemes iterator */
1541 	unsigned int sidx = 0;
1542 	struct damon_target *titer;	/* targets iterator */
1543 	unsigned int tidx = 0;
1544 	bool do_trace = false;
1545 
1546 	/* get indices for trace_damos_before_apply() */
1547 	if (trace_damos_before_apply_enabled()) {
1548 		damon_for_each_scheme(siter, c) {
1549 			if (siter == s)
1550 				break;
1551 			sidx++;
1552 		}
1553 		damon_for_each_target(titer, c) {
1554 			if (titer == t)
1555 				break;
1556 			tidx++;
1557 		}
1558 		do_trace = true;
1559 	}
1560 
1561 	if (c->ops.apply_scheme) {
1562 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
1563 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1564 					DAMON_MIN_REGION);
1565 			if (!sz)
1566 				goto update_stat;
1567 			damon_split_region_at(t, r, sz);
1568 		}
1569 		if (damos_filter_out(c, t, r, s))
1570 			return;
1571 		ktime_get_coarse_ts64(&begin);
1572 		if (c->callback.before_damos_apply)
1573 			err = c->callback.before_damos_apply(c, t, r, s);
1574 		if (!err) {
1575 			trace_damos_before_apply(cidx, sidx, tidx, r,
1576 					damon_nr_regions(t), do_trace);
1577 			sz_applied = c->ops.apply_scheme(c, t, r, s,
1578 					&sz_ops_filter_passed);
1579 		}
1580 		damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1581 		ktime_get_coarse_ts64(&end);
1582 		quota->total_charged_ns += timespec64_to_ns(&end) -
1583 			timespec64_to_ns(&begin);
1584 		quota->charged_sz += sz;
1585 		if (quota->esz && quota->charged_sz >= quota->esz) {
1586 			quota->charge_target_from = t;
1587 			quota->charge_addr_from = r->ar.end + 1;
1588 		}
1589 	}
1590 	if (s->action != DAMOS_STAT)
1591 		r->age = 0;
1592 
1593 update_stat:
1594 	damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1595 }
1596 
1597 static void damon_do_apply_schemes(struct damon_ctx *c,
1598 				   struct damon_target *t,
1599 				   struct damon_region *r)
1600 {
1601 	struct damos *s;
1602 
1603 	damon_for_each_scheme(s, c) {
1604 		struct damos_quota *quota = &s->quota;
1605 
1606 		if (c->passed_sample_intervals < s->next_apply_sis)
1607 			continue;
1608 
1609 		if (!s->wmarks.activated)
1610 			continue;
1611 
1612 		/* Check the quota */
1613 		if (quota->esz && quota->charged_sz >= quota->esz)
1614 			continue;
1615 
1616 		if (damos_skip_charged_region(t, &r, s))
1617 			continue;
1618 
1619 		if (!damos_valid_target(c, t, r, s))
1620 			continue;
1621 
1622 		damos_apply_scheme(c, t, r, s);
1623 	}
1624 }
1625 
1626 /*
1627  * damon_feed_loop_next_input() - get next input to achieve a target score.
1628  * @last_input	The last input.
1629  * @score	Current score that made with @last_input.
1630  *
1631  * Calculate next input to achieve the target score, based on the last input
1632  * and current score.  Assuming the input and the score are positively
1633  * proportional, calculate how much compensation should be added to or
1634  * subtracted from the last input as a proportion of the last input.  Avoid
1635  * next input always being zero by setting it non-zero always.  In short form
1636  * (assuming support of float and signed calculations), the algorithm is as
1637  * below.
1638  *
1639  * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1640  *
1641  * For simple implementation, we assume the target score is always 10,000.  The
1642  * caller should adjust @score for this.
1643  *
1644  * Returns next input that assumed to achieve the target score.
1645  */
1646 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1647 		unsigned long score)
1648 {
1649 	const unsigned long goal = 10000;
1650 	/* Set minimum input as 10000 to avoid compensation be zero */
1651 	const unsigned long min_input = 10000;
1652 	unsigned long score_goal_diff, compensation;
1653 	bool over_achieving = score > goal;
1654 
1655 	if (score == goal)
1656 		return last_input;
1657 	if (score >= goal * 2)
1658 		return min_input;
1659 
1660 	if (over_achieving)
1661 		score_goal_diff = score - goal;
1662 	else
1663 		score_goal_diff = goal - score;
1664 
1665 	if (last_input < ULONG_MAX / score_goal_diff)
1666 		compensation = last_input * score_goal_diff / goal;
1667 	else
1668 		compensation = last_input / goal * score_goal_diff;
1669 
1670 	if (over_achieving)
1671 		return max(last_input - compensation, min_input);
1672 	if (last_input < ULONG_MAX - compensation)
1673 		return last_input + compensation;
1674 	return ULONG_MAX;
1675 }
1676 
1677 #ifdef CONFIG_PSI
1678 
1679 static u64 damos_get_some_mem_psi_total(void)
1680 {
1681 	if (static_branch_likely(&psi_disabled))
1682 		return 0;
1683 	return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1684 			NSEC_PER_USEC);
1685 }
1686 
1687 #else	/* CONFIG_PSI */
1688 
1689 static inline u64 damos_get_some_mem_psi_total(void)
1690 {
1691 	return 0;
1692 };
1693 
1694 #endif	/* CONFIG_PSI */
1695 
1696 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
1697 {
1698 	u64 now_psi_total;
1699 
1700 	switch (goal->metric) {
1701 	case DAMOS_QUOTA_USER_INPUT:
1702 		/* User should already set goal->current_value */
1703 		break;
1704 	case DAMOS_QUOTA_SOME_MEM_PSI_US:
1705 		now_psi_total = damos_get_some_mem_psi_total();
1706 		goal->current_value = now_psi_total - goal->last_psi_total;
1707 		goal->last_psi_total = now_psi_total;
1708 		break;
1709 	default:
1710 		break;
1711 	}
1712 }
1713 
1714 /* Return the highest score since it makes schemes least aggressive */
1715 static unsigned long damos_quota_score(struct damos_quota *quota)
1716 {
1717 	struct damos_quota_goal *goal;
1718 	unsigned long highest_score = 0;
1719 
1720 	damos_for_each_quota_goal(goal, quota) {
1721 		damos_set_quota_goal_current_value(goal);
1722 		highest_score = max(highest_score,
1723 				goal->current_value * 10000 /
1724 				goal->target_value);
1725 	}
1726 
1727 	return highest_score;
1728 }
1729 
1730 /*
1731  * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1732  */
1733 static void damos_set_effective_quota(struct damos_quota *quota)
1734 {
1735 	unsigned long throughput;
1736 	unsigned long esz = ULONG_MAX;
1737 
1738 	if (!quota->ms && list_empty(&quota->goals)) {
1739 		quota->esz = quota->sz;
1740 		return;
1741 	}
1742 
1743 	if (!list_empty(&quota->goals)) {
1744 		unsigned long score = damos_quota_score(quota);
1745 
1746 		quota->esz_bp = damon_feed_loop_next_input(
1747 				max(quota->esz_bp, 10000UL),
1748 				score);
1749 		esz = quota->esz_bp / 10000;
1750 	}
1751 
1752 	if (quota->ms) {
1753 		if (quota->total_charged_ns)
1754 			throughput = quota->total_charged_sz * 1000000 /
1755 				quota->total_charged_ns;
1756 		else
1757 			throughput = PAGE_SIZE * 1024;
1758 		esz = min(throughput * quota->ms, esz);
1759 	}
1760 
1761 	if (quota->sz && quota->sz < esz)
1762 		esz = quota->sz;
1763 
1764 	quota->esz = esz;
1765 }
1766 
1767 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1768 {
1769 	struct damos_quota *quota = &s->quota;
1770 	struct damon_target *t;
1771 	struct damon_region *r;
1772 	unsigned long cumulated_sz;
1773 	unsigned int score, max_score = 0;
1774 
1775 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
1776 		return;
1777 
1778 	/* New charge window starts */
1779 	if (time_after_eq(jiffies, quota->charged_from +
1780 				msecs_to_jiffies(quota->reset_interval))) {
1781 		if (quota->esz && quota->charged_sz >= quota->esz)
1782 			s->stat.qt_exceeds++;
1783 		quota->total_charged_sz += quota->charged_sz;
1784 		quota->charged_from = jiffies;
1785 		quota->charged_sz = 0;
1786 		damos_set_effective_quota(quota);
1787 	}
1788 
1789 	if (!c->ops.get_scheme_score)
1790 		return;
1791 
1792 	/* Fill up the score histogram */
1793 	memset(c->regions_score_histogram, 0,
1794 			sizeof(*c->regions_score_histogram) *
1795 			(DAMOS_MAX_SCORE + 1));
1796 	damon_for_each_target(t, c) {
1797 		damon_for_each_region(r, t) {
1798 			if (!__damos_valid_target(r, s))
1799 				continue;
1800 			score = c->ops.get_scheme_score(c, t, r, s);
1801 			c->regions_score_histogram[score] +=
1802 				damon_sz_region(r);
1803 			if (score > max_score)
1804 				max_score = score;
1805 		}
1806 	}
1807 
1808 	/* Set the min score limit */
1809 	for (cumulated_sz = 0, score = max_score; ; score--) {
1810 		cumulated_sz += c->regions_score_histogram[score];
1811 		if (cumulated_sz >= quota->esz || !score)
1812 			break;
1813 	}
1814 	quota->min_score = score;
1815 }
1816 
1817 static void kdamond_apply_schemes(struct damon_ctx *c)
1818 {
1819 	struct damon_target *t;
1820 	struct damon_region *r, *next_r;
1821 	struct damos *s;
1822 	unsigned long sample_interval = c->attrs.sample_interval ?
1823 		c->attrs.sample_interval : 1;
1824 	bool has_schemes_to_apply = false;
1825 
1826 	damon_for_each_scheme(s, c) {
1827 		if (c->passed_sample_intervals < s->next_apply_sis)
1828 			continue;
1829 
1830 		if (!s->wmarks.activated)
1831 			continue;
1832 
1833 		has_schemes_to_apply = true;
1834 
1835 		damos_adjust_quota(c, s);
1836 	}
1837 
1838 	if (!has_schemes_to_apply)
1839 		return;
1840 
1841 	damon_for_each_target(t, c) {
1842 		damon_for_each_region_safe(r, next_r, t)
1843 			damon_do_apply_schemes(c, t, r);
1844 	}
1845 
1846 	damon_for_each_scheme(s, c) {
1847 		if (c->passed_sample_intervals < s->next_apply_sis)
1848 			continue;
1849 		damos_walk_complete(c, s);
1850 		s->next_apply_sis = c->passed_sample_intervals +
1851 			(s->apply_interval_us ? s->apply_interval_us :
1852 			 c->attrs.aggr_interval) / sample_interval;
1853 	}
1854 }
1855 
1856 /*
1857  * Merge two adjacent regions into one region
1858  */
1859 static void damon_merge_two_regions(struct damon_target *t,
1860 		struct damon_region *l, struct damon_region *r)
1861 {
1862 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1863 
1864 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1865 			(sz_l + sz_r);
1866 	l->nr_accesses_bp = l->nr_accesses * 10000;
1867 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1868 	l->ar.end = r->ar.end;
1869 	damon_destroy_region(r, t);
1870 }
1871 
1872 /*
1873  * Merge adjacent regions having similar access frequencies
1874  *
1875  * t		target affected by this merge operation
1876  * thres	'->nr_accesses' diff threshold for the merge
1877  * sz_limit	size upper limit of each region
1878  */
1879 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1880 				   unsigned long sz_limit)
1881 {
1882 	struct damon_region *r, *prev = NULL, *next;
1883 
1884 	damon_for_each_region_safe(r, next, t) {
1885 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1886 			r->age = 0;
1887 		else
1888 			r->age++;
1889 
1890 		if (prev && prev->ar.end == r->ar.start &&
1891 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1892 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1893 			damon_merge_two_regions(t, prev, r);
1894 		else
1895 			prev = r;
1896 	}
1897 }
1898 
1899 /*
1900  * Merge adjacent regions having similar access frequencies
1901  *
1902  * threshold	'->nr_accesses' diff threshold for the merge
1903  * sz_limit	size upper limit of each region
1904  *
1905  * This function merges monitoring target regions which are adjacent and their
1906  * access frequencies are similar.  This is for minimizing the monitoring
1907  * overhead under the dynamically changeable access pattern.  If a merge was
1908  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1909  *
1910  * The total number of regions could be higher than the user-defined limit,
1911  * max_nr_regions for some cases.  For example, the user can update
1912  * max_nr_regions to a number that lower than the current number of regions
1913  * while DAMON is running.  For such a case, repeat merging until the limit is
1914  * met while increasing @threshold up to possible maximum level.
1915  */
1916 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1917 				  unsigned long sz_limit)
1918 {
1919 	struct damon_target *t;
1920 	unsigned int nr_regions;
1921 	unsigned int max_thres;
1922 
1923 	max_thres = c->attrs.aggr_interval /
1924 		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
1925 	do {
1926 		nr_regions = 0;
1927 		damon_for_each_target(t, c) {
1928 			damon_merge_regions_of(t, threshold, sz_limit);
1929 			nr_regions += damon_nr_regions(t);
1930 		}
1931 		threshold = max(1, threshold * 2);
1932 	} while (nr_regions > c->attrs.max_nr_regions &&
1933 			threshold / 2 < max_thres);
1934 }
1935 
1936 /*
1937  * Split a region in two
1938  *
1939  * r		the region to be split
1940  * sz_r		size of the first sub-region that will be made
1941  */
1942 static void damon_split_region_at(struct damon_target *t,
1943 				  struct damon_region *r, unsigned long sz_r)
1944 {
1945 	struct damon_region *new;
1946 
1947 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1948 	if (!new)
1949 		return;
1950 
1951 	r->ar.end = new->ar.start;
1952 
1953 	new->age = r->age;
1954 	new->last_nr_accesses = r->last_nr_accesses;
1955 	new->nr_accesses_bp = r->nr_accesses_bp;
1956 	new->nr_accesses = r->nr_accesses;
1957 
1958 	damon_insert_region(new, r, damon_next_region(r), t);
1959 }
1960 
1961 /* Split every region in the given target into 'nr_subs' regions */
1962 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1963 {
1964 	struct damon_region *r, *next;
1965 	unsigned long sz_region, sz_sub = 0;
1966 	int i;
1967 
1968 	damon_for_each_region_safe(r, next, t) {
1969 		sz_region = damon_sz_region(r);
1970 
1971 		for (i = 0; i < nr_subs - 1 &&
1972 				sz_region > 2 * DAMON_MIN_REGION; i++) {
1973 			/*
1974 			 * Randomly select size of left sub-region to be at
1975 			 * least 10 percent and at most 90% of original region
1976 			 */
1977 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1978 					sz_region / 10, DAMON_MIN_REGION);
1979 			/* Do not allow blank region */
1980 			if (sz_sub == 0 || sz_sub >= sz_region)
1981 				continue;
1982 
1983 			damon_split_region_at(t, r, sz_sub);
1984 			sz_region = sz_sub;
1985 		}
1986 	}
1987 }
1988 
1989 /*
1990  * Split every target region into randomly-sized small regions
1991  *
1992  * This function splits every target region into random-sized small regions if
1993  * current total number of the regions is equal or smaller than half of the
1994  * user-specified maximum number of regions.  This is for maximizing the
1995  * monitoring accuracy under the dynamically changeable access patterns.  If a
1996  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1997  * it.
1998  */
1999 static void kdamond_split_regions(struct damon_ctx *ctx)
2000 {
2001 	struct damon_target *t;
2002 	unsigned int nr_regions = 0;
2003 	static unsigned int last_nr_regions;
2004 	int nr_subregions = 2;
2005 
2006 	damon_for_each_target(t, ctx)
2007 		nr_regions += damon_nr_regions(t);
2008 
2009 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
2010 		return;
2011 
2012 	/* Maybe the middle of the region has different access frequency */
2013 	if (last_nr_regions == nr_regions &&
2014 			nr_regions < ctx->attrs.max_nr_regions / 3)
2015 		nr_subregions = 3;
2016 
2017 	damon_for_each_target(t, ctx)
2018 		damon_split_regions_of(t, nr_subregions);
2019 
2020 	last_nr_regions = nr_regions;
2021 }
2022 
2023 /*
2024  * Check whether current monitoring should be stopped
2025  *
2026  * The monitoring is stopped when either the user requested to stop, or all
2027  * monitoring targets are invalid.
2028  *
2029  * Returns true if need to stop current monitoring.
2030  */
2031 static bool kdamond_need_stop(struct damon_ctx *ctx)
2032 {
2033 	struct damon_target *t;
2034 
2035 	if (kthread_should_stop())
2036 		return true;
2037 
2038 	if (!ctx->ops.target_valid)
2039 		return false;
2040 
2041 	damon_for_each_target(t, ctx) {
2042 		if (ctx->ops.target_valid(t))
2043 			return false;
2044 	}
2045 
2046 	return true;
2047 }
2048 
2049 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2050 					unsigned long *metric_value)
2051 {
2052 	switch (metric) {
2053 	case DAMOS_WMARK_FREE_MEM_RATE:
2054 		*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2055 		       totalram_pages();
2056 		return 0;
2057 	default:
2058 		break;
2059 	}
2060 	return -EINVAL;
2061 }
2062 
2063 /*
2064  * Returns zero if the scheme is active.  Else, returns time to wait for next
2065  * watermark check in micro-seconds.
2066  */
2067 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2068 {
2069 	unsigned long metric;
2070 
2071 	if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2072 		return 0;
2073 
2074 	/* higher than high watermark or lower than low watermark */
2075 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2076 		if (scheme->wmarks.activated)
2077 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
2078 					scheme->action,
2079 					metric > scheme->wmarks.high ?
2080 					"high" : "low");
2081 		scheme->wmarks.activated = false;
2082 		return scheme->wmarks.interval;
2083 	}
2084 
2085 	/* inactive and higher than middle watermark */
2086 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2087 			!scheme->wmarks.activated)
2088 		return scheme->wmarks.interval;
2089 
2090 	if (!scheme->wmarks.activated)
2091 		pr_debug("activate a scheme (%d)\n", scheme->action);
2092 	scheme->wmarks.activated = true;
2093 	return 0;
2094 }
2095 
2096 static void kdamond_usleep(unsigned long usecs)
2097 {
2098 	if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2099 		schedule_timeout_idle(usecs_to_jiffies(usecs));
2100 	else
2101 		usleep_range_idle(usecs, usecs + 1);
2102 }
2103 
2104 /*
2105  * kdamond_call() - handle damon_call_control.
2106  * @ctx:	The &struct damon_ctx of the kdamond.
2107  * @cancel:	Whether to cancel the invocation of the function.
2108  *
2109  * If there is a &struct damon_call_control request that registered via
2110  * &damon_call() on @ctx, do or cancel the invocation of the function depending
2111  * on @cancel.  @cancel is set when the kdamond is deactivated by DAMOS
2112  * watermarks, or the kdamond is already out of the main loop and therefore
2113  * will be terminated.
2114  */
2115 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2116 {
2117 	struct damon_call_control *control;
2118 	int ret = 0;
2119 
2120 	mutex_lock(&ctx->call_control_lock);
2121 	control = ctx->call_control;
2122 	mutex_unlock(&ctx->call_control_lock);
2123 	if (!control)
2124 		return;
2125 	if (cancel) {
2126 		control->canceled = true;
2127 	} else {
2128 		ret = control->fn(control->data);
2129 		control->return_code = ret;
2130 	}
2131 	complete(&control->completion);
2132 	mutex_lock(&ctx->call_control_lock);
2133 	ctx->call_control = NULL;
2134 	mutex_unlock(&ctx->call_control_lock);
2135 }
2136 
2137 /* Returns negative error code if it's not activated but should return */
2138 static int kdamond_wait_activation(struct damon_ctx *ctx)
2139 {
2140 	struct damos *s;
2141 	unsigned long wait_time;
2142 	unsigned long min_wait_time = 0;
2143 	bool init_wait_time = false;
2144 
2145 	while (!kdamond_need_stop(ctx)) {
2146 		damon_for_each_scheme(s, ctx) {
2147 			wait_time = damos_wmark_wait_us(s);
2148 			if (!init_wait_time || wait_time < min_wait_time) {
2149 				init_wait_time = true;
2150 				min_wait_time = wait_time;
2151 			}
2152 		}
2153 		if (!min_wait_time)
2154 			return 0;
2155 
2156 		kdamond_usleep(min_wait_time);
2157 
2158 		if (ctx->callback.after_wmarks_check &&
2159 				ctx->callback.after_wmarks_check(ctx))
2160 			break;
2161 		kdamond_call(ctx, true);
2162 		damos_walk_cancel(ctx);
2163 	}
2164 	return -EBUSY;
2165 }
2166 
2167 static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
2168 {
2169 	unsigned long sample_interval = ctx->attrs.sample_interval ?
2170 		ctx->attrs.sample_interval : 1;
2171 	unsigned long apply_interval;
2172 	struct damos *scheme;
2173 
2174 	ctx->passed_sample_intervals = 0;
2175 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2176 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2177 		sample_interval;
2178 
2179 	damon_for_each_scheme(scheme, ctx) {
2180 		apply_interval = scheme->apply_interval_us ?
2181 			scheme->apply_interval_us : ctx->attrs.aggr_interval;
2182 		scheme->next_apply_sis = apply_interval / sample_interval;
2183 	}
2184 }
2185 
2186 /*
2187  * The monitoring daemon that runs as a kernel thread
2188  */
2189 static int kdamond_fn(void *data)
2190 {
2191 	struct damon_ctx *ctx = data;
2192 	struct damon_target *t;
2193 	struct damon_region *r, *next;
2194 	unsigned int max_nr_accesses = 0;
2195 	unsigned long sz_limit = 0;
2196 
2197 	pr_debug("kdamond (%d) starts\n", current->pid);
2198 
2199 	complete(&ctx->kdamond_started);
2200 	kdamond_init_intervals_sis(ctx);
2201 
2202 	if (ctx->ops.init)
2203 		ctx->ops.init(ctx);
2204 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
2205 		goto done;
2206 	ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2207 			sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2208 	if (!ctx->regions_score_histogram)
2209 		goto done;
2210 
2211 	sz_limit = damon_region_sz_limit(ctx);
2212 
2213 	while (!kdamond_need_stop(ctx)) {
2214 		/*
2215 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2216 		 * be changed from after_wmarks_check() or after_aggregation()
2217 		 * callbacks.  Read the values here, and use those for this
2218 		 * iteration.  That is, damon_set_attrs() updated new values
2219 		 * are respected from next iteration.
2220 		 */
2221 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2222 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2223 		unsigned long sample_interval = ctx->attrs.sample_interval;
2224 
2225 		if (kdamond_wait_activation(ctx))
2226 			break;
2227 
2228 		if (ctx->ops.prepare_access_checks)
2229 			ctx->ops.prepare_access_checks(ctx);
2230 		if (ctx->callback.after_sampling &&
2231 				ctx->callback.after_sampling(ctx))
2232 			break;
2233 		kdamond_call(ctx, false);
2234 
2235 		kdamond_usleep(sample_interval);
2236 		ctx->passed_sample_intervals++;
2237 
2238 		if (ctx->ops.check_accesses)
2239 			max_nr_accesses = ctx->ops.check_accesses(ctx);
2240 
2241 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2242 			kdamond_merge_regions(ctx,
2243 					max_nr_accesses / 10,
2244 					sz_limit);
2245 			if (ctx->callback.after_aggregation &&
2246 					ctx->callback.after_aggregation(ctx))
2247 				break;
2248 		}
2249 
2250 		/*
2251 		 * do kdamond_apply_schemes() after kdamond_merge_regions() if
2252 		 * possible, to reduce overhead
2253 		 */
2254 		if (!list_empty(&ctx->schemes))
2255 			kdamond_apply_schemes(ctx);
2256 		else
2257 			damos_walk_cancel(ctx);
2258 
2259 		sample_interval = ctx->attrs.sample_interval ?
2260 			ctx->attrs.sample_interval : 1;
2261 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2262 			ctx->next_aggregation_sis = next_aggregation_sis +
2263 				ctx->attrs.aggr_interval / sample_interval;
2264 
2265 			kdamond_reset_aggregated(ctx);
2266 			kdamond_split_regions(ctx);
2267 			if (ctx->ops.reset_aggregated)
2268 				ctx->ops.reset_aggregated(ctx);
2269 		}
2270 
2271 		if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2272 			ctx->next_ops_update_sis = next_ops_update_sis +
2273 				ctx->attrs.ops_update_interval /
2274 				sample_interval;
2275 			if (ctx->ops.update)
2276 				ctx->ops.update(ctx);
2277 			sz_limit = damon_region_sz_limit(ctx);
2278 		}
2279 	}
2280 done:
2281 	damon_for_each_target(t, ctx) {
2282 		damon_for_each_region_safe(r, next, t)
2283 			damon_destroy_region(r, t);
2284 	}
2285 
2286 	if (ctx->callback.before_terminate)
2287 		ctx->callback.before_terminate(ctx);
2288 	if (ctx->ops.cleanup)
2289 		ctx->ops.cleanup(ctx);
2290 	kfree(ctx->regions_score_histogram);
2291 
2292 	pr_debug("kdamond (%d) finishes\n", current->pid);
2293 	mutex_lock(&ctx->kdamond_lock);
2294 	ctx->kdamond = NULL;
2295 	mutex_unlock(&ctx->kdamond_lock);
2296 
2297 	kdamond_call(ctx, true);
2298 	damos_walk_cancel(ctx);
2299 
2300 	mutex_lock(&damon_lock);
2301 	nr_running_ctxs--;
2302 	if (!nr_running_ctxs && running_exclusive_ctxs)
2303 		running_exclusive_ctxs = false;
2304 	mutex_unlock(&damon_lock);
2305 
2306 	return 0;
2307 }
2308 
2309 /*
2310  * struct damon_system_ram_region - System RAM resource address region of
2311  *				    [@start, @end).
2312  * @start:	Start address of the region (inclusive).
2313  * @end:	End address of the region (exclusive).
2314  */
2315 struct damon_system_ram_region {
2316 	unsigned long start;
2317 	unsigned long end;
2318 };
2319 
2320 static int walk_system_ram(struct resource *res, void *arg)
2321 {
2322 	struct damon_system_ram_region *a = arg;
2323 
2324 	if (a->end - a->start < resource_size(res)) {
2325 		a->start = res->start;
2326 		a->end = res->end;
2327 	}
2328 	return 0;
2329 }
2330 
2331 /*
2332  * Find biggest 'System RAM' resource and store its start and end address in
2333  * @start and @end, respectively.  If no System RAM is found, returns false.
2334  */
2335 static bool damon_find_biggest_system_ram(unsigned long *start,
2336 						unsigned long *end)
2337 
2338 {
2339 	struct damon_system_ram_region arg = {};
2340 
2341 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2342 	if (arg.end <= arg.start)
2343 		return false;
2344 
2345 	*start = arg.start;
2346 	*end = arg.end;
2347 	return true;
2348 }
2349 
2350 /**
2351  * damon_set_region_biggest_system_ram_default() - Set the region of the given
2352  * monitoring target as requested, or biggest 'System RAM'.
2353  * @t:		The monitoring target to set the region.
2354  * @start:	The pointer to the start address of the region.
2355  * @end:	The pointer to the end address of the region.
2356  *
2357  * This function sets the region of @t as requested by @start and @end.  If the
2358  * values of @start and @end are zero, however, this function finds the biggest
2359  * 'System RAM' resource and sets the region to cover the resource.  In the
2360  * latter case, this function saves the start and end addresses of the resource
2361  * in @start and @end, respectively.
2362  *
2363  * Return: 0 on success, negative error code otherwise.
2364  */
2365 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2366 			unsigned long *start, unsigned long *end)
2367 {
2368 	struct damon_addr_range addr_range;
2369 
2370 	if (*start > *end)
2371 		return -EINVAL;
2372 
2373 	if (!*start && !*end &&
2374 		!damon_find_biggest_system_ram(start, end))
2375 		return -EINVAL;
2376 
2377 	addr_range.start = *start;
2378 	addr_range.end = *end;
2379 	return damon_set_regions(t, &addr_range, 1);
2380 }
2381 
2382 /*
2383  * damon_moving_sum() - Calculate an inferred moving sum value.
2384  * @mvsum:	Inferred sum of the last @len_window values.
2385  * @nomvsum:	Non-moving sum of the last discrete @len_window window values.
2386  * @len_window:	The number of last values to take care of.
2387  * @new_value:	New value that will be added to the pseudo moving sum.
2388  *
2389  * Moving sum (moving average * window size) is good for handling noise, but
2390  * the cost of keeping past values can be high for arbitrary window size.  This
2391  * function implements a lightweight pseudo moving sum function that doesn't
2392  * keep the past window values.
2393  *
2394  * It simply assumes there was no noise in the past, and get the no-noise
2395  * assumed past value to drop from @nomvsum and @len_window.  @nomvsum is a
2396  * non-moving sum of the last window.  For example, if @len_window is 10 and we
2397  * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2398  * values.  Hence, this function simply drops @nomvsum / @len_window from
2399  * given @mvsum and add @new_value.
2400  *
2401  * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2402  * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20.  For
2403  * calculating next moving sum with a new value, we should drop 0 from 50 and
2404  * add the new value.  However, this function assumes it got value 5 for each
2405  * of the last ten times.  Based on the assumption, when the next value is
2406  * measured, it drops the assumed past value, 5 from the current sum, and add
2407  * the new value to get the updated pseduo-moving average.
2408  *
2409  * This means the value could have errors, but the errors will be disappeared
2410  * for every @len_window aligned calls.  For example, if @len_window is 10, the
2411  * pseudo moving sum with 11th value to 19th value would have an error.  But
2412  * the sum with 20th value will not have the error.
2413  *
2414  * Return: Pseudo-moving average after getting the @new_value.
2415  */
2416 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2417 		unsigned int len_window, unsigned int new_value)
2418 {
2419 	return mvsum - nomvsum / len_window + new_value;
2420 }
2421 
2422 /**
2423  * damon_update_region_access_rate() - Update the access rate of a region.
2424  * @r:		The DAMON region to update for its access check result.
2425  * @accessed:	Whether the region has accessed during last sampling interval.
2426  * @attrs:	The damon_attrs of the DAMON context.
2427  *
2428  * Update the access rate of a region with the region's last sampling interval
2429  * access check result.
2430  *
2431  * Usually this will be called by &damon_operations->check_accesses callback.
2432  */
2433 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2434 		struct damon_attrs *attrs)
2435 {
2436 	unsigned int len_window = 1;
2437 
2438 	/*
2439 	 * sample_interval can be zero, but cannot be larger than
2440 	 * aggr_interval, owing to validation of damon_set_attrs().
2441 	 */
2442 	if (attrs->sample_interval)
2443 		len_window = damon_max_nr_accesses(attrs);
2444 	r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2445 			r->last_nr_accesses * 10000, len_window,
2446 			accessed ? 10000 : 0);
2447 
2448 	if (accessed)
2449 		r->nr_accesses++;
2450 }
2451 
2452 static int __init damon_init(void)
2453 {
2454 	damon_region_cache = KMEM_CACHE(damon_region, 0);
2455 	if (unlikely(!damon_region_cache)) {
2456 		pr_err("creating damon_region_cache fails\n");
2457 		return -ENOMEM;
2458 	}
2459 
2460 	return 0;
2461 }
2462 
2463 subsys_initcall(damon_init);
2464 
2465 #include "tests/core-kunit.h"
2466