xref: /linux/mm/damon/core.c (revision 9907e1df31c0f4bdcebe16de809121baa754e5b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/string_choices.h>
18 
19 #define CREATE_TRACE_POINTS
20 #include <trace/events/damon.h>
21 
22 #ifdef CONFIG_DAMON_KUNIT_TEST
23 #undef DAMON_MIN_REGION
24 #define DAMON_MIN_REGION 1
25 #endif
26 
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
29 static bool running_exclusive_ctxs;
30 
31 static DEFINE_MUTEX(damon_ops_lock);
32 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
33 
34 static struct kmem_cache *damon_region_cache __ro_after_init;
35 
36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
37 static bool __damon_is_registered_ops(enum damon_ops_id id)
38 {
39 	struct damon_operations empty_ops = {};
40 
41 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
42 		return false;
43 	return true;
44 }
45 
46 /**
47  * damon_is_registered_ops() - Check if a given damon_operations is registered.
48  * @id:	Id of the damon_operations to check if registered.
49  *
50  * Return: true if the ops is set, false otherwise.
51  */
52 bool damon_is_registered_ops(enum damon_ops_id id)
53 {
54 	bool registered;
55 
56 	if (id >= NR_DAMON_OPS)
57 		return false;
58 	mutex_lock(&damon_ops_lock);
59 	registered = __damon_is_registered_ops(id);
60 	mutex_unlock(&damon_ops_lock);
61 	return registered;
62 }
63 
64 /**
65  * damon_register_ops() - Register a monitoring operations set to DAMON.
66  * @ops:	monitoring operations set to register.
67  *
68  * This function registers a monitoring operations set of valid &struct
69  * damon_operations->id so that others can find and use them later.
70  *
71  * Return: 0 on success, negative error code otherwise.
72  */
73 int damon_register_ops(struct damon_operations *ops)
74 {
75 	int err = 0;
76 
77 	if (ops->id >= NR_DAMON_OPS)
78 		return -EINVAL;
79 
80 	mutex_lock(&damon_ops_lock);
81 	/* Fail for already registered ops */
82 	if (__damon_is_registered_ops(ops->id))
83 		err = -EINVAL;
84 	else
85 		damon_registered_ops[ops->id] = *ops;
86 	mutex_unlock(&damon_ops_lock);
87 	return err;
88 }
89 
90 /**
91  * damon_select_ops() - Select a monitoring operations to use with the context.
92  * @ctx:	monitoring context to use the operations.
93  * @id:		id of the registered monitoring operations to select.
94  *
95  * This function finds registered monitoring operations set of @id and make
96  * @ctx to use it.
97  *
98  * Return: 0 on success, negative error code otherwise.
99  */
100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101 {
102 	int err = 0;
103 
104 	if (id >= NR_DAMON_OPS)
105 		return -EINVAL;
106 
107 	mutex_lock(&damon_ops_lock);
108 	if (!__damon_is_registered_ops(id))
109 		err = -EINVAL;
110 	else
111 		ctx->ops = damon_registered_ops[id];
112 	mutex_unlock(&damon_ops_lock);
113 	return err;
114 }
115 
116 /*
117  * Construct a damon_region struct
118  *
119  * Returns the pointer to the new struct if success, or NULL otherwise
120  */
121 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122 {
123 	struct damon_region *region;
124 
125 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 	if (!region)
127 		return NULL;
128 
129 	region->ar.start = start;
130 	region->ar.end = end;
131 	region->nr_accesses = 0;
132 	region->nr_accesses_bp = 0;
133 	INIT_LIST_HEAD(&region->list);
134 
135 	region->age = 0;
136 	region->last_nr_accesses = 0;
137 
138 	return region;
139 }
140 
141 void damon_add_region(struct damon_region *r, struct damon_target *t)
142 {
143 	list_add_tail(&r->list, &t->regions_list);
144 	t->nr_regions++;
145 }
146 
147 static void damon_del_region(struct damon_region *r, struct damon_target *t)
148 {
149 	list_del(&r->list);
150 	t->nr_regions--;
151 }
152 
153 static void damon_free_region(struct damon_region *r)
154 {
155 	kmem_cache_free(damon_region_cache, r);
156 }
157 
158 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159 {
160 	damon_del_region(r, t);
161 	damon_free_region(r);
162 }
163 
164 /*
165  * Check whether a region is intersecting an address range
166  *
167  * Returns true if it is.
168  */
169 static bool damon_intersect(struct damon_region *r,
170 		struct damon_addr_range *re)
171 {
172 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
173 }
174 
175 /*
176  * Fill holes in regions with new regions.
177  */
178 static int damon_fill_regions_holes(struct damon_region *first,
179 		struct damon_region *last, struct damon_target *t)
180 {
181 	struct damon_region *r = first;
182 
183 	damon_for_each_region_from(r, t) {
184 		struct damon_region *next, *newr;
185 
186 		if (r == last)
187 			break;
188 		next = damon_next_region(r);
189 		if (r->ar.end != next->ar.start) {
190 			newr = damon_new_region(r->ar.end, next->ar.start);
191 			if (!newr)
192 				return -ENOMEM;
193 			damon_insert_region(newr, r, next, t);
194 		}
195 	}
196 	return 0;
197 }
198 
199 /*
200  * damon_set_regions() - Set regions of a target for given address ranges.
201  * @t:		the given target.
202  * @ranges:	array of new monitoring target ranges.
203  * @nr_ranges:	length of @ranges.
204  *
205  * This function adds new regions to, or modify existing regions of a
206  * monitoring target to fit in specific ranges.
207  *
208  * Return: 0 if success, or negative error code otherwise.
209  */
210 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
211 		unsigned int nr_ranges)
212 {
213 	struct damon_region *r, *next;
214 	unsigned int i;
215 	int err;
216 
217 	/* Remove regions which are not in the new ranges */
218 	damon_for_each_region_safe(r, next, t) {
219 		for (i = 0; i < nr_ranges; i++) {
220 			if (damon_intersect(r, &ranges[i]))
221 				break;
222 		}
223 		if (i == nr_ranges)
224 			damon_destroy_region(r, t);
225 	}
226 
227 	r = damon_first_region(t);
228 	/* Add new regions or resize existing regions to fit in the ranges */
229 	for (i = 0; i < nr_ranges; i++) {
230 		struct damon_region *first = NULL, *last, *newr;
231 		struct damon_addr_range *range;
232 
233 		range = &ranges[i];
234 		/* Get the first/last regions intersecting with the range */
235 		damon_for_each_region_from(r, t) {
236 			if (damon_intersect(r, range)) {
237 				if (!first)
238 					first = r;
239 				last = r;
240 			}
241 			if (r->ar.start >= range->end)
242 				break;
243 		}
244 		if (!first) {
245 			/* no region intersects with this range */
246 			newr = damon_new_region(
247 					ALIGN_DOWN(range->start,
248 						DAMON_MIN_REGION),
249 					ALIGN(range->end, DAMON_MIN_REGION));
250 			if (!newr)
251 				return -ENOMEM;
252 			damon_insert_region(newr, damon_prev_region(r), r, t);
253 		} else {
254 			/* resize intersecting regions to fit in this range */
255 			first->ar.start = ALIGN_DOWN(range->start,
256 					DAMON_MIN_REGION);
257 			last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
258 
259 			/* fill possible holes in the range */
260 			err = damon_fill_regions_holes(first, last, t);
261 			if (err)
262 				return err;
263 		}
264 	}
265 	return 0;
266 }
267 
268 struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 		bool matching, bool allow)
270 {
271 	struct damos_filter *filter;
272 
273 	filter = kmalloc(sizeof(*filter), GFP_KERNEL);
274 	if (!filter)
275 		return NULL;
276 	filter->type = type;
277 	filter->matching = matching;
278 	filter->allow = allow;
279 	INIT_LIST_HEAD(&filter->list);
280 	return filter;
281 }
282 
283 /**
284  * damos_filter_for_ops() - Return if the filter is ops-hndled one.
285  * @type:	type of the filter.
286  *
287  * Return: true if the filter of @type needs to be handled by ops layer, false
288  * otherwise.
289  */
290 bool damos_filter_for_ops(enum damos_filter_type type)
291 {
292 	switch (type) {
293 	case DAMOS_FILTER_TYPE_ADDR:
294 	case DAMOS_FILTER_TYPE_TARGET:
295 		return false;
296 	default:
297 		break;
298 	}
299 	return true;
300 }
301 
302 void damos_add_filter(struct damos *s, struct damos_filter *f)
303 {
304 	if (damos_filter_for_ops(f->type))
305 		list_add_tail(&f->list, &s->ops_filters);
306 	else
307 		list_add_tail(&f->list, &s->filters);
308 }
309 
310 static void damos_del_filter(struct damos_filter *f)
311 {
312 	list_del(&f->list);
313 }
314 
315 static void damos_free_filter(struct damos_filter *f)
316 {
317 	kfree(f);
318 }
319 
320 void damos_destroy_filter(struct damos_filter *f)
321 {
322 	damos_del_filter(f);
323 	damos_free_filter(f);
324 }
325 
326 struct damos_quota_goal *damos_new_quota_goal(
327 		enum damos_quota_goal_metric metric,
328 		unsigned long target_value)
329 {
330 	struct damos_quota_goal *goal;
331 
332 	goal = kmalloc(sizeof(*goal), GFP_KERNEL);
333 	if (!goal)
334 		return NULL;
335 	goal->metric = metric;
336 	goal->target_value = target_value;
337 	INIT_LIST_HEAD(&goal->list);
338 	return goal;
339 }
340 
341 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
342 {
343 	list_add_tail(&g->list, &q->goals);
344 }
345 
346 static void damos_del_quota_goal(struct damos_quota_goal *g)
347 {
348 	list_del(&g->list);
349 }
350 
351 static void damos_free_quota_goal(struct damos_quota_goal *g)
352 {
353 	kfree(g);
354 }
355 
356 void damos_destroy_quota_goal(struct damos_quota_goal *g)
357 {
358 	damos_del_quota_goal(g);
359 	damos_free_quota_goal(g);
360 }
361 
362 /* initialize fields of @quota that normally API users wouldn't set */
363 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
364 {
365 	quota->esz = 0;
366 	quota->total_charged_sz = 0;
367 	quota->total_charged_ns = 0;
368 	quota->charged_sz = 0;
369 	quota->charged_from = 0;
370 	quota->charge_target_from = NULL;
371 	quota->charge_addr_from = 0;
372 	quota->esz_bp = 0;
373 	return quota;
374 }
375 
376 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
377 			enum damos_action action,
378 			unsigned long apply_interval_us,
379 			struct damos_quota *quota,
380 			struct damos_watermarks *wmarks,
381 			int target_nid)
382 {
383 	struct damos *scheme;
384 
385 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
386 	if (!scheme)
387 		return NULL;
388 	scheme->pattern = *pattern;
389 	scheme->action = action;
390 	scheme->apply_interval_us = apply_interval_us;
391 	/*
392 	 * next_apply_sis will be set when kdamond starts.  While kdamond is
393 	 * running, it will also updated when it is added to the DAMON context,
394 	 * or damon_attrs are updated.
395 	 */
396 	scheme->next_apply_sis = 0;
397 	scheme->walk_completed = false;
398 	INIT_LIST_HEAD(&scheme->filters);
399 	INIT_LIST_HEAD(&scheme->ops_filters);
400 	scheme->stat = (struct damos_stat){};
401 	INIT_LIST_HEAD(&scheme->list);
402 
403 	scheme->quota = *(damos_quota_init(quota));
404 	/* quota.goals should be separately set by caller */
405 	INIT_LIST_HEAD(&scheme->quota.goals);
406 
407 	scheme->wmarks = *wmarks;
408 	scheme->wmarks.activated = true;
409 
410 	scheme->migrate_dests = (struct damos_migrate_dests){};
411 	scheme->target_nid = target_nid;
412 
413 	return scheme;
414 }
415 
416 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
417 {
418 	unsigned long sample_interval = ctx->attrs.sample_interval ?
419 		ctx->attrs.sample_interval : 1;
420 	unsigned long apply_interval = s->apply_interval_us ?
421 		s->apply_interval_us : ctx->attrs.aggr_interval;
422 
423 	s->next_apply_sis = ctx->passed_sample_intervals +
424 		apply_interval / sample_interval;
425 }
426 
427 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
428 {
429 	list_add_tail(&s->list, &ctx->schemes);
430 	damos_set_next_apply_sis(s, ctx);
431 }
432 
433 static void damon_del_scheme(struct damos *s)
434 {
435 	list_del(&s->list);
436 }
437 
438 static void damon_free_scheme(struct damos *s)
439 {
440 	kfree(s);
441 }
442 
443 void damon_destroy_scheme(struct damos *s)
444 {
445 	struct damos_quota_goal *g, *g_next;
446 	struct damos_filter *f, *next;
447 
448 	damos_for_each_quota_goal_safe(g, g_next, &s->quota)
449 		damos_destroy_quota_goal(g);
450 
451 	damos_for_each_filter_safe(f, next, s)
452 		damos_destroy_filter(f);
453 
454 	kfree(s->migrate_dests.node_id_arr);
455 	kfree(s->migrate_dests.weight_arr);
456 	damon_del_scheme(s);
457 	damon_free_scheme(s);
458 }
459 
460 /*
461  * Construct a damon_target struct
462  *
463  * Returns the pointer to the new struct if success, or NULL otherwise
464  */
465 struct damon_target *damon_new_target(void)
466 {
467 	struct damon_target *t;
468 
469 	t = kmalloc(sizeof(*t), GFP_KERNEL);
470 	if (!t)
471 		return NULL;
472 
473 	t->pid = NULL;
474 	t->nr_regions = 0;
475 	INIT_LIST_HEAD(&t->regions_list);
476 	INIT_LIST_HEAD(&t->list);
477 
478 	return t;
479 }
480 
481 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
482 {
483 	list_add_tail(&t->list, &ctx->adaptive_targets);
484 }
485 
486 bool damon_targets_empty(struct damon_ctx *ctx)
487 {
488 	return list_empty(&ctx->adaptive_targets);
489 }
490 
491 static void damon_del_target(struct damon_target *t)
492 {
493 	list_del(&t->list);
494 }
495 
496 void damon_free_target(struct damon_target *t)
497 {
498 	struct damon_region *r, *next;
499 
500 	damon_for_each_region_safe(r, next, t)
501 		damon_free_region(r);
502 	kfree(t);
503 }
504 
505 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
506 {
507 
508 	if (ctx && ctx->ops.cleanup_target)
509 		ctx->ops.cleanup_target(t);
510 
511 	damon_del_target(t);
512 	damon_free_target(t);
513 }
514 
515 unsigned int damon_nr_regions(struct damon_target *t)
516 {
517 	return t->nr_regions;
518 }
519 
520 struct damon_ctx *damon_new_ctx(void)
521 {
522 	struct damon_ctx *ctx;
523 
524 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
525 	if (!ctx)
526 		return NULL;
527 
528 	init_completion(&ctx->kdamond_started);
529 
530 	ctx->attrs.sample_interval = 5 * 1000;
531 	ctx->attrs.aggr_interval = 100 * 1000;
532 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
533 
534 	ctx->passed_sample_intervals = 0;
535 	/* These will be set from kdamond_init_ctx() */
536 	ctx->next_aggregation_sis = 0;
537 	ctx->next_ops_update_sis = 0;
538 
539 	mutex_init(&ctx->kdamond_lock);
540 	INIT_LIST_HEAD(&ctx->call_controls);
541 	mutex_init(&ctx->call_controls_lock);
542 	mutex_init(&ctx->walk_control_lock);
543 
544 	ctx->attrs.min_nr_regions = 10;
545 	ctx->attrs.max_nr_regions = 1000;
546 
547 	INIT_LIST_HEAD(&ctx->adaptive_targets);
548 	INIT_LIST_HEAD(&ctx->schemes);
549 
550 	return ctx;
551 }
552 
553 static void damon_destroy_targets(struct damon_ctx *ctx)
554 {
555 	struct damon_target *t, *next_t;
556 
557 	damon_for_each_target_safe(t, next_t, ctx)
558 		damon_destroy_target(t, ctx);
559 }
560 
561 void damon_destroy_ctx(struct damon_ctx *ctx)
562 {
563 	struct damos *s, *next_s;
564 
565 	damon_destroy_targets(ctx);
566 
567 	damon_for_each_scheme_safe(s, next_s, ctx)
568 		damon_destroy_scheme(s);
569 
570 	kfree(ctx);
571 }
572 
573 static bool damon_attrs_equals(const struct damon_attrs *attrs1,
574 		const struct damon_attrs *attrs2)
575 {
576 	const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal;
577 	const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal;
578 
579 	return attrs1->sample_interval == attrs2->sample_interval &&
580 		attrs1->aggr_interval == attrs2->aggr_interval &&
581 		attrs1->ops_update_interval == attrs2->ops_update_interval &&
582 		attrs1->min_nr_regions == attrs2->min_nr_regions &&
583 		attrs1->max_nr_regions == attrs2->max_nr_regions &&
584 		ig1->access_bp == ig2->access_bp &&
585 		ig1->aggrs == ig2->aggrs &&
586 		ig1->min_sample_us == ig2->min_sample_us &&
587 		ig1->max_sample_us == ig2->max_sample_us;
588 }
589 
590 static unsigned int damon_age_for_new_attrs(unsigned int age,
591 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
592 {
593 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
594 }
595 
596 /* convert access ratio in bp (per 10,000) to nr_accesses */
597 static unsigned int damon_accesses_bp_to_nr_accesses(
598 		unsigned int accesses_bp, struct damon_attrs *attrs)
599 {
600 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
601 }
602 
603 /*
604  * Convert nr_accesses to access ratio in bp (per 10,000).
605  *
606  * Callers should ensure attrs.aggr_interval is not zero, like
607  * damon_update_monitoring_results() does .  Otherwise, divide-by-zero would
608  * happen.
609  */
610 static unsigned int damon_nr_accesses_to_accesses_bp(
611 		unsigned int nr_accesses, struct damon_attrs *attrs)
612 {
613 	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
614 }
615 
616 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
617 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
618 {
619 	return damon_accesses_bp_to_nr_accesses(
620 			damon_nr_accesses_to_accesses_bp(
621 				nr_accesses, old_attrs),
622 			new_attrs);
623 }
624 
625 static void damon_update_monitoring_result(struct damon_region *r,
626 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
627 		bool aggregating)
628 {
629 	if (!aggregating) {
630 		r->nr_accesses = damon_nr_accesses_for_new_attrs(
631 				r->nr_accesses, old_attrs, new_attrs);
632 		r->nr_accesses_bp = r->nr_accesses * 10000;
633 	} else {
634 		/*
635 		 * if this is called in the middle of the aggregation, reset
636 		 * the aggregations we made so far for this aggregation
637 		 * interval.  In other words, make the status like
638 		 * kdamond_reset_aggregated() is called.
639 		 */
640 		r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
641 				r->last_nr_accesses, old_attrs, new_attrs);
642 		r->nr_accesses_bp = r->last_nr_accesses * 10000;
643 		r->nr_accesses = 0;
644 	}
645 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
646 }
647 
648 /*
649  * region->nr_accesses is the number of sampling intervals in the last
650  * aggregation interval that access to the region has found, and region->age is
651  * the number of aggregation intervals that its access pattern has maintained.
652  * For the reason, the real meaning of the two fields depend on current
653  * sampling interval and aggregation interval.  This function updates
654  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
655  */
656 static void damon_update_monitoring_results(struct damon_ctx *ctx,
657 		struct damon_attrs *new_attrs, bool aggregating)
658 {
659 	struct damon_attrs *old_attrs = &ctx->attrs;
660 	struct damon_target *t;
661 	struct damon_region *r;
662 
663 	/* if any interval is zero, simply forgive conversion */
664 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
665 			!new_attrs->sample_interval ||
666 			!new_attrs->aggr_interval)
667 		return;
668 
669 	damon_for_each_target(t, ctx)
670 		damon_for_each_region(r, t)
671 			damon_update_monitoring_result(
672 					r, old_attrs, new_attrs, aggregating);
673 }
674 
675 /*
676  * damon_valid_intervals_goal() - return if the intervals goal of @attrs is
677  * valid.
678  */
679 static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
680 {
681 	struct damon_intervals_goal *goal = &attrs->intervals_goal;
682 
683 	/* tuning is disabled */
684 	if (!goal->aggrs)
685 		return true;
686 	if (goal->min_sample_us > goal->max_sample_us)
687 		return false;
688 	if (attrs->sample_interval < goal->min_sample_us ||
689 			goal->max_sample_us < attrs->sample_interval)
690 		return false;
691 	return true;
692 }
693 
694 /**
695  * damon_set_attrs() - Set attributes for the monitoring.
696  * @ctx:		monitoring context
697  * @attrs:		monitoring attributes
698  *
699  * This function should be called while the kdamond is not running, an access
700  * check results aggregation is not ongoing (e.g., from damon_call().
701  *
702  * Every time interval is in micro-seconds.
703  *
704  * Return: 0 on success, negative error code otherwise.
705  */
706 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
707 {
708 	unsigned long sample_interval = attrs->sample_interval ?
709 		attrs->sample_interval : 1;
710 	struct damos *s;
711 	bool aggregating = ctx->passed_sample_intervals <
712 		ctx->next_aggregation_sis;
713 
714 	if (!damon_valid_intervals_goal(attrs))
715 		return -EINVAL;
716 
717 	if (attrs->min_nr_regions < 3)
718 		return -EINVAL;
719 	if (attrs->min_nr_regions > attrs->max_nr_regions)
720 		return -EINVAL;
721 	if (attrs->sample_interval > attrs->aggr_interval)
722 		return -EINVAL;
723 
724 	/* calls from core-external doesn't set this. */
725 	if (!attrs->aggr_samples)
726 		attrs->aggr_samples = attrs->aggr_interval / sample_interval;
727 
728 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
729 		attrs->aggr_interval / sample_interval;
730 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
731 		attrs->ops_update_interval / sample_interval;
732 
733 	damon_update_monitoring_results(ctx, attrs, aggregating);
734 	ctx->attrs = *attrs;
735 
736 	damon_for_each_scheme(s, ctx)
737 		damos_set_next_apply_sis(s, ctx);
738 
739 	return 0;
740 }
741 
742 /**
743  * damon_set_schemes() - Set data access monitoring based operation schemes.
744  * @ctx:	monitoring context
745  * @schemes:	array of the schemes
746  * @nr_schemes:	number of entries in @schemes
747  *
748  * This function should not be called while the kdamond of the context is
749  * running.
750  */
751 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
752 			ssize_t nr_schemes)
753 {
754 	struct damos *s, *next;
755 	ssize_t i;
756 
757 	damon_for_each_scheme_safe(s, next, ctx)
758 		damon_destroy_scheme(s);
759 	for (i = 0; i < nr_schemes; i++)
760 		damon_add_scheme(ctx, schemes[i]);
761 }
762 
763 static struct damos_quota_goal *damos_nth_quota_goal(
764 		int n, struct damos_quota *q)
765 {
766 	struct damos_quota_goal *goal;
767 	int i = 0;
768 
769 	damos_for_each_quota_goal(goal, q) {
770 		if (i++ == n)
771 			return goal;
772 	}
773 	return NULL;
774 }
775 
776 static void damos_commit_quota_goal_union(
777 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
778 {
779 	switch (dst->metric) {
780 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
781 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
782 		dst->nid = src->nid;
783 		break;
784 	default:
785 		break;
786 	}
787 }
788 
789 static void damos_commit_quota_goal(
790 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
791 {
792 	dst->metric = src->metric;
793 	dst->target_value = src->target_value;
794 	if (dst->metric == DAMOS_QUOTA_USER_INPUT)
795 		dst->current_value = src->current_value;
796 	/* keep last_psi_total as is, since it will be updated in next cycle */
797 	damos_commit_quota_goal_union(dst, src);
798 }
799 
800 /**
801  * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
802  * @dst:	The commit destination DAMOS quota.
803  * @src:	The commit source DAMOS quota.
804  *
805  * Copies user-specified parameters for quota goals from @src to @dst.  Users
806  * should use this function for quota goals-level parameters update of running
807  * DAMON contexts, instead of manual in-place updates.
808  *
809  * This function should be called from parameters-update safe context, like
810  * damon_call().
811  */
812 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
813 {
814 	struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
815 	int i = 0, j = 0;
816 
817 	damos_for_each_quota_goal_safe(dst_goal, next, dst) {
818 		src_goal = damos_nth_quota_goal(i++, src);
819 		if (src_goal)
820 			damos_commit_quota_goal(dst_goal, src_goal);
821 		else
822 			damos_destroy_quota_goal(dst_goal);
823 	}
824 	damos_for_each_quota_goal_safe(src_goal, next, src) {
825 		if (j++ < i)
826 			continue;
827 		new_goal = damos_new_quota_goal(
828 				src_goal->metric, src_goal->target_value);
829 		if (!new_goal)
830 			return -ENOMEM;
831 		damos_commit_quota_goal_union(new_goal, src_goal);
832 		damos_add_quota_goal(dst, new_goal);
833 	}
834 	return 0;
835 }
836 
837 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
838 {
839 	int err;
840 
841 	dst->reset_interval = src->reset_interval;
842 	dst->ms = src->ms;
843 	dst->sz = src->sz;
844 	err = damos_commit_quota_goals(dst, src);
845 	if (err)
846 		return err;
847 	dst->weight_sz = src->weight_sz;
848 	dst->weight_nr_accesses = src->weight_nr_accesses;
849 	dst->weight_age = src->weight_age;
850 	return 0;
851 }
852 
853 static struct damos_filter *damos_nth_filter(int n, struct damos *s)
854 {
855 	struct damos_filter *filter;
856 	int i = 0;
857 
858 	damos_for_each_filter(filter, s) {
859 		if (i++ == n)
860 			return filter;
861 	}
862 	return NULL;
863 }
864 
865 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
866 {
867 	struct damos_filter *filter;
868 	int i = 0;
869 
870 	damos_for_each_ops_filter(filter, s) {
871 		if (i++ == n)
872 			return filter;
873 	}
874 	return NULL;
875 }
876 
877 static void damos_commit_filter_arg(
878 		struct damos_filter *dst, struct damos_filter *src)
879 {
880 	switch (dst->type) {
881 	case DAMOS_FILTER_TYPE_MEMCG:
882 		dst->memcg_id = src->memcg_id;
883 		break;
884 	case DAMOS_FILTER_TYPE_ADDR:
885 		dst->addr_range = src->addr_range;
886 		break;
887 	case DAMOS_FILTER_TYPE_TARGET:
888 		dst->target_idx = src->target_idx;
889 		break;
890 	case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
891 		dst->sz_range = src->sz_range;
892 		break;
893 	default:
894 		break;
895 	}
896 }
897 
898 static void damos_commit_filter(
899 		struct damos_filter *dst, struct damos_filter *src)
900 {
901 	dst->type = src->type;
902 	dst->matching = src->matching;
903 	dst->allow = src->allow;
904 	damos_commit_filter_arg(dst, src);
905 }
906 
907 static int damos_commit_core_filters(struct damos *dst, struct damos *src)
908 {
909 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
910 	int i = 0, j = 0;
911 
912 	damos_for_each_filter_safe(dst_filter, next, dst) {
913 		src_filter = damos_nth_filter(i++, src);
914 		if (src_filter)
915 			damos_commit_filter(dst_filter, src_filter);
916 		else
917 			damos_destroy_filter(dst_filter);
918 	}
919 
920 	damos_for_each_filter_safe(src_filter, next, src) {
921 		if (j++ < i)
922 			continue;
923 
924 		new_filter = damos_new_filter(
925 				src_filter->type, src_filter->matching,
926 				src_filter->allow);
927 		if (!new_filter)
928 			return -ENOMEM;
929 		damos_commit_filter_arg(new_filter, src_filter);
930 		damos_add_filter(dst, new_filter);
931 	}
932 	return 0;
933 }
934 
935 static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
936 {
937 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
938 	int i = 0, j = 0;
939 
940 	damos_for_each_ops_filter_safe(dst_filter, next, dst) {
941 		src_filter = damos_nth_ops_filter(i++, src);
942 		if (src_filter)
943 			damos_commit_filter(dst_filter, src_filter);
944 		else
945 			damos_destroy_filter(dst_filter);
946 	}
947 
948 	damos_for_each_ops_filter_safe(src_filter, next, src) {
949 		if (j++ < i)
950 			continue;
951 
952 		new_filter = damos_new_filter(
953 				src_filter->type, src_filter->matching,
954 				src_filter->allow);
955 		if (!new_filter)
956 			return -ENOMEM;
957 		damos_commit_filter_arg(new_filter, src_filter);
958 		damos_add_filter(dst, new_filter);
959 	}
960 	return 0;
961 }
962 
963 /**
964  * damos_filters_default_reject() - decide whether to reject memory that didn't
965  *				    match with any given filter.
966  * @filters:	Given DAMOS filters of a group.
967  */
968 static bool damos_filters_default_reject(struct list_head *filters)
969 {
970 	struct damos_filter *last_filter;
971 
972 	if (list_empty(filters))
973 		return false;
974 	last_filter = list_last_entry(filters, struct damos_filter, list);
975 	return last_filter->allow;
976 }
977 
978 static void damos_set_filters_default_reject(struct damos *s)
979 {
980 	if (!list_empty(&s->ops_filters))
981 		s->core_filters_default_reject = false;
982 	else
983 		s->core_filters_default_reject =
984 			damos_filters_default_reject(&s->filters);
985 	s->ops_filters_default_reject =
986 		damos_filters_default_reject(&s->ops_filters);
987 }
988 
989 static int damos_commit_dests(struct damos *dst, struct damos *src)
990 {
991 	struct damos_migrate_dests *dst_dests, *src_dests;
992 
993 	dst_dests = &dst->migrate_dests;
994 	src_dests = &src->migrate_dests;
995 
996 	if (dst_dests->nr_dests != src_dests->nr_dests) {
997 		kfree(dst_dests->node_id_arr);
998 		kfree(dst_dests->weight_arr);
999 
1000 		dst_dests->node_id_arr = kmalloc_array(src_dests->nr_dests,
1001 			sizeof(*dst_dests->node_id_arr), GFP_KERNEL);
1002 		if (!dst_dests->node_id_arr) {
1003 			dst_dests->weight_arr = NULL;
1004 			return -ENOMEM;
1005 		}
1006 
1007 		dst_dests->weight_arr = kmalloc_array(src_dests->nr_dests,
1008 			sizeof(*dst_dests->weight_arr), GFP_KERNEL);
1009 		if (!dst_dests->weight_arr) {
1010 			/* ->node_id_arr will be freed by scheme destruction */
1011 			return -ENOMEM;
1012 		}
1013 	}
1014 
1015 	dst_dests->nr_dests = src_dests->nr_dests;
1016 	for (int i = 0; i < src_dests->nr_dests; i++) {
1017 		dst_dests->node_id_arr[i] = src_dests->node_id_arr[i];
1018 		dst_dests->weight_arr[i] = src_dests->weight_arr[i];
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 static int damos_commit_filters(struct damos *dst, struct damos *src)
1025 {
1026 	int err;
1027 
1028 	err = damos_commit_core_filters(dst, src);
1029 	if (err)
1030 		return err;
1031 	err = damos_commit_ops_filters(dst, src);
1032 	if (err)
1033 		return err;
1034 	damos_set_filters_default_reject(dst);
1035 	return 0;
1036 }
1037 
1038 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
1039 {
1040 	struct damos *s;
1041 	int i = 0;
1042 
1043 	damon_for_each_scheme(s, ctx) {
1044 		if (i++ == n)
1045 			return s;
1046 	}
1047 	return NULL;
1048 }
1049 
1050 static int damos_commit(struct damos *dst, struct damos *src)
1051 {
1052 	int err;
1053 
1054 	dst->pattern = src->pattern;
1055 	dst->action = src->action;
1056 	dst->apply_interval_us = src->apply_interval_us;
1057 
1058 	err = damos_commit_quota(&dst->quota, &src->quota);
1059 	if (err)
1060 		return err;
1061 
1062 	dst->wmarks = src->wmarks;
1063 	dst->target_nid = src->target_nid;
1064 
1065 	err = damos_commit_dests(dst, src);
1066 	if (err)
1067 		return err;
1068 
1069 	err = damos_commit_filters(dst, src);
1070 	return err;
1071 }
1072 
1073 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
1074 {
1075 	struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
1076 	int i = 0, j = 0, err;
1077 
1078 	damon_for_each_scheme_safe(dst_scheme, next, dst) {
1079 		src_scheme = damon_nth_scheme(i++, src);
1080 		if (src_scheme) {
1081 			err = damos_commit(dst_scheme, src_scheme);
1082 			if (err)
1083 				return err;
1084 		} else {
1085 			damon_destroy_scheme(dst_scheme);
1086 		}
1087 	}
1088 
1089 	damon_for_each_scheme_safe(src_scheme, next, src) {
1090 		if (j++ < i)
1091 			continue;
1092 		new_scheme = damon_new_scheme(&src_scheme->pattern,
1093 				src_scheme->action,
1094 				src_scheme->apply_interval_us,
1095 				&src_scheme->quota, &src_scheme->wmarks,
1096 				NUMA_NO_NODE);
1097 		if (!new_scheme)
1098 			return -ENOMEM;
1099 		err = damos_commit(new_scheme, src_scheme);
1100 		if (err) {
1101 			damon_destroy_scheme(new_scheme);
1102 			return err;
1103 		}
1104 		damon_add_scheme(dst, new_scheme);
1105 	}
1106 	return 0;
1107 }
1108 
1109 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1110 {
1111 	struct damon_target *t;
1112 	int i = 0;
1113 
1114 	damon_for_each_target(t, ctx) {
1115 		if (i++ == n)
1116 			return t;
1117 	}
1118 	return NULL;
1119 }
1120 
1121 /*
1122  * The caller should ensure the regions of @src are
1123  * 1. valid (end >= src) and
1124  * 2. sorted by starting address.
1125  *
1126  * If @src has no region, @dst keeps current regions.
1127  */
1128 static int damon_commit_target_regions(
1129 		struct damon_target *dst, struct damon_target *src)
1130 {
1131 	struct damon_region *src_region;
1132 	struct damon_addr_range *ranges;
1133 	int i = 0, err;
1134 
1135 	damon_for_each_region(src_region, src)
1136 		i++;
1137 	if (!i)
1138 		return 0;
1139 
1140 	ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1141 	if (!ranges)
1142 		return -ENOMEM;
1143 	i = 0;
1144 	damon_for_each_region(src_region, src)
1145 		ranges[i++] = src_region->ar;
1146 	err = damon_set_regions(dst, ranges, i);
1147 	kfree(ranges);
1148 	return err;
1149 }
1150 
1151 static int damon_commit_target(
1152 		struct damon_target *dst, bool dst_has_pid,
1153 		struct damon_target *src, bool src_has_pid)
1154 {
1155 	int err;
1156 
1157 	err = damon_commit_target_regions(dst, src);
1158 	if (err)
1159 		return err;
1160 	if (dst_has_pid)
1161 		put_pid(dst->pid);
1162 	if (src_has_pid)
1163 		get_pid(src->pid);
1164 	dst->pid = src->pid;
1165 	return 0;
1166 }
1167 
1168 static int damon_commit_targets(
1169 		struct damon_ctx *dst, struct damon_ctx *src)
1170 {
1171 	struct damon_target *dst_target, *next, *src_target, *new_target;
1172 	int i = 0, j = 0, err;
1173 
1174 	damon_for_each_target_safe(dst_target, next, dst) {
1175 		src_target = damon_nth_target(i++, src);
1176 		if (src_target) {
1177 			err = damon_commit_target(
1178 					dst_target, damon_target_has_pid(dst),
1179 					src_target, damon_target_has_pid(src));
1180 			if (err)
1181 				return err;
1182 		} else {
1183 			struct damos *s;
1184 
1185 			damon_destroy_target(dst_target, dst);
1186 			damon_for_each_scheme(s, dst) {
1187 				if (s->quota.charge_target_from == dst_target) {
1188 					s->quota.charge_target_from = NULL;
1189 					s->quota.charge_addr_from = 0;
1190 				}
1191 			}
1192 		}
1193 	}
1194 
1195 	damon_for_each_target_safe(src_target, next, src) {
1196 		if (j++ < i)
1197 			continue;
1198 		new_target = damon_new_target();
1199 		if (!new_target)
1200 			return -ENOMEM;
1201 		err = damon_commit_target(new_target, false,
1202 				src_target, damon_target_has_pid(src));
1203 		if (err) {
1204 			damon_destroy_target(new_target, NULL);
1205 			return err;
1206 		}
1207 		damon_add_target(dst, new_target);
1208 	}
1209 	return 0;
1210 }
1211 
1212 /**
1213  * damon_commit_ctx() - Commit parameters of a DAMON context to another.
1214  * @dst:	The commit destination DAMON context.
1215  * @src:	The commit source DAMON context.
1216  *
1217  * This function copies user-specified parameters from @src to @dst and update
1218  * the internal status and results accordingly.  Users should use this function
1219  * for context-level parameters update of running context, instead of manual
1220  * in-place updates.
1221  *
1222  * This function should be called from parameters-update safe context, like
1223  * damon_call().
1224  */
1225 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1226 {
1227 	int err;
1228 
1229 	err = damon_commit_schemes(dst, src);
1230 	if (err)
1231 		return err;
1232 	err = damon_commit_targets(dst, src);
1233 	if (err)
1234 		return err;
1235 	/*
1236 	 * schemes and targets should be updated first, since
1237 	 * 1. damon_set_attrs() updates monitoring results of targets and
1238 	 * next_apply_sis of schemes, and
1239 	 * 2. ops update should be done after pid handling is done (target
1240 	 *    committing require putting pids).
1241 	 */
1242 	if (!damon_attrs_equals(&dst->attrs, &src->attrs)) {
1243 		err = damon_set_attrs(dst, &src->attrs);
1244 		if (err)
1245 			return err;
1246 	}
1247 	dst->ops = src->ops;
1248 
1249 	return 0;
1250 }
1251 
1252 /**
1253  * damon_nr_running_ctxs() - Return number of currently running contexts.
1254  */
1255 int damon_nr_running_ctxs(void)
1256 {
1257 	int nr_ctxs;
1258 
1259 	mutex_lock(&damon_lock);
1260 	nr_ctxs = nr_running_ctxs;
1261 	mutex_unlock(&damon_lock);
1262 
1263 	return nr_ctxs;
1264 }
1265 
1266 /* Returns the size upper limit for each monitoring region */
1267 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1268 {
1269 	struct damon_target *t;
1270 	struct damon_region *r;
1271 	unsigned long sz = 0;
1272 
1273 	damon_for_each_target(t, ctx) {
1274 		damon_for_each_region(r, t)
1275 			sz += damon_sz_region(r);
1276 	}
1277 
1278 	if (ctx->attrs.min_nr_regions)
1279 		sz /= ctx->attrs.min_nr_regions;
1280 	if (sz < DAMON_MIN_REGION)
1281 		sz = DAMON_MIN_REGION;
1282 
1283 	return sz;
1284 }
1285 
1286 static int kdamond_fn(void *data);
1287 
1288 /*
1289  * __damon_start() - Starts monitoring with given context.
1290  * @ctx:	monitoring context
1291  *
1292  * This function should be called while damon_lock is hold.
1293  *
1294  * Return: 0 on success, negative error code otherwise.
1295  */
1296 static int __damon_start(struct damon_ctx *ctx)
1297 {
1298 	int err = -EBUSY;
1299 
1300 	mutex_lock(&ctx->kdamond_lock);
1301 	if (!ctx->kdamond) {
1302 		err = 0;
1303 		reinit_completion(&ctx->kdamond_started);
1304 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1305 				nr_running_ctxs);
1306 		if (IS_ERR(ctx->kdamond)) {
1307 			err = PTR_ERR(ctx->kdamond);
1308 			ctx->kdamond = NULL;
1309 		} else {
1310 			wait_for_completion(&ctx->kdamond_started);
1311 		}
1312 	}
1313 	mutex_unlock(&ctx->kdamond_lock);
1314 
1315 	return err;
1316 }
1317 
1318 /**
1319  * damon_start() - Starts the monitorings for a given group of contexts.
1320  * @ctxs:	an array of the pointers for contexts to start monitoring
1321  * @nr_ctxs:	size of @ctxs
1322  * @exclusive:	exclusiveness of this contexts group
1323  *
1324  * This function starts a group of monitoring threads for a group of monitoring
1325  * contexts.  One thread per each context is created and run in parallel.  The
1326  * caller should handle synchronization between the threads by itself.  If
1327  * @exclusive is true and a group of threads that created by other
1328  * 'damon_start()' call is currently running, this function does nothing but
1329  * returns -EBUSY.
1330  *
1331  * Return: 0 on success, negative error code otherwise.
1332  */
1333 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1334 {
1335 	int i;
1336 	int err = 0;
1337 
1338 	mutex_lock(&damon_lock);
1339 	if ((exclusive && nr_running_ctxs) ||
1340 			(!exclusive && running_exclusive_ctxs)) {
1341 		mutex_unlock(&damon_lock);
1342 		return -EBUSY;
1343 	}
1344 
1345 	for (i = 0; i < nr_ctxs; i++) {
1346 		err = __damon_start(ctxs[i]);
1347 		if (err)
1348 			break;
1349 		nr_running_ctxs++;
1350 	}
1351 	if (exclusive && nr_running_ctxs)
1352 		running_exclusive_ctxs = true;
1353 	mutex_unlock(&damon_lock);
1354 
1355 	return err;
1356 }
1357 
1358 /*
1359  * __damon_stop() - Stops monitoring of a given context.
1360  * @ctx:	monitoring context
1361  *
1362  * Return: 0 on success, negative error code otherwise.
1363  */
1364 static int __damon_stop(struct damon_ctx *ctx)
1365 {
1366 	struct task_struct *tsk;
1367 
1368 	mutex_lock(&ctx->kdamond_lock);
1369 	tsk = ctx->kdamond;
1370 	if (tsk) {
1371 		get_task_struct(tsk);
1372 		mutex_unlock(&ctx->kdamond_lock);
1373 		kthread_stop_put(tsk);
1374 		return 0;
1375 	}
1376 	mutex_unlock(&ctx->kdamond_lock);
1377 
1378 	return -EPERM;
1379 }
1380 
1381 /**
1382  * damon_stop() - Stops the monitorings for a given group of contexts.
1383  * @ctxs:	an array of the pointers for contexts to stop monitoring
1384  * @nr_ctxs:	size of @ctxs
1385  *
1386  * Return: 0 on success, negative error code otherwise.
1387  */
1388 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1389 {
1390 	int i, err = 0;
1391 
1392 	for (i = 0; i < nr_ctxs; i++) {
1393 		/* nr_running_ctxs is decremented in kdamond_fn */
1394 		err = __damon_stop(ctxs[i]);
1395 		if (err)
1396 			break;
1397 	}
1398 	return err;
1399 }
1400 
1401 /**
1402  * damon_is_running() - Returns if a given DAMON context is running.
1403  * @ctx:	The DAMON context to see if running.
1404  *
1405  * Return: true if @ctx is running, false otherwise.
1406  */
1407 bool damon_is_running(struct damon_ctx *ctx)
1408 {
1409 	bool running;
1410 
1411 	mutex_lock(&ctx->kdamond_lock);
1412 	running = ctx->kdamond != NULL;
1413 	mutex_unlock(&ctx->kdamond_lock);
1414 	return running;
1415 }
1416 
1417 /**
1418  * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1419  * @ctx:	DAMON context to call the function for.
1420  * @control:	Control variable of the call request.
1421  *
1422  * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1423  * argument data that respectively passed via &damon_call_control->fn and
1424  * &damon_call_control->data of @control.  If &damon_call_control->repeat of
1425  * @control is set, further wait until the kdamond finishes handling of the
1426  * request.  Otherwise, return as soon as the request is made.
1427  *
1428  * The kdamond executes the function with the argument in the main loop, just
1429  * after a sampling of the iteration is finished.  The function can hence
1430  * safely access the internal data of the &struct damon_ctx without additional
1431  * synchronization.  The return value of the function will be saved in
1432  * &damon_call_control->return_code.
1433  *
1434  * Return: 0 on success, negative error code otherwise.
1435  */
1436 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1437 {
1438 	if (!control->repeat)
1439 		init_completion(&control->completion);
1440 	control->canceled = false;
1441 	INIT_LIST_HEAD(&control->list);
1442 
1443 	mutex_lock(&ctx->call_controls_lock);
1444 	list_add_tail(&ctx->call_controls, &control->list);
1445 	mutex_unlock(&ctx->call_controls_lock);
1446 	if (!damon_is_running(ctx))
1447 		return -EINVAL;
1448 	if (control->repeat)
1449 		return 0;
1450 	wait_for_completion(&control->completion);
1451 	if (control->canceled)
1452 		return -ECANCELED;
1453 	return 0;
1454 }
1455 
1456 /**
1457  * damos_walk() - Invoke a given functions while DAMOS walk regions.
1458  * @ctx:	DAMON context to call the functions for.
1459  * @control:	Control variable of the walk request.
1460  *
1461  * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1462  * that the kdamond will apply DAMOS action to, and wait until the kdamond
1463  * finishes handling of the request.
1464  *
1465  * The kdamond executes the given function in the main loop, for each region
1466  * just after it applied any DAMOS actions of @ctx to it.  The invocation is
1467  * made only within one &damos->apply_interval_us since damos_walk()
1468  * invocation, for each scheme.  The given callback function can hence safely
1469  * access the internal data of &struct damon_ctx and &struct damon_region that
1470  * each of the scheme will apply the action for next interval, without
1471  * additional synchronizations against the kdamond.  If every scheme of @ctx
1472  * passed at least one &damos->apply_interval_us, kdamond marks the request as
1473  * completed so that damos_walk() can wakeup and return.
1474  *
1475  * Return: 0 on success, negative error code otherwise.
1476  */
1477 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1478 {
1479 	init_completion(&control->completion);
1480 	control->canceled = false;
1481 	mutex_lock(&ctx->walk_control_lock);
1482 	if (ctx->walk_control) {
1483 		mutex_unlock(&ctx->walk_control_lock);
1484 		return -EBUSY;
1485 	}
1486 	ctx->walk_control = control;
1487 	mutex_unlock(&ctx->walk_control_lock);
1488 	if (!damon_is_running(ctx))
1489 		return -EINVAL;
1490 	wait_for_completion(&control->completion);
1491 	if (control->canceled)
1492 		return -ECANCELED;
1493 	return 0;
1494 }
1495 
1496 /*
1497  * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing
1498  * the problem being propagated.
1499  */
1500 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
1501 {
1502 	if (r->nr_accesses_bp == r->nr_accesses * 10000)
1503 		return;
1504 	WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
1505 			r->nr_accesses_bp, r->nr_accesses);
1506 	r->nr_accesses_bp = r->nr_accesses * 10000;
1507 }
1508 
1509 /*
1510  * Reset the aggregated monitoring results ('nr_accesses' of each region).
1511  */
1512 static void kdamond_reset_aggregated(struct damon_ctx *c)
1513 {
1514 	struct damon_target *t;
1515 	unsigned int ti = 0;	/* target's index */
1516 
1517 	damon_for_each_target(t, c) {
1518 		struct damon_region *r;
1519 
1520 		damon_for_each_region(r, t) {
1521 			trace_damon_aggregated(ti, r, damon_nr_regions(t));
1522 			damon_warn_fix_nr_accesses_corruption(r);
1523 			r->last_nr_accesses = r->nr_accesses;
1524 			r->nr_accesses = 0;
1525 		}
1526 		ti++;
1527 	}
1528 }
1529 
1530 static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1531 {
1532 	struct damon_target *t;
1533 	struct damon_region *r;
1534 	unsigned long sz_region, max_access_events = 0, access_events = 0;
1535 	unsigned long target_access_events;
1536 	unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1537 
1538 	damon_for_each_target(t, c) {
1539 		damon_for_each_region(r, t) {
1540 			sz_region = damon_sz_region(r);
1541 			max_access_events += sz_region * c->attrs.aggr_samples;
1542 			access_events += sz_region * r->nr_accesses;
1543 		}
1544 	}
1545 	target_access_events = max_access_events * goal_bp / 10000;
1546 	target_access_events = target_access_events ? : 1;
1547 	return access_events * 10000 / target_access_events;
1548 }
1549 
1550 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1551 		unsigned long score);
1552 
1553 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1554 {
1555 	unsigned long score_bp, adaptation_bp;
1556 
1557 	score_bp = damon_get_intervals_score(c);
1558 	adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1559 		10000;
1560 	/*
1561 	 * adaptaion_bp ranges from 1 to 20,000.  Avoid too rapid reduction of
1562 	 * the intervals by rescaling [1,10,000] to [5000, 10,000].
1563 	 */
1564 	if (adaptation_bp <= 10000)
1565 		adaptation_bp = 5000 + adaptation_bp / 2;
1566 	return adaptation_bp;
1567 }
1568 
1569 static void kdamond_tune_intervals(struct damon_ctx *c)
1570 {
1571 	unsigned long adaptation_bp;
1572 	struct damon_attrs new_attrs;
1573 	struct damon_intervals_goal *goal;
1574 
1575 	adaptation_bp = damon_get_intervals_adaptation_bp(c);
1576 	if (adaptation_bp == 10000)
1577 		return;
1578 
1579 	new_attrs = c->attrs;
1580 	goal = &c->attrs.intervals_goal;
1581 	new_attrs.sample_interval = min(goal->max_sample_us,
1582 			c->attrs.sample_interval * adaptation_bp / 10000);
1583 	new_attrs.sample_interval = max(goal->min_sample_us,
1584 			new_attrs.sample_interval);
1585 	new_attrs.aggr_interval = new_attrs.sample_interval *
1586 		c->attrs.aggr_samples;
1587 	trace_damon_monitor_intervals_tune(new_attrs.sample_interval);
1588 	damon_set_attrs(c, &new_attrs);
1589 }
1590 
1591 static void damon_split_region_at(struct damon_target *t,
1592 				  struct damon_region *r, unsigned long sz_r);
1593 
1594 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1595 {
1596 	unsigned long sz;
1597 	unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1598 
1599 	sz = damon_sz_region(r);
1600 	return s->pattern.min_sz_region <= sz &&
1601 		sz <= s->pattern.max_sz_region &&
1602 		s->pattern.min_nr_accesses <= nr_accesses &&
1603 		nr_accesses <= s->pattern.max_nr_accesses &&
1604 		s->pattern.min_age_region <= r->age &&
1605 		r->age <= s->pattern.max_age_region;
1606 }
1607 
1608 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1609 		struct damon_region *r, struct damos *s)
1610 {
1611 	bool ret = __damos_valid_target(r, s);
1612 
1613 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1614 		return ret;
1615 
1616 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1617 }
1618 
1619 /*
1620  * damos_skip_charged_region() - Check if the given region or starting part of
1621  * it is already charged for the DAMOS quota.
1622  * @t:	The target of the region.
1623  * @rp:	The pointer to the region.
1624  * @s:	The scheme to be applied.
1625  *
1626  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1627  * action would applied to only a part of the target access pattern fulfilling
1628  * regions.  To avoid applying the scheme action to only already applied
1629  * regions, DAMON skips applying the scheme action to the regions that charged
1630  * in the previous charge window.
1631  *
1632  * This function checks if a given region should be skipped or not for the
1633  * reason.  If only the starting part of the region has previously charged,
1634  * this function splits the region into two so that the second one covers the
1635  * area that not charged in the previous charge widnow and saves the second
1636  * region in *rp and returns false, so that the caller can apply DAMON action
1637  * to the second one.
1638  *
1639  * Return: true if the region should be entirely skipped, false otherwise.
1640  */
1641 static bool damos_skip_charged_region(struct damon_target *t,
1642 		struct damon_region **rp, struct damos *s)
1643 {
1644 	struct damon_region *r = *rp;
1645 	struct damos_quota *quota = &s->quota;
1646 	unsigned long sz_to_skip;
1647 
1648 	/* Skip previously charged regions */
1649 	if (quota->charge_target_from) {
1650 		if (t != quota->charge_target_from)
1651 			return true;
1652 		if (r == damon_last_region(t)) {
1653 			quota->charge_target_from = NULL;
1654 			quota->charge_addr_from = 0;
1655 			return true;
1656 		}
1657 		if (quota->charge_addr_from &&
1658 				r->ar.end <= quota->charge_addr_from)
1659 			return true;
1660 
1661 		if (quota->charge_addr_from && r->ar.start <
1662 				quota->charge_addr_from) {
1663 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1664 					r->ar.start, DAMON_MIN_REGION);
1665 			if (!sz_to_skip) {
1666 				if (damon_sz_region(r) <= DAMON_MIN_REGION)
1667 					return true;
1668 				sz_to_skip = DAMON_MIN_REGION;
1669 			}
1670 			damon_split_region_at(t, r, sz_to_skip);
1671 			r = damon_next_region(r);
1672 			*rp = r;
1673 		}
1674 		quota->charge_target_from = NULL;
1675 		quota->charge_addr_from = 0;
1676 	}
1677 	return false;
1678 }
1679 
1680 static void damos_update_stat(struct damos *s,
1681 		unsigned long sz_tried, unsigned long sz_applied,
1682 		unsigned long sz_ops_filter_passed)
1683 {
1684 	s->stat.nr_tried++;
1685 	s->stat.sz_tried += sz_tried;
1686 	if (sz_applied)
1687 		s->stat.nr_applied++;
1688 	s->stat.sz_applied += sz_applied;
1689 	s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1690 }
1691 
1692 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1693 		struct damon_region *r, struct damos_filter *filter)
1694 {
1695 	bool matched = false;
1696 	struct damon_target *ti;
1697 	int target_idx = 0;
1698 	unsigned long start, end;
1699 
1700 	switch (filter->type) {
1701 	case DAMOS_FILTER_TYPE_TARGET:
1702 		damon_for_each_target(ti, ctx) {
1703 			if (ti == t)
1704 				break;
1705 			target_idx++;
1706 		}
1707 		matched = target_idx == filter->target_idx;
1708 		break;
1709 	case DAMOS_FILTER_TYPE_ADDR:
1710 		start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
1711 		end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
1712 
1713 		/* inside the range */
1714 		if (start <= r->ar.start && r->ar.end <= end) {
1715 			matched = true;
1716 			break;
1717 		}
1718 		/* outside of the range */
1719 		if (r->ar.end <= start || end <= r->ar.start) {
1720 			matched = false;
1721 			break;
1722 		}
1723 		/* start before the range and overlap */
1724 		if (r->ar.start < start) {
1725 			damon_split_region_at(t, r, start - r->ar.start);
1726 			matched = false;
1727 			break;
1728 		}
1729 		/* start inside the range */
1730 		damon_split_region_at(t, r, end - r->ar.start);
1731 		matched = true;
1732 		break;
1733 	default:
1734 		return false;
1735 	}
1736 
1737 	return matched == filter->matching;
1738 }
1739 
1740 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1741 		struct damon_region *r, struct damos *s)
1742 {
1743 	struct damos_filter *filter;
1744 
1745 	s->core_filters_allowed = false;
1746 	damos_for_each_filter(filter, s) {
1747 		if (damos_filter_match(ctx, t, r, filter)) {
1748 			if (filter->allow)
1749 				s->core_filters_allowed = true;
1750 			return !filter->allow;
1751 		}
1752 	}
1753 	return s->core_filters_default_reject;
1754 }
1755 
1756 /*
1757  * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1758  * @ctx:	The context of &damon_ctx->walk_control.
1759  * @t:		The monitoring target of @r that @s will be applied.
1760  * @r:		The region of @t that @s will be applied.
1761  * @s:		The scheme of @ctx that will be applied to @r.
1762  *
1763  * This function is called from kdamond whenever it asked the operation set to
1764  * apply a DAMOS scheme action to a region.  If a DAMOS walk request is
1765  * installed by damos_walk() and not yet uninstalled, invoke it.
1766  */
1767 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1768 		struct damon_region *r, struct damos *s,
1769 		unsigned long sz_filter_passed)
1770 {
1771 	struct damos_walk_control *control;
1772 
1773 	if (s->walk_completed)
1774 		return;
1775 
1776 	control = ctx->walk_control;
1777 	if (!control)
1778 		return;
1779 
1780 	control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1781 }
1782 
1783 /*
1784  * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1785  * @ctx:	The context of &damon_ctx->walk_control.
1786  * @s:		A scheme of @ctx that all walks are now done.
1787  *
1788  * This function is called when kdamond finished applying the action of a DAMOS
1789  * scheme to all regions that eligible for the given &damos->apply_interval_us.
1790  * If every scheme of @ctx including @s now finished walking for at least one
1791  * &damos->apply_interval_us, this function makrs the handling of the given
1792  * DAMOS walk request is done, so that damos_walk() can wake up and return.
1793  */
1794 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1795 {
1796 	struct damos *siter;
1797 	struct damos_walk_control *control;
1798 
1799 	control = ctx->walk_control;
1800 	if (!control)
1801 		return;
1802 
1803 	s->walk_completed = true;
1804 	/* if all schemes completed, signal completion to walker */
1805 	damon_for_each_scheme(siter, ctx) {
1806 		if (!siter->walk_completed)
1807 			return;
1808 	}
1809 	damon_for_each_scheme(siter, ctx)
1810 		siter->walk_completed = false;
1811 
1812 	complete(&control->completion);
1813 	ctx->walk_control = NULL;
1814 }
1815 
1816 /*
1817  * damos_walk_cancel() - Cancel the current DAMOS walk request.
1818  * @ctx:	The context of &damon_ctx->walk_control.
1819  *
1820  * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1821  * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1822  * is already out of the main loop and therefore gonna be terminated, and hence
1823  * cannot continue the walks.  This function therefore marks the walk request
1824  * as canceled, so that damos_walk() can wake up and return.
1825  */
1826 static void damos_walk_cancel(struct damon_ctx *ctx)
1827 {
1828 	struct damos_walk_control *control;
1829 
1830 	mutex_lock(&ctx->walk_control_lock);
1831 	control = ctx->walk_control;
1832 	mutex_unlock(&ctx->walk_control_lock);
1833 
1834 	if (!control)
1835 		return;
1836 	control->canceled = true;
1837 	complete(&control->completion);
1838 	mutex_lock(&ctx->walk_control_lock);
1839 	ctx->walk_control = NULL;
1840 	mutex_unlock(&ctx->walk_control_lock);
1841 }
1842 
1843 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1844 		struct damon_region *r, struct damos *s)
1845 {
1846 	struct damos_quota *quota = &s->quota;
1847 	unsigned long sz = damon_sz_region(r);
1848 	struct timespec64 begin, end;
1849 	unsigned long sz_applied = 0;
1850 	unsigned long sz_ops_filter_passed = 0;
1851 	/*
1852 	 * We plan to support multiple context per kdamond, as DAMON sysfs
1853 	 * implies with 'nr_contexts' file.  Nevertheless, only single context
1854 	 * per kdamond is supported for now.  So, we can simply use '0' context
1855 	 * index here.
1856 	 */
1857 	unsigned int cidx = 0;
1858 	struct damos *siter;		/* schemes iterator */
1859 	unsigned int sidx = 0;
1860 	struct damon_target *titer;	/* targets iterator */
1861 	unsigned int tidx = 0;
1862 	bool do_trace = false;
1863 
1864 	/* get indices for trace_damos_before_apply() */
1865 	if (trace_damos_before_apply_enabled()) {
1866 		damon_for_each_scheme(siter, c) {
1867 			if (siter == s)
1868 				break;
1869 			sidx++;
1870 		}
1871 		damon_for_each_target(titer, c) {
1872 			if (titer == t)
1873 				break;
1874 			tidx++;
1875 		}
1876 		do_trace = true;
1877 	}
1878 
1879 	if (c->ops.apply_scheme) {
1880 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
1881 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1882 					DAMON_MIN_REGION);
1883 			if (!sz)
1884 				goto update_stat;
1885 			damon_split_region_at(t, r, sz);
1886 		}
1887 		if (damos_filter_out(c, t, r, s))
1888 			return;
1889 		ktime_get_coarse_ts64(&begin);
1890 		trace_damos_before_apply(cidx, sidx, tidx, r,
1891 				damon_nr_regions(t), do_trace);
1892 		sz_applied = c->ops.apply_scheme(c, t, r, s,
1893 				&sz_ops_filter_passed);
1894 		damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1895 		ktime_get_coarse_ts64(&end);
1896 		quota->total_charged_ns += timespec64_to_ns(&end) -
1897 			timespec64_to_ns(&begin);
1898 		quota->charged_sz += sz;
1899 		if (quota->esz && quota->charged_sz >= quota->esz) {
1900 			quota->charge_target_from = t;
1901 			quota->charge_addr_from = r->ar.end + 1;
1902 		}
1903 	}
1904 	if (s->action != DAMOS_STAT)
1905 		r->age = 0;
1906 
1907 update_stat:
1908 	damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1909 }
1910 
1911 static void damon_do_apply_schemes(struct damon_ctx *c,
1912 				   struct damon_target *t,
1913 				   struct damon_region *r)
1914 {
1915 	struct damos *s;
1916 
1917 	damon_for_each_scheme(s, c) {
1918 		struct damos_quota *quota = &s->quota;
1919 
1920 		if (c->passed_sample_intervals < s->next_apply_sis)
1921 			continue;
1922 
1923 		if (!s->wmarks.activated)
1924 			continue;
1925 
1926 		/* Check the quota */
1927 		if (quota->esz && quota->charged_sz >= quota->esz)
1928 			continue;
1929 
1930 		if (damos_skip_charged_region(t, &r, s))
1931 			continue;
1932 
1933 		if (!damos_valid_target(c, t, r, s))
1934 			continue;
1935 
1936 		damos_apply_scheme(c, t, r, s);
1937 	}
1938 }
1939 
1940 /*
1941  * damon_feed_loop_next_input() - get next input to achieve a target score.
1942  * @last_input	The last input.
1943  * @score	Current score that made with @last_input.
1944  *
1945  * Calculate next input to achieve the target score, based on the last input
1946  * and current score.  Assuming the input and the score are positively
1947  * proportional, calculate how much compensation should be added to or
1948  * subtracted from the last input as a proportion of the last input.  Avoid
1949  * next input always being zero by setting it non-zero always.  In short form
1950  * (assuming support of float and signed calculations), the algorithm is as
1951  * below.
1952  *
1953  * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1954  *
1955  * For simple implementation, we assume the target score is always 10,000.  The
1956  * caller should adjust @score for this.
1957  *
1958  * Returns next input that assumed to achieve the target score.
1959  */
1960 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1961 		unsigned long score)
1962 {
1963 	const unsigned long goal = 10000;
1964 	/* Set minimum input as 10000 to avoid compensation be zero */
1965 	const unsigned long min_input = 10000;
1966 	unsigned long score_goal_diff, compensation;
1967 	bool over_achieving = score > goal;
1968 
1969 	if (score == goal)
1970 		return last_input;
1971 	if (score >= goal * 2)
1972 		return min_input;
1973 
1974 	if (over_achieving)
1975 		score_goal_diff = score - goal;
1976 	else
1977 		score_goal_diff = goal - score;
1978 
1979 	if (last_input < ULONG_MAX / score_goal_diff)
1980 		compensation = last_input * score_goal_diff / goal;
1981 	else
1982 		compensation = last_input / goal * score_goal_diff;
1983 
1984 	if (over_achieving)
1985 		return max(last_input - compensation, min_input);
1986 	if (last_input < ULONG_MAX - compensation)
1987 		return last_input + compensation;
1988 	return ULONG_MAX;
1989 }
1990 
1991 #ifdef CONFIG_PSI
1992 
1993 static u64 damos_get_some_mem_psi_total(void)
1994 {
1995 	if (static_branch_likely(&psi_disabled))
1996 		return 0;
1997 	return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1998 			NSEC_PER_USEC);
1999 }
2000 
2001 #else	/* CONFIG_PSI */
2002 
2003 static inline u64 damos_get_some_mem_psi_total(void)
2004 {
2005 	return 0;
2006 };
2007 
2008 #endif	/* CONFIG_PSI */
2009 
2010 #ifdef CONFIG_NUMA
2011 static __kernel_ulong_t damos_get_node_mem_bp(
2012 		struct damos_quota_goal *goal)
2013 {
2014 	struct sysinfo i;
2015 	__kernel_ulong_t numerator;
2016 
2017 	si_meminfo_node(&i, goal->nid);
2018 	if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP)
2019 		numerator = i.totalram - i.freeram;
2020 	else	/* DAMOS_QUOTA_NODE_MEM_FREE_BP */
2021 		numerator = i.freeram;
2022 	return numerator * 10000 / i.totalram;
2023 }
2024 #else
2025 static __kernel_ulong_t damos_get_node_mem_bp(
2026 		struct damos_quota_goal *goal)
2027 {
2028 	return 0;
2029 }
2030 #endif
2031 
2032 
2033 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
2034 {
2035 	u64 now_psi_total;
2036 
2037 	switch (goal->metric) {
2038 	case DAMOS_QUOTA_USER_INPUT:
2039 		/* User should already set goal->current_value */
2040 		break;
2041 	case DAMOS_QUOTA_SOME_MEM_PSI_US:
2042 		now_psi_total = damos_get_some_mem_psi_total();
2043 		goal->current_value = now_psi_total - goal->last_psi_total;
2044 		goal->last_psi_total = now_psi_total;
2045 		break;
2046 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
2047 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
2048 		goal->current_value = damos_get_node_mem_bp(goal);
2049 		break;
2050 	default:
2051 		break;
2052 	}
2053 }
2054 
2055 /* Return the highest score since it makes schemes least aggressive */
2056 static unsigned long damos_quota_score(struct damos_quota *quota)
2057 {
2058 	struct damos_quota_goal *goal;
2059 	unsigned long highest_score = 0;
2060 
2061 	damos_for_each_quota_goal(goal, quota) {
2062 		damos_set_quota_goal_current_value(goal);
2063 		highest_score = max(highest_score,
2064 				goal->current_value * 10000 /
2065 				goal->target_value);
2066 	}
2067 
2068 	return highest_score;
2069 }
2070 
2071 /*
2072  * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
2073  */
2074 static void damos_set_effective_quota(struct damos_quota *quota)
2075 {
2076 	unsigned long throughput;
2077 	unsigned long esz = ULONG_MAX;
2078 
2079 	if (!quota->ms && list_empty(&quota->goals)) {
2080 		quota->esz = quota->sz;
2081 		return;
2082 	}
2083 
2084 	if (!list_empty(&quota->goals)) {
2085 		unsigned long score = damos_quota_score(quota);
2086 
2087 		quota->esz_bp = damon_feed_loop_next_input(
2088 				max(quota->esz_bp, 10000UL),
2089 				score);
2090 		esz = quota->esz_bp / 10000;
2091 	}
2092 
2093 	if (quota->ms) {
2094 		if (quota->total_charged_ns)
2095 			throughput = mult_frac(quota->total_charged_sz, 1000000,
2096 							quota->total_charged_ns);
2097 		else
2098 			throughput = PAGE_SIZE * 1024;
2099 		esz = min(throughput * quota->ms, esz);
2100 	}
2101 
2102 	if (quota->sz && quota->sz < esz)
2103 		esz = quota->sz;
2104 
2105 	quota->esz = esz;
2106 }
2107 
2108 static void damos_trace_esz(struct damon_ctx *c, struct damos *s,
2109 		struct damos_quota *quota)
2110 {
2111 	unsigned int cidx = 0, sidx = 0;
2112 	struct damos *siter;
2113 
2114 	damon_for_each_scheme(siter, c) {
2115 		if (siter == s)
2116 			break;
2117 		sidx++;
2118 	}
2119 	trace_damos_esz(cidx, sidx, quota->esz);
2120 }
2121 
2122 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
2123 {
2124 	struct damos_quota *quota = &s->quota;
2125 	struct damon_target *t;
2126 	struct damon_region *r;
2127 	unsigned long cumulated_sz, cached_esz;
2128 	unsigned int score, max_score = 0;
2129 
2130 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
2131 		return;
2132 
2133 	/* New charge window starts */
2134 	if (time_after_eq(jiffies, quota->charged_from +
2135 				msecs_to_jiffies(quota->reset_interval))) {
2136 		if (quota->esz && quota->charged_sz >= quota->esz)
2137 			s->stat.qt_exceeds++;
2138 		quota->total_charged_sz += quota->charged_sz;
2139 		quota->charged_from = jiffies;
2140 		quota->charged_sz = 0;
2141 		if (trace_damos_esz_enabled())
2142 			cached_esz = quota->esz;
2143 		damos_set_effective_quota(quota);
2144 		if (trace_damos_esz_enabled() && quota->esz != cached_esz)
2145 			damos_trace_esz(c, s, quota);
2146 	}
2147 
2148 	if (!c->ops.get_scheme_score)
2149 		return;
2150 
2151 	/* Fill up the score histogram */
2152 	memset(c->regions_score_histogram, 0,
2153 			sizeof(*c->regions_score_histogram) *
2154 			(DAMOS_MAX_SCORE + 1));
2155 	damon_for_each_target(t, c) {
2156 		damon_for_each_region(r, t) {
2157 			if (!__damos_valid_target(r, s))
2158 				continue;
2159 			score = c->ops.get_scheme_score(c, t, r, s);
2160 			c->regions_score_histogram[score] +=
2161 				damon_sz_region(r);
2162 			if (score > max_score)
2163 				max_score = score;
2164 		}
2165 	}
2166 
2167 	/* Set the min score limit */
2168 	for (cumulated_sz = 0, score = max_score; ; score--) {
2169 		cumulated_sz += c->regions_score_histogram[score];
2170 		if (cumulated_sz >= quota->esz || !score)
2171 			break;
2172 	}
2173 	quota->min_score = score;
2174 }
2175 
2176 static void kdamond_apply_schemes(struct damon_ctx *c)
2177 {
2178 	struct damon_target *t;
2179 	struct damon_region *r, *next_r;
2180 	struct damos *s;
2181 	unsigned long sample_interval = c->attrs.sample_interval ?
2182 		c->attrs.sample_interval : 1;
2183 	bool has_schemes_to_apply = false;
2184 
2185 	damon_for_each_scheme(s, c) {
2186 		if (c->passed_sample_intervals < s->next_apply_sis)
2187 			continue;
2188 
2189 		if (!s->wmarks.activated)
2190 			continue;
2191 
2192 		has_schemes_to_apply = true;
2193 
2194 		damos_adjust_quota(c, s);
2195 	}
2196 
2197 	if (!has_schemes_to_apply)
2198 		return;
2199 
2200 	mutex_lock(&c->walk_control_lock);
2201 	damon_for_each_target(t, c) {
2202 		damon_for_each_region_safe(r, next_r, t)
2203 			damon_do_apply_schemes(c, t, r);
2204 	}
2205 
2206 	damon_for_each_scheme(s, c) {
2207 		if (c->passed_sample_intervals < s->next_apply_sis)
2208 			continue;
2209 		damos_walk_complete(c, s);
2210 		s->next_apply_sis = c->passed_sample_intervals +
2211 			(s->apply_interval_us ? s->apply_interval_us :
2212 			 c->attrs.aggr_interval) / sample_interval;
2213 		s->last_applied = NULL;
2214 	}
2215 	mutex_unlock(&c->walk_control_lock);
2216 }
2217 
2218 /*
2219  * Merge two adjacent regions into one region
2220  */
2221 static void damon_merge_two_regions(struct damon_target *t,
2222 		struct damon_region *l, struct damon_region *r)
2223 {
2224 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2225 
2226 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2227 			(sz_l + sz_r);
2228 	l->nr_accesses_bp = l->nr_accesses * 10000;
2229 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2230 	l->ar.end = r->ar.end;
2231 	damon_destroy_region(r, t);
2232 }
2233 
2234 /*
2235  * Merge adjacent regions having similar access frequencies
2236  *
2237  * t		target affected by this merge operation
2238  * thres	'->nr_accesses' diff threshold for the merge
2239  * sz_limit	size upper limit of each region
2240  */
2241 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2242 				   unsigned long sz_limit)
2243 {
2244 	struct damon_region *r, *prev = NULL, *next;
2245 
2246 	damon_for_each_region_safe(r, next, t) {
2247 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2248 			r->age = 0;
2249 		else
2250 			r->age++;
2251 
2252 		if (prev && prev->ar.end == r->ar.start &&
2253 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2254 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2255 			damon_merge_two_regions(t, prev, r);
2256 		else
2257 			prev = r;
2258 	}
2259 }
2260 
2261 /*
2262  * Merge adjacent regions having similar access frequencies
2263  *
2264  * threshold	'->nr_accesses' diff threshold for the merge
2265  * sz_limit	size upper limit of each region
2266  *
2267  * This function merges monitoring target regions which are adjacent and their
2268  * access frequencies are similar.  This is for minimizing the monitoring
2269  * overhead under the dynamically changeable access pattern.  If a merge was
2270  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
2271  *
2272  * The total number of regions could be higher than the user-defined limit,
2273  * max_nr_regions for some cases.  For example, the user can update
2274  * max_nr_regions to a number that lower than the current number of regions
2275  * while DAMON is running.  For such a case, repeat merging until the limit is
2276  * met while increasing @threshold up to possible maximum level.
2277  */
2278 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2279 				  unsigned long sz_limit)
2280 {
2281 	struct damon_target *t;
2282 	unsigned int nr_regions;
2283 	unsigned int max_thres;
2284 
2285 	max_thres = c->attrs.aggr_interval /
2286 		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
2287 	do {
2288 		nr_regions = 0;
2289 		damon_for_each_target(t, c) {
2290 			damon_merge_regions_of(t, threshold, sz_limit);
2291 			nr_regions += damon_nr_regions(t);
2292 		}
2293 		threshold = max(1, threshold * 2);
2294 	} while (nr_regions > c->attrs.max_nr_regions &&
2295 			threshold / 2 < max_thres);
2296 }
2297 
2298 /*
2299  * Split a region in two
2300  *
2301  * r		the region to be split
2302  * sz_r		size of the first sub-region that will be made
2303  */
2304 static void damon_split_region_at(struct damon_target *t,
2305 				  struct damon_region *r, unsigned long sz_r)
2306 {
2307 	struct damon_region *new;
2308 
2309 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2310 	if (!new)
2311 		return;
2312 
2313 	r->ar.end = new->ar.start;
2314 
2315 	new->age = r->age;
2316 	new->last_nr_accesses = r->last_nr_accesses;
2317 	new->nr_accesses_bp = r->nr_accesses_bp;
2318 	new->nr_accesses = r->nr_accesses;
2319 
2320 	damon_insert_region(new, r, damon_next_region(r), t);
2321 }
2322 
2323 /* Split every region in the given target into 'nr_subs' regions */
2324 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
2325 {
2326 	struct damon_region *r, *next;
2327 	unsigned long sz_region, sz_sub = 0;
2328 	int i;
2329 
2330 	damon_for_each_region_safe(r, next, t) {
2331 		sz_region = damon_sz_region(r);
2332 
2333 		for (i = 0; i < nr_subs - 1 &&
2334 				sz_region > 2 * DAMON_MIN_REGION; i++) {
2335 			/*
2336 			 * Randomly select size of left sub-region to be at
2337 			 * least 10 percent and at most 90% of original region
2338 			 */
2339 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2340 					sz_region / 10, DAMON_MIN_REGION);
2341 			/* Do not allow blank region */
2342 			if (sz_sub == 0 || sz_sub >= sz_region)
2343 				continue;
2344 
2345 			damon_split_region_at(t, r, sz_sub);
2346 			sz_region = sz_sub;
2347 		}
2348 	}
2349 }
2350 
2351 /*
2352  * Split every target region into randomly-sized small regions
2353  *
2354  * This function splits every target region into random-sized small regions if
2355  * current total number of the regions is equal or smaller than half of the
2356  * user-specified maximum number of regions.  This is for maximizing the
2357  * monitoring accuracy under the dynamically changeable access patterns.  If a
2358  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2359  * it.
2360  */
2361 static void kdamond_split_regions(struct damon_ctx *ctx)
2362 {
2363 	struct damon_target *t;
2364 	unsigned int nr_regions = 0;
2365 	static unsigned int last_nr_regions;
2366 	int nr_subregions = 2;
2367 
2368 	damon_for_each_target(t, ctx)
2369 		nr_regions += damon_nr_regions(t);
2370 
2371 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
2372 		return;
2373 
2374 	/* Maybe the middle of the region has different access frequency */
2375 	if (last_nr_regions == nr_regions &&
2376 			nr_regions < ctx->attrs.max_nr_regions / 3)
2377 		nr_subregions = 3;
2378 
2379 	damon_for_each_target(t, ctx)
2380 		damon_split_regions_of(t, nr_subregions);
2381 
2382 	last_nr_regions = nr_regions;
2383 }
2384 
2385 /*
2386  * Check whether current monitoring should be stopped
2387  *
2388  * The monitoring is stopped when either the user requested to stop, or all
2389  * monitoring targets are invalid.
2390  *
2391  * Returns true if need to stop current monitoring.
2392  */
2393 static bool kdamond_need_stop(struct damon_ctx *ctx)
2394 {
2395 	struct damon_target *t;
2396 
2397 	if (kthread_should_stop())
2398 		return true;
2399 
2400 	if (!ctx->ops.target_valid)
2401 		return false;
2402 
2403 	damon_for_each_target(t, ctx) {
2404 		if (ctx->ops.target_valid(t))
2405 			return false;
2406 	}
2407 
2408 	return true;
2409 }
2410 
2411 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2412 					unsigned long *metric_value)
2413 {
2414 	switch (metric) {
2415 	case DAMOS_WMARK_FREE_MEM_RATE:
2416 		*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2417 		       totalram_pages();
2418 		return 0;
2419 	default:
2420 		break;
2421 	}
2422 	return -EINVAL;
2423 }
2424 
2425 /*
2426  * Returns zero if the scheme is active.  Else, returns time to wait for next
2427  * watermark check in micro-seconds.
2428  */
2429 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2430 {
2431 	unsigned long metric;
2432 
2433 	if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2434 		return 0;
2435 
2436 	/* higher than high watermark or lower than low watermark */
2437 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2438 		if (scheme->wmarks.activated)
2439 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
2440 				 scheme->action,
2441 				 str_high_low(metric > scheme->wmarks.high));
2442 		scheme->wmarks.activated = false;
2443 		return scheme->wmarks.interval;
2444 	}
2445 
2446 	/* inactive and higher than middle watermark */
2447 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2448 			!scheme->wmarks.activated)
2449 		return scheme->wmarks.interval;
2450 
2451 	if (!scheme->wmarks.activated)
2452 		pr_debug("activate a scheme (%d)\n", scheme->action);
2453 	scheme->wmarks.activated = true;
2454 	return 0;
2455 }
2456 
2457 static void kdamond_usleep(unsigned long usecs)
2458 {
2459 	if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2460 		schedule_timeout_idle(usecs_to_jiffies(usecs));
2461 	else
2462 		usleep_range_idle(usecs, usecs + 1);
2463 }
2464 
2465 /*
2466  * kdamond_call() - handle damon_call_control objects.
2467  * @ctx:	The &struct damon_ctx of the kdamond.
2468  * @cancel:	Whether to cancel the invocation of the function.
2469  *
2470  * If there are &struct damon_call_control requests that registered via
2471  * &damon_call() on @ctx, do or cancel the invocation of the function depending
2472  * on @cancel.  @cancel is set when the kdamond is already out of the main loop
2473  * and therefore will be terminated.
2474  */
2475 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2476 {
2477 	struct damon_call_control *control;
2478 	LIST_HEAD(repeat_controls);
2479 	int ret = 0;
2480 
2481 	while (true) {
2482 		mutex_lock(&ctx->call_controls_lock);
2483 		control = list_first_entry_or_null(&ctx->call_controls,
2484 				struct damon_call_control, list);
2485 		mutex_unlock(&ctx->call_controls_lock);
2486 		if (!control)
2487 			break;
2488 		if (cancel) {
2489 			control->canceled = true;
2490 		} else {
2491 			ret = control->fn(control->data);
2492 			control->return_code = ret;
2493 		}
2494 		mutex_lock(&ctx->call_controls_lock);
2495 		list_del(&control->list);
2496 		mutex_unlock(&ctx->call_controls_lock);
2497 		if (!control->repeat)
2498 			complete(&control->completion);
2499 		else
2500 			list_add(&control->list, &repeat_controls);
2501 	}
2502 	control = list_first_entry_or_null(&repeat_controls,
2503 			struct damon_call_control, list);
2504 	if (!control || cancel)
2505 		return;
2506 	mutex_lock(&ctx->call_controls_lock);
2507 	list_add_tail(&control->list, &ctx->call_controls);
2508 	mutex_unlock(&ctx->call_controls_lock);
2509 }
2510 
2511 /* Returns negative error code if it's not activated but should return */
2512 static int kdamond_wait_activation(struct damon_ctx *ctx)
2513 {
2514 	struct damos *s;
2515 	unsigned long wait_time;
2516 	unsigned long min_wait_time = 0;
2517 	bool init_wait_time = false;
2518 
2519 	while (!kdamond_need_stop(ctx)) {
2520 		damon_for_each_scheme(s, ctx) {
2521 			wait_time = damos_wmark_wait_us(s);
2522 			if (!init_wait_time || wait_time < min_wait_time) {
2523 				init_wait_time = true;
2524 				min_wait_time = wait_time;
2525 			}
2526 		}
2527 		if (!min_wait_time)
2528 			return 0;
2529 
2530 		kdamond_usleep(min_wait_time);
2531 
2532 		kdamond_call(ctx, false);
2533 		damos_walk_cancel(ctx);
2534 	}
2535 	return -EBUSY;
2536 }
2537 
2538 static void kdamond_init_ctx(struct damon_ctx *ctx)
2539 {
2540 	unsigned long sample_interval = ctx->attrs.sample_interval ?
2541 		ctx->attrs.sample_interval : 1;
2542 	unsigned long apply_interval;
2543 	struct damos *scheme;
2544 
2545 	ctx->passed_sample_intervals = 0;
2546 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2547 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2548 		sample_interval;
2549 	ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2550 		ctx->attrs.intervals_goal.aggrs;
2551 
2552 	damon_for_each_scheme(scheme, ctx) {
2553 		apply_interval = scheme->apply_interval_us ?
2554 			scheme->apply_interval_us : ctx->attrs.aggr_interval;
2555 		scheme->next_apply_sis = apply_interval / sample_interval;
2556 		damos_set_filters_default_reject(scheme);
2557 	}
2558 }
2559 
2560 /*
2561  * The monitoring daemon that runs as a kernel thread
2562  */
2563 static int kdamond_fn(void *data)
2564 {
2565 	struct damon_ctx *ctx = data;
2566 	struct damon_target *t;
2567 	struct damon_region *r, *next;
2568 	unsigned int max_nr_accesses = 0;
2569 	unsigned long sz_limit = 0;
2570 
2571 	pr_debug("kdamond (%d) starts\n", current->pid);
2572 
2573 	complete(&ctx->kdamond_started);
2574 	kdamond_init_ctx(ctx);
2575 
2576 	if (ctx->ops.init)
2577 		ctx->ops.init(ctx);
2578 	ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2579 			sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2580 	if (!ctx->regions_score_histogram)
2581 		goto done;
2582 
2583 	sz_limit = damon_region_sz_limit(ctx);
2584 
2585 	while (!kdamond_need_stop(ctx)) {
2586 		/*
2587 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2588 		 * be changed from kdamond_call().  Read the values here, and
2589 		 * use those for this iteration.  That is, damon_set_attrs()
2590 		 * updated new values are respected from next iteration.
2591 		 */
2592 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2593 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2594 		unsigned long sample_interval = ctx->attrs.sample_interval;
2595 
2596 		if (kdamond_wait_activation(ctx))
2597 			break;
2598 
2599 		if (ctx->ops.prepare_access_checks)
2600 			ctx->ops.prepare_access_checks(ctx);
2601 
2602 		kdamond_usleep(sample_interval);
2603 		ctx->passed_sample_intervals++;
2604 
2605 		if (ctx->ops.check_accesses)
2606 			max_nr_accesses = ctx->ops.check_accesses(ctx);
2607 
2608 		if (ctx->passed_sample_intervals >= next_aggregation_sis)
2609 			kdamond_merge_regions(ctx,
2610 					max_nr_accesses / 10,
2611 					sz_limit);
2612 
2613 		/*
2614 		 * do kdamond_call() and kdamond_apply_schemes() after
2615 		 * kdamond_merge_regions() if possible, to reduce overhead
2616 		 */
2617 		kdamond_call(ctx, false);
2618 		if (!list_empty(&ctx->schemes))
2619 			kdamond_apply_schemes(ctx);
2620 		else
2621 			damos_walk_cancel(ctx);
2622 
2623 		sample_interval = ctx->attrs.sample_interval ?
2624 			ctx->attrs.sample_interval : 1;
2625 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2626 			if (ctx->attrs.intervals_goal.aggrs &&
2627 					ctx->passed_sample_intervals >=
2628 					ctx->next_intervals_tune_sis) {
2629 				/*
2630 				 * ctx->next_aggregation_sis might be updated
2631 				 * from kdamond_call().  In the case,
2632 				 * damon_set_attrs() which will be called from
2633 				 * kdamond_tune_interval() may wrongly think
2634 				 * this is in the middle of the current
2635 				 * aggregation, and make aggregation
2636 				 * information reset for all regions.  Then,
2637 				 * following kdamond_reset_aggregated() call
2638 				 * will make the region information invalid,
2639 				 * particularly for ->nr_accesses_bp.
2640 				 *
2641 				 * Reset ->next_aggregation_sis to avoid that.
2642 				 * It will anyway correctly updated after this
2643 				 * if caluse.
2644 				 */
2645 				ctx->next_aggregation_sis =
2646 					next_aggregation_sis;
2647 				ctx->next_intervals_tune_sis +=
2648 					ctx->attrs.aggr_samples *
2649 					ctx->attrs.intervals_goal.aggrs;
2650 				kdamond_tune_intervals(ctx);
2651 				sample_interval = ctx->attrs.sample_interval ?
2652 					ctx->attrs.sample_interval : 1;
2653 
2654 			}
2655 			ctx->next_aggregation_sis = next_aggregation_sis +
2656 				ctx->attrs.aggr_interval / sample_interval;
2657 
2658 			kdamond_reset_aggregated(ctx);
2659 			kdamond_split_regions(ctx);
2660 		}
2661 
2662 		if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2663 			ctx->next_ops_update_sis = next_ops_update_sis +
2664 				ctx->attrs.ops_update_interval /
2665 				sample_interval;
2666 			if (ctx->ops.update)
2667 				ctx->ops.update(ctx);
2668 			sz_limit = damon_region_sz_limit(ctx);
2669 		}
2670 	}
2671 done:
2672 	damon_for_each_target(t, ctx) {
2673 		damon_for_each_region_safe(r, next, t)
2674 			damon_destroy_region(r, t);
2675 	}
2676 
2677 	if (ctx->ops.cleanup)
2678 		ctx->ops.cleanup(ctx);
2679 	kfree(ctx->regions_score_histogram);
2680 
2681 	pr_debug("kdamond (%d) finishes\n", current->pid);
2682 	mutex_lock(&ctx->kdamond_lock);
2683 	ctx->kdamond = NULL;
2684 	mutex_unlock(&ctx->kdamond_lock);
2685 
2686 	kdamond_call(ctx, true);
2687 	damos_walk_cancel(ctx);
2688 
2689 	mutex_lock(&damon_lock);
2690 	nr_running_ctxs--;
2691 	if (!nr_running_ctxs && running_exclusive_ctxs)
2692 		running_exclusive_ctxs = false;
2693 	mutex_unlock(&damon_lock);
2694 
2695 	damon_destroy_targets(ctx);
2696 	return 0;
2697 }
2698 
2699 /*
2700  * struct damon_system_ram_region - System RAM resource address region of
2701  *				    [@start, @end).
2702  * @start:	Start address of the region (inclusive).
2703  * @end:	End address of the region (exclusive).
2704  */
2705 struct damon_system_ram_region {
2706 	unsigned long start;
2707 	unsigned long end;
2708 };
2709 
2710 static int walk_system_ram(struct resource *res, void *arg)
2711 {
2712 	struct damon_system_ram_region *a = arg;
2713 
2714 	if (a->end - a->start < resource_size(res)) {
2715 		a->start = res->start;
2716 		a->end = res->end;
2717 	}
2718 	return 0;
2719 }
2720 
2721 /*
2722  * Find biggest 'System RAM' resource and store its start and end address in
2723  * @start and @end, respectively.  If no System RAM is found, returns false.
2724  */
2725 static bool damon_find_biggest_system_ram(unsigned long *start,
2726 						unsigned long *end)
2727 
2728 {
2729 	struct damon_system_ram_region arg = {};
2730 
2731 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2732 	if (arg.end <= arg.start)
2733 		return false;
2734 
2735 	*start = arg.start;
2736 	*end = arg.end;
2737 	return true;
2738 }
2739 
2740 /**
2741  * damon_set_region_biggest_system_ram_default() - Set the region of the given
2742  * monitoring target as requested, or biggest 'System RAM'.
2743  * @t:		The monitoring target to set the region.
2744  * @start:	The pointer to the start address of the region.
2745  * @end:	The pointer to the end address of the region.
2746  *
2747  * This function sets the region of @t as requested by @start and @end.  If the
2748  * values of @start and @end are zero, however, this function finds the biggest
2749  * 'System RAM' resource and sets the region to cover the resource.  In the
2750  * latter case, this function saves the start and end addresses of the resource
2751  * in @start and @end, respectively.
2752  *
2753  * Return: 0 on success, negative error code otherwise.
2754  */
2755 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2756 			unsigned long *start, unsigned long *end)
2757 {
2758 	struct damon_addr_range addr_range;
2759 
2760 	if (*start > *end)
2761 		return -EINVAL;
2762 
2763 	if (!*start && !*end &&
2764 		!damon_find_biggest_system_ram(start, end))
2765 		return -EINVAL;
2766 
2767 	addr_range.start = *start;
2768 	addr_range.end = *end;
2769 	return damon_set_regions(t, &addr_range, 1);
2770 }
2771 
2772 /*
2773  * damon_moving_sum() - Calculate an inferred moving sum value.
2774  * @mvsum:	Inferred sum of the last @len_window values.
2775  * @nomvsum:	Non-moving sum of the last discrete @len_window window values.
2776  * @len_window:	The number of last values to take care of.
2777  * @new_value:	New value that will be added to the pseudo moving sum.
2778  *
2779  * Moving sum (moving average * window size) is good for handling noise, but
2780  * the cost of keeping past values can be high for arbitrary window size.  This
2781  * function implements a lightweight pseudo moving sum function that doesn't
2782  * keep the past window values.
2783  *
2784  * It simply assumes there was no noise in the past, and get the no-noise
2785  * assumed past value to drop from @nomvsum and @len_window.  @nomvsum is a
2786  * non-moving sum of the last window.  For example, if @len_window is 10 and we
2787  * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2788  * values.  Hence, this function simply drops @nomvsum / @len_window from
2789  * given @mvsum and add @new_value.
2790  *
2791  * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2792  * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20.  For
2793  * calculating next moving sum with a new value, we should drop 0 from 50 and
2794  * add the new value.  However, this function assumes it got value 5 for each
2795  * of the last ten times.  Based on the assumption, when the next value is
2796  * measured, it drops the assumed past value, 5 from the current sum, and add
2797  * the new value to get the updated pseduo-moving average.
2798  *
2799  * This means the value could have errors, but the errors will be disappeared
2800  * for every @len_window aligned calls.  For example, if @len_window is 10, the
2801  * pseudo moving sum with 11th value to 19th value would have an error.  But
2802  * the sum with 20th value will not have the error.
2803  *
2804  * Return: Pseudo-moving average after getting the @new_value.
2805  */
2806 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2807 		unsigned int len_window, unsigned int new_value)
2808 {
2809 	return mvsum - nomvsum / len_window + new_value;
2810 }
2811 
2812 /**
2813  * damon_update_region_access_rate() - Update the access rate of a region.
2814  * @r:		The DAMON region to update for its access check result.
2815  * @accessed:	Whether the region has accessed during last sampling interval.
2816  * @attrs:	The damon_attrs of the DAMON context.
2817  *
2818  * Update the access rate of a region with the region's last sampling interval
2819  * access check result.
2820  *
2821  * Usually this will be called by &damon_operations->check_accesses callback.
2822  */
2823 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2824 		struct damon_attrs *attrs)
2825 {
2826 	unsigned int len_window = 1;
2827 
2828 	/*
2829 	 * sample_interval can be zero, but cannot be larger than
2830 	 * aggr_interval, owing to validation of damon_set_attrs().
2831 	 */
2832 	if (attrs->sample_interval)
2833 		len_window = damon_max_nr_accesses(attrs);
2834 	r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2835 			r->last_nr_accesses * 10000, len_window,
2836 			accessed ? 10000 : 0);
2837 
2838 	if (accessed)
2839 		r->nr_accesses++;
2840 }
2841 
2842 static int __init damon_init(void)
2843 {
2844 	damon_region_cache = KMEM_CACHE(damon_region, 0);
2845 	if (unlikely(!damon_region_cache)) {
2846 		pr_err("creating damon_region_cache fails\n");
2847 		return -ENOMEM;
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 subsys_initcall(damon_init);
2854 
2855 #include "tests/core-kunit.h"
2856