xref: /linux/mm/damon/core.c (revision 8c7c1b5506e593ce00c42214b4fcafd640ceeb42)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/string_choices.h>
18 
19 #define CREATE_TRACE_POINTS
20 #include <trace/events/damon.h>
21 
22 #ifdef CONFIG_DAMON_KUNIT_TEST
23 #undef DAMON_MIN_REGION
24 #define DAMON_MIN_REGION 1
25 #endif
26 
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
29 static bool running_exclusive_ctxs;
30 
31 static DEFINE_MUTEX(damon_ops_lock);
32 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
33 
34 static struct kmem_cache *damon_region_cache __ro_after_init;
35 
36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
__damon_is_registered_ops(enum damon_ops_id id)37 static bool __damon_is_registered_ops(enum damon_ops_id id)
38 {
39 	struct damon_operations empty_ops = {};
40 
41 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
42 		return false;
43 	return true;
44 }
45 
46 /**
47  * damon_is_registered_ops() - Check if a given damon_operations is registered.
48  * @id:	Id of the damon_operations to check if registered.
49  *
50  * Return: true if the ops is set, false otherwise.
51  */
damon_is_registered_ops(enum damon_ops_id id)52 bool damon_is_registered_ops(enum damon_ops_id id)
53 {
54 	bool registered;
55 
56 	if (id >= NR_DAMON_OPS)
57 		return false;
58 	mutex_lock(&damon_ops_lock);
59 	registered = __damon_is_registered_ops(id);
60 	mutex_unlock(&damon_ops_lock);
61 	return registered;
62 }
63 
64 /**
65  * damon_register_ops() - Register a monitoring operations set to DAMON.
66  * @ops:	monitoring operations set to register.
67  *
68  * This function registers a monitoring operations set of valid &struct
69  * damon_operations->id so that others can find and use them later.
70  *
71  * Return: 0 on success, negative error code otherwise.
72  */
damon_register_ops(struct damon_operations * ops)73 int damon_register_ops(struct damon_operations *ops)
74 {
75 	int err = 0;
76 
77 	if (ops->id >= NR_DAMON_OPS)
78 		return -EINVAL;
79 
80 	mutex_lock(&damon_ops_lock);
81 	/* Fail for already registered ops */
82 	if (__damon_is_registered_ops(ops->id))
83 		err = -EINVAL;
84 	else
85 		damon_registered_ops[ops->id] = *ops;
86 	mutex_unlock(&damon_ops_lock);
87 	return err;
88 }
89 
90 /**
91  * damon_select_ops() - Select a monitoring operations to use with the context.
92  * @ctx:	monitoring context to use the operations.
93  * @id:		id of the registered monitoring operations to select.
94  *
95  * This function finds registered monitoring operations set of @id and make
96  * @ctx to use it.
97  *
98  * Return: 0 on success, negative error code otherwise.
99  */
damon_select_ops(struct damon_ctx * ctx,enum damon_ops_id id)100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101 {
102 	int err = 0;
103 
104 	if (id >= NR_DAMON_OPS)
105 		return -EINVAL;
106 
107 	mutex_lock(&damon_ops_lock);
108 	if (!__damon_is_registered_ops(id))
109 		err = -EINVAL;
110 	else
111 		ctx->ops = damon_registered_ops[id];
112 	mutex_unlock(&damon_ops_lock);
113 	return err;
114 }
115 
116 /*
117  * Construct a damon_region struct
118  *
119  * Returns the pointer to the new struct if success, or NULL otherwise
120  */
damon_new_region(unsigned long start,unsigned long end)121 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122 {
123 	struct damon_region *region;
124 
125 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 	if (!region)
127 		return NULL;
128 
129 	region->ar.start = start;
130 	region->ar.end = end;
131 	region->nr_accesses = 0;
132 	region->nr_accesses_bp = 0;
133 	INIT_LIST_HEAD(&region->list);
134 
135 	region->age = 0;
136 	region->last_nr_accesses = 0;
137 
138 	return region;
139 }
140 
damon_add_region(struct damon_region * r,struct damon_target * t)141 void damon_add_region(struct damon_region *r, struct damon_target *t)
142 {
143 	list_add_tail(&r->list, &t->regions_list);
144 	t->nr_regions++;
145 }
146 
damon_del_region(struct damon_region * r,struct damon_target * t)147 static void damon_del_region(struct damon_region *r, struct damon_target *t)
148 {
149 	list_del(&r->list);
150 	t->nr_regions--;
151 }
152 
damon_free_region(struct damon_region * r)153 static void damon_free_region(struct damon_region *r)
154 {
155 	kmem_cache_free(damon_region_cache, r);
156 }
157 
damon_destroy_region(struct damon_region * r,struct damon_target * t)158 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159 {
160 	damon_del_region(r, t);
161 	damon_free_region(r);
162 }
163 
164 /*
165  * Check whether a region is intersecting an address range
166  *
167  * Returns true if it is.
168  */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)169 static bool damon_intersect(struct damon_region *r,
170 		struct damon_addr_range *re)
171 {
172 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
173 }
174 
175 /*
176  * Fill holes in regions with new regions.
177  */
damon_fill_regions_holes(struct damon_region * first,struct damon_region * last,struct damon_target * t)178 static int damon_fill_regions_holes(struct damon_region *first,
179 		struct damon_region *last, struct damon_target *t)
180 {
181 	struct damon_region *r = first;
182 
183 	damon_for_each_region_from(r, t) {
184 		struct damon_region *next, *newr;
185 
186 		if (r == last)
187 			break;
188 		next = damon_next_region(r);
189 		if (r->ar.end != next->ar.start) {
190 			newr = damon_new_region(r->ar.end, next->ar.start);
191 			if (!newr)
192 				return -ENOMEM;
193 			damon_insert_region(newr, r, next, t);
194 		}
195 	}
196 	return 0;
197 }
198 
199 /*
200  * damon_set_regions() - Set regions of a target for given address ranges.
201  * @t:		the given target.
202  * @ranges:	array of new monitoring target ranges.
203  * @nr_ranges:	length of @ranges.
204  *
205  * This function adds new regions to, or modify existing regions of a
206  * monitoring target to fit in specific ranges.
207  *
208  * Return: 0 if success, or negative error code otherwise.
209  */
damon_set_regions(struct damon_target * t,struct damon_addr_range * ranges,unsigned int nr_ranges)210 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
211 		unsigned int nr_ranges)
212 {
213 	struct damon_region *r, *next;
214 	unsigned int i;
215 	int err;
216 
217 	/* Remove regions which are not in the new ranges */
218 	damon_for_each_region_safe(r, next, t) {
219 		for (i = 0; i < nr_ranges; i++) {
220 			if (damon_intersect(r, &ranges[i]))
221 				break;
222 		}
223 		if (i == nr_ranges)
224 			damon_destroy_region(r, t);
225 	}
226 
227 	r = damon_first_region(t);
228 	/* Add new regions or resize existing regions to fit in the ranges */
229 	for (i = 0; i < nr_ranges; i++) {
230 		struct damon_region *first = NULL, *last, *newr;
231 		struct damon_addr_range *range;
232 
233 		range = &ranges[i];
234 		/* Get the first/last regions intersecting with the range */
235 		damon_for_each_region_from(r, t) {
236 			if (damon_intersect(r, range)) {
237 				if (!first)
238 					first = r;
239 				last = r;
240 			}
241 			if (r->ar.start >= range->end)
242 				break;
243 		}
244 		if (!first) {
245 			/* no region intersects with this range */
246 			newr = damon_new_region(
247 					ALIGN_DOWN(range->start,
248 						DAMON_MIN_REGION),
249 					ALIGN(range->end, DAMON_MIN_REGION));
250 			if (!newr)
251 				return -ENOMEM;
252 			damon_insert_region(newr, damon_prev_region(r), r, t);
253 		} else {
254 			/* resize intersecting regions to fit in this range */
255 			first->ar.start = ALIGN_DOWN(range->start,
256 					DAMON_MIN_REGION);
257 			last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
258 
259 			/* fill possible holes in the range */
260 			err = damon_fill_regions_holes(first, last, t);
261 			if (err)
262 				return err;
263 		}
264 	}
265 	return 0;
266 }
267 
damos_new_filter(enum damos_filter_type type,bool matching,bool allow)268 struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 		bool matching, bool allow)
270 {
271 	struct damos_filter *filter;
272 
273 	filter = kmalloc(sizeof(*filter), GFP_KERNEL);
274 	if (!filter)
275 		return NULL;
276 	filter->type = type;
277 	filter->matching = matching;
278 	filter->allow = allow;
279 	INIT_LIST_HEAD(&filter->list);
280 	return filter;
281 }
282 
283 /**
284  * damos_filter_for_ops() - Return if the filter is ops-hndled one.
285  * @type:	type of the filter.
286  *
287  * Return: true if the filter of @type needs to be handled by ops layer, false
288  * otherwise.
289  */
damos_filter_for_ops(enum damos_filter_type type)290 bool damos_filter_for_ops(enum damos_filter_type type)
291 {
292 	switch (type) {
293 	case DAMOS_FILTER_TYPE_ADDR:
294 	case DAMOS_FILTER_TYPE_TARGET:
295 		return false;
296 	default:
297 		break;
298 	}
299 	return true;
300 }
301 
damos_add_filter(struct damos * s,struct damos_filter * f)302 void damos_add_filter(struct damos *s, struct damos_filter *f)
303 {
304 	if (damos_filter_for_ops(f->type))
305 		list_add_tail(&f->list, &s->ops_filters);
306 	else
307 		list_add_tail(&f->list, &s->filters);
308 }
309 
damos_del_filter(struct damos_filter * f)310 static void damos_del_filter(struct damos_filter *f)
311 {
312 	list_del(&f->list);
313 }
314 
damos_free_filter(struct damos_filter * f)315 static void damos_free_filter(struct damos_filter *f)
316 {
317 	kfree(f);
318 }
319 
damos_destroy_filter(struct damos_filter * f)320 void damos_destroy_filter(struct damos_filter *f)
321 {
322 	damos_del_filter(f);
323 	damos_free_filter(f);
324 }
325 
damos_new_quota_goal(enum damos_quota_goal_metric metric,unsigned long target_value)326 struct damos_quota_goal *damos_new_quota_goal(
327 		enum damos_quota_goal_metric metric,
328 		unsigned long target_value)
329 {
330 	struct damos_quota_goal *goal;
331 
332 	goal = kmalloc(sizeof(*goal), GFP_KERNEL);
333 	if (!goal)
334 		return NULL;
335 	goal->metric = metric;
336 	goal->target_value = target_value;
337 	INIT_LIST_HEAD(&goal->list);
338 	return goal;
339 }
340 
damos_add_quota_goal(struct damos_quota * q,struct damos_quota_goal * g)341 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
342 {
343 	list_add_tail(&g->list, &q->goals);
344 }
345 
damos_del_quota_goal(struct damos_quota_goal * g)346 static void damos_del_quota_goal(struct damos_quota_goal *g)
347 {
348 	list_del(&g->list);
349 }
350 
damos_free_quota_goal(struct damos_quota_goal * g)351 static void damos_free_quota_goal(struct damos_quota_goal *g)
352 {
353 	kfree(g);
354 }
355 
damos_destroy_quota_goal(struct damos_quota_goal * g)356 void damos_destroy_quota_goal(struct damos_quota_goal *g)
357 {
358 	damos_del_quota_goal(g);
359 	damos_free_quota_goal(g);
360 }
361 
362 /* initialize fields of @quota that normally API users wouldn't set */
damos_quota_init(struct damos_quota * quota)363 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
364 {
365 	quota->esz = 0;
366 	quota->total_charged_sz = 0;
367 	quota->total_charged_ns = 0;
368 	quota->charged_sz = 0;
369 	quota->charged_from = 0;
370 	quota->charge_target_from = NULL;
371 	quota->charge_addr_from = 0;
372 	quota->esz_bp = 0;
373 	return quota;
374 }
375 
damon_new_scheme(struct damos_access_pattern * pattern,enum damos_action action,unsigned long apply_interval_us,struct damos_quota * quota,struct damos_watermarks * wmarks,int target_nid)376 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
377 			enum damos_action action,
378 			unsigned long apply_interval_us,
379 			struct damos_quota *quota,
380 			struct damos_watermarks *wmarks,
381 			int target_nid)
382 {
383 	struct damos *scheme;
384 
385 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
386 	if (!scheme)
387 		return NULL;
388 	scheme->pattern = *pattern;
389 	scheme->action = action;
390 	scheme->apply_interval_us = apply_interval_us;
391 	/*
392 	 * next_apply_sis will be set when kdamond starts.  While kdamond is
393 	 * running, it will also updated when it is added to the DAMON context,
394 	 * or damon_attrs are updated.
395 	 */
396 	scheme->next_apply_sis = 0;
397 	scheme->walk_completed = false;
398 	INIT_LIST_HEAD(&scheme->filters);
399 	INIT_LIST_HEAD(&scheme->ops_filters);
400 	scheme->stat = (struct damos_stat){};
401 	INIT_LIST_HEAD(&scheme->list);
402 
403 	scheme->quota = *(damos_quota_init(quota));
404 	/* quota.goals should be separately set by caller */
405 	INIT_LIST_HEAD(&scheme->quota.goals);
406 
407 	scheme->wmarks = *wmarks;
408 	scheme->wmarks.activated = true;
409 
410 	scheme->target_nid = target_nid;
411 
412 	return scheme;
413 }
414 
damos_set_next_apply_sis(struct damos * s,struct damon_ctx * ctx)415 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
416 {
417 	unsigned long sample_interval = ctx->attrs.sample_interval ?
418 		ctx->attrs.sample_interval : 1;
419 	unsigned long apply_interval = s->apply_interval_us ?
420 		s->apply_interval_us : ctx->attrs.aggr_interval;
421 
422 	s->next_apply_sis = ctx->passed_sample_intervals +
423 		apply_interval / sample_interval;
424 }
425 
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)426 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
427 {
428 	list_add_tail(&s->list, &ctx->schemes);
429 	damos_set_next_apply_sis(s, ctx);
430 }
431 
damon_del_scheme(struct damos * s)432 static void damon_del_scheme(struct damos *s)
433 {
434 	list_del(&s->list);
435 }
436 
damon_free_scheme(struct damos * s)437 static void damon_free_scheme(struct damos *s)
438 {
439 	kfree(s);
440 }
441 
damon_destroy_scheme(struct damos * s)442 void damon_destroy_scheme(struct damos *s)
443 {
444 	struct damos_quota_goal *g, *g_next;
445 	struct damos_filter *f, *next;
446 
447 	damos_for_each_quota_goal_safe(g, g_next, &s->quota)
448 		damos_destroy_quota_goal(g);
449 
450 	damos_for_each_filter_safe(f, next, s)
451 		damos_destroy_filter(f);
452 	damon_del_scheme(s);
453 	damon_free_scheme(s);
454 }
455 
456 /*
457  * Construct a damon_target struct
458  *
459  * Returns the pointer to the new struct if success, or NULL otherwise
460  */
damon_new_target(void)461 struct damon_target *damon_new_target(void)
462 {
463 	struct damon_target *t;
464 
465 	t = kmalloc(sizeof(*t), GFP_KERNEL);
466 	if (!t)
467 		return NULL;
468 
469 	t->pid = NULL;
470 	t->nr_regions = 0;
471 	INIT_LIST_HEAD(&t->regions_list);
472 	INIT_LIST_HEAD(&t->list);
473 
474 	return t;
475 }
476 
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)477 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
478 {
479 	list_add_tail(&t->list, &ctx->adaptive_targets);
480 }
481 
damon_targets_empty(struct damon_ctx * ctx)482 bool damon_targets_empty(struct damon_ctx *ctx)
483 {
484 	return list_empty(&ctx->adaptive_targets);
485 }
486 
damon_del_target(struct damon_target * t)487 static void damon_del_target(struct damon_target *t)
488 {
489 	list_del(&t->list);
490 }
491 
damon_free_target(struct damon_target * t)492 void damon_free_target(struct damon_target *t)
493 {
494 	struct damon_region *r, *next;
495 
496 	damon_for_each_region_safe(r, next, t)
497 		damon_free_region(r);
498 	kfree(t);
499 }
500 
damon_destroy_target(struct damon_target * t)501 void damon_destroy_target(struct damon_target *t)
502 {
503 	damon_del_target(t);
504 	damon_free_target(t);
505 }
506 
damon_nr_regions(struct damon_target * t)507 unsigned int damon_nr_regions(struct damon_target *t)
508 {
509 	return t->nr_regions;
510 }
511 
damon_new_ctx(void)512 struct damon_ctx *damon_new_ctx(void)
513 {
514 	struct damon_ctx *ctx;
515 
516 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
517 	if (!ctx)
518 		return NULL;
519 
520 	init_completion(&ctx->kdamond_started);
521 
522 	ctx->attrs.sample_interval = 5 * 1000;
523 	ctx->attrs.aggr_interval = 100 * 1000;
524 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
525 
526 	ctx->passed_sample_intervals = 0;
527 	/* These will be set from kdamond_init_ctx() */
528 	ctx->next_aggregation_sis = 0;
529 	ctx->next_ops_update_sis = 0;
530 
531 	mutex_init(&ctx->kdamond_lock);
532 	mutex_init(&ctx->call_control_lock);
533 	mutex_init(&ctx->walk_control_lock);
534 
535 	ctx->attrs.min_nr_regions = 10;
536 	ctx->attrs.max_nr_regions = 1000;
537 
538 	INIT_LIST_HEAD(&ctx->adaptive_targets);
539 	INIT_LIST_HEAD(&ctx->schemes);
540 
541 	return ctx;
542 }
543 
damon_destroy_targets(struct damon_ctx * ctx)544 static void damon_destroy_targets(struct damon_ctx *ctx)
545 {
546 	struct damon_target *t, *next_t;
547 
548 	if (ctx->ops.cleanup) {
549 		ctx->ops.cleanup(ctx);
550 		return;
551 	}
552 
553 	damon_for_each_target_safe(t, next_t, ctx)
554 		damon_destroy_target(t);
555 }
556 
damon_destroy_ctx(struct damon_ctx * ctx)557 void damon_destroy_ctx(struct damon_ctx *ctx)
558 {
559 	struct damos *s, *next_s;
560 
561 	damon_destroy_targets(ctx);
562 
563 	damon_for_each_scheme_safe(s, next_s, ctx)
564 		damon_destroy_scheme(s);
565 
566 	kfree(ctx);
567 }
568 
damon_age_for_new_attrs(unsigned int age,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)569 static unsigned int damon_age_for_new_attrs(unsigned int age,
570 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
571 {
572 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
573 }
574 
575 /* convert access ratio in bp (per 10,000) to nr_accesses */
damon_accesses_bp_to_nr_accesses(unsigned int accesses_bp,struct damon_attrs * attrs)576 static unsigned int damon_accesses_bp_to_nr_accesses(
577 		unsigned int accesses_bp, struct damon_attrs *attrs)
578 {
579 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
580 }
581 
582 /*
583  * Convert nr_accesses to access ratio in bp (per 10,000).
584  *
585  * Callers should ensure attrs.aggr_interval is not zero, like
586  * damon_update_monitoring_results() does .  Otherwise, divide-by-zero would
587  * happen.
588  */
damon_nr_accesses_to_accesses_bp(unsigned int nr_accesses,struct damon_attrs * attrs)589 static unsigned int damon_nr_accesses_to_accesses_bp(
590 		unsigned int nr_accesses, struct damon_attrs *attrs)
591 {
592 	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
593 }
594 
damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)595 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
596 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
597 {
598 	return damon_accesses_bp_to_nr_accesses(
599 			damon_nr_accesses_to_accesses_bp(
600 				nr_accesses, old_attrs),
601 			new_attrs);
602 }
603 
damon_update_monitoring_result(struct damon_region * r,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs,bool aggregating)604 static void damon_update_monitoring_result(struct damon_region *r,
605 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
606 		bool aggregating)
607 {
608 	if (!aggregating) {
609 		r->nr_accesses = damon_nr_accesses_for_new_attrs(
610 				r->nr_accesses, old_attrs, new_attrs);
611 		r->nr_accesses_bp = r->nr_accesses * 10000;
612 	} else {
613 		/*
614 		 * if this is called in the middle of the aggregation, reset
615 		 * the aggregations we made so far for this aggregation
616 		 * interval.  In other words, make the status like
617 		 * kdamond_reset_aggregated() is called.
618 		 */
619 		r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
620 				r->last_nr_accesses, old_attrs, new_attrs);
621 		r->nr_accesses_bp = r->last_nr_accesses * 10000;
622 		r->nr_accesses = 0;
623 	}
624 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
625 }
626 
627 /*
628  * region->nr_accesses is the number of sampling intervals in the last
629  * aggregation interval that access to the region has found, and region->age is
630  * the number of aggregation intervals that its access pattern has maintained.
631  * For the reason, the real meaning of the two fields depend on current
632  * sampling interval and aggregation interval.  This function updates
633  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
634  */
damon_update_monitoring_results(struct damon_ctx * ctx,struct damon_attrs * new_attrs,bool aggregating)635 static void damon_update_monitoring_results(struct damon_ctx *ctx,
636 		struct damon_attrs *new_attrs, bool aggregating)
637 {
638 	struct damon_attrs *old_attrs = &ctx->attrs;
639 	struct damon_target *t;
640 	struct damon_region *r;
641 
642 	/* if any interval is zero, simply forgive conversion */
643 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
644 			!new_attrs->sample_interval ||
645 			!new_attrs->aggr_interval)
646 		return;
647 
648 	damon_for_each_target(t, ctx)
649 		damon_for_each_region(r, t)
650 			damon_update_monitoring_result(
651 					r, old_attrs, new_attrs, aggregating);
652 }
653 
654 /*
655  * damon_valid_intervals_goal() - return if the intervals goal of @attrs is
656  * valid.
657  */
damon_valid_intervals_goal(struct damon_attrs * attrs)658 static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
659 {
660 	struct damon_intervals_goal *goal = &attrs->intervals_goal;
661 
662 	/* tuning is disabled */
663 	if (!goal->aggrs)
664 		return true;
665 	if (goal->min_sample_us > goal->max_sample_us)
666 		return false;
667 	if (attrs->sample_interval < goal->min_sample_us ||
668 			goal->max_sample_us < attrs->sample_interval)
669 		return false;
670 	return true;
671 }
672 
673 /**
674  * damon_set_attrs() - Set attributes for the monitoring.
675  * @ctx:		monitoring context
676  * @attrs:		monitoring attributes
677  *
678  * This function should be called while the kdamond is not running, an access
679  * check results aggregation is not ongoing (e.g., from &struct
680  * damon_callback->after_aggregation or &struct
681  * damon_callback->after_wmarks_check callbacks), or from damon_call().
682  *
683  * Every time interval is in micro-seconds.
684  *
685  * Return: 0 on success, negative error code otherwise.
686  */
damon_set_attrs(struct damon_ctx * ctx,struct damon_attrs * attrs)687 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
688 {
689 	unsigned long sample_interval = attrs->sample_interval ?
690 		attrs->sample_interval : 1;
691 	struct damos *s;
692 	bool aggregating = ctx->passed_sample_intervals <
693 		ctx->next_aggregation_sis;
694 
695 	if (!damon_valid_intervals_goal(attrs))
696 		return -EINVAL;
697 
698 	if (attrs->min_nr_regions < 3)
699 		return -EINVAL;
700 	if (attrs->min_nr_regions > attrs->max_nr_regions)
701 		return -EINVAL;
702 	if (attrs->sample_interval > attrs->aggr_interval)
703 		return -EINVAL;
704 
705 	/* calls from core-external doesn't set this. */
706 	if (!attrs->aggr_samples)
707 		attrs->aggr_samples = attrs->aggr_interval / sample_interval;
708 
709 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
710 		attrs->aggr_interval / sample_interval;
711 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
712 		attrs->ops_update_interval / sample_interval;
713 
714 	damon_update_monitoring_results(ctx, attrs, aggregating);
715 	ctx->attrs = *attrs;
716 
717 	damon_for_each_scheme(s, ctx)
718 		damos_set_next_apply_sis(s, ctx);
719 
720 	return 0;
721 }
722 
723 /**
724  * damon_set_schemes() - Set data access monitoring based operation schemes.
725  * @ctx:	monitoring context
726  * @schemes:	array of the schemes
727  * @nr_schemes:	number of entries in @schemes
728  *
729  * This function should not be called while the kdamond of the context is
730  * running.
731  */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)732 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
733 			ssize_t nr_schemes)
734 {
735 	struct damos *s, *next;
736 	ssize_t i;
737 
738 	damon_for_each_scheme_safe(s, next, ctx)
739 		damon_destroy_scheme(s);
740 	for (i = 0; i < nr_schemes; i++)
741 		damon_add_scheme(ctx, schemes[i]);
742 }
743 
damos_nth_quota_goal(int n,struct damos_quota * q)744 static struct damos_quota_goal *damos_nth_quota_goal(
745 		int n, struct damos_quota *q)
746 {
747 	struct damos_quota_goal *goal;
748 	int i = 0;
749 
750 	damos_for_each_quota_goal(goal, q) {
751 		if (i++ == n)
752 			return goal;
753 	}
754 	return NULL;
755 }
756 
damos_commit_quota_goal(struct damos_quota_goal * dst,struct damos_quota_goal * src)757 static void damos_commit_quota_goal(
758 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
759 {
760 	dst->metric = src->metric;
761 	dst->target_value = src->target_value;
762 	if (dst->metric == DAMOS_QUOTA_USER_INPUT)
763 		dst->current_value = src->current_value;
764 	/* keep last_psi_total as is, since it will be updated in next cycle */
765 }
766 
767 /**
768  * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
769  * @dst:	The commit destination DAMOS quota.
770  * @src:	The commit source DAMOS quota.
771  *
772  * Copies user-specified parameters for quota goals from @src to @dst.  Users
773  * should use this function for quota goals-level parameters update of running
774  * DAMON contexts, instead of manual in-place updates.
775  *
776  * This function should be called from parameters-update safe context, like
777  * DAMON callbacks.
778  */
damos_commit_quota_goals(struct damos_quota * dst,struct damos_quota * src)779 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
780 {
781 	struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
782 	int i = 0, j = 0;
783 
784 	damos_for_each_quota_goal_safe(dst_goal, next, dst) {
785 		src_goal = damos_nth_quota_goal(i++, src);
786 		if (src_goal)
787 			damos_commit_quota_goal(dst_goal, src_goal);
788 		else
789 			damos_destroy_quota_goal(dst_goal);
790 	}
791 	damos_for_each_quota_goal_safe(src_goal, next, src) {
792 		if (j++ < i)
793 			continue;
794 		new_goal = damos_new_quota_goal(
795 				src_goal->metric, src_goal->target_value);
796 		if (!new_goal)
797 			return -ENOMEM;
798 		damos_add_quota_goal(dst, new_goal);
799 	}
800 	return 0;
801 }
802 
damos_commit_quota(struct damos_quota * dst,struct damos_quota * src)803 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
804 {
805 	int err;
806 
807 	dst->reset_interval = src->reset_interval;
808 	dst->ms = src->ms;
809 	dst->sz = src->sz;
810 	err = damos_commit_quota_goals(dst, src);
811 	if (err)
812 		return err;
813 	dst->weight_sz = src->weight_sz;
814 	dst->weight_nr_accesses = src->weight_nr_accesses;
815 	dst->weight_age = src->weight_age;
816 	return 0;
817 }
818 
damos_nth_filter(int n,struct damos * s)819 static struct damos_filter *damos_nth_filter(int n, struct damos *s)
820 {
821 	struct damos_filter *filter;
822 	int i = 0;
823 
824 	damos_for_each_filter(filter, s) {
825 		if (i++ == n)
826 			return filter;
827 	}
828 	return NULL;
829 }
830 
damos_commit_filter_arg(struct damos_filter * dst,struct damos_filter * src)831 static void damos_commit_filter_arg(
832 		struct damos_filter *dst, struct damos_filter *src)
833 {
834 	switch (dst->type) {
835 	case DAMOS_FILTER_TYPE_MEMCG:
836 		dst->memcg_id = src->memcg_id;
837 		break;
838 	case DAMOS_FILTER_TYPE_ADDR:
839 		dst->addr_range = src->addr_range;
840 		break;
841 	case DAMOS_FILTER_TYPE_TARGET:
842 		dst->target_idx = src->target_idx;
843 		break;
844 	case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
845 		dst->sz_range = src->sz_range;
846 		break;
847 	default:
848 		break;
849 	}
850 }
851 
damos_commit_filter(struct damos_filter * dst,struct damos_filter * src)852 static void damos_commit_filter(
853 		struct damos_filter *dst, struct damos_filter *src)
854 {
855 	dst->type = src->type;
856 	dst->matching = src->matching;
857 	damos_commit_filter_arg(dst, src);
858 }
859 
damos_commit_core_filters(struct damos * dst,struct damos * src)860 static int damos_commit_core_filters(struct damos *dst, struct damos *src)
861 {
862 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
863 	int i = 0, j = 0;
864 
865 	damos_for_each_filter_safe(dst_filter, next, dst) {
866 		src_filter = damos_nth_filter(i++, src);
867 		if (src_filter)
868 			damos_commit_filter(dst_filter, src_filter);
869 		else
870 			damos_destroy_filter(dst_filter);
871 	}
872 
873 	damos_for_each_filter_safe(src_filter, next, src) {
874 		if (j++ < i)
875 			continue;
876 
877 		new_filter = damos_new_filter(
878 				src_filter->type, src_filter->matching,
879 				src_filter->allow);
880 		if (!new_filter)
881 			return -ENOMEM;
882 		damos_commit_filter_arg(new_filter, src_filter);
883 		damos_add_filter(dst, new_filter);
884 	}
885 	return 0;
886 }
887 
damos_commit_ops_filters(struct damos * dst,struct damos * src)888 static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
889 {
890 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
891 	int i = 0, j = 0;
892 
893 	damos_for_each_ops_filter_safe(dst_filter, next, dst) {
894 		src_filter = damos_nth_filter(i++, src);
895 		if (src_filter)
896 			damos_commit_filter(dst_filter, src_filter);
897 		else
898 			damos_destroy_filter(dst_filter);
899 	}
900 
901 	damos_for_each_ops_filter_safe(src_filter, next, src) {
902 		if (j++ < i)
903 			continue;
904 
905 		new_filter = damos_new_filter(
906 				src_filter->type, src_filter->matching,
907 				src_filter->allow);
908 		if (!new_filter)
909 			return -ENOMEM;
910 		damos_commit_filter_arg(new_filter, src_filter);
911 		damos_add_filter(dst, new_filter);
912 	}
913 	return 0;
914 }
915 
916 /**
917  * damos_filters_default_reject() - decide whether to reject memory that didn't
918  *				    match with any given filter.
919  * @filters:	Given DAMOS filters of a group.
920  */
damos_filters_default_reject(struct list_head * filters)921 static bool damos_filters_default_reject(struct list_head *filters)
922 {
923 	struct damos_filter *last_filter;
924 
925 	if (list_empty(filters))
926 		return false;
927 	last_filter = list_last_entry(filters, struct damos_filter, list);
928 	return last_filter->allow;
929 }
930 
damos_set_filters_default_reject(struct damos * s)931 static void damos_set_filters_default_reject(struct damos *s)
932 {
933 	if (!list_empty(&s->ops_filters))
934 		s->core_filters_default_reject = false;
935 	else
936 		s->core_filters_default_reject =
937 			damos_filters_default_reject(&s->filters);
938 	s->ops_filters_default_reject =
939 		damos_filters_default_reject(&s->ops_filters);
940 }
941 
damos_commit_filters(struct damos * dst,struct damos * src)942 static int damos_commit_filters(struct damos *dst, struct damos *src)
943 {
944 	int err;
945 
946 	err = damos_commit_core_filters(dst, src);
947 	if (err)
948 		return err;
949 	err = damos_commit_ops_filters(dst, src);
950 	if (err)
951 		return err;
952 	damos_set_filters_default_reject(dst);
953 	return 0;
954 }
955 
damon_nth_scheme(int n,struct damon_ctx * ctx)956 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
957 {
958 	struct damos *s;
959 	int i = 0;
960 
961 	damon_for_each_scheme(s, ctx) {
962 		if (i++ == n)
963 			return s;
964 	}
965 	return NULL;
966 }
967 
damos_commit(struct damos * dst,struct damos * src)968 static int damos_commit(struct damos *dst, struct damos *src)
969 {
970 	int err;
971 
972 	dst->pattern = src->pattern;
973 	dst->action = src->action;
974 	dst->apply_interval_us = src->apply_interval_us;
975 
976 	err = damos_commit_quota(&dst->quota, &src->quota);
977 	if (err)
978 		return err;
979 
980 	dst->wmarks = src->wmarks;
981 
982 	err = damos_commit_filters(dst, src);
983 	return err;
984 }
985 
damon_commit_schemes(struct damon_ctx * dst,struct damon_ctx * src)986 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
987 {
988 	struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
989 	int i = 0, j = 0, err;
990 
991 	damon_for_each_scheme_safe(dst_scheme, next, dst) {
992 		src_scheme = damon_nth_scheme(i++, src);
993 		if (src_scheme) {
994 			err = damos_commit(dst_scheme, src_scheme);
995 			if (err)
996 				return err;
997 		} else {
998 			damon_destroy_scheme(dst_scheme);
999 		}
1000 	}
1001 
1002 	damon_for_each_scheme_safe(src_scheme, next, src) {
1003 		if (j++ < i)
1004 			continue;
1005 		new_scheme = damon_new_scheme(&src_scheme->pattern,
1006 				src_scheme->action,
1007 				src_scheme->apply_interval_us,
1008 				&src_scheme->quota, &src_scheme->wmarks,
1009 				NUMA_NO_NODE);
1010 		if (!new_scheme)
1011 			return -ENOMEM;
1012 		err = damos_commit(new_scheme, src_scheme);
1013 		if (err) {
1014 			damon_destroy_scheme(new_scheme);
1015 			return err;
1016 		}
1017 		damon_add_scheme(dst, new_scheme);
1018 	}
1019 	return 0;
1020 }
1021 
damon_nth_target(int n,struct damon_ctx * ctx)1022 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1023 {
1024 	struct damon_target *t;
1025 	int i = 0;
1026 
1027 	damon_for_each_target(t, ctx) {
1028 		if (i++ == n)
1029 			return t;
1030 	}
1031 	return NULL;
1032 }
1033 
1034 /*
1035  * The caller should ensure the regions of @src are
1036  * 1. valid (end >= src) and
1037  * 2. sorted by starting address.
1038  *
1039  * If @src has no region, @dst keeps current regions.
1040  */
damon_commit_target_regions(struct damon_target * dst,struct damon_target * src)1041 static int damon_commit_target_regions(
1042 		struct damon_target *dst, struct damon_target *src)
1043 {
1044 	struct damon_region *src_region;
1045 	struct damon_addr_range *ranges;
1046 	int i = 0, err;
1047 
1048 	damon_for_each_region(src_region, src)
1049 		i++;
1050 	if (!i)
1051 		return 0;
1052 
1053 	ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1054 	if (!ranges)
1055 		return -ENOMEM;
1056 	i = 0;
1057 	damon_for_each_region(src_region, src)
1058 		ranges[i++] = src_region->ar;
1059 	err = damon_set_regions(dst, ranges, i);
1060 	kfree(ranges);
1061 	return err;
1062 }
1063 
damon_commit_target(struct damon_target * dst,bool dst_has_pid,struct damon_target * src,bool src_has_pid)1064 static int damon_commit_target(
1065 		struct damon_target *dst, bool dst_has_pid,
1066 		struct damon_target *src, bool src_has_pid)
1067 {
1068 	int err;
1069 
1070 	err = damon_commit_target_regions(dst, src);
1071 	if (err)
1072 		return err;
1073 	if (dst_has_pid)
1074 		put_pid(dst->pid);
1075 	if (src_has_pid)
1076 		get_pid(src->pid);
1077 	dst->pid = src->pid;
1078 	return 0;
1079 }
1080 
damon_commit_targets(struct damon_ctx * dst,struct damon_ctx * src)1081 static int damon_commit_targets(
1082 		struct damon_ctx *dst, struct damon_ctx *src)
1083 {
1084 	struct damon_target *dst_target, *next, *src_target, *new_target;
1085 	int i = 0, j = 0, err;
1086 
1087 	damon_for_each_target_safe(dst_target, next, dst) {
1088 		src_target = damon_nth_target(i++, src);
1089 		if (src_target) {
1090 			err = damon_commit_target(
1091 					dst_target, damon_target_has_pid(dst),
1092 					src_target, damon_target_has_pid(src));
1093 			if (err)
1094 				return err;
1095 		} else {
1096 			if (damon_target_has_pid(dst))
1097 				put_pid(dst_target->pid);
1098 			damon_destroy_target(dst_target);
1099 		}
1100 	}
1101 
1102 	damon_for_each_target_safe(src_target, next, src) {
1103 		if (j++ < i)
1104 			continue;
1105 		new_target = damon_new_target();
1106 		if (!new_target)
1107 			return -ENOMEM;
1108 		err = damon_commit_target(new_target, false,
1109 				src_target, damon_target_has_pid(src));
1110 		if (err) {
1111 			damon_destroy_target(new_target);
1112 			return err;
1113 		}
1114 		damon_add_target(dst, new_target);
1115 	}
1116 	return 0;
1117 }
1118 
1119 /**
1120  * damon_commit_ctx() - Commit parameters of a DAMON context to another.
1121  * @dst:	The commit destination DAMON context.
1122  * @src:	The commit source DAMON context.
1123  *
1124  * This function copies user-specified parameters from @src to @dst and update
1125  * the internal status and results accordingly.  Users should use this function
1126  * for context-level parameters update of running context, instead of manual
1127  * in-place updates.
1128  *
1129  * This function should be called from parameters-update safe context, like
1130  * DAMON callbacks.
1131  */
damon_commit_ctx(struct damon_ctx * dst,struct damon_ctx * src)1132 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1133 {
1134 	int err;
1135 
1136 	err = damon_commit_schemes(dst, src);
1137 	if (err)
1138 		return err;
1139 	err = damon_commit_targets(dst, src);
1140 	if (err)
1141 		return err;
1142 	/*
1143 	 * schemes and targets should be updated first, since
1144 	 * 1. damon_set_attrs() updates monitoring results of targets and
1145 	 * next_apply_sis of schemes, and
1146 	 * 2. ops update should be done after pid handling is done (target
1147 	 *    committing require putting pids).
1148 	 */
1149 	err = damon_set_attrs(dst, &src->attrs);
1150 	if (err)
1151 		return err;
1152 	dst->ops = src->ops;
1153 
1154 	return 0;
1155 }
1156 
1157 /**
1158  * damon_nr_running_ctxs() - Return number of currently running contexts.
1159  */
damon_nr_running_ctxs(void)1160 int damon_nr_running_ctxs(void)
1161 {
1162 	int nr_ctxs;
1163 
1164 	mutex_lock(&damon_lock);
1165 	nr_ctxs = nr_running_ctxs;
1166 	mutex_unlock(&damon_lock);
1167 
1168 	return nr_ctxs;
1169 }
1170 
1171 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)1172 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1173 {
1174 	struct damon_target *t;
1175 	struct damon_region *r;
1176 	unsigned long sz = 0;
1177 
1178 	damon_for_each_target(t, ctx) {
1179 		damon_for_each_region(r, t)
1180 			sz += damon_sz_region(r);
1181 	}
1182 
1183 	if (ctx->attrs.min_nr_regions)
1184 		sz /= ctx->attrs.min_nr_regions;
1185 	if (sz < DAMON_MIN_REGION)
1186 		sz = DAMON_MIN_REGION;
1187 
1188 	return sz;
1189 }
1190 
1191 static int kdamond_fn(void *data);
1192 
1193 /*
1194  * __damon_start() - Starts monitoring with given context.
1195  * @ctx:	monitoring context
1196  *
1197  * This function should be called while damon_lock is hold.
1198  *
1199  * Return: 0 on success, negative error code otherwise.
1200  */
__damon_start(struct damon_ctx * ctx)1201 static int __damon_start(struct damon_ctx *ctx)
1202 {
1203 	int err = -EBUSY;
1204 
1205 	mutex_lock(&ctx->kdamond_lock);
1206 	if (!ctx->kdamond) {
1207 		err = 0;
1208 		reinit_completion(&ctx->kdamond_started);
1209 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1210 				nr_running_ctxs);
1211 		if (IS_ERR(ctx->kdamond)) {
1212 			err = PTR_ERR(ctx->kdamond);
1213 			ctx->kdamond = NULL;
1214 		} else {
1215 			wait_for_completion(&ctx->kdamond_started);
1216 		}
1217 	}
1218 	mutex_unlock(&ctx->kdamond_lock);
1219 
1220 	return err;
1221 }
1222 
1223 /**
1224  * damon_start() - Starts the monitorings for a given group of contexts.
1225  * @ctxs:	an array of the pointers for contexts to start monitoring
1226  * @nr_ctxs:	size of @ctxs
1227  * @exclusive:	exclusiveness of this contexts group
1228  *
1229  * This function starts a group of monitoring threads for a group of monitoring
1230  * contexts.  One thread per each context is created and run in parallel.  The
1231  * caller should handle synchronization between the threads by itself.  If
1232  * @exclusive is true and a group of threads that created by other
1233  * 'damon_start()' call is currently running, this function does nothing but
1234  * returns -EBUSY.
1235  *
1236  * Return: 0 on success, negative error code otherwise.
1237  */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs,bool exclusive)1238 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1239 {
1240 	int i;
1241 	int err = 0;
1242 
1243 	mutex_lock(&damon_lock);
1244 	if ((exclusive && nr_running_ctxs) ||
1245 			(!exclusive && running_exclusive_ctxs)) {
1246 		mutex_unlock(&damon_lock);
1247 		return -EBUSY;
1248 	}
1249 
1250 	for (i = 0; i < nr_ctxs; i++) {
1251 		err = __damon_start(ctxs[i]);
1252 		if (err)
1253 			break;
1254 		nr_running_ctxs++;
1255 	}
1256 	if (exclusive && nr_running_ctxs)
1257 		running_exclusive_ctxs = true;
1258 	mutex_unlock(&damon_lock);
1259 
1260 	return err;
1261 }
1262 
1263 /*
1264  * __damon_stop() - Stops monitoring of a given context.
1265  * @ctx:	monitoring context
1266  *
1267  * Return: 0 on success, negative error code otherwise.
1268  */
__damon_stop(struct damon_ctx * ctx)1269 static int __damon_stop(struct damon_ctx *ctx)
1270 {
1271 	struct task_struct *tsk;
1272 
1273 	mutex_lock(&ctx->kdamond_lock);
1274 	tsk = ctx->kdamond;
1275 	if (tsk) {
1276 		get_task_struct(tsk);
1277 		mutex_unlock(&ctx->kdamond_lock);
1278 		kthread_stop_put(tsk);
1279 		return 0;
1280 	}
1281 	mutex_unlock(&ctx->kdamond_lock);
1282 
1283 	return -EPERM;
1284 }
1285 
1286 /**
1287  * damon_stop() - Stops the monitorings for a given group of contexts.
1288  * @ctxs:	an array of the pointers for contexts to stop monitoring
1289  * @nr_ctxs:	size of @ctxs
1290  *
1291  * Return: 0 on success, negative error code otherwise.
1292  */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)1293 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1294 {
1295 	int i, err = 0;
1296 
1297 	for (i = 0; i < nr_ctxs; i++) {
1298 		/* nr_running_ctxs is decremented in kdamond_fn */
1299 		err = __damon_stop(ctxs[i]);
1300 		if (err)
1301 			break;
1302 	}
1303 	return err;
1304 }
1305 
damon_is_running(struct damon_ctx * ctx)1306 static bool damon_is_running(struct damon_ctx *ctx)
1307 {
1308 	bool running;
1309 
1310 	mutex_lock(&ctx->kdamond_lock);
1311 	running = ctx->kdamond != NULL;
1312 	mutex_unlock(&ctx->kdamond_lock);
1313 	return running;
1314 }
1315 
1316 /**
1317  * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1318  * @ctx:	DAMON context to call the function for.
1319  * @control:	Control variable of the call request.
1320  *
1321  * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1322  * argument data that respectively passed via &damon_call_control->fn and
1323  * &damon_call_control->data of @control, and wait until the kdamond finishes
1324  * handling of the request.
1325  *
1326  * The kdamond executes the function with the argument in the main loop, just
1327  * after a sampling of the iteration is finished.  The function can hence
1328  * safely access the internal data of the &struct damon_ctx without additional
1329  * synchronization.  The return value of the function will be saved in
1330  * &damon_call_control->return_code.
1331  *
1332  * Return: 0 on success, negative error code otherwise.
1333  */
damon_call(struct damon_ctx * ctx,struct damon_call_control * control)1334 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1335 {
1336 	init_completion(&control->completion);
1337 	control->canceled = false;
1338 
1339 	mutex_lock(&ctx->call_control_lock);
1340 	if (ctx->call_control) {
1341 		mutex_unlock(&ctx->call_control_lock);
1342 		return -EBUSY;
1343 	}
1344 	ctx->call_control = control;
1345 	mutex_unlock(&ctx->call_control_lock);
1346 	if (!damon_is_running(ctx))
1347 		return -EINVAL;
1348 	wait_for_completion(&control->completion);
1349 	if (control->canceled)
1350 		return -ECANCELED;
1351 	return 0;
1352 }
1353 
1354 /**
1355  * damos_walk() - Invoke a given functions while DAMOS walk regions.
1356  * @ctx:	DAMON context to call the functions for.
1357  * @control:	Control variable of the walk request.
1358  *
1359  * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1360  * that the kdamond will apply DAMOS action to, and wait until the kdamond
1361  * finishes handling of the request.
1362  *
1363  * The kdamond executes the given function in the main loop, for each region
1364  * just after it applied any DAMOS actions of @ctx to it.  The invocation is
1365  * made only within one &damos->apply_interval_us since damos_walk()
1366  * invocation, for each scheme.  The given callback function can hence safely
1367  * access the internal data of &struct damon_ctx and &struct damon_region that
1368  * each of the scheme will apply the action for next interval, without
1369  * additional synchronizations against the kdamond.  If every scheme of @ctx
1370  * passed at least one &damos->apply_interval_us, kdamond marks the request as
1371  * completed so that damos_walk() can wakeup and return.
1372  *
1373  * Return: 0 on success, negative error code otherwise.
1374  */
damos_walk(struct damon_ctx * ctx,struct damos_walk_control * control)1375 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1376 {
1377 	init_completion(&control->completion);
1378 	control->canceled = false;
1379 	mutex_lock(&ctx->walk_control_lock);
1380 	if (ctx->walk_control) {
1381 		mutex_unlock(&ctx->walk_control_lock);
1382 		return -EBUSY;
1383 	}
1384 	ctx->walk_control = control;
1385 	mutex_unlock(&ctx->walk_control_lock);
1386 	if (!damon_is_running(ctx))
1387 		return -EINVAL;
1388 	wait_for_completion(&control->completion);
1389 	if (control->canceled)
1390 		return -ECANCELED;
1391 	return 0;
1392 }
1393 
1394 /*
1395  * Reset the aggregated monitoring results ('nr_accesses' of each region).
1396  */
kdamond_reset_aggregated(struct damon_ctx * c)1397 static void kdamond_reset_aggregated(struct damon_ctx *c)
1398 {
1399 	struct damon_target *t;
1400 	unsigned int ti = 0;	/* target's index */
1401 
1402 	damon_for_each_target(t, c) {
1403 		struct damon_region *r;
1404 
1405 		damon_for_each_region(r, t) {
1406 			trace_damon_aggregated(ti, r, damon_nr_regions(t));
1407 			r->last_nr_accesses = r->nr_accesses;
1408 			r->nr_accesses = 0;
1409 		}
1410 		ti++;
1411 	}
1412 }
1413 
damon_get_intervals_score(struct damon_ctx * c)1414 static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1415 {
1416 	struct damon_target *t;
1417 	struct damon_region *r;
1418 	unsigned long sz_region, max_access_events = 0, access_events = 0;
1419 	unsigned long target_access_events;
1420 	unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1421 
1422 	damon_for_each_target(t, c) {
1423 		damon_for_each_region(r, t) {
1424 			sz_region = damon_sz_region(r);
1425 			max_access_events += sz_region * c->attrs.aggr_samples;
1426 			access_events += sz_region * r->nr_accesses;
1427 		}
1428 	}
1429 	target_access_events = max_access_events * goal_bp / 10000;
1430 	return access_events * 10000 / target_access_events;
1431 }
1432 
1433 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1434 		unsigned long score);
1435 
damon_get_intervals_adaptation_bp(struct damon_ctx * c)1436 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1437 {
1438 	unsigned long score_bp, adaptation_bp;
1439 
1440 	score_bp = damon_get_intervals_score(c);
1441 	adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1442 		10000;
1443 	/*
1444 	 * adaptaion_bp ranges from 1 to 20,000.  Avoid too rapid reduction of
1445 	 * the intervals by rescaling [1,10,000] to [5000, 10,000].
1446 	 */
1447 	if (adaptation_bp <= 10000)
1448 		adaptation_bp = 5000 + adaptation_bp / 2;
1449 	return adaptation_bp;
1450 }
1451 
kdamond_tune_intervals(struct damon_ctx * c)1452 static void kdamond_tune_intervals(struct damon_ctx *c)
1453 {
1454 	unsigned long adaptation_bp;
1455 	struct damon_attrs new_attrs;
1456 	struct damon_intervals_goal *goal;
1457 
1458 	adaptation_bp = damon_get_intervals_adaptation_bp(c);
1459 	if (adaptation_bp == 10000)
1460 		return;
1461 
1462 	new_attrs = c->attrs;
1463 	goal = &c->attrs.intervals_goal;
1464 	new_attrs.sample_interval = min(goal->max_sample_us,
1465 			c->attrs.sample_interval * adaptation_bp / 10000);
1466 	new_attrs.sample_interval = max(goal->min_sample_us,
1467 			new_attrs.sample_interval);
1468 	new_attrs.aggr_interval = new_attrs.sample_interval *
1469 		c->attrs.aggr_samples;
1470 	damon_set_attrs(c, &new_attrs);
1471 }
1472 
1473 static void damon_split_region_at(struct damon_target *t,
1474 				  struct damon_region *r, unsigned long sz_r);
1475 
__damos_valid_target(struct damon_region * r,struct damos * s)1476 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1477 {
1478 	unsigned long sz;
1479 	unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1480 
1481 	sz = damon_sz_region(r);
1482 	return s->pattern.min_sz_region <= sz &&
1483 		sz <= s->pattern.max_sz_region &&
1484 		s->pattern.min_nr_accesses <= nr_accesses &&
1485 		nr_accesses <= s->pattern.max_nr_accesses &&
1486 		s->pattern.min_age_region <= r->age &&
1487 		r->age <= s->pattern.max_age_region;
1488 }
1489 
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1490 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1491 		struct damon_region *r, struct damos *s)
1492 {
1493 	bool ret = __damos_valid_target(r, s);
1494 
1495 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1496 		return ret;
1497 
1498 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1499 }
1500 
1501 /*
1502  * damos_skip_charged_region() - Check if the given region or starting part of
1503  * it is already charged for the DAMOS quota.
1504  * @t:	The target of the region.
1505  * @rp:	The pointer to the region.
1506  * @s:	The scheme to be applied.
1507  *
1508  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1509  * action would applied to only a part of the target access pattern fulfilling
1510  * regions.  To avoid applying the scheme action to only already applied
1511  * regions, DAMON skips applying the scheme action to the regions that charged
1512  * in the previous charge window.
1513  *
1514  * This function checks if a given region should be skipped or not for the
1515  * reason.  If only the starting part of the region has previously charged,
1516  * this function splits the region into two so that the second one covers the
1517  * area that not charged in the previous charge widnow and saves the second
1518  * region in *rp and returns false, so that the caller can apply DAMON action
1519  * to the second one.
1520  *
1521  * Return: true if the region should be entirely skipped, false otherwise.
1522  */
damos_skip_charged_region(struct damon_target * t,struct damon_region ** rp,struct damos * s)1523 static bool damos_skip_charged_region(struct damon_target *t,
1524 		struct damon_region **rp, struct damos *s)
1525 {
1526 	struct damon_region *r = *rp;
1527 	struct damos_quota *quota = &s->quota;
1528 	unsigned long sz_to_skip;
1529 
1530 	/* Skip previously charged regions */
1531 	if (quota->charge_target_from) {
1532 		if (t != quota->charge_target_from)
1533 			return true;
1534 		if (r == damon_last_region(t)) {
1535 			quota->charge_target_from = NULL;
1536 			quota->charge_addr_from = 0;
1537 			return true;
1538 		}
1539 		if (quota->charge_addr_from &&
1540 				r->ar.end <= quota->charge_addr_from)
1541 			return true;
1542 
1543 		if (quota->charge_addr_from && r->ar.start <
1544 				quota->charge_addr_from) {
1545 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1546 					r->ar.start, DAMON_MIN_REGION);
1547 			if (!sz_to_skip) {
1548 				if (damon_sz_region(r) <= DAMON_MIN_REGION)
1549 					return true;
1550 				sz_to_skip = DAMON_MIN_REGION;
1551 			}
1552 			damon_split_region_at(t, r, sz_to_skip);
1553 			r = damon_next_region(r);
1554 			*rp = r;
1555 		}
1556 		quota->charge_target_from = NULL;
1557 		quota->charge_addr_from = 0;
1558 	}
1559 	return false;
1560 }
1561 
damos_update_stat(struct damos * s,unsigned long sz_tried,unsigned long sz_applied,unsigned long sz_ops_filter_passed)1562 static void damos_update_stat(struct damos *s,
1563 		unsigned long sz_tried, unsigned long sz_applied,
1564 		unsigned long sz_ops_filter_passed)
1565 {
1566 	s->stat.nr_tried++;
1567 	s->stat.sz_tried += sz_tried;
1568 	if (sz_applied)
1569 		s->stat.nr_applied++;
1570 	s->stat.sz_applied += sz_applied;
1571 	s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1572 }
1573 
damos_filter_match(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos_filter * filter)1574 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1575 		struct damon_region *r, struct damos_filter *filter)
1576 {
1577 	bool matched = false;
1578 	struct damon_target *ti;
1579 	int target_idx = 0;
1580 	unsigned long start, end;
1581 
1582 	switch (filter->type) {
1583 	case DAMOS_FILTER_TYPE_TARGET:
1584 		damon_for_each_target(ti, ctx) {
1585 			if (ti == t)
1586 				break;
1587 			target_idx++;
1588 		}
1589 		matched = target_idx == filter->target_idx;
1590 		break;
1591 	case DAMOS_FILTER_TYPE_ADDR:
1592 		start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
1593 		end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
1594 
1595 		/* inside the range */
1596 		if (start <= r->ar.start && r->ar.end <= end) {
1597 			matched = true;
1598 			break;
1599 		}
1600 		/* outside of the range */
1601 		if (r->ar.end <= start || end <= r->ar.start) {
1602 			matched = false;
1603 			break;
1604 		}
1605 		/* start before the range and overlap */
1606 		if (r->ar.start < start) {
1607 			damon_split_region_at(t, r, start - r->ar.start);
1608 			matched = false;
1609 			break;
1610 		}
1611 		/* start inside the range */
1612 		damon_split_region_at(t, r, end - r->ar.start);
1613 		matched = true;
1614 		break;
1615 	default:
1616 		return false;
1617 	}
1618 
1619 	return matched == filter->matching;
1620 }
1621 
damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s)1622 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1623 		struct damon_region *r, struct damos *s)
1624 {
1625 	struct damos_filter *filter;
1626 
1627 	s->core_filters_allowed = false;
1628 	damos_for_each_filter(filter, s) {
1629 		if (damos_filter_match(ctx, t, r, filter)) {
1630 			if (filter->allow)
1631 				s->core_filters_allowed = true;
1632 			return !filter->allow;
1633 		}
1634 	}
1635 	return s->core_filters_default_reject;
1636 }
1637 
1638 /*
1639  * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1640  * @ctx:	The context of &damon_ctx->walk_control.
1641  * @t:		The monitoring target of @r that @s will be applied.
1642  * @r:		The region of @t that @s will be applied.
1643  * @s:		The scheme of @ctx that will be applied to @r.
1644  *
1645  * This function is called from kdamond whenever it asked the operation set to
1646  * apply a DAMOS scheme action to a region.  If a DAMOS walk request is
1647  * installed by damos_walk() and not yet uninstalled, invoke it.
1648  */
damos_walk_call_walk(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s,unsigned long sz_filter_passed)1649 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1650 		struct damon_region *r, struct damos *s,
1651 		unsigned long sz_filter_passed)
1652 {
1653 	struct damos_walk_control *control;
1654 
1655 	if (s->walk_completed)
1656 		return;
1657 
1658 	control = ctx->walk_control;
1659 	if (!control)
1660 		return;
1661 
1662 	control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1663 }
1664 
1665 /*
1666  * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1667  * @ctx:	The context of &damon_ctx->walk_control.
1668  * @s:		A scheme of @ctx that all walks are now done.
1669  *
1670  * This function is called when kdamond finished applying the action of a DAMOS
1671  * scheme to all regions that eligible for the given &damos->apply_interval_us.
1672  * If every scheme of @ctx including @s now finished walking for at least one
1673  * &damos->apply_interval_us, this function makrs the handling of the given
1674  * DAMOS walk request is done, so that damos_walk() can wake up and return.
1675  */
damos_walk_complete(struct damon_ctx * ctx,struct damos * s)1676 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1677 {
1678 	struct damos *siter;
1679 	struct damos_walk_control *control;
1680 
1681 	control = ctx->walk_control;
1682 	if (!control)
1683 		return;
1684 
1685 	s->walk_completed = true;
1686 	/* if all schemes completed, signal completion to walker */
1687 	damon_for_each_scheme(siter, ctx) {
1688 		if (!siter->walk_completed)
1689 			return;
1690 	}
1691 	damon_for_each_scheme(siter, ctx)
1692 		siter->walk_completed = false;
1693 
1694 	complete(&control->completion);
1695 	ctx->walk_control = NULL;
1696 }
1697 
1698 /*
1699  * damos_walk_cancel() - Cancel the current DAMOS walk request.
1700  * @ctx:	The context of &damon_ctx->walk_control.
1701  *
1702  * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1703  * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1704  * is already out of the main loop and therefore gonna be terminated, and hence
1705  * cannot continue the walks.  This function therefore marks the walk request
1706  * as canceled, so that damos_walk() can wake up and return.
1707  */
damos_walk_cancel(struct damon_ctx * ctx)1708 static void damos_walk_cancel(struct damon_ctx *ctx)
1709 {
1710 	struct damos_walk_control *control;
1711 
1712 	mutex_lock(&ctx->walk_control_lock);
1713 	control = ctx->walk_control;
1714 	mutex_unlock(&ctx->walk_control_lock);
1715 
1716 	if (!control)
1717 		return;
1718 	control->canceled = true;
1719 	complete(&control->completion);
1720 	mutex_lock(&ctx->walk_control_lock);
1721 	ctx->walk_control = NULL;
1722 	mutex_unlock(&ctx->walk_control_lock);
1723 }
1724 
damos_apply_scheme(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1725 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1726 		struct damon_region *r, struct damos *s)
1727 {
1728 	struct damos_quota *quota = &s->quota;
1729 	unsigned long sz = damon_sz_region(r);
1730 	struct timespec64 begin, end;
1731 	unsigned long sz_applied = 0;
1732 	unsigned long sz_ops_filter_passed = 0;
1733 	/*
1734 	 * We plan to support multiple context per kdamond, as DAMON sysfs
1735 	 * implies with 'nr_contexts' file.  Nevertheless, only single context
1736 	 * per kdamond is supported for now.  So, we can simply use '0' context
1737 	 * index here.
1738 	 */
1739 	unsigned int cidx = 0;
1740 	struct damos *siter;		/* schemes iterator */
1741 	unsigned int sidx = 0;
1742 	struct damon_target *titer;	/* targets iterator */
1743 	unsigned int tidx = 0;
1744 	bool do_trace = false;
1745 
1746 	/* get indices for trace_damos_before_apply() */
1747 	if (trace_damos_before_apply_enabled()) {
1748 		damon_for_each_scheme(siter, c) {
1749 			if (siter == s)
1750 				break;
1751 			sidx++;
1752 		}
1753 		damon_for_each_target(titer, c) {
1754 			if (titer == t)
1755 				break;
1756 			tidx++;
1757 		}
1758 		do_trace = true;
1759 	}
1760 
1761 	if (c->ops.apply_scheme) {
1762 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
1763 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1764 					DAMON_MIN_REGION);
1765 			if (!sz)
1766 				goto update_stat;
1767 			damon_split_region_at(t, r, sz);
1768 		}
1769 		if (damos_filter_out(c, t, r, s))
1770 			return;
1771 		ktime_get_coarse_ts64(&begin);
1772 		trace_damos_before_apply(cidx, sidx, tidx, r,
1773 				damon_nr_regions(t), do_trace);
1774 		sz_applied = c->ops.apply_scheme(c, t, r, s,
1775 				&sz_ops_filter_passed);
1776 		damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1777 		ktime_get_coarse_ts64(&end);
1778 		quota->total_charged_ns += timespec64_to_ns(&end) -
1779 			timespec64_to_ns(&begin);
1780 		quota->charged_sz += sz;
1781 		if (quota->esz && quota->charged_sz >= quota->esz) {
1782 			quota->charge_target_from = t;
1783 			quota->charge_addr_from = r->ar.end + 1;
1784 		}
1785 	}
1786 	if (s->action != DAMOS_STAT)
1787 		r->age = 0;
1788 
1789 update_stat:
1790 	damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1791 }
1792 
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)1793 static void damon_do_apply_schemes(struct damon_ctx *c,
1794 				   struct damon_target *t,
1795 				   struct damon_region *r)
1796 {
1797 	struct damos *s;
1798 
1799 	damon_for_each_scheme(s, c) {
1800 		struct damos_quota *quota = &s->quota;
1801 
1802 		if (c->passed_sample_intervals < s->next_apply_sis)
1803 			continue;
1804 
1805 		if (!s->wmarks.activated)
1806 			continue;
1807 
1808 		/* Check the quota */
1809 		if (quota->esz && quota->charged_sz >= quota->esz)
1810 			continue;
1811 
1812 		if (damos_skip_charged_region(t, &r, s))
1813 			continue;
1814 
1815 		if (!damos_valid_target(c, t, r, s))
1816 			continue;
1817 
1818 		damos_apply_scheme(c, t, r, s);
1819 	}
1820 }
1821 
1822 /*
1823  * damon_feed_loop_next_input() - get next input to achieve a target score.
1824  * @last_input	The last input.
1825  * @score	Current score that made with @last_input.
1826  *
1827  * Calculate next input to achieve the target score, based on the last input
1828  * and current score.  Assuming the input and the score are positively
1829  * proportional, calculate how much compensation should be added to or
1830  * subtracted from the last input as a proportion of the last input.  Avoid
1831  * next input always being zero by setting it non-zero always.  In short form
1832  * (assuming support of float and signed calculations), the algorithm is as
1833  * below.
1834  *
1835  * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1836  *
1837  * For simple implementation, we assume the target score is always 10,000.  The
1838  * caller should adjust @score for this.
1839  *
1840  * Returns next input that assumed to achieve the target score.
1841  */
damon_feed_loop_next_input(unsigned long last_input,unsigned long score)1842 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1843 		unsigned long score)
1844 {
1845 	const unsigned long goal = 10000;
1846 	/* Set minimum input as 10000 to avoid compensation be zero */
1847 	const unsigned long min_input = 10000;
1848 	unsigned long score_goal_diff, compensation;
1849 	bool over_achieving = score > goal;
1850 
1851 	if (score == goal)
1852 		return last_input;
1853 	if (score >= goal * 2)
1854 		return min_input;
1855 
1856 	if (over_achieving)
1857 		score_goal_diff = score - goal;
1858 	else
1859 		score_goal_diff = goal - score;
1860 
1861 	if (last_input < ULONG_MAX / score_goal_diff)
1862 		compensation = last_input * score_goal_diff / goal;
1863 	else
1864 		compensation = last_input / goal * score_goal_diff;
1865 
1866 	if (over_achieving)
1867 		return max(last_input - compensation, min_input);
1868 	if (last_input < ULONG_MAX - compensation)
1869 		return last_input + compensation;
1870 	return ULONG_MAX;
1871 }
1872 
1873 #ifdef CONFIG_PSI
1874 
damos_get_some_mem_psi_total(void)1875 static u64 damos_get_some_mem_psi_total(void)
1876 {
1877 	if (static_branch_likely(&psi_disabled))
1878 		return 0;
1879 	return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1880 			NSEC_PER_USEC);
1881 }
1882 
1883 #else	/* CONFIG_PSI */
1884 
damos_get_some_mem_psi_total(void)1885 static inline u64 damos_get_some_mem_psi_total(void)
1886 {
1887 	return 0;
1888 };
1889 
1890 #endif	/* CONFIG_PSI */
1891 
damos_set_quota_goal_current_value(struct damos_quota_goal * goal)1892 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
1893 {
1894 	u64 now_psi_total;
1895 
1896 	switch (goal->metric) {
1897 	case DAMOS_QUOTA_USER_INPUT:
1898 		/* User should already set goal->current_value */
1899 		break;
1900 	case DAMOS_QUOTA_SOME_MEM_PSI_US:
1901 		now_psi_total = damos_get_some_mem_psi_total();
1902 		goal->current_value = now_psi_total - goal->last_psi_total;
1903 		goal->last_psi_total = now_psi_total;
1904 		break;
1905 	default:
1906 		break;
1907 	}
1908 }
1909 
1910 /* Return the highest score since it makes schemes least aggressive */
damos_quota_score(struct damos_quota * quota)1911 static unsigned long damos_quota_score(struct damos_quota *quota)
1912 {
1913 	struct damos_quota_goal *goal;
1914 	unsigned long highest_score = 0;
1915 
1916 	damos_for_each_quota_goal(goal, quota) {
1917 		damos_set_quota_goal_current_value(goal);
1918 		highest_score = max(highest_score,
1919 				goal->current_value * 10000 /
1920 				goal->target_value);
1921 	}
1922 
1923 	return highest_score;
1924 }
1925 
1926 /*
1927  * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1928  */
damos_set_effective_quota(struct damos_quota * quota)1929 static void damos_set_effective_quota(struct damos_quota *quota)
1930 {
1931 	unsigned long throughput;
1932 	unsigned long esz = ULONG_MAX;
1933 
1934 	if (!quota->ms && list_empty(&quota->goals)) {
1935 		quota->esz = quota->sz;
1936 		return;
1937 	}
1938 
1939 	if (!list_empty(&quota->goals)) {
1940 		unsigned long score = damos_quota_score(quota);
1941 
1942 		quota->esz_bp = damon_feed_loop_next_input(
1943 				max(quota->esz_bp, 10000UL),
1944 				score);
1945 		esz = quota->esz_bp / 10000;
1946 	}
1947 
1948 	if (quota->ms) {
1949 		if (quota->total_charged_ns)
1950 			throughput = quota->total_charged_sz * 1000000 /
1951 				quota->total_charged_ns;
1952 		else
1953 			throughput = PAGE_SIZE * 1024;
1954 		esz = min(throughput * quota->ms, esz);
1955 	}
1956 
1957 	if (quota->sz && quota->sz < esz)
1958 		esz = quota->sz;
1959 
1960 	quota->esz = esz;
1961 }
1962 
damos_adjust_quota(struct damon_ctx * c,struct damos * s)1963 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1964 {
1965 	struct damos_quota *quota = &s->quota;
1966 	struct damon_target *t;
1967 	struct damon_region *r;
1968 	unsigned long cumulated_sz;
1969 	unsigned int score, max_score = 0;
1970 
1971 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
1972 		return;
1973 
1974 	/* New charge window starts */
1975 	if (time_after_eq(jiffies, quota->charged_from +
1976 				msecs_to_jiffies(quota->reset_interval))) {
1977 		if (quota->esz && quota->charged_sz >= quota->esz)
1978 			s->stat.qt_exceeds++;
1979 		quota->total_charged_sz += quota->charged_sz;
1980 		quota->charged_from = jiffies;
1981 		quota->charged_sz = 0;
1982 		damos_set_effective_quota(quota);
1983 	}
1984 
1985 	if (!c->ops.get_scheme_score)
1986 		return;
1987 
1988 	/* Fill up the score histogram */
1989 	memset(c->regions_score_histogram, 0,
1990 			sizeof(*c->regions_score_histogram) *
1991 			(DAMOS_MAX_SCORE + 1));
1992 	damon_for_each_target(t, c) {
1993 		damon_for_each_region(r, t) {
1994 			if (!__damos_valid_target(r, s))
1995 				continue;
1996 			score = c->ops.get_scheme_score(c, t, r, s);
1997 			c->regions_score_histogram[score] +=
1998 				damon_sz_region(r);
1999 			if (score > max_score)
2000 				max_score = score;
2001 		}
2002 	}
2003 
2004 	/* Set the min score limit */
2005 	for (cumulated_sz = 0, score = max_score; ; score--) {
2006 		cumulated_sz += c->regions_score_histogram[score];
2007 		if (cumulated_sz >= quota->esz || !score)
2008 			break;
2009 	}
2010 	quota->min_score = score;
2011 }
2012 
kdamond_apply_schemes(struct damon_ctx * c)2013 static void kdamond_apply_schemes(struct damon_ctx *c)
2014 {
2015 	struct damon_target *t;
2016 	struct damon_region *r, *next_r;
2017 	struct damos *s;
2018 	unsigned long sample_interval = c->attrs.sample_interval ?
2019 		c->attrs.sample_interval : 1;
2020 	bool has_schemes_to_apply = false;
2021 
2022 	damon_for_each_scheme(s, c) {
2023 		if (c->passed_sample_intervals < s->next_apply_sis)
2024 			continue;
2025 
2026 		if (!s->wmarks.activated)
2027 			continue;
2028 
2029 		has_schemes_to_apply = true;
2030 
2031 		damos_adjust_quota(c, s);
2032 	}
2033 
2034 	if (!has_schemes_to_apply)
2035 		return;
2036 
2037 	mutex_lock(&c->walk_control_lock);
2038 	damon_for_each_target(t, c) {
2039 		damon_for_each_region_safe(r, next_r, t)
2040 			damon_do_apply_schemes(c, t, r);
2041 	}
2042 
2043 	damon_for_each_scheme(s, c) {
2044 		if (c->passed_sample_intervals < s->next_apply_sis)
2045 			continue;
2046 		damos_walk_complete(c, s);
2047 		s->next_apply_sis = c->passed_sample_intervals +
2048 			(s->apply_interval_us ? s->apply_interval_us :
2049 			 c->attrs.aggr_interval) / sample_interval;
2050 		s->last_applied = NULL;
2051 	}
2052 	mutex_unlock(&c->walk_control_lock);
2053 }
2054 
2055 /*
2056  * Merge two adjacent regions into one region
2057  */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)2058 static void damon_merge_two_regions(struct damon_target *t,
2059 		struct damon_region *l, struct damon_region *r)
2060 {
2061 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2062 
2063 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2064 			(sz_l + sz_r);
2065 	l->nr_accesses_bp = l->nr_accesses * 10000;
2066 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2067 	l->ar.end = r->ar.end;
2068 	damon_destroy_region(r, t);
2069 }
2070 
2071 /*
2072  * Merge adjacent regions having similar access frequencies
2073  *
2074  * t		target affected by this merge operation
2075  * thres	'->nr_accesses' diff threshold for the merge
2076  * sz_limit	size upper limit of each region
2077  */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)2078 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2079 				   unsigned long sz_limit)
2080 {
2081 	struct damon_region *r, *prev = NULL, *next;
2082 
2083 	damon_for_each_region_safe(r, next, t) {
2084 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2085 			r->age = 0;
2086 		else
2087 			r->age++;
2088 
2089 		if (prev && prev->ar.end == r->ar.start &&
2090 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2091 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2092 			damon_merge_two_regions(t, prev, r);
2093 		else
2094 			prev = r;
2095 	}
2096 }
2097 
2098 /*
2099  * Merge adjacent regions having similar access frequencies
2100  *
2101  * threshold	'->nr_accesses' diff threshold for the merge
2102  * sz_limit	size upper limit of each region
2103  *
2104  * This function merges monitoring target regions which are adjacent and their
2105  * access frequencies are similar.  This is for minimizing the monitoring
2106  * overhead under the dynamically changeable access pattern.  If a merge was
2107  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
2108  *
2109  * The total number of regions could be higher than the user-defined limit,
2110  * max_nr_regions for some cases.  For example, the user can update
2111  * max_nr_regions to a number that lower than the current number of regions
2112  * while DAMON is running.  For such a case, repeat merging until the limit is
2113  * met while increasing @threshold up to possible maximum level.
2114  */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)2115 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2116 				  unsigned long sz_limit)
2117 {
2118 	struct damon_target *t;
2119 	unsigned int nr_regions;
2120 	unsigned int max_thres;
2121 
2122 	max_thres = c->attrs.aggr_interval /
2123 		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
2124 	do {
2125 		nr_regions = 0;
2126 		damon_for_each_target(t, c) {
2127 			damon_merge_regions_of(t, threshold, sz_limit);
2128 			nr_regions += damon_nr_regions(t);
2129 		}
2130 		threshold = max(1, threshold * 2);
2131 	} while (nr_regions > c->attrs.max_nr_regions &&
2132 			threshold / 2 < max_thres);
2133 }
2134 
2135 /*
2136  * Split a region in two
2137  *
2138  * r		the region to be split
2139  * sz_r		size of the first sub-region that will be made
2140  */
damon_split_region_at(struct damon_target * t,struct damon_region * r,unsigned long sz_r)2141 static void damon_split_region_at(struct damon_target *t,
2142 				  struct damon_region *r, unsigned long sz_r)
2143 {
2144 	struct damon_region *new;
2145 
2146 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2147 	if (!new)
2148 		return;
2149 
2150 	r->ar.end = new->ar.start;
2151 
2152 	new->age = r->age;
2153 	new->last_nr_accesses = r->last_nr_accesses;
2154 	new->nr_accesses_bp = r->nr_accesses_bp;
2155 	new->nr_accesses = r->nr_accesses;
2156 
2157 	damon_insert_region(new, r, damon_next_region(r), t);
2158 }
2159 
2160 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_target * t,int nr_subs)2161 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
2162 {
2163 	struct damon_region *r, *next;
2164 	unsigned long sz_region, sz_sub = 0;
2165 	int i;
2166 
2167 	damon_for_each_region_safe(r, next, t) {
2168 		sz_region = damon_sz_region(r);
2169 
2170 		for (i = 0; i < nr_subs - 1 &&
2171 				sz_region > 2 * DAMON_MIN_REGION; i++) {
2172 			/*
2173 			 * Randomly select size of left sub-region to be at
2174 			 * least 10 percent and at most 90% of original region
2175 			 */
2176 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2177 					sz_region / 10, DAMON_MIN_REGION);
2178 			/* Do not allow blank region */
2179 			if (sz_sub == 0 || sz_sub >= sz_region)
2180 				continue;
2181 
2182 			damon_split_region_at(t, r, sz_sub);
2183 			sz_region = sz_sub;
2184 		}
2185 	}
2186 }
2187 
2188 /*
2189  * Split every target region into randomly-sized small regions
2190  *
2191  * This function splits every target region into random-sized small regions if
2192  * current total number of the regions is equal or smaller than half of the
2193  * user-specified maximum number of regions.  This is for maximizing the
2194  * monitoring accuracy under the dynamically changeable access patterns.  If a
2195  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2196  * it.
2197  */
kdamond_split_regions(struct damon_ctx * ctx)2198 static void kdamond_split_regions(struct damon_ctx *ctx)
2199 {
2200 	struct damon_target *t;
2201 	unsigned int nr_regions = 0;
2202 	static unsigned int last_nr_regions;
2203 	int nr_subregions = 2;
2204 
2205 	damon_for_each_target(t, ctx)
2206 		nr_regions += damon_nr_regions(t);
2207 
2208 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
2209 		return;
2210 
2211 	/* Maybe the middle of the region has different access frequency */
2212 	if (last_nr_regions == nr_regions &&
2213 			nr_regions < ctx->attrs.max_nr_regions / 3)
2214 		nr_subregions = 3;
2215 
2216 	damon_for_each_target(t, ctx)
2217 		damon_split_regions_of(t, nr_subregions);
2218 
2219 	last_nr_regions = nr_regions;
2220 }
2221 
2222 /*
2223  * Check whether current monitoring should be stopped
2224  *
2225  * The monitoring is stopped when either the user requested to stop, or all
2226  * monitoring targets are invalid.
2227  *
2228  * Returns true if need to stop current monitoring.
2229  */
kdamond_need_stop(struct damon_ctx * ctx)2230 static bool kdamond_need_stop(struct damon_ctx *ctx)
2231 {
2232 	struct damon_target *t;
2233 
2234 	if (kthread_should_stop())
2235 		return true;
2236 
2237 	if (!ctx->ops.target_valid)
2238 		return false;
2239 
2240 	damon_for_each_target(t, ctx) {
2241 		if (ctx->ops.target_valid(t))
2242 			return false;
2243 	}
2244 
2245 	return true;
2246 }
2247 
damos_get_wmark_metric_value(enum damos_wmark_metric metric,unsigned long * metric_value)2248 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2249 					unsigned long *metric_value)
2250 {
2251 	switch (metric) {
2252 	case DAMOS_WMARK_FREE_MEM_RATE:
2253 		*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2254 		       totalram_pages();
2255 		return 0;
2256 	default:
2257 		break;
2258 	}
2259 	return -EINVAL;
2260 }
2261 
2262 /*
2263  * Returns zero if the scheme is active.  Else, returns time to wait for next
2264  * watermark check in micro-seconds.
2265  */
damos_wmark_wait_us(struct damos * scheme)2266 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2267 {
2268 	unsigned long metric;
2269 
2270 	if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2271 		return 0;
2272 
2273 	/* higher than high watermark or lower than low watermark */
2274 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2275 		if (scheme->wmarks.activated)
2276 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
2277 				 scheme->action,
2278 				 str_high_low(metric > scheme->wmarks.high));
2279 		scheme->wmarks.activated = false;
2280 		return scheme->wmarks.interval;
2281 	}
2282 
2283 	/* inactive and higher than middle watermark */
2284 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2285 			!scheme->wmarks.activated)
2286 		return scheme->wmarks.interval;
2287 
2288 	if (!scheme->wmarks.activated)
2289 		pr_debug("activate a scheme (%d)\n", scheme->action);
2290 	scheme->wmarks.activated = true;
2291 	return 0;
2292 }
2293 
kdamond_usleep(unsigned long usecs)2294 static void kdamond_usleep(unsigned long usecs)
2295 {
2296 	if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2297 		schedule_timeout_idle(usecs_to_jiffies(usecs));
2298 	else
2299 		usleep_range_idle(usecs, usecs + 1);
2300 }
2301 
2302 /*
2303  * kdamond_call() - handle damon_call_control.
2304  * @ctx:	The &struct damon_ctx of the kdamond.
2305  * @cancel:	Whether to cancel the invocation of the function.
2306  *
2307  * If there is a &struct damon_call_control request that registered via
2308  * &damon_call() on @ctx, do or cancel the invocation of the function depending
2309  * on @cancel.  @cancel is set when the kdamond is deactivated by DAMOS
2310  * watermarks, or the kdamond is already out of the main loop and therefore
2311  * will be terminated.
2312  */
kdamond_call(struct damon_ctx * ctx,bool cancel)2313 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2314 {
2315 	struct damon_call_control *control;
2316 	int ret = 0;
2317 
2318 	mutex_lock(&ctx->call_control_lock);
2319 	control = ctx->call_control;
2320 	mutex_unlock(&ctx->call_control_lock);
2321 	if (!control)
2322 		return;
2323 	if (cancel) {
2324 		control->canceled = true;
2325 	} else {
2326 		ret = control->fn(control->data);
2327 		control->return_code = ret;
2328 	}
2329 	complete(&control->completion);
2330 	mutex_lock(&ctx->call_control_lock);
2331 	ctx->call_control = NULL;
2332 	mutex_unlock(&ctx->call_control_lock);
2333 }
2334 
2335 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)2336 static int kdamond_wait_activation(struct damon_ctx *ctx)
2337 {
2338 	struct damos *s;
2339 	unsigned long wait_time;
2340 	unsigned long min_wait_time = 0;
2341 	bool init_wait_time = false;
2342 
2343 	while (!kdamond_need_stop(ctx)) {
2344 		damon_for_each_scheme(s, ctx) {
2345 			wait_time = damos_wmark_wait_us(s);
2346 			if (!init_wait_time || wait_time < min_wait_time) {
2347 				init_wait_time = true;
2348 				min_wait_time = wait_time;
2349 			}
2350 		}
2351 		if (!min_wait_time)
2352 			return 0;
2353 
2354 		kdamond_usleep(min_wait_time);
2355 
2356 		if (ctx->callback.after_wmarks_check &&
2357 				ctx->callback.after_wmarks_check(ctx))
2358 			break;
2359 		kdamond_call(ctx, true);
2360 		damos_walk_cancel(ctx);
2361 	}
2362 	return -EBUSY;
2363 }
2364 
kdamond_init_ctx(struct damon_ctx * ctx)2365 static void kdamond_init_ctx(struct damon_ctx *ctx)
2366 {
2367 	unsigned long sample_interval = ctx->attrs.sample_interval ?
2368 		ctx->attrs.sample_interval : 1;
2369 	unsigned long apply_interval;
2370 	struct damos *scheme;
2371 
2372 	ctx->passed_sample_intervals = 0;
2373 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2374 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2375 		sample_interval;
2376 	ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2377 		ctx->attrs.intervals_goal.aggrs;
2378 
2379 	damon_for_each_scheme(scheme, ctx) {
2380 		apply_interval = scheme->apply_interval_us ?
2381 			scheme->apply_interval_us : ctx->attrs.aggr_interval;
2382 		scheme->next_apply_sis = apply_interval / sample_interval;
2383 		damos_set_filters_default_reject(scheme);
2384 	}
2385 }
2386 
2387 /*
2388  * The monitoring daemon that runs as a kernel thread
2389  */
kdamond_fn(void * data)2390 static int kdamond_fn(void *data)
2391 {
2392 	struct damon_ctx *ctx = data;
2393 	struct damon_target *t;
2394 	struct damon_region *r, *next;
2395 	unsigned int max_nr_accesses = 0;
2396 	unsigned long sz_limit = 0;
2397 
2398 	pr_debug("kdamond (%d) starts\n", current->pid);
2399 
2400 	complete(&ctx->kdamond_started);
2401 	kdamond_init_ctx(ctx);
2402 
2403 	if (ctx->ops.init)
2404 		ctx->ops.init(ctx);
2405 	ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2406 			sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2407 	if (!ctx->regions_score_histogram)
2408 		goto done;
2409 
2410 	sz_limit = damon_region_sz_limit(ctx);
2411 
2412 	while (!kdamond_need_stop(ctx)) {
2413 		/*
2414 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2415 		 * be changed from after_wmarks_check() or after_aggregation()
2416 		 * callbacks.  Read the values here, and use those for this
2417 		 * iteration.  That is, damon_set_attrs() updated new values
2418 		 * are respected from next iteration.
2419 		 */
2420 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2421 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2422 		unsigned long sample_interval = ctx->attrs.sample_interval;
2423 
2424 		if (kdamond_wait_activation(ctx))
2425 			break;
2426 
2427 		if (ctx->ops.prepare_access_checks)
2428 			ctx->ops.prepare_access_checks(ctx);
2429 
2430 		kdamond_usleep(sample_interval);
2431 		ctx->passed_sample_intervals++;
2432 
2433 		if (ctx->ops.check_accesses)
2434 			max_nr_accesses = ctx->ops.check_accesses(ctx);
2435 
2436 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2437 			kdamond_merge_regions(ctx,
2438 					max_nr_accesses / 10,
2439 					sz_limit);
2440 			if (ctx->callback.after_aggregation &&
2441 					ctx->callback.after_aggregation(ctx))
2442 				break;
2443 		}
2444 
2445 		/*
2446 		 * do kdamond_call() and kdamond_apply_schemes() after
2447 		 * kdamond_merge_regions() if possible, to reduce overhead
2448 		 */
2449 		kdamond_call(ctx, false);
2450 		if (!list_empty(&ctx->schemes))
2451 			kdamond_apply_schemes(ctx);
2452 		else
2453 			damos_walk_cancel(ctx);
2454 
2455 		sample_interval = ctx->attrs.sample_interval ?
2456 			ctx->attrs.sample_interval : 1;
2457 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2458 			if (ctx->attrs.intervals_goal.aggrs &&
2459 					ctx->passed_sample_intervals >=
2460 					ctx->next_intervals_tune_sis) {
2461 				/*
2462 				 * ctx->next_aggregation_sis might be updated
2463 				 * from kdamond_call().  In the case,
2464 				 * damon_set_attrs() which will be called from
2465 				 * kdamond_tune_interval() may wrongly think
2466 				 * this is in the middle of the current
2467 				 * aggregation, and make aggregation
2468 				 * information reset for all regions.  Then,
2469 				 * following kdamond_reset_aggregated() call
2470 				 * will make the region information invalid,
2471 				 * particularly for ->nr_accesses_bp.
2472 				 *
2473 				 * Reset ->next_aggregation_sis to avoid that.
2474 				 * It will anyway correctly updated after this
2475 				 * if caluse.
2476 				 */
2477 				ctx->next_aggregation_sis =
2478 					next_aggregation_sis;
2479 				ctx->next_intervals_tune_sis +=
2480 					ctx->attrs.aggr_samples *
2481 					ctx->attrs.intervals_goal.aggrs;
2482 				kdamond_tune_intervals(ctx);
2483 				sample_interval = ctx->attrs.sample_interval ?
2484 					ctx->attrs.sample_interval : 1;
2485 
2486 			}
2487 			ctx->next_aggregation_sis = next_aggregation_sis +
2488 				ctx->attrs.aggr_interval / sample_interval;
2489 
2490 			kdamond_reset_aggregated(ctx);
2491 			kdamond_split_regions(ctx);
2492 		}
2493 
2494 		if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2495 			ctx->next_ops_update_sis = next_ops_update_sis +
2496 				ctx->attrs.ops_update_interval /
2497 				sample_interval;
2498 			if (ctx->ops.update)
2499 				ctx->ops.update(ctx);
2500 			sz_limit = damon_region_sz_limit(ctx);
2501 		}
2502 	}
2503 done:
2504 	damon_for_each_target(t, ctx) {
2505 		damon_for_each_region_safe(r, next, t)
2506 			damon_destroy_region(r, t);
2507 	}
2508 
2509 	if (ctx->callback.before_terminate)
2510 		ctx->callback.before_terminate(ctx);
2511 	if (ctx->ops.cleanup)
2512 		ctx->ops.cleanup(ctx);
2513 	kfree(ctx->regions_score_histogram);
2514 
2515 	pr_debug("kdamond (%d) finishes\n", current->pid);
2516 	mutex_lock(&ctx->kdamond_lock);
2517 	ctx->kdamond = NULL;
2518 	mutex_unlock(&ctx->kdamond_lock);
2519 
2520 	kdamond_call(ctx, true);
2521 	damos_walk_cancel(ctx);
2522 
2523 	mutex_lock(&damon_lock);
2524 	nr_running_ctxs--;
2525 	if (!nr_running_ctxs && running_exclusive_ctxs)
2526 		running_exclusive_ctxs = false;
2527 	mutex_unlock(&damon_lock);
2528 
2529 	return 0;
2530 }
2531 
2532 /*
2533  * struct damon_system_ram_region - System RAM resource address region of
2534  *				    [@start, @end).
2535  * @start:	Start address of the region (inclusive).
2536  * @end:	End address of the region (exclusive).
2537  */
2538 struct damon_system_ram_region {
2539 	unsigned long start;
2540 	unsigned long end;
2541 };
2542 
walk_system_ram(struct resource * res,void * arg)2543 static int walk_system_ram(struct resource *res, void *arg)
2544 {
2545 	struct damon_system_ram_region *a = arg;
2546 
2547 	if (a->end - a->start < resource_size(res)) {
2548 		a->start = res->start;
2549 		a->end = res->end;
2550 	}
2551 	return 0;
2552 }
2553 
2554 /*
2555  * Find biggest 'System RAM' resource and store its start and end address in
2556  * @start and @end, respectively.  If no System RAM is found, returns false.
2557  */
damon_find_biggest_system_ram(unsigned long * start,unsigned long * end)2558 static bool damon_find_biggest_system_ram(unsigned long *start,
2559 						unsigned long *end)
2560 
2561 {
2562 	struct damon_system_ram_region arg = {};
2563 
2564 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2565 	if (arg.end <= arg.start)
2566 		return false;
2567 
2568 	*start = arg.start;
2569 	*end = arg.end;
2570 	return true;
2571 }
2572 
2573 /**
2574  * damon_set_region_biggest_system_ram_default() - Set the region of the given
2575  * monitoring target as requested, or biggest 'System RAM'.
2576  * @t:		The monitoring target to set the region.
2577  * @start:	The pointer to the start address of the region.
2578  * @end:	The pointer to the end address of the region.
2579  *
2580  * This function sets the region of @t as requested by @start and @end.  If the
2581  * values of @start and @end are zero, however, this function finds the biggest
2582  * 'System RAM' resource and sets the region to cover the resource.  In the
2583  * latter case, this function saves the start and end addresses of the resource
2584  * in @start and @end, respectively.
2585  *
2586  * Return: 0 on success, negative error code otherwise.
2587  */
damon_set_region_biggest_system_ram_default(struct damon_target * t,unsigned long * start,unsigned long * end)2588 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2589 			unsigned long *start, unsigned long *end)
2590 {
2591 	struct damon_addr_range addr_range;
2592 
2593 	if (*start > *end)
2594 		return -EINVAL;
2595 
2596 	if (!*start && !*end &&
2597 		!damon_find_biggest_system_ram(start, end))
2598 		return -EINVAL;
2599 
2600 	addr_range.start = *start;
2601 	addr_range.end = *end;
2602 	return damon_set_regions(t, &addr_range, 1);
2603 }
2604 
2605 /*
2606  * damon_moving_sum() - Calculate an inferred moving sum value.
2607  * @mvsum:	Inferred sum of the last @len_window values.
2608  * @nomvsum:	Non-moving sum of the last discrete @len_window window values.
2609  * @len_window:	The number of last values to take care of.
2610  * @new_value:	New value that will be added to the pseudo moving sum.
2611  *
2612  * Moving sum (moving average * window size) is good for handling noise, but
2613  * the cost of keeping past values can be high for arbitrary window size.  This
2614  * function implements a lightweight pseudo moving sum function that doesn't
2615  * keep the past window values.
2616  *
2617  * It simply assumes there was no noise in the past, and get the no-noise
2618  * assumed past value to drop from @nomvsum and @len_window.  @nomvsum is a
2619  * non-moving sum of the last window.  For example, if @len_window is 10 and we
2620  * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2621  * values.  Hence, this function simply drops @nomvsum / @len_window from
2622  * given @mvsum and add @new_value.
2623  *
2624  * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2625  * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20.  For
2626  * calculating next moving sum with a new value, we should drop 0 from 50 and
2627  * add the new value.  However, this function assumes it got value 5 for each
2628  * of the last ten times.  Based on the assumption, when the next value is
2629  * measured, it drops the assumed past value, 5 from the current sum, and add
2630  * the new value to get the updated pseduo-moving average.
2631  *
2632  * This means the value could have errors, but the errors will be disappeared
2633  * for every @len_window aligned calls.  For example, if @len_window is 10, the
2634  * pseudo moving sum with 11th value to 19th value would have an error.  But
2635  * the sum with 20th value will not have the error.
2636  *
2637  * Return: Pseudo-moving average after getting the @new_value.
2638  */
damon_moving_sum(unsigned int mvsum,unsigned int nomvsum,unsigned int len_window,unsigned int new_value)2639 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2640 		unsigned int len_window, unsigned int new_value)
2641 {
2642 	return mvsum - nomvsum / len_window + new_value;
2643 }
2644 
2645 /**
2646  * damon_update_region_access_rate() - Update the access rate of a region.
2647  * @r:		The DAMON region to update for its access check result.
2648  * @accessed:	Whether the region has accessed during last sampling interval.
2649  * @attrs:	The damon_attrs of the DAMON context.
2650  *
2651  * Update the access rate of a region with the region's last sampling interval
2652  * access check result.
2653  *
2654  * Usually this will be called by &damon_operations->check_accesses callback.
2655  */
damon_update_region_access_rate(struct damon_region * r,bool accessed,struct damon_attrs * attrs)2656 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2657 		struct damon_attrs *attrs)
2658 {
2659 	unsigned int len_window = 1;
2660 
2661 	/*
2662 	 * sample_interval can be zero, but cannot be larger than
2663 	 * aggr_interval, owing to validation of damon_set_attrs().
2664 	 */
2665 	if (attrs->sample_interval)
2666 		len_window = damon_max_nr_accesses(attrs);
2667 	r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2668 			r->last_nr_accesses * 10000, len_window,
2669 			accessed ? 10000 : 0);
2670 
2671 	if (accessed)
2672 		r->nr_accesses++;
2673 }
2674 
damon_init(void)2675 static int __init damon_init(void)
2676 {
2677 	damon_region_cache = KMEM_CACHE(damon_region, 0);
2678 	if (unlikely(!damon_region_cache)) {
2679 		pr_err("creating damon_region_cache fails\n");
2680 		return -ENOMEM;
2681 	}
2682 
2683 	return 0;
2684 }
2685 
2686 subsys_initcall(damon_init);
2687 
2688 #include "tests/core-kunit.h"
2689