xref: /linux/mm/damon/core.c (revision 2942242dde896ea8544f321617c86f941899c544)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/string_choices.h>
18 
19 #define CREATE_TRACE_POINTS
20 #include <trace/events/damon.h>
21 
22 #ifdef CONFIG_DAMON_KUNIT_TEST
23 #undef DAMON_MIN_REGION
24 #define DAMON_MIN_REGION 1
25 #endif
26 
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
29 static bool running_exclusive_ctxs;
30 
31 static DEFINE_MUTEX(damon_ops_lock);
32 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
33 
34 static struct kmem_cache *damon_region_cache __ro_after_init;
35 
36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
__damon_is_registered_ops(enum damon_ops_id id)37 static bool __damon_is_registered_ops(enum damon_ops_id id)
38 {
39 	struct damon_operations empty_ops = {};
40 
41 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
42 		return false;
43 	return true;
44 }
45 
46 /**
47  * damon_is_registered_ops() - Check if a given damon_operations is registered.
48  * @id:	Id of the damon_operations to check if registered.
49  *
50  * Return: true if the ops is set, false otherwise.
51  */
damon_is_registered_ops(enum damon_ops_id id)52 bool damon_is_registered_ops(enum damon_ops_id id)
53 {
54 	bool registered;
55 
56 	if (id >= NR_DAMON_OPS)
57 		return false;
58 	mutex_lock(&damon_ops_lock);
59 	registered = __damon_is_registered_ops(id);
60 	mutex_unlock(&damon_ops_lock);
61 	return registered;
62 }
63 
64 /**
65  * damon_register_ops() - Register a monitoring operations set to DAMON.
66  * @ops:	monitoring operations set to register.
67  *
68  * This function registers a monitoring operations set of valid &struct
69  * damon_operations->id so that others can find and use them later.
70  *
71  * Return: 0 on success, negative error code otherwise.
72  */
damon_register_ops(struct damon_operations * ops)73 int damon_register_ops(struct damon_operations *ops)
74 {
75 	int err = 0;
76 
77 	if (ops->id >= NR_DAMON_OPS)
78 		return -EINVAL;
79 
80 	mutex_lock(&damon_ops_lock);
81 	/* Fail for already registered ops */
82 	if (__damon_is_registered_ops(ops->id))
83 		err = -EINVAL;
84 	else
85 		damon_registered_ops[ops->id] = *ops;
86 	mutex_unlock(&damon_ops_lock);
87 	return err;
88 }
89 
90 /**
91  * damon_select_ops() - Select a monitoring operations to use with the context.
92  * @ctx:	monitoring context to use the operations.
93  * @id:		id of the registered monitoring operations to select.
94  *
95  * This function finds registered monitoring operations set of @id and make
96  * @ctx to use it.
97  *
98  * Return: 0 on success, negative error code otherwise.
99  */
damon_select_ops(struct damon_ctx * ctx,enum damon_ops_id id)100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101 {
102 	int err = 0;
103 
104 	if (id >= NR_DAMON_OPS)
105 		return -EINVAL;
106 
107 	mutex_lock(&damon_ops_lock);
108 	if (!__damon_is_registered_ops(id))
109 		err = -EINVAL;
110 	else
111 		ctx->ops = damon_registered_ops[id];
112 	mutex_unlock(&damon_ops_lock);
113 	return err;
114 }
115 
116 /*
117  * Construct a damon_region struct
118  *
119  * Returns the pointer to the new struct if success, or NULL otherwise
120  */
damon_new_region(unsigned long start,unsigned long end)121 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122 {
123 	struct damon_region *region;
124 
125 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 	if (!region)
127 		return NULL;
128 
129 	region->ar.start = start;
130 	region->ar.end = end;
131 	region->nr_accesses = 0;
132 	region->nr_accesses_bp = 0;
133 	INIT_LIST_HEAD(&region->list);
134 
135 	region->age = 0;
136 	region->last_nr_accesses = 0;
137 
138 	return region;
139 }
140 
damon_add_region(struct damon_region * r,struct damon_target * t)141 void damon_add_region(struct damon_region *r, struct damon_target *t)
142 {
143 	list_add_tail(&r->list, &t->regions_list);
144 	t->nr_regions++;
145 }
146 
damon_del_region(struct damon_region * r,struct damon_target * t)147 static void damon_del_region(struct damon_region *r, struct damon_target *t)
148 {
149 	list_del(&r->list);
150 	t->nr_regions--;
151 }
152 
damon_free_region(struct damon_region * r)153 static void damon_free_region(struct damon_region *r)
154 {
155 	kmem_cache_free(damon_region_cache, r);
156 }
157 
damon_destroy_region(struct damon_region * r,struct damon_target * t)158 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159 {
160 	damon_del_region(r, t);
161 	damon_free_region(r);
162 }
163 
164 /*
165  * Check whether a region is intersecting an address range
166  *
167  * Returns true if it is.
168  */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)169 static bool damon_intersect(struct damon_region *r,
170 		struct damon_addr_range *re)
171 {
172 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
173 }
174 
175 /*
176  * Fill holes in regions with new regions.
177  */
damon_fill_regions_holes(struct damon_region * first,struct damon_region * last,struct damon_target * t)178 static int damon_fill_regions_holes(struct damon_region *first,
179 		struct damon_region *last, struct damon_target *t)
180 {
181 	struct damon_region *r = first;
182 
183 	damon_for_each_region_from(r, t) {
184 		struct damon_region *next, *newr;
185 
186 		if (r == last)
187 			break;
188 		next = damon_next_region(r);
189 		if (r->ar.end != next->ar.start) {
190 			newr = damon_new_region(r->ar.end, next->ar.start);
191 			if (!newr)
192 				return -ENOMEM;
193 			damon_insert_region(newr, r, next, t);
194 		}
195 	}
196 	return 0;
197 }
198 
199 /*
200  * damon_set_regions() - Set regions of a target for given address ranges.
201  * @t:		the given target.
202  * @ranges:	array of new monitoring target ranges.
203  * @nr_ranges:	length of @ranges.
204  *
205  * This function adds new regions to, or modify existing regions of a
206  * monitoring target to fit in specific ranges.
207  *
208  * Return: 0 if success, or negative error code otherwise.
209  */
damon_set_regions(struct damon_target * t,struct damon_addr_range * ranges,unsigned int nr_ranges)210 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
211 		unsigned int nr_ranges)
212 {
213 	struct damon_region *r, *next;
214 	unsigned int i;
215 	int err;
216 
217 	/* Remove regions which are not in the new ranges */
218 	damon_for_each_region_safe(r, next, t) {
219 		for (i = 0; i < nr_ranges; i++) {
220 			if (damon_intersect(r, &ranges[i]))
221 				break;
222 		}
223 		if (i == nr_ranges)
224 			damon_destroy_region(r, t);
225 	}
226 
227 	r = damon_first_region(t);
228 	/* Add new regions or resize existing regions to fit in the ranges */
229 	for (i = 0; i < nr_ranges; i++) {
230 		struct damon_region *first = NULL, *last, *newr;
231 		struct damon_addr_range *range;
232 
233 		range = &ranges[i];
234 		/* Get the first/last regions intersecting with the range */
235 		damon_for_each_region_from(r, t) {
236 			if (damon_intersect(r, range)) {
237 				if (!first)
238 					first = r;
239 				last = r;
240 			}
241 			if (r->ar.start >= range->end)
242 				break;
243 		}
244 		if (!first) {
245 			/* no region intersects with this range */
246 			newr = damon_new_region(
247 					ALIGN_DOWN(range->start,
248 						DAMON_MIN_REGION),
249 					ALIGN(range->end, DAMON_MIN_REGION));
250 			if (!newr)
251 				return -ENOMEM;
252 			damon_insert_region(newr, damon_prev_region(r), r, t);
253 		} else {
254 			/* resize intersecting regions to fit in this range */
255 			first->ar.start = ALIGN_DOWN(range->start,
256 					DAMON_MIN_REGION);
257 			last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
258 
259 			/* fill possible holes in the range */
260 			err = damon_fill_regions_holes(first, last, t);
261 			if (err)
262 				return err;
263 		}
264 	}
265 	return 0;
266 }
267 
damos_new_filter(enum damos_filter_type type,bool matching,bool allow)268 struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 		bool matching, bool allow)
270 {
271 	struct damos_filter *filter;
272 
273 	filter = kmalloc(sizeof(*filter), GFP_KERNEL);
274 	if (!filter)
275 		return NULL;
276 	filter->type = type;
277 	filter->matching = matching;
278 	filter->allow = allow;
279 	INIT_LIST_HEAD(&filter->list);
280 	return filter;
281 }
282 
283 /**
284  * damos_filter_for_ops() - Return if the filter is ops-hndled one.
285  * @type:	type of the filter.
286  *
287  * Return: true if the filter of @type needs to be handled by ops layer, false
288  * otherwise.
289  */
damos_filter_for_ops(enum damos_filter_type type)290 bool damos_filter_for_ops(enum damos_filter_type type)
291 {
292 	switch (type) {
293 	case DAMOS_FILTER_TYPE_ADDR:
294 	case DAMOS_FILTER_TYPE_TARGET:
295 		return false;
296 	default:
297 		break;
298 	}
299 	return true;
300 }
301 
damos_add_filter(struct damos * s,struct damos_filter * f)302 void damos_add_filter(struct damos *s, struct damos_filter *f)
303 {
304 	if (damos_filter_for_ops(f->type))
305 		list_add_tail(&f->list, &s->ops_filters);
306 	else
307 		list_add_tail(&f->list, &s->filters);
308 }
309 
damos_del_filter(struct damos_filter * f)310 static void damos_del_filter(struct damos_filter *f)
311 {
312 	list_del(&f->list);
313 }
314 
damos_free_filter(struct damos_filter * f)315 static void damos_free_filter(struct damos_filter *f)
316 {
317 	kfree(f);
318 }
319 
damos_destroy_filter(struct damos_filter * f)320 void damos_destroy_filter(struct damos_filter *f)
321 {
322 	damos_del_filter(f);
323 	damos_free_filter(f);
324 }
325 
damos_new_quota_goal(enum damos_quota_goal_metric metric,unsigned long target_value)326 struct damos_quota_goal *damos_new_quota_goal(
327 		enum damos_quota_goal_metric metric,
328 		unsigned long target_value)
329 {
330 	struct damos_quota_goal *goal;
331 
332 	goal = kmalloc(sizeof(*goal), GFP_KERNEL);
333 	if (!goal)
334 		return NULL;
335 	goal->metric = metric;
336 	goal->target_value = target_value;
337 	INIT_LIST_HEAD(&goal->list);
338 	return goal;
339 }
340 
damos_add_quota_goal(struct damos_quota * q,struct damos_quota_goal * g)341 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
342 {
343 	list_add_tail(&g->list, &q->goals);
344 }
345 
damos_del_quota_goal(struct damos_quota_goal * g)346 static void damos_del_quota_goal(struct damos_quota_goal *g)
347 {
348 	list_del(&g->list);
349 }
350 
damos_free_quota_goal(struct damos_quota_goal * g)351 static void damos_free_quota_goal(struct damos_quota_goal *g)
352 {
353 	kfree(g);
354 }
355 
damos_destroy_quota_goal(struct damos_quota_goal * g)356 void damos_destroy_quota_goal(struct damos_quota_goal *g)
357 {
358 	damos_del_quota_goal(g);
359 	damos_free_quota_goal(g);
360 }
361 
362 /* initialize fields of @quota that normally API users wouldn't set */
damos_quota_init(struct damos_quota * quota)363 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
364 {
365 	quota->esz = 0;
366 	quota->total_charged_sz = 0;
367 	quota->total_charged_ns = 0;
368 	quota->charged_sz = 0;
369 	quota->charged_from = 0;
370 	quota->charge_target_from = NULL;
371 	quota->charge_addr_from = 0;
372 	quota->esz_bp = 0;
373 	return quota;
374 }
375 
damon_new_scheme(struct damos_access_pattern * pattern,enum damos_action action,unsigned long apply_interval_us,struct damos_quota * quota,struct damos_watermarks * wmarks,int target_nid)376 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
377 			enum damos_action action,
378 			unsigned long apply_interval_us,
379 			struct damos_quota *quota,
380 			struct damos_watermarks *wmarks,
381 			int target_nid)
382 {
383 	struct damos *scheme;
384 
385 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
386 	if (!scheme)
387 		return NULL;
388 	scheme->pattern = *pattern;
389 	scheme->action = action;
390 	scheme->apply_interval_us = apply_interval_us;
391 	/*
392 	 * next_apply_sis will be set when kdamond starts.  While kdamond is
393 	 * running, it will also updated when it is added to the DAMON context,
394 	 * or damon_attrs are updated.
395 	 */
396 	scheme->next_apply_sis = 0;
397 	scheme->walk_completed = false;
398 	INIT_LIST_HEAD(&scheme->filters);
399 	INIT_LIST_HEAD(&scheme->ops_filters);
400 	scheme->stat = (struct damos_stat){};
401 	INIT_LIST_HEAD(&scheme->list);
402 
403 	scheme->quota = *(damos_quota_init(quota));
404 	/* quota.goals should be separately set by caller */
405 	INIT_LIST_HEAD(&scheme->quota.goals);
406 
407 	scheme->wmarks = *wmarks;
408 	scheme->wmarks.activated = true;
409 
410 	scheme->target_nid = target_nid;
411 
412 	return scheme;
413 }
414 
damos_set_next_apply_sis(struct damos * s,struct damon_ctx * ctx)415 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
416 {
417 	unsigned long sample_interval = ctx->attrs.sample_interval ?
418 		ctx->attrs.sample_interval : 1;
419 	unsigned long apply_interval = s->apply_interval_us ?
420 		s->apply_interval_us : ctx->attrs.aggr_interval;
421 
422 	s->next_apply_sis = ctx->passed_sample_intervals +
423 		apply_interval / sample_interval;
424 }
425 
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)426 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
427 {
428 	list_add_tail(&s->list, &ctx->schemes);
429 	damos_set_next_apply_sis(s, ctx);
430 }
431 
damon_del_scheme(struct damos * s)432 static void damon_del_scheme(struct damos *s)
433 {
434 	list_del(&s->list);
435 }
436 
damon_free_scheme(struct damos * s)437 static void damon_free_scheme(struct damos *s)
438 {
439 	kfree(s);
440 }
441 
damon_destroy_scheme(struct damos * s)442 void damon_destroy_scheme(struct damos *s)
443 {
444 	struct damos_quota_goal *g, *g_next;
445 	struct damos_filter *f, *next;
446 
447 	damos_for_each_quota_goal_safe(g, g_next, &s->quota)
448 		damos_destroy_quota_goal(g);
449 
450 	damos_for_each_filter_safe(f, next, s)
451 		damos_destroy_filter(f);
452 	damon_del_scheme(s);
453 	damon_free_scheme(s);
454 }
455 
456 /*
457  * Construct a damon_target struct
458  *
459  * Returns the pointer to the new struct if success, or NULL otherwise
460  */
damon_new_target(void)461 struct damon_target *damon_new_target(void)
462 {
463 	struct damon_target *t;
464 
465 	t = kmalloc(sizeof(*t), GFP_KERNEL);
466 	if (!t)
467 		return NULL;
468 
469 	t->pid = NULL;
470 	t->nr_regions = 0;
471 	INIT_LIST_HEAD(&t->regions_list);
472 	INIT_LIST_HEAD(&t->list);
473 
474 	return t;
475 }
476 
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)477 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
478 {
479 	list_add_tail(&t->list, &ctx->adaptive_targets);
480 }
481 
damon_targets_empty(struct damon_ctx * ctx)482 bool damon_targets_empty(struct damon_ctx *ctx)
483 {
484 	return list_empty(&ctx->adaptive_targets);
485 }
486 
damon_del_target(struct damon_target * t)487 static void damon_del_target(struct damon_target *t)
488 {
489 	list_del(&t->list);
490 }
491 
damon_free_target(struct damon_target * t)492 void damon_free_target(struct damon_target *t)
493 {
494 	struct damon_region *r, *next;
495 
496 	damon_for_each_region_safe(r, next, t)
497 		damon_free_region(r);
498 	kfree(t);
499 }
500 
damon_destroy_target(struct damon_target * t)501 void damon_destroy_target(struct damon_target *t)
502 {
503 	damon_del_target(t);
504 	damon_free_target(t);
505 }
506 
damon_nr_regions(struct damon_target * t)507 unsigned int damon_nr_regions(struct damon_target *t)
508 {
509 	return t->nr_regions;
510 }
511 
damon_new_ctx(void)512 struct damon_ctx *damon_new_ctx(void)
513 {
514 	struct damon_ctx *ctx;
515 
516 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
517 	if (!ctx)
518 		return NULL;
519 
520 	init_completion(&ctx->kdamond_started);
521 
522 	ctx->attrs.sample_interval = 5 * 1000;
523 	ctx->attrs.aggr_interval = 100 * 1000;
524 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
525 
526 	ctx->passed_sample_intervals = 0;
527 	/* These will be set from kdamond_init_ctx() */
528 	ctx->next_aggregation_sis = 0;
529 	ctx->next_ops_update_sis = 0;
530 
531 	mutex_init(&ctx->kdamond_lock);
532 	mutex_init(&ctx->call_control_lock);
533 	mutex_init(&ctx->walk_control_lock);
534 
535 	ctx->attrs.min_nr_regions = 10;
536 	ctx->attrs.max_nr_regions = 1000;
537 
538 	INIT_LIST_HEAD(&ctx->adaptive_targets);
539 	INIT_LIST_HEAD(&ctx->schemes);
540 
541 	return ctx;
542 }
543 
damon_destroy_targets(struct damon_ctx * ctx)544 static void damon_destroy_targets(struct damon_ctx *ctx)
545 {
546 	struct damon_target *t, *next_t;
547 
548 	if (ctx->ops.cleanup) {
549 		ctx->ops.cleanup(ctx);
550 		return;
551 	}
552 
553 	damon_for_each_target_safe(t, next_t, ctx)
554 		damon_destroy_target(t);
555 }
556 
damon_destroy_ctx(struct damon_ctx * ctx)557 void damon_destroy_ctx(struct damon_ctx *ctx)
558 {
559 	struct damos *s, *next_s;
560 
561 	damon_destroy_targets(ctx);
562 
563 	damon_for_each_scheme_safe(s, next_s, ctx)
564 		damon_destroy_scheme(s);
565 
566 	kfree(ctx);
567 }
568 
damon_age_for_new_attrs(unsigned int age,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)569 static unsigned int damon_age_for_new_attrs(unsigned int age,
570 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
571 {
572 	return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
573 }
574 
575 /* convert access ratio in bp (per 10,000) to nr_accesses */
damon_accesses_bp_to_nr_accesses(unsigned int accesses_bp,struct damon_attrs * attrs)576 static unsigned int damon_accesses_bp_to_nr_accesses(
577 		unsigned int accesses_bp, struct damon_attrs *attrs)
578 {
579 	return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
580 }
581 
582 /*
583  * Convert nr_accesses to access ratio in bp (per 10,000).
584  *
585  * Callers should ensure attrs.aggr_interval is not zero, like
586  * damon_update_monitoring_results() does .  Otherwise, divide-by-zero would
587  * happen.
588  */
damon_nr_accesses_to_accesses_bp(unsigned int nr_accesses,struct damon_attrs * attrs)589 static unsigned int damon_nr_accesses_to_accesses_bp(
590 		unsigned int nr_accesses, struct damon_attrs *attrs)
591 {
592 	return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
593 }
594 
damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)595 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
596 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
597 {
598 	return damon_accesses_bp_to_nr_accesses(
599 			damon_nr_accesses_to_accesses_bp(
600 				nr_accesses, old_attrs),
601 			new_attrs);
602 }
603 
damon_update_monitoring_result(struct damon_region * r,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs,bool aggregating)604 static void damon_update_monitoring_result(struct damon_region *r,
605 		struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
606 		bool aggregating)
607 {
608 	if (!aggregating) {
609 		r->nr_accesses = damon_nr_accesses_for_new_attrs(
610 				r->nr_accesses, old_attrs, new_attrs);
611 		r->nr_accesses_bp = r->nr_accesses * 10000;
612 	} else {
613 		/*
614 		 * if this is called in the middle of the aggregation, reset
615 		 * the aggregations we made so far for this aggregation
616 		 * interval.  In other words, make the status like
617 		 * kdamond_reset_aggregated() is called.
618 		 */
619 		r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
620 				r->last_nr_accesses, old_attrs, new_attrs);
621 		r->nr_accesses_bp = r->last_nr_accesses * 10000;
622 		r->nr_accesses = 0;
623 	}
624 	r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
625 }
626 
627 /*
628  * region->nr_accesses is the number of sampling intervals in the last
629  * aggregation interval that access to the region has found, and region->age is
630  * the number of aggregation intervals that its access pattern has maintained.
631  * For the reason, the real meaning of the two fields depend on current
632  * sampling interval and aggregation interval.  This function updates
633  * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
634  */
damon_update_monitoring_results(struct damon_ctx * ctx,struct damon_attrs * new_attrs,bool aggregating)635 static void damon_update_monitoring_results(struct damon_ctx *ctx,
636 		struct damon_attrs *new_attrs, bool aggregating)
637 {
638 	struct damon_attrs *old_attrs = &ctx->attrs;
639 	struct damon_target *t;
640 	struct damon_region *r;
641 
642 	/* if any interval is zero, simply forgive conversion */
643 	if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
644 			!new_attrs->sample_interval ||
645 			!new_attrs->aggr_interval)
646 		return;
647 
648 	damon_for_each_target(t, ctx)
649 		damon_for_each_region(r, t)
650 			damon_update_monitoring_result(
651 					r, old_attrs, new_attrs, aggregating);
652 }
653 
654 /*
655  * damon_valid_intervals_goal() - return if the intervals goal of @attrs is
656  * valid.
657  */
damon_valid_intervals_goal(struct damon_attrs * attrs)658 static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
659 {
660 	struct damon_intervals_goal *goal = &attrs->intervals_goal;
661 
662 	/* tuning is disabled */
663 	if (!goal->aggrs)
664 		return true;
665 	if (goal->min_sample_us > goal->max_sample_us)
666 		return false;
667 	if (attrs->sample_interval < goal->min_sample_us ||
668 			goal->max_sample_us < attrs->sample_interval)
669 		return false;
670 	return true;
671 }
672 
673 /**
674  * damon_set_attrs() - Set attributes for the monitoring.
675  * @ctx:		monitoring context
676  * @attrs:		monitoring attributes
677  *
678  * This function should be called while the kdamond is not running, an access
679  * check results aggregation is not ongoing (e.g., from &struct
680  * damon_callback->after_aggregation or &struct
681  * damon_callback->after_wmarks_check callbacks), or from damon_call().
682  *
683  * Every time interval is in micro-seconds.
684  *
685  * Return: 0 on success, negative error code otherwise.
686  */
damon_set_attrs(struct damon_ctx * ctx,struct damon_attrs * attrs)687 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
688 {
689 	unsigned long sample_interval = attrs->sample_interval ?
690 		attrs->sample_interval : 1;
691 	struct damos *s;
692 	bool aggregating = ctx->passed_sample_intervals <
693 		ctx->next_aggregation_sis;
694 
695 	if (!damon_valid_intervals_goal(attrs))
696 		return -EINVAL;
697 
698 	if (attrs->min_nr_regions < 3)
699 		return -EINVAL;
700 	if (attrs->min_nr_regions > attrs->max_nr_regions)
701 		return -EINVAL;
702 	if (attrs->sample_interval > attrs->aggr_interval)
703 		return -EINVAL;
704 
705 	/* calls from core-external doesn't set this. */
706 	if (!attrs->aggr_samples)
707 		attrs->aggr_samples = attrs->aggr_interval / sample_interval;
708 
709 	ctx->next_aggregation_sis = ctx->passed_sample_intervals +
710 		attrs->aggr_interval / sample_interval;
711 	ctx->next_ops_update_sis = ctx->passed_sample_intervals +
712 		attrs->ops_update_interval / sample_interval;
713 
714 	damon_update_monitoring_results(ctx, attrs, aggregating);
715 	ctx->attrs = *attrs;
716 
717 	damon_for_each_scheme(s, ctx)
718 		damos_set_next_apply_sis(s, ctx);
719 
720 	return 0;
721 }
722 
723 /**
724  * damon_set_schemes() - Set data access monitoring based operation schemes.
725  * @ctx:	monitoring context
726  * @schemes:	array of the schemes
727  * @nr_schemes:	number of entries in @schemes
728  *
729  * This function should not be called while the kdamond of the context is
730  * running.
731  */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)732 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
733 			ssize_t nr_schemes)
734 {
735 	struct damos *s, *next;
736 	ssize_t i;
737 
738 	damon_for_each_scheme_safe(s, next, ctx)
739 		damon_destroy_scheme(s);
740 	for (i = 0; i < nr_schemes; i++)
741 		damon_add_scheme(ctx, schemes[i]);
742 }
743 
damos_nth_quota_goal(int n,struct damos_quota * q)744 static struct damos_quota_goal *damos_nth_quota_goal(
745 		int n, struct damos_quota *q)
746 {
747 	struct damos_quota_goal *goal;
748 	int i = 0;
749 
750 	damos_for_each_quota_goal(goal, q) {
751 		if (i++ == n)
752 			return goal;
753 	}
754 	return NULL;
755 }
756 
damos_commit_quota_goal_union(struct damos_quota_goal * dst,struct damos_quota_goal * src)757 static void damos_commit_quota_goal_union(
758 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
759 {
760 	switch (dst->metric) {
761 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
762 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
763 		dst->nid = src->nid;
764 		break;
765 	default:
766 		break;
767 	}
768 }
769 
damos_commit_quota_goal(struct damos_quota_goal * dst,struct damos_quota_goal * src)770 static void damos_commit_quota_goal(
771 		struct damos_quota_goal *dst, struct damos_quota_goal *src)
772 {
773 	dst->metric = src->metric;
774 	dst->target_value = src->target_value;
775 	if (dst->metric == DAMOS_QUOTA_USER_INPUT)
776 		dst->current_value = src->current_value;
777 	/* keep last_psi_total as is, since it will be updated in next cycle */
778 	damos_commit_quota_goal_union(dst, src);
779 }
780 
781 /**
782  * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
783  * @dst:	The commit destination DAMOS quota.
784  * @src:	The commit source DAMOS quota.
785  *
786  * Copies user-specified parameters for quota goals from @src to @dst.  Users
787  * should use this function for quota goals-level parameters update of running
788  * DAMON contexts, instead of manual in-place updates.
789  *
790  * This function should be called from parameters-update safe context, like
791  * DAMON callbacks.
792  */
damos_commit_quota_goals(struct damos_quota * dst,struct damos_quota * src)793 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
794 {
795 	struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
796 	int i = 0, j = 0;
797 
798 	damos_for_each_quota_goal_safe(dst_goal, next, dst) {
799 		src_goal = damos_nth_quota_goal(i++, src);
800 		if (src_goal)
801 			damos_commit_quota_goal(dst_goal, src_goal);
802 		else
803 			damos_destroy_quota_goal(dst_goal);
804 	}
805 	damos_for_each_quota_goal_safe(src_goal, next, src) {
806 		if (j++ < i)
807 			continue;
808 		new_goal = damos_new_quota_goal(
809 				src_goal->metric, src_goal->target_value);
810 		if (!new_goal)
811 			return -ENOMEM;
812 		damos_commit_quota_goal_union(new_goal, src_goal);
813 		damos_add_quota_goal(dst, new_goal);
814 	}
815 	return 0;
816 }
817 
damos_commit_quota(struct damos_quota * dst,struct damos_quota * src)818 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
819 {
820 	int err;
821 
822 	dst->reset_interval = src->reset_interval;
823 	dst->ms = src->ms;
824 	dst->sz = src->sz;
825 	err = damos_commit_quota_goals(dst, src);
826 	if (err)
827 		return err;
828 	dst->weight_sz = src->weight_sz;
829 	dst->weight_nr_accesses = src->weight_nr_accesses;
830 	dst->weight_age = src->weight_age;
831 	return 0;
832 }
833 
damos_nth_filter(int n,struct damos * s)834 static struct damos_filter *damos_nth_filter(int n, struct damos *s)
835 {
836 	struct damos_filter *filter;
837 	int i = 0;
838 
839 	damos_for_each_filter(filter, s) {
840 		if (i++ == n)
841 			return filter;
842 	}
843 	return NULL;
844 }
845 
damos_commit_filter_arg(struct damos_filter * dst,struct damos_filter * src)846 static void damos_commit_filter_arg(
847 		struct damos_filter *dst, struct damos_filter *src)
848 {
849 	switch (dst->type) {
850 	case DAMOS_FILTER_TYPE_MEMCG:
851 		dst->memcg_id = src->memcg_id;
852 		break;
853 	case DAMOS_FILTER_TYPE_ADDR:
854 		dst->addr_range = src->addr_range;
855 		break;
856 	case DAMOS_FILTER_TYPE_TARGET:
857 		dst->target_idx = src->target_idx;
858 		break;
859 	case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
860 		dst->sz_range = src->sz_range;
861 		break;
862 	default:
863 		break;
864 	}
865 }
866 
damos_commit_filter(struct damos_filter * dst,struct damos_filter * src)867 static void damos_commit_filter(
868 		struct damos_filter *dst, struct damos_filter *src)
869 {
870 	dst->type = src->type;
871 	dst->matching = src->matching;
872 	damos_commit_filter_arg(dst, src);
873 }
874 
damos_commit_core_filters(struct damos * dst,struct damos * src)875 static int damos_commit_core_filters(struct damos *dst, struct damos *src)
876 {
877 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
878 	int i = 0, j = 0;
879 
880 	damos_for_each_filter_safe(dst_filter, next, dst) {
881 		src_filter = damos_nth_filter(i++, src);
882 		if (src_filter)
883 			damos_commit_filter(dst_filter, src_filter);
884 		else
885 			damos_destroy_filter(dst_filter);
886 	}
887 
888 	damos_for_each_filter_safe(src_filter, next, src) {
889 		if (j++ < i)
890 			continue;
891 
892 		new_filter = damos_new_filter(
893 				src_filter->type, src_filter->matching,
894 				src_filter->allow);
895 		if (!new_filter)
896 			return -ENOMEM;
897 		damos_commit_filter_arg(new_filter, src_filter);
898 		damos_add_filter(dst, new_filter);
899 	}
900 	return 0;
901 }
902 
damos_commit_ops_filters(struct damos * dst,struct damos * src)903 static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
904 {
905 	struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
906 	int i = 0, j = 0;
907 
908 	damos_for_each_ops_filter_safe(dst_filter, next, dst) {
909 		src_filter = damos_nth_filter(i++, src);
910 		if (src_filter)
911 			damos_commit_filter(dst_filter, src_filter);
912 		else
913 			damos_destroy_filter(dst_filter);
914 	}
915 
916 	damos_for_each_ops_filter_safe(src_filter, next, src) {
917 		if (j++ < i)
918 			continue;
919 
920 		new_filter = damos_new_filter(
921 				src_filter->type, src_filter->matching,
922 				src_filter->allow);
923 		if (!new_filter)
924 			return -ENOMEM;
925 		damos_commit_filter_arg(new_filter, src_filter);
926 		damos_add_filter(dst, new_filter);
927 	}
928 	return 0;
929 }
930 
931 /**
932  * damos_filters_default_reject() - decide whether to reject memory that didn't
933  *				    match with any given filter.
934  * @filters:	Given DAMOS filters of a group.
935  */
damos_filters_default_reject(struct list_head * filters)936 static bool damos_filters_default_reject(struct list_head *filters)
937 {
938 	struct damos_filter *last_filter;
939 
940 	if (list_empty(filters))
941 		return false;
942 	last_filter = list_last_entry(filters, struct damos_filter, list);
943 	return last_filter->allow;
944 }
945 
damos_set_filters_default_reject(struct damos * s)946 static void damos_set_filters_default_reject(struct damos *s)
947 {
948 	if (!list_empty(&s->ops_filters))
949 		s->core_filters_default_reject = false;
950 	else
951 		s->core_filters_default_reject =
952 			damos_filters_default_reject(&s->filters);
953 	s->ops_filters_default_reject =
954 		damos_filters_default_reject(&s->ops_filters);
955 }
956 
damos_commit_filters(struct damos * dst,struct damos * src)957 static int damos_commit_filters(struct damos *dst, struct damos *src)
958 {
959 	int err;
960 
961 	err = damos_commit_core_filters(dst, src);
962 	if (err)
963 		return err;
964 	err = damos_commit_ops_filters(dst, src);
965 	if (err)
966 		return err;
967 	damos_set_filters_default_reject(dst);
968 	return 0;
969 }
970 
damon_nth_scheme(int n,struct damon_ctx * ctx)971 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
972 {
973 	struct damos *s;
974 	int i = 0;
975 
976 	damon_for_each_scheme(s, ctx) {
977 		if (i++ == n)
978 			return s;
979 	}
980 	return NULL;
981 }
982 
damos_commit(struct damos * dst,struct damos * src)983 static int damos_commit(struct damos *dst, struct damos *src)
984 {
985 	int err;
986 
987 	dst->pattern = src->pattern;
988 	dst->action = src->action;
989 	dst->apply_interval_us = src->apply_interval_us;
990 
991 	err = damos_commit_quota(&dst->quota, &src->quota);
992 	if (err)
993 		return err;
994 
995 	dst->wmarks = src->wmarks;
996 
997 	err = damos_commit_filters(dst, src);
998 	return err;
999 }
1000 
damon_commit_schemes(struct damon_ctx * dst,struct damon_ctx * src)1001 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
1002 {
1003 	struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
1004 	int i = 0, j = 0, err;
1005 
1006 	damon_for_each_scheme_safe(dst_scheme, next, dst) {
1007 		src_scheme = damon_nth_scheme(i++, src);
1008 		if (src_scheme) {
1009 			err = damos_commit(dst_scheme, src_scheme);
1010 			if (err)
1011 				return err;
1012 		} else {
1013 			damon_destroy_scheme(dst_scheme);
1014 		}
1015 	}
1016 
1017 	damon_for_each_scheme_safe(src_scheme, next, src) {
1018 		if (j++ < i)
1019 			continue;
1020 		new_scheme = damon_new_scheme(&src_scheme->pattern,
1021 				src_scheme->action,
1022 				src_scheme->apply_interval_us,
1023 				&src_scheme->quota, &src_scheme->wmarks,
1024 				NUMA_NO_NODE);
1025 		if (!new_scheme)
1026 			return -ENOMEM;
1027 		err = damos_commit(new_scheme, src_scheme);
1028 		if (err) {
1029 			damon_destroy_scheme(new_scheme);
1030 			return err;
1031 		}
1032 		damon_add_scheme(dst, new_scheme);
1033 	}
1034 	return 0;
1035 }
1036 
damon_nth_target(int n,struct damon_ctx * ctx)1037 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1038 {
1039 	struct damon_target *t;
1040 	int i = 0;
1041 
1042 	damon_for_each_target(t, ctx) {
1043 		if (i++ == n)
1044 			return t;
1045 	}
1046 	return NULL;
1047 }
1048 
1049 /*
1050  * The caller should ensure the regions of @src are
1051  * 1. valid (end >= src) and
1052  * 2. sorted by starting address.
1053  *
1054  * If @src has no region, @dst keeps current regions.
1055  */
damon_commit_target_regions(struct damon_target * dst,struct damon_target * src)1056 static int damon_commit_target_regions(
1057 		struct damon_target *dst, struct damon_target *src)
1058 {
1059 	struct damon_region *src_region;
1060 	struct damon_addr_range *ranges;
1061 	int i = 0, err;
1062 
1063 	damon_for_each_region(src_region, src)
1064 		i++;
1065 	if (!i)
1066 		return 0;
1067 
1068 	ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1069 	if (!ranges)
1070 		return -ENOMEM;
1071 	i = 0;
1072 	damon_for_each_region(src_region, src)
1073 		ranges[i++] = src_region->ar;
1074 	err = damon_set_regions(dst, ranges, i);
1075 	kfree(ranges);
1076 	return err;
1077 }
1078 
damon_commit_target(struct damon_target * dst,bool dst_has_pid,struct damon_target * src,bool src_has_pid)1079 static int damon_commit_target(
1080 		struct damon_target *dst, bool dst_has_pid,
1081 		struct damon_target *src, bool src_has_pid)
1082 {
1083 	int err;
1084 
1085 	err = damon_commit_target_regions(dst, src);
1086 	if (err)
1087 		return err;
1088 	if (dst_has_pid)
1089 		put_pid(dst->pid);
1090 	if (src_has_pid)
1091 		get_pid(src->pid);
1092 	dst->pid = src->pid;
1093 	return 0;
1094 }
1095 
damon_commit_targets(struct damon_ctx * dst,struct damon_ctx * src)1096 static int damon_commit_targets(
1097 		struct damon_ctx *dst, struct damon_ctx *src)
1098 {
1099 	struct damon_target *dst_target, *next, *src_target, *new_target;
1100 	int i = 0, j = 0, err;
1101 
1102 	damon_for_each_target_safe(dst_target, next, dst) {
1103 		src_target = damon_nth_target(i++, src);
1104 		if (src_target) {
1105 			err = damon_commit_target(
1106 					dst_target, damon_target_has_pid(dst),
1107 					src_target, damon_target_has_pid(src));
1108 			if (err)
1109 				return err;
1110 		} else {
1111 			struct damos *s;
1112 
1113 			if (damon_target_has_pid(dst))
1114 				put_pid(dst_target->pid);
1115 			damon_destroy_target(dst_target);
1116 			damon_for_each_scheme(s, dst) {
1117 				if (s->quota.charge_target_from == dst_target) {
1118 					s->quota.charge_target_from = NULL;
1119 					s->quota.charge_addr_from = 0;
1120 				}
1121 			}
1122 		}
1123 	}
1124 
1125 	damon_for_each_target_safe(src_target, next, src) {
1126 		if (j++ < i)
1127 			continue;
1128 		new_target = damon_new_target();
1129 		if (!new_target)
1130 			return -ENOMEM;
1131 		err = damon_commit_target(new_target, false,
1132 				src_target, damon_target_has_pid(src));
1133 		if (err) {
1134 			damon_destroy_target(new_target);
1135 			return err;
1136 		}
1137 		damon_add_target(dst, new_target);
1138 	}
1139 	return 0;
1140 }
1141 
1142 /**
1143  * damon_commit_ctx() - Commit parameters of a DAMON context to another.
1144  * @dst:	The commit destination DAMON context.
1145  * @src:	The commit source DAMON context.
1146  *
1147  * This function copies user-specified parameters from @src to @dst and update
1148  * the internal status and results accordingly.  Users should use this function
1149  * for context-level parameters update of running context, instead of manual
1150  * in-place updates.
1151  *
1152  * This function should be called from parameters-update safe context, like
1153  * DAMON callbacks.
1154  */
damon_commit_ctx(struct damon_ctx * dst,struct damon_ctx * src)1155 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1156 {
1157 	int err;
1158 
1159 	err = damon_commit_schemes(dst, src);
1160 	if (err)
1161 		return err;
1162 	err = damon_commit_targets(dst, src);
1163 	if (err)
1164 		return err;
1165 	/*
1166 	 * schemes and targets should be updated first, since
1167 	 * 1. damon_set_attrs() updates monitoring results of targets and
1168 	 * next_apply_sis of schemes, and
1169 	 * 2. ops update should be done after pid handling is done (target
1170 	 *    committing require putting pids).
1171 	 */
1172 	err = damon_set_attrs(dst, &src->attrs);
1173 	if (err)
1174 		return err;
1175 	dst->ops = src->ops;
1176 
1177 	return 0;
1178 }
1179 
1180 /**
1181  * damon_nr_running_ctxs() - Return number of currently running contexts.
1182  */
damon_nr_running_ctxs(void)1183 int damon_nr_running_ctxs(void)
1184 {
1185 	int nr_ctxs;
1186 
1187 	mutex_lock(&damon_lock);
1188 	nr_ctxs = nr_running_ctxs;
1189 	mutex_unlock(&damon_lock);
1190 
1191 	return nr_ctxs;
1192 }
1193 
1194 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)1195 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1196 {
1197 	struct damon_target *t;
1198 	struct damon_region *r;
1199 	unsigned long sz = 0;
1200 
1201 	damon_for_each_target(t, ctx) {
1202 		damon_for_each_region(r, t)
1203 			sz += damon_sz_region(r);
1204 	}
1205 
1206 	if (ctx->attrs.min_nr_regions)
1207 		sz /= ctx->attrs.min_nr_regions;
1208 	if (sz < DAMON_MIN_REGION)
1209 		sz = DAMON_MIN_REGION;
1210 
1211 	return sz;
1212 }
1213 
1214 static int kdamond_fn(void *data);
1215 
1216 /*
1217  * __damon_start() - Starts monitoring with given context.
1218  * @ctx:	monitoring context
1219  *
1220  * This function should be called while damon_lock is hold.
1221  *
1222  * Return: 0 on success, negative error code otherwise.
1223  */
__damon_start(struct damon_ctx * ctx)1224 static int __damon_start(struct damon_ctx *ctx)
1225 {
1226 	int err = -EBUSY;
1227 
1228 	mutex_lock(&ctx->kdamond_lock);
1229 	if (!ctx->kdamond) {
1230 		err = 0;
1231 		reinit_completion(&ctx->kdamond_started);
1232 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1233 				nr_running_ctxs);
1234 		if (IS_ERR(ctx->kdamond)) {
1235 			err = PTR_ERR(ctx->kdamond);
1236 			ctx->kdamond = NULL;
1237 		} else {
1238 			wait_for_completion(&ctx->kdamond_started);
1239 		}
1240 	}
1241 	mutex_unlock(&ctx->kdamond_lock);
1242 
1243 	return err;
1244 }
1245 
1246 /**
1247  * damon_start() - Starts the monitorings for a given group of contexts.
1248  * @ctxs:	an array of the pointers for contexts to start monitoring
1249  * @nr_ctxs:	size of @ctxs
1250  * @exclusive:	exclusiveness of this contexts group
1251  *
1252  * This function starts a group of monitoring threads for a group of monitoring
1253  * contexts.  One thread per each context is created and run in parallel.  The
1254  * caller should handle synchronization between the threads by itself.  If
1255  * @exclusive is true and a group of threads that created by other
1256  * 'damon_start()' call is currently running, this function does nothing but
1257  * returns -EBUSY.
1258  *
1259  * Return: 0 on success, negative error code otherwise.
1260  */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs,bool exclusive)1261 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1262 {
1263 	int i;
1264 	int err = 0;
1265 
1266 	mutex_lock(&damon_lock);
1267 	if ((exclusive && nr_running_ctxs) ||
1268 			(!exclusive && running_exclusive_ctxs)) {
1269 		mutex_unlock(&damon_lock);
1270 		return -EBUSY;
1271 	}
1272 
1273 	for (i = 0; i < nr_ctxs; i++) {
1274 		err = __damon_start(ctxs[i]);
1275 		if (err)
1276 			break;
1277 		nr_running_ctxs++;
1278 	}
1279 	if (exclusive && nr_running_ctxs)
1280 		running_exclusive_ctxs = true;
1281 	mutex_unlock(&damon_lock);
1282 
1283 	return err;
1284 }
1285 
1286 /*
1287  * __damon_stop() - Stops monitoring of a given context.
1288  * @ctx:	monitoring context
1289  *
1290  * Return: 0 on success, negative error code otherwise.
1291  */
__damon_stop(struct damon_ctx * ctx)1292 static int __damon_stop(struct damon_ctx *ctx)
1293 {
1294 	struct task_struct *tsk;
1295 
1296 	mutex_lock(&ctx->kdamond_lock);
1297 	tsk = ctx->kdamond;
1298 	if (tsk) {
1299 		get_task_struct(tsk);
1300 		mutex_unlock(&ctx->kdamond_lock);
1301 		kthread_stop_put(tsk);
1302 		return 0;
1303 	}
1304 	mutex_unlock(&ctx->kdamond_lock);
1305 
1306 	return -EPERM;
1307 }
1308 
1309 /**
1310  * damon_stop() - Stops the monitorings for a given group of contexts.
1311  * @ctxs:	an array of the pointers for contexts to stop monitoring
1312  * @nr_ctxs:	size of @ctxs
1313  *
1314  * Return: 0 on success, negative error code otherwise.
1315  */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)1316 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1317 {
1318 	int i, err = 0;
1319 
1320 	for (i = 0; i < nr_ctxs; i++) {
1321 		/* nr_running_ctxs is decremented in kdamond_fn */
1322 		err = __damon_stop(ctxs[i]);
1323 		if (err)
1324 			break;
1325 	}
1326 	return err;
1327 }
1328 
damon_is_running(struct damon_ctx * ctx)1329 static bool damon_is_running(struct damon_ctx *ctx)
1330 {
1331 	bool running;
1332 
1333 	mutex_lock(&ctx->kdamond_lock);
1334 	running = ctx->kdamond != NULL;
1335 	mutex_unlock(&ctx->kdamond_lock);
1336 	return running;
1337 }
1338 
1339 /**
1340  * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1341  * @ctx:	DAMON context to call the function for.
1342  * @control:	Control variable of the call request.
1343  *
1344  * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1345  * argument data that respectively passed via &damon_call_control->fn and
1346  * &damon_call_control->data of @control, and wait until the kdamond finishes
1347  * handling of the request.
1348  *
1349  * The kdamond executes the function with the argument in the main loop, just
1350  * after a sampling of the iteration is finished.  The function can hence
1351  * safely access the internal data of the &struct damon_ctx without additional
1352  * synchronization.  The return value of the function will be saved in
1353  * &damon_call_control->return_code.
1354  *
1355  * Return: 0 on success, negative error code otherwise.
1356  */
damon_call(struct damon_ctx * ctx,struct damon_call_control * control)1357 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1358 {
1359 	init_completion(&control->completion);
1360 	control->canceled = false;
1361 
1362 	mutex_lock(&ctx->call_control_lock);
1363 	if (ctx->call_control) {
1364 		mutex_unlock(&ctx->call_control_lock);
1365 		return -EBUSY;
1366 	}
1367 	ctx->call_control = control;
1368 	mutex_unlock(&ctx->call_control_lock);
1369 	if (!damon_is_running(ctx))
1370 		return -EINVAL;
1371 	wait_for_completion(&control->completion);
1372 	if (control->canceled)
1373 		return -ECANCELED;
1374 	return 0;
1375 }
1376 
1377 /**
1378  * damos_walk() - Invoke a given functions while DAMOS walk regions.
1379  * @ctx:	DAMON context to call the functions for.
1380  * @control:	Control variable of the walk request.
1381  *
1382  * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1383  * that the kdamond will apply DAMOS action to, and wait until the kdamond
1384  * finishes handling of the request.
1385  *
1386  * The kdamond executes the given function in the main loop, for each region
1387  * just after it applied any DAMOS actions of @ctx to it.  The invocation is
1388  * made only within one &damos->apply_interval_us since damos_walk()
1389  * invocation, for each scheme.  The given callback function can hence safely
1390  * access the internal data of &struct damon_ctx and &struct damon_region that
1391  * each of the scheme will apply the action for next interval, without
1392  * additional synchronizations against the kdamond.  If every scheme of @ctx
1393  * passed at least one &damos->apply_interval_us, kdamond marks the request as
1394  * completed so that damos_walk() can wakeup and return.
1395  *
1396  * Return: 0 on success, negative error code otherwise.
1397  */
damos_walk(struct damon_ctx * ctx,struct damos_walk_control * control)1398 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1399 {
1400 	init_completion(&control->completion);
1401 	control->canceled = false;
1402 	mutex_lock(&ctx->walk_control_lock);
1403 	if (ctx->walk_control) {
1404 		mutex_unlock(&ctx->walk_control_lock);
1405 		return -EBUSY;
1406 	}
1407 	ctx->walk_control = control;
1408 	mutex_unlock(&ctx->walk_control_lock);
1409 	if (!damon_is_running(ctx))
1410 		return -EINVAL;
1411 	wait_for_completion(&control->completion);
1412 	if (control->canceled)
1413 		return -ECANCELED;
1414 	return 0;
1415 }
1416 
1417 /*
1418  * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing
1419  * the problem being propagated.
1420  */
damon_warn_fix_nr_accesses_corruption(struct damon_region * r)1421 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
1422 {
1423 	if (r->nr_accesses_bp == r->nr_accesses * 10000)
1424 		return;
1425 	WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
1426 			r->nr_accesses_bp, r->nr_accesses);
1427 	r->nr_accesses_bp = r->nr_accesses * 10000;
1428 }
1429 
1430 /*
1431  * Reset the aggregated monitoring results ('nr_accesses' of each region).
1432  */
kdamond_reset_aggregated(struct damon_ctx * c)1433 static void kdamond_reset_aggregated(struct damon_ctx *c)
1434 {
1435 	struct damon_target *t;
1436 	unsigned int ti = 0;	/* target's index */
1437 
1438 	damon_for_each_target(t, c) {
1439 		struct damon_region *r;
1440 
1441 		damon_for_each_region(r, t) {
1442 			trace_damon_aggregated(ti, r, damon_nr_regions(t));
1443 			damon_warn_fix_nr_accesses_corruption(r);
1444 			r->last_nr_accesses = r->nr_accesses;
1445 			r->nr_accesses = 0;
1446 		}
1447 		ti++;
1448 	}
1449 }
1450 
damon_get_intervals_score(struct damon_ctx * c)1451 static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1452 {
1453 	struct damon_target *t;
1454 	struct damon_region *r;
1455 	unsigned long sz_region, max_access_events = 0, access_events = 0;
1456 	unsigned long target_access_events;
1457 	unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1458 
1459 	damon_for_each_target(t, c) {
1460 		damon_for_each_region(r, t) {
1461 			sz_region = damon_sz_region(r);
1462 			max_access_events += sz_region * c->attrs.aggr_samples;
1463 			access_events += sz_region * r->nr_accesses;
1464 		}
1465 	}
1466 	target_access_events = max_access_events * goal_bp / 10000;
1467 	target_access_events = target_access_events ? : 1;
1468 	return access_events * 10000 / target_access_events;
1469 }
1470 
1471 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1472 		unsigned long score);
1473 
damon_get_intervals_adaptation_bp(struct damon_ctx * c)1474 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1475 {
1476 	unsigned long score_bp, adaptation_bp;
1477 
1478 	score_bp = damon_get_intervals_score(c);
1479 	adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1480 		10000;
1481 	/*
1482 	 * adaptaion_bp ranges from 1 to 20,000.  Avoid too rapid reduction of
1483 	 * the intervals by rescaling [1,10,000] to [5000, 10,000].
1484 	 */
1485 	if (adaptation_bp <= 10000)
1486 		adaptation_bp = 5000 + adaptation_bp / 2;
1487 	return adaptation_bp;
1488 }
1489 
kdamond_tune_intervals(struct damon_ctx * c)1490 static void kdamond_tune_intervals(struct damon_ctx *c)
1491 {
1492 	unsigned long adaptation_bp;
1493 	struct damon_attrs new_attrs;
1494 	struct damon_intervals_goal *goal;
1495 
1496 	adaptation_bp = damon_get_intervals_adaptation_bp(c);
1497 	if (adaptation_bp == 10000)
1498 		return;
1499 
1500 	new_attrs = c->attrs;
1501 	goal = &c->attrs.intervals_goal;
1502 	new_attrs.sample_interval = min(goal->max_sample_us,
1503 			c->attrs.sample_interval * adaptation_bp / 10000);
1504 	new_attrs.sample_interval = max(goal->min_sample_us,
1505 			new_attrs.sample_interval);
1506 	new_attrs.aggr_interval = new_attrs.sample_interval *
1507 		c->attrs.aggr_samples;
1508 	damon_set_attrs(c, &new_attrs);
1509 }
1510 
1511 static void damon_split_region_at(struct damon_target *t,
1512 				  struct damon_region *r, unsigned long sz_r);
1513 
__damos_valid_target(struct damon_region * r,struct damos * s)1514 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1515 {
1516 	unsigned long sz;
1517 	unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1518 
1519 	sz = damon_sz_region(r);
1520 	return s->pattern.min_sz_region <= sz &&
1521 		sz <= s->pattern.max_sz_region &&
1522 		s->pattern.min_nr_accesses <= nr_accesses &&
1523 		nr_accesses <= s->pattern.max_nr_accesses &&
1524 		s->pattern.min_age_region <= r->age &&
1525 		r->age <= s->pattern.max_age_region;
1526 }
1527 
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1528 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1529 		struct damon_region *r, struct damos *s)
1530 {
1531 	bool ret = __damos_valid_target(r, s);
1532 
1533 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1534 		return ret;
1535 
1536 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1537 }
1538 
1539 /*
1540  * damos_skip_charged_region() - Check if the given region or starting part of
1541  * it is already charged for the DAMOS quota.
1542  * @t:	The target of the region.
1543  * @rp:	The pointer to the region.
1544  * @s:	The scheme to be applied.
1545  *
1546  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1547  * action would applied to only a part of the target access pattern fulfilling
1548  * regions.  To avoid applying the scheme action to only already applied
1549  * regions, DAMON skips applying the scheme action to the regions that charged
1550  * in the previous charge window.
1551  *
1552  * This function checks if a given region should be skipped or not for the
1553  * reason.  If only the starting part of the region has previously charged,
1554  * this function splits the region into two so that the second one covers the
1555  * area that not charged in the previous charge widnow and saves the second
1556  * region in *rp and returns false, so that the caller can apply DAMON action
1557  * to the second one.
1558  *
1559  * Return: true if the region should be entirely skipped, false otherwise.
1560  */
damos_skip_charged_region(struct damon_target * t,struct damon_region ** rp,struct damos * s)1561 static bool damos_skip_charged_region(struct damon_target *t,
1562 		struct damon_region **rp, struct damos *s)
1563 {
1564 	struct damon_region *r = *rp;
1565 	struct damos_quota *quota = &s->quota;
1566 	unsigned long sz_to_skip;
1567 
1568 	/* Skip previously charged regions */
1569 	if (quota->charge_target_from) {
1570 		if (t != quota->charge_target_from)
1571 			return true;
1572 		if (r == damon_last_region(t)) {
1573 			quota->charge_target_from = NULL;
1574 			quota->charge_addr_from = 0;
1575 			return true;
1576 		}
1577 		if (quota->charge_addr_from &&
1578 				r->ar.end <= quota->charge_addr_from)
1579 			return true;
1580 
1581 		if (quota->charge_addr_from && r->ar.start <
1582 				quota->charge_addr_from) {
1583 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1584 					r->ar.start, DAMON_MIN_REGION);
1585 			if (!sz_to_skip) {
1586 				if (damon_sz_region(r) <= DAMON_MIN_REGION)
1587 					return true;
1588 				sz_to_skip = DAMON_MIN_REGION;
1589 			}
1590 			damon_split_region_at(t, r, sz_to_skip);
1591 			r = damon_next_region(r);
1592 			*rp = r;
1593 		}
1594 		quota->charge_target_from = NULL;
1595 		quota->charge_addr_from = 0;
1596 	}
1597 	return false;
1598 }
1599 
damos_update_stat(struct damos * s,unsigned long sz_tried,unsigned long sz_applied,unsigned long sz_ops_filter_passed)1600 static void damos_update_stat(struct damos *s,
1601 		unsigned long sz_tried, unsigned long sz_applied,
1602 		unsigned long sz_ops_filter_passed)
1603 {
1604 	s->stat.nr_tried++;
1605 	s->stat.sz_tried += sz_tried;
1606 	if (sz_applied)
1607 		s->stat.nr_applied++;
1608 	s->stat.sz_applied += sz_applied;
1609 	s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1610 }
1611 
damos_filter_match(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos_filter * filter)1612 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1613 		struct damon_region *r, struct damos_filter *filter)
1614 {
1615 	bool matched = false;
1616 	struct damon_target *ti;
1617 	int target_idx = 0;
1618 	unsigned long start, end;
1619 
1620 	switch (filter->type) {
1621 	case DAMOS_FILTER_TYPE_TARGET:
1622 		damon_for_each_target(ti, ctx) {
1623 			if (ti == t)
1624 				break;
1625 			target_idx++;
1626 		}
1627 		matched = target_idx == filter->target_idx;
1628 		break;
1629 	case DAMOS_FILTER_TYPE_ADDR:
1630 		start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
1631 		end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
1632 
1633 		/* inside the range */
1634 		if (start <= r->ar.start && r->ar.end <= end) {
1635 			matched = true;
1636 			break;
1637 		}
1638 		/* outside of the range */
1639 		if (r->ar.end <= start || end <= r->ar.start) {
1640 			matched = false;
1641 			break;
1642 		}
1643 		/* start before the range and overlap */
1644 		if (r->ar.start < start) {
1645 			damon_split_region_at(t, r, start - r->ar.start);
1646 			matched = false;
1647 			break;
1648 		}
1649 		/* start inside the range */
1650 		damon_split_region_at(t, r, end - r->ar.start);
1651 		matched = true;
1652 		break;
1653 	default:
1654 		return false;
1655 	}
1656 
1657 	return matched == filter->matching;
1658 }
1659 
damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s)1660 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1661 		struct damon_region *r, struct damos *s)
1662 {
1663 	struct damos_filter *filter;
1664 
1665 	s->core_filters_allowed = false;
1666 	damos_for_each_filter(filter, s) {
1667 		if (damos_filter_match(ctx, t, r, filter)) {
1668 			if (filter->allow)
1669 				s->core_filters_allowed = true;
1670 			return !filter->allow;
1671 		}
1672 	}
1673 	return s->core_filters_default_reject;
1674 }
1675 
1676 /*
1677  * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1678  * @ctx:	The context of &damon_ctx->walk_control.
1679  * @t:		The monitoring target of @r that @s will be applied.
1680  * @r:		The region of @t that @s will be applied.
1681  * @s:		The scheme of @ctx that will be applied to @r.
1682  *
1683  * This function is called from kdamond whenever it asked the operation set to
1684  * apply a DAMOS scheme action to a region.  If a DAMOS walk request is
1685  * installed by damos_walk() and not yet uninstalled, invoke it.
1686  */
damos_walk_call_walk(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s,unsigned long sz_filter_passed)1687 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1688 		struct damon_region *r, struct damos *s,
1689 		unsigned long sz_filter_passed)
1690 {
1691 	struct damos_walk_control *control;
1692 
1693 	if (s->walk_completed)
1694 		return;
1695 
1696 	control = ctx->walk_control;
1697 	if (!control)
1698 		return;
1699 
1700 	control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1701 }
1702 
1703 /*
1704  * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1705  * @ctx:	The context of &damon_ctx->walk_control.
1706  * @s:		A scheme of @ctx that all walks are now done.
1707  *
1708  * This function is called when kdamond finished applying the action of a DAMOS
1709  * scheme to all regions that eligible for the given &damos->apply_interval_us.
1710  * If every scheme of @ctx including @s now finished walking for at least one
1711  * &damos->apply_interval_us, this function makrs the handling of the given
1712  * DAMOS walk request is done, so that damos_walk() can wake up and return.
1713  */
damos_walk_complete(struct damon_ctx * ctx,struct damos * s)1714 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1715 {
1716 	struct damos *siter;
1717 	struct damos_walk_control *control;
1718 
1719 	control = ctx->walk_control;
1720 	if (!control)
1721 		return;
1722 
1723 	s->walk_completed = true;
1724 	/* if all schemes completed, signal completion to walker */
1725 	damon_for_each_scheme(siter, ctx) {
1726 		if (!siter->walk_completed)
1727 			return;
1728 	}
1729 	damon_for_each_scheme(siter, ctx)
1730 		siter->walk_completed = false;
1731 
1732 	complete(&control->completion);
1733 	ctx->walk_control = NULL;
1734 }
1735 
1736 /*
1737  * damos_walk_cancel() - Cancel the current DAMOS walk request.
1738  * @ctx:	The context of &damon_ctx->walk_control.
1739  *
1740  * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1741  * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1742  * is already out of the main loop and therefore gonna be terminated, and hence
1743  * cannot continue the walks.  This function therefore marks the walk request
1744  * as canceled, so that damos_walk() can wake up and return.
1745  */
damos_walk_cancel(struct damon_ctx * ctx)1746 static void damos_walk_cancel(struct damon_ctx *ctx)
1747 {
1748 	struct damos_walk_control *control;
1749 
1750 	mutex_lock(&ctx->walk_control_lock);
1751 	control = ctx->walk_control;
1752 	mutex_unlock(&ctx->walk_control_lock);
1753 
1754 	if (!control)
1755 		return;
1756 	control->canceled = true;
1757 	complete(&control->completion);
1758 	mutex_lock(&ctx->walk_control_lock);
1759 	ctx->walk_control = NULL;
1760 	mutex_unlock(&ctx->walk_control_lock);
1761 }
1762 
damos_apply_scheme(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1763 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1764 		struct damon_region *r, struct damos *s)
1765 {
1766 	struct damos_quota *quota = &s->quota;
1767 	unsigned long sz = damon_sz_region(r);
1768 	struct timespec64 begin, end;
1769 	unsigned long sz_applied = 0;
1770 	unsigned long sz_ops_filter_passed = 0;
1771 	/*
1772 	 * We plan to support multiple context per kdamond, as DAMON sysfs
1773 	 * implies with 'nr_contexts' file.  Nevertheless, only single context
1774 	 * per kdamond is supported for now.  So, we can simply use '0' context
1775 	 * index here.
1776 	 */
1777 	unsigned int cidx = 0;
1778 	struct damos *siter;		/* schemes iterator */
1779 	unsigned int sidx = 0;
1780 	struct damon_target *titer;	/* targets iterator */
1781 	unsigned int tidx = 0;
1782 	bool do_trace = false;
1783 
1784 	/* get indices for trace_damos_before_apply() */
1785 	if (trace_damos_before_apply_enabled()) {
1786 		damon_for_each_scheme(siter, c) {
1787 			if (siter == s)
1788 				break;
1789 			sidx++;
1790 		}
1791 		damon_for_each_target(titer, c) {
1792 			if (titer == t)
1793 				break;
1794 			tidx++;
1795 		}
1796 		do_trace = true;
1797 	}
1798 
1799 	if (c->ops.apply_scheme) {
1800 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
1801 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1802 					DAMON_MIN_REGION);
1803 			if (!sz)
1804 				goto update_stat;
1805 			damon_split_region_at(t, r, sz);
1806 		}
1807 		if (damos_filter_out(c, t, r, s))
1808 			return;
1809 		ktime_get_coarse_ts64(&begin);
1810 		trace_damos_before_apply(cidx, sidx, tidx, r,
1811 				damon_nr_regions(t), do_trace);
1812 		sz_applied = c->ops.apply_scheme(c, t, r, s,
1813 				&sz_ops_filter_passed);
1814 		damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1815 		ktime_get_coarse_ts64(&end);
1816 		quota->total_charged_ns += timespec64_to_ns(&end) -
1817 			timespec64_to_ns(&begin);
1818 		quota->charged_sz += sz;
1819 		if (quota->esz && quota->charged_sz >= quota->esz) {
1820 			quota->charge_target_from = t;
1821 			quota->charge_addr_from = r->ar.end + 1;
1822 		}
1823 	}
1824 	if (s->action != DAMOS_STAT)
1825 		r->age = 0;
1826 
1827 update_stat:
1828 	damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1829 }
1830 
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)1831 static void damon_do_apply_schemes(struct damon_ctx *c,
1832 				   struct damon_target *t,
1833 				   struct damon_region *r)
1834 {
1835 	struct damos *s;
1836 
1837 	damon_for_each_scheme(s, c) {
1838 		struct damos_quota *quota = &s->quota;
1839 
1840 		if (c->passed_sample_intervals < s->next_apply_sis)
1841 			continue;
1842 
1843 		if (!s->wmarks.activated)
1844 			continue;
1845 
1846 		/* Check the quota */
1847 		if (quota->esz && quota->charged_sz >= quota->esz)
1848 			continue;
1849 
1850 		if (damos_skip_charged_region(t, &r, s))
1851 			continue;
1852 
1853 		if (!damos_valid_target(c, t, r, s))
1854 			continue;
1855 
1856 		damos_apply_scheme(c, t, r, s);
1857 	}
1858 }
1859 
1860 /*
1861  * damon_feed_loop_next_input() - get next input to achieve a target score.
1862  * @last_input	The last input.
1863  * @score	Current score that made with @last_input.
1864  *
1865  * Calculate next input to achieve the target score, based on the last input
1866  * and current score.  Assuming the input and the score are positively
1867  * proportional, calculate how much compensation should be added to or
1868  * subtracted from the last input as a proportion of the last input.  Avoid
1869  * next input always being zero by setting it non-zero always.  In short form
1870  * (assuming support of float and signed calculations), the algorithm is as
1871  * below.
1872  *
1873  * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1874  *
1875  * For simple implementation, we assume the target score is always 10,000.  The
1876  * caller should adjust @score for this.
1877  *
1878  * Returns next input that assumed to achieve the target score.
1879  */
damon_feed_loop_next_input(unsigned long last_input,unsigned long score)1880 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1881 		unsigned long score)
1882 {
1883 	const unsigned long goal = 10000;
1884 	/* Set minimum input as 10000 to avoid compensation be zero */
1885 	const unsigned long min_input = 10000;
1886 	unsigned long score_goal_diff, compensation;
1887 	bool over_achieving = score > goal;
1888 
1889 	if (score == goal)
1890 		return last_input;
1891 	if (score >= goal * 2)
1892 		return min_input;
1893 
1894 	if (over_achieving)
1895 		score_goal_diff = score - goal;
1896 	else
1897 		score_goal_diff = goal - score;
1898 
1899 	if (last_input < ULONG_MAX / score_goal_diff)
1900 		compensation = last_input * score_goal_diff / goal;
1901 	else
1902 		compensation = last_input / goal * score_goal_diff;
1903 
1904 	if (over_achieving)
1905 		return max(last_input - compensation, min_input);
1906 	if (last_input < ULONG_MAX - compensation)
1907 		return last_input + compensation;
1908 	return ULONG_MAX;
1909 }
1910 
1911 #ifdef CONFIG_PSI
1912 
damos_get_some_mem_psi_total(void)1913 static u64 damos_get_some_mem_psi_total(void)
1914 {
1915 	if (static_branch_likely(&psi_disabled))
1916 		return 0;
1917 	return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1918 			NSEC_PER_USEC);
1919 }
1920 
1921 #else	/* CONFIG_PSI */
1922 
damos_get_some_mem_psi_total(void)1923 static inline u64 damos_get_some_mem_psi_total(void)
1924 {
1925 	return 0;
1926 };
1927 
1928 #endif	/* CONFIG_PSI */
1929 
1930 #ifdef CONFIG_NUMA
damos_get_node_mem_bp(struct damos_quota_goal * goal)1931 static __kernel_ulong_t damos_get_node_mem_bp(
1932 		struct damos_quota_goal *goal)
1933 {
1934 	struct sysinfo i;
1935 	__kernel_ulong_t numerator;
1936 
1937 	si_meminfo_node(&i, goal->nid);
1938 	if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP)
1939 		numerator = i.totalram - i.freeram;
1940 	else	/* DAMOS_QUOTA_NODE_MEM_FREE_BP */
1941 		numerator = i.freeram;
1942 	return numerator * 10000 / i.totalram;
1943 }
1944 #else
damos_get_node_mem_bp(struct damos_quota_goal * goal)1945 static __kernel_ulong_t damos_get_node_mem_bp(
1946 		struct damos_quota_goal *goal)
1947 {
1948 	return 0;
1949 }
1950 #endif
1951 
1952 
damos_set_quota_goal_current_value(struct damos_quota_goal * goal)1953 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
1954 {
1955 	u64 now_psi_total;
1956 
1957 	switch (goal->metric) {
1958 	case DAMOS_QUOTA_USER_INPUT:
1959 		/* User should already set goal->current_value */
1960 		break;
1961 	case DAMOS_QUOTA_SOME_MEM_PSI_US:
1962 		now_psi_total = damos_get_some_mem_psi_total();
1963 		goal->current_value = now_psi_total - goal->last_psi_total;
1964 		goal->last_psi_total = now_psi_total;
1965 		break;
1966 	case DAMOS_QUOTA_NODE_MEM_USED_BP:
1967 	case DAMOS_QUOTA_NODE_MEM_FREE_BP:
1968 		goal->current_value = damos_get_node_mem_bp(goal);
1969 		break;
1970 	default:
1971 		break;
1972 	}
1973 }
1974 
1975 /* Return the highest score since it makes schemes least aggressive */
damos_quota_score(struct damos_quota * quota)1976 static unsigned long damos_quota_score(struct damos_quota *quota)
1977 {
1978 	struct damos_quota_goal *goal;
1979 	unsigned long highest_score = 0;
1980 
1981 	damos_for_each_quota_goal(goal, quota) {
1982 		damos_set_quota_goal_current_value(goal);
1983 		highest_score = max(highest_score,
1984 				goal->current_value * 10000 /
1985 				goal->target_value);
1986 	}
1987 
1988 	return highest_score;
1989 }
1990 
1991 /*
1992  * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1993  */
damos_set_effective_quota(struct damos_quota * quota)1994 static void damos_set_effective_quota(struct damos_quota *quota)
1995 {
1996 	unsigned long throughput;
1997 	unsigned long esz = ULONG_MAX;
1998 
1999 	if (!quota->ms && list_empty(&quota->goals)) {
2000 		quota->esz = quota->sz;
2001 		return;
2002 	}
2003 
2004 	if (!list_empty(&quota->goals)) {
2005 		unsigned long score = damos_quota_score(quota);
2006 
2007 		quota->esz_bp = damon_feed_loop_next_input(
2008 				max(quota->esz_bp, 10000UL),
2009 				score);
2010 		esz = quota->esz_bp / 10000;
2011 	}
2012 
2013 	if (quota->ms) {
2014 		if (quota->total_charged_ns)
2015 			throughput = quota->total_charged_sz * 1000000 /
2016 				quota->total_charged_ns;
2017 		else
2018 			throughput = PAGE_SIZE * 1024;
2019 		esz = min(throughput * quota->ms, esz);
2020 	}
2021 
2022 	if (quota->sz && quota->sz < esz)
2023 		esz = quota->sz;
2024 
2025 	quota->esz = esz;
2026 }
2027 
damos_adjust_quota(struct damon_ctx * c,struct damos * s)2028 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
2029 {
2030 	struct damos_quota *quota = &s->quota;
2031 	struct damon_target *t;
2032 	struct damon_region *r;
2033 	unsigned long cumulated_sz;
2034 	unsigned int score, max_score = 0;
2035 
2036 	if (!quota->ms && !quota->sz && list_empty(&quota->goals))
2037 		return;
2038 
2039 	/* New charge window starts */
2040 	if (time_after_eq(jiffies, quota->charged_from +
2041 				msecs_to_jiffies(quota->reset_interval))) {
2042 		if (quota->esz && quota->charged_sz >= quota->esz)
2043 			s->stat.qt_exceeds++;
2044 		quota->total_charged_sz += quota->charged_sz;
2045 		quota->charged_from = jiffies;
2046 		quota->charged_sz = 0;
2047 		damos_set_effective_quota(quota);
2048 	}
2049 
2050 	if (!c->ops.get_scheme_score)
2051 		return;
2052 
2053 	/* Fill up the score histogram */
2054 	memset(c->regions_score_histogram, 0,
2055 			sizeof(*c->regions_score_histogram) *
2056 			(DAMOS_MAX_SCORE + 1));
2057 	damon_for_each_target(t, c) {
2058 		damon_for_each_region(r, t) {
2059 			if (!__damos_valid_target(r, s))
2060 				continue;
2061 			score = c->ops.get_scheme_score(c, t, r, s);
2062 			c->regions_score_histogram[score] +=
2063 				damon_sz_region(r);
2064 			if (score > max_score)
2065 				max_score = score;
2066 		}
2067 	}
2068 
2069 	/* Set the min score limit */
2070 	for (cumulated_sz = 0, score = max_score; ; score--) {
2071 		cumulated_sz += c->regions_score_histogram[score];
2072 		if (cumulated_sz >= quota->esz || !score)
2073 			break;
2074 	}
2075 	quota->min_score = score;
2076 }
2077 
kdamond_apply_schemes(struct damon_ctx * c)2078 static void kdamond_apply_schemes(struct damon_ctx *c)
2079 {
2080 	struct damon_target *t;
2081 	struct damon_region *r, *next_r;
2082 	struct damos *s;
2083 	unsigned long sample_interval = c->attrs.sample_interval ?
2084 		c->attrs.sample_interval : 1;
2085 	bool has_schemes_to_apply = false;
2086 
2087 	damon_for_each_scheme(s, c) {
2088 		if (c->passed_sample_intervals < s->next_apply_sis)
2089 			continue;
2090 
2091 		if (!s->wmarks.activated)
2092 			continue;
2093 
2094 		has_schemes_to_apply = true;
2095 
2096 		damos_adjust_quota(c, s);
2097 	}
2098 
2099 	if (!has_schemes_to_apply)
2100 		return;
2101 
2102 	mutex_lock(&c->walk_control_lock);
2103 	damon_for_each_target(t, c) {
2104 		damon_for_each_region_safe(r, next_r, t)
2105 			damon_do_apply_schemes(c, t, r);
2106 	}
2107 
2108 	damon_for_each_scheme(s, c) {
2109 		if (c->passed_sample_intervals < s->next_apply_sis)
2110 			continue;
2111 		damos_walk_complete(c, s);
2112 		s->next_apply_sis = c->passed_sample_intervals +
2113 			(s->apply_interval_us ? s->apply_interval_us :
2114 			 c->attrs.aggr_interval) / sample_interval;
2115 		s->last_applied = NULL;
2116 	}
2117 	mutex_unlock(&c->walk_control_lock);
2118 }
2119 
2120 /*
2121  * Merge two adjacent regions into one region
2122  */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)2123 static void damon_merge_two_regions(struct damon_target *t,
2124 		struct damon_region *l, struct damon_region *r)
2125 {
2126 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2127 
2128 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2129 			(sz_l + sz_r);
2130 	l->nr_accesses_bp = l->nr_accesses * 10000;
2131 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2132 	l->ar.end = r->ar.end;
2133 	damon_destroy_region(r, t);
2134 }
2135 
2136 /*
2137  * Merge adjacent regions having similar access frequencies
2138  *
2139  * t		target affected by this merge operation
2140  * thres	'->nr_accesses' diff threshold for the merge
2141  * sz_limit	size upper limit of each region
2142  */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)2143 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2144 				   unsigned long sz_limit)
2145 {
2146 	struct damon_region *r, *prev = NULL, *next;
2147 
2148 	damon_for_each_region_safe(r, next, t) {
2149 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2150 			r->age = 0;
2151 		else
2152 			r->age++;
2153 
2154 		if (prev && prev->ar.end == r->ar.start &&
2155 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2156 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2157 			damon_merge_two_regions(t, prev, r);
2158 		else
2159 			prev = r;
2160 	}
2161 }
2162 
2163 /*
2164  * Merge adjacent regions having similar access frequencies
2165  *
2166  * threshold	'->nr_accesses' diff threshold for the merge
2167  * sz_limit	size upper limit of each region
2168  *
2169  * This function merges monitoring target regions which are adjacent and their
2170  * access frequencies are similar.  This is for minimizing the monitoring
2171  * overhead under the dynamically changeable access pattern.  If a merge was
2172  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
2173  *
2174  * The total number of regions could be higher than the user-defined limit,
2175  * max_nr_regions for some cases.  For example, the user can update
2176  * max_nr_regions to a number that lower than the current number of regions
2177  * while DAMON is running.  For such a case, repeat merging until the limit is
2178  * met while increasing @threshold up to possible maximum level.
2179  */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)2180 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2181 				  unsigned long sz_limit)
2182 {
2183 	struct damon_target *t;
2184 	unsigned int nr_regions;
2185 	unsigned int max_thres;
2186 
2187 	max_thres = c->attrs.aggr_interval /
2188 		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
2189 	do {
2190 		nr_regions = 0;
2191 		damon_for_each_target(t, c) {
2192 			damon_merge_regions_of(t, threshold, sz_limit);
2193 			nr_regions += damon_nr_regions(t);
2194 		}
2195 		threshold = max(1, threshold * 2);
2196 	} while (nr_regions > c->attrs.max_nr_regions &&
2197 			threshold / 2 < max_thres);
2198 }
2199 
2200 /*
2201  * Split a region in two
2202  *
2203  * r		the region to be split
2204  * sz_r		size of the first sub-region that will be made
2205  */
damon_split_region_at(struct damon_target * t,struct damon_region * r,unsigned long sz_r)2206 static void damon_split_region_at(struct damon_target *t,
2207 				  struct damon_region *r, unsigned long sz_r)
2208 {
2209 	struct damon_region *new;
2210 
2211 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2212 	if (!new)
2213 		return;
2214 
2215 	r->ar.end = new->ar.start;
2216 
2217 	new->age = r->age;
2218 	new->last_nr_accesses = r->last_nr_accesses;
2219 	new->nr_accesses_bp = r->nr_accesses_bp;
2220 	new->nr_accesses = r->nr_accesses;
2221 
2222 	damon_insert_region(new, r, damon_next_region(r), t);
2223 }
2224 
2225 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_target * t,int nr_subs)2226 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
2227 {
2228 	struct damon_region *r, *next;
2229 	unsigned long sz_region, sz_sub = 0;
2230 	int i;
2231 
2232 	damon_for_each_region_safe(r, next, t) {
2233 		sz_region = damon_sz_region(r);
2234 
2235 		for (i = 0; i < nr_subs - 1 &&
2236 				sz_region > 2 * DAMON_MIN_REGION; i++) {
2237 			/*
2238 			 * Randomly select size of left sub-region to be at
2239 			 * least 10 percent and at most 90% of original region
2240 			 */
2241 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2242 					sz_region / 10, DAMON_MIN_REGION);
2243 			/* Do not allow blank region */
2244 			if (sz_sub == 0 || sz_sub >= sz_region)
2245 				continue;
2246 
2247 			damon_split_region_at(t, r, sz_sub);
2248 			sz_region = sz_sub;
2249 		}
2250 	}
2251 }
2252 
2253 /*
2254  * Split every target region into randomly-sized small regions
2255  *
2256  * This function splits every target region into random-sized small regions if
2257  * current total number of the regions is equal or smaller than half of the
2258  * user-specified maximum number of regions.  This is for maximizing the
2259  * monitoring accuracy under the dynamically changeable access patterns.  If a
2260  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2261  * it.
2262  */
kdamond_split_regions(struct damon_ctx * ctx)2263 static void kdamond_split_regions(struct damon_ctx *ctx)
2264 {
2265 	struct damon_target *t;
2266 	unsigned int nr_regions = 0;
2267 	static unsigned int last_nr_regions;
2268 	int nr_subregions = 2;
2269 
2270 	damon_for_each_target(t, ctx)
2271 		nr_regions += damon_nr_regions(t);
2272 
2273 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
2274 		return;
2275 
2276 	/* Maybe the middle of the region has different access frequency */
2277 	if (last_nr_regions == nr_regions &&
2278 			nr_regions < ctx->attrs.max_nr_regions / 3)
2279 		nr_subregions = 3;
2280 
2281 	damon_for_each_target(t, ctx)
2282 		damon_split_regions_of(t, nr_subregions);
2283 
2284 	last_nr_regions = nr_regions;
2285 }
2286 
2287 /*
2288  * Check whether current monitoring should be stopped
2289  *
2290  * The monitoring is stopped when either the user requested to stop, or all
2291  * monitoring targets are invalid.
2292  *
2293  * Returns true if need to stop current monitoring.
2294  */
kdamond_need_stop(struct damon_ctx * ctx)2295 static bool kdamond_need_stop(struct damon_ctx *ctx)
2296 {
2297 	struct damon_target *t;
2298 
2299 	if (kthread_should_stop())
2300 		return true;
2301 
2302 	if (!ctx->ops.target_valid)
2303 		return false;
2304 
2305 	damon_for_each_target(t, ctx) {
2306 		if (ctx->ops.target_valid(t))
2307 			return false;
2308 	}
2309 
2310 	return true;
2311 }
2312 
damos_get_wmark_metric_value(enum damos_wmark_metric metric,unsigned long * metric_value)2313 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2314 					unsigned long *metric_value)
2315 {
2316 	switch (metric) {
2317 	case DAMOS_WMARK_FREE_MEM_RATE:
2318 		*metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2319 		       totalram_pages();
2320 		return 0;
2321 	default:
2322 		break;
2323 	}
2324 	return -EINVAL;
2325 }
2326 
2327 /*
2328  * Returns zero if the scheme is active.  Else, returns time to wait for next
2329  * watermark check in micro-seconds.
2330  */
damos_wmark_wait_us(struct damos * scheme)2331 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2332 {
2333 	unsigned long metric;
2334 
2335 	if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2336 		return 0;
2337 
2338 	/* higher than high watermark or lower than low watermark */
2339 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2340 		if (scheme->wmarks.activated)
2341 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
2342 				 scheme->action,
2343 				 str_high_low(metric > scheme->wmarks.high));
2344 		scheme->wmarks.activated = false;
2345 		return scheme->wmarks.interval;
2346 	}
2347 
2348 	/* inactive and higher than middle watermark */
2349 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2350 			!scheme->wmarks.activated)
2351 		return scheme->wmarks.interval;
2352 
2353 	if (!scheme->wmarks.activated)
2354 		pr_debug("activate a scheme (%d)\n", scheme->action);
2355 	scheme->wmarks.activated = true;
2356 	return 0;
2357 }
2358 
kdamond_usleep(unsigned long usecs)2359 static void kdamond_usleep(unsigned long usecs)
2360 {
2361 	if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2362 		schedule_timeout_idle(usecs_to_jiffies(usecs));
2363 	else
2364 		usleep_range_idle(usecs, usecs + 1);
2365 }
2366 
2367 /*
2368  * kdamond_call() - handle damon_call_control.
2369  * @ctx:	The &struct damon_ctx of the kdamond.
2370  * @cancel:	Whether to cancel the invocation of the function.
2371  *
2372  * If there is a &struct damon_call_control request that registered via
2373  * &damon_call() on @ctx, do or cancel the invocation of the function depending
2374  * on @cancel.  @cancel is set when the kdamond is already out of the main loop
2375  * and therefore will be terminated.
2376  */
kdamond_call(struct damon_ctx * ctx,bool cancel)2377 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2378 {
2379 	struct damon_call_control *control;
2380 	int ret = 0;
2381 
2382 	mutex_lock(&ctx->call_control_lock);
2383 	control = ctx->call_control;
2384 	mutex_unlock(&ctx->call_control_lock);
2385 	if (!control)
2386 		return;
2387 	if (cancel) {
2388 		control->canceled = true;
2389 	} else {
2390 		ret = control->fn(control->data);
2391 		control->return_code = ret;
2392 	}
2393 	complete(&control->completion);
2394 	mutex_lock(&ctx->call_control_lock);
2395 	ctx->call_control = NULL;
2396 	mutex_unlock(&ctx->call_control_lock);
2397 }
2398 
2399 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)2400 static int kdamond_wait_activation(struct damon_ctx *ctx)
2401 {
2402 	struct damos *s;
2403 	unsigned long wait_time;
2404 	unsigned long min_wait_time = 0;
2405 	bool init_wait_time = false;
2406 
2407 	while (!kdamond_need_stop(ctx)) {
2408 		damon_for_each_scheme(s, ctx) {
2409 			wait_time = damos_wmark_wait_us(s);
2410 			if (!init_wait_time || wait_time < min_wait_time) {
2411 				init_wait_time = true;
2412 				min_wait_time = wait_time;
2413 			}
2414 		}
2415 		if (!min_wait_time)
2416 			return 0;
2417 
2418 		kdamond_usleep(min_wait_time);
2419 
2420 		if (ctx->callback.after_wmarks_check &&
2421 				ctx->callback.after_wmarks_check(ctx))
2422 			break;
2423 		kdamond_call(ctx, false);
2424 		damos_walk_cancel(ctx);
2425 	}
2426 	return -EBUSY;
2427 }
2428 
kdamond_init_ctx(struct damon_ctx * ctx)2429 static void kdamond_init_ctx(struct damon_ctx *ctx)
2430 {
2431 	unsigned long sample_interval = ctx->attrs.sample_interval ?
2432 		ctx->attrs.sample_interval : 1;
2433 	unsigned long apply_interval;
2434 	struct damos *scheme;
2435 
2436 	ctx->passed_sample_intervals = 0;
2437 	ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2438 	ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2439 		sample_interval;
2440 	ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2441 		ctx->attrs.intervals_goal.aggrs;
2442 
2443 	damon_for_each_scheme(scheme, ctx) {
2444 		apply_interval = scheme->apply_interval_us ?
2445 			scheme->apply_interval_us : ctx->attrs.aggr_interval;
2446 		scheme->next_apply_sis = apply_interval / sample_interval;
2447 		damos_set_filters_default_reject(scheme);
2448 	}
2449 }
2450 
2451 /*
2452  * The monitoring daemon that runs as a kernel thread
2453  */
kdamond_fn(void * data)2454 static int kdamond_fn(void *data)
2455 {
2456 	struct damon_ctx *ctx = data;
2457 	struct damon_target *t;
2458 	struct damon_region *r, *next;
2459 	unsigned int max_nr_accesses = 0;
2460 	unsigned long sz_limit = 0;
2461 
2462 	pr_debug("kdamond (%d) starts\n", current->pid);
2463 
2464 	complete(&ctx->kdamond_started);
2465 	kdamond_init_ctx(ctx);
2466 
2467 	if (ctx->ops.init)
2468 		ctx->ops.init(ctx);
2469 	ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2470 			sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2471 	if (!ctx->regions_score_histogram)
2472 		goto done;
2473 
2474 	sz_limit = damon_region_sz_limit(ctx);
2475 
2476 	while (!kdamond_need_stop(ctx)) {
2477 		/*
2478 		 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2479 		 * be changed from after_wmarks_check() or after_aggregation()
2480 		 * callbacks.  Read the values here, and use those for this
2481 		 * iteration.  That is, damon_set_attrs() updated new values
2482 		 * are respected from next iteration.
2483 		 */
2484 		unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2485 		unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2486 		unsigned long sample_interval = ctx->attrs.sample_interval;
2487 
2488 		if (kdamond_wait_activation(ctx))
2489 			break;
2490 
2491 		if (ctx->ops.prepare_access_checks)
2492 			ctx->ops.prepare_access_checks(ctx);
2493 
2494 		kdamond_usleep(sample_interval);
2495 		ctx->passed_sample_intervals++;
2496 
2497 		if (ctx->ops.check_accesses)
2498 			max_nr_accesses = ctx->ops.check_accesses(ctx);
2499 
2500 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2501 			kdamond_merge_regions(ctx,
2502 					max_nr_accesses / 10,
2503 					sz_limit);
2504 			if (ctx->callback.after_aggregation &&
2505 					ctx->callback.after_aggregation(ctx))
2506 				break;
2507 		}
2508 
2509 		/*
2510 		 * do kdamond_call() and kdamond_apply_schemes() after
2511 		 * kdamond_merge_regions() if possible, to reduce overhead
2512 		 */
2513 		kdamond_call(ctx, false);
2514 		if (!list_empty(&ctx->schemes))
2515 			kdamond_apply_schemes(ctx);
2516 		else
2517 			damos_walk_cancel(ctx);
2518 
2519 		sample_interval = ctx->attrs.sample_interval ?
2520 			ctx->attrs.sample_interval : 1;
2521 		if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2522 			if (ctx->attrs.intervals_goal.aggrs &&
2523 					ctx->passed_sample_intervals >=
2524 					ctx->next_intervals_tune_sis) {
2525 				/*
2526 				 * ctx->next_aggregation_sis might be updated
2527 				 * from kdamond_call().  In the case,
2528 				 * damon_set_attrs() which will be called from
2529 				 * kdamond_tune_interval() may wrongly think
2530 				 * this is in the middle of the current
2531 				 * aggregation, and make aggregation
2532 				 * information reset for all regions.  Then,
2533 				 * following kdamond_reset_aggregated() call
2534 				 * will make the region information invalid,
2535 				 * particularly for ->nr_accesses_bp.
2536 				 *
2537 				 * Reset ->next_aggregation_sis to avoid that.
2538 				 * It will anyway correctly updated after this
2539 				 * if caluse.
2540 				 */
2541 				ctx->next_aggregation_sis =
2542 					next_aggregation_sis;
2543 				ctx->next_intervals_tune_sis +=
2544 					ctx->attrs.aggr_samples *
2545 					ctx->attrs.intervals_goal.aggrs;
2546 				kdamond_tune_intervals(ctx);
2547 				sample_interval = ctx->attrs.sample_interval ?
2548 					ctx->attrs.sample_interval : 1;
2549 
2550 			}
2551 			ctx->next_aggregation_sis = next_aggregation_sis +
2552 				ctx->attrs.aggr_interval / sample_interval;
2553 
2554 			kdamond_reset_aggregated(ctx);
2555 			kdamond_split_regions(ctx);
2556 		}
2557 
2558 		if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2559 			ctx->next_ops_update_sis = next_ops_update_sis +
2560 				ctx->attrs.ops_update_interval /
2561 				sample_interval;
2562 			if (ctx->ops.update)
2563 				ctx->ops.update(ctx);
2564 			sz_limit = damon_region_sz_limit(ctx);
2565 		}
2566 	}
2567 done:
2568 	damon_for_each_target(t, ctx) {
2569 		damon_for_each_region_safe(r, next, t)
2570 			damon_destroy_region(r, t);
2571 	}
2572 
2573 	if (ctx->callback.before_terminate)
2574 		ctx->callback.before_terminate(ctx);
2575 	if (ctx->ops.cleanup)
2576 		ctx->ops.cleanup(ctx);
2577 	kfree(ctx->regions_score_histogram);
2578 
2579 	pr_debug("kdamond (%d) finishes\n", current->pid);
2580 	mutex_lock(&ctx->kdamond_lock);
2581 	ctx->kdamond = NULL;
2582 	mutex_unlock(&ctx->kdamond_lock);
2583 
2584 	kdamond_call(ctx, true);
2585 	damos_walk_cancel(ctx);
2586 
2587 	mutex_lock(&damon_lock);
2588 	nr_running_ctxs--;
2589 	if (!nr_running_ctxs && running_exclusive_ctxs)
2590 		running_exclusive_ctxs = false;
2591 	mutex_unlock(&damon_lock);
2592 
2593 	return 0;
2594 }
2595 
2596 /*
2597  * struct damon_system_ram_region - System RAM resource address region of
2598  *				    [@start, @end).
2599  * @start:	Start address of the region (inclusive).
2600  * @end:	End address of the region (exclusive).
2601  */
2602 struct damon_system_ram_region {
2603 	unsigned long start;
2604 	unsigned long end;
2605 };
2606 
walk_system_ram(struct resource * res,void * arg)2607 static int walk_system_ram(struct resource *res, void *arg)
2608 {
2609 	struct damon_system_ram_region *a = arg;
2610 
2611 	if (a->end - a->start < resource_size(res)) {
2612 		a->start = res->start;
2613 		a->end = res->end;
2614 	}
2615 	return 0;
2616 }
2617 
2618 /*
2619  * Find biggest 'System RAM' resource and store its start and end address in
2620  * @start and @end, respectively.  If no System RAM is found, returns false.
2621  */
damon_find_biggest_system_ram(unsigned long * start,unsigned long * end)2622 static bool damon_find_biggest_system_ram(unsigned long *start,
2623 						unsigned long *end)
2624 
2625 {
2626 	struct damon_system_ram_region arg = {};
2627 
2628 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2629 	if (arg.end <= arg.start)
2630 		return false;
2631 
2632 	*start = arg.start;
2633 	*end = arg.end;
2634 	return true;
2635 }
2636 
2637 /**
2638  * damon_set_region_biggest_system_ram_default() - Set the region of the given
2639  * monitoring target as requested, or biggest 'System RAM'.
2640  * @t:		The monitoring target to set the region.
2641  * @start:	The pointer to the start address of the region.
2642  * @end:	The pointer to the end address of the region.
2643  *
2644  * This function sets the region of @t as requested by @start and @end.  If the
2645  * values of @start and @end are zero, however, this function finds the biggest
2646  * 'System RAM' resource and sets the region to cover the resource.  In the
2647  * latter case, this function saves the start and end addresses of the resource
2648  * in @start and @end, respectively.
2649  *
2650  * Return: 0 on success, negative error code otherwise.
2651  */
damon_set_region_biggest_system_ram_default(struct damon_target * t,unsigned long * start,unsigned long * end)2652 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2653 			unsigned long *start, unsigned long *end)
2654 {
2655 	struct damon_addr_range addr_range;
2656 
2657 	if (*start > *end)
2658 		return -EINVAL;
2659 
2660 	if (!*start && !*end &&
2661 		!damon_find_biggest_system_ram(start, end))
2662 		return -EINVAL;
2663 
2664 	addr_range.start = *start;
2665 	addr_range.end = *end;
2666 	return damon_set_regions(t, &addr_range, 1);
2667 }
2668 
2669 /*
2670  * damon_moving_sum() - Calculate an inferred moving sum value.
2671  * @mvsum:	Inferred sum of the last @len_window values.
2672  * @nomvsum:	Non-moving sum of the last discrete @len_window window values.
2673  * @len_window:	The number of last values to take care of.
2674  * @new_value:	New value that will be added to the pseudo moving sum.
2675  *
2676  * Moving sum (moving average * window size) is good for handling noise, but
2677  * the cost of keeping past values can be high for arbitrary window size.  This
2678  * function implements a lightweight pseudo moving sum function that doesn't
2679  * keep the past window values.
2680  *
2681  * It simply assumes there was no noise in the past, and get the no-noise
2682  * assumed past value to drop from @nomvsum and @len_window.  @nomvsum is a
2683  * non-moving sum of the last window.  For example, if @len_window is 10 and we
2684  * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2685  * values.  Hence, this function simply drops @nomvsum / @len_window from
2686  * given @mvsum and add @new_value.
2687  *
2688  * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2689  * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20.  For
2690  * calculating next moving sum with a new value, we should drop 0 from 50 and
2691  * add the new value.  However, this function assumes it got value 5 for each
2692  * of the last ten times.  Based on the assumption, when the next value is
2693  * measured, it drops the assumed past value, 5 from the current sum, and add
2694  * the new value to get the updated pseduo-moving average.
2695  *
2696  * This means the value could have errors, but the errors will be disappeared
2697  * for every @len_window aligned calls.  For example, if @len_window is 10, the
2698  * pseudo moving sum with 11th value to 19th value would have an error.  But
2699  * the sum with 20th value will not have the error.
2700  *
2701  * Return: Pseudo-moving average after getting the @new_value.
2702  */
damon_moving_sum(unsigned int mvsum,unsigned int nomvsum,unsigned int len_window,unsigned int new_value)2703 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2704 		unsigned int len_window, unsigned int new_value)
2705 {
2706 	return mvsum - nomvsum / len_window + new_value;
2707 }
2708 
2709 /**
2710  * damon_update_region_access_rate() - Update the access rate of a region.
2711  * @r:		The DAMON region to update for its access check result.
2712  * @accessed:	Whether the region has accessed during last sampling interval.
2713  * @attrs:	The damon_attrs of the DAMON context.
2714  *
2715  * Update the access rate of a region with the region's last sampling interval
2716  * access check result.
2717  *
2718  * Usually this will be called by &damon_operations->check_accesses callback.
2719  */
damon_update_region_access_rate(struct damon_region * r,bool accessed,struct damon_attrs * attrs)2720 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2721 		struct damon_attrs *attrs)
2722 {
2723 	unsigned int len_window = 1;
2724 
2725 	/*
2726 	 * sample_interval can be zero, but cannot be larger than
2727 	 * aggr_interval, owing to validation of damon_set_attrs().
2728 	 */
2729 	if (attrs->sample_interval)
2730 		len_window = damon_max_nr_accesses(attrs);
2731 	r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2732 			r->last_nr_accesses * 10000, len_window,
2733 			accessed ? 10000 : 0);
2734 
2735 	if (accessed)
2736 		r->nr_accesses++;
2737 }
2738 
damon_init(void)2739 static int __init damon_init(void)
2740 {
2741 	damon_region_cache = KMEM_CACHE(damon_region, 0);
2742 	if (unlikely(!damon_region_cache)) {
2743 		pr_err("creating damon_region_cache fails\n");
2744 		return -ENOMEM;
2745 	}
2746 
2747 	return 0;
2748 }
2749 
2750 subsys_initcall(damon_init);
2751 
2752 #include "tests/core-kunit.h"
2753