xref: /linux/mm/damon/core.c (revision e80a48bade619ec5a92230b3d4ae84bfc2746822)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Data Access Monitor
4  *
5  * Author: SeongJae Park <sjpark@amazon.de>
6  */
7 
8 #define pr_fmt(fmt) "damon: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/damon.h>
19 
20 #ifdef CONFIG_DAMON_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24 
25 static DEFINE_MUTEX(damon_lock);
26 static int nr_running_ctxs;
27 static bool running_exclusive_ctxs;
28 
29 static DEFINE_MUTEX(damon_ops_lock);
30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
31 
32 static struct kmem_cache *damon_region_cache __ro_after_init;
33 
34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
35 static bool __damon_is_registered_ops(enum damon_ops_id id)
36 {
37 	struct damon_operations empty_ops = {};
38 
39 	if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
40 		return false;
41 	return true;
42 }
43 
44 /**
45  * damon_is_registered_ops() - Check if a given damon_operations is registered.
46  * @id:	Id of the damon_operations to check if registered.
47  *
48  * Return: true if the ops is set, false otherwise.
49  */
50 bool damon_is_registered_ops(enum damon_ops_id id)
51 {
52 	bool registered;
53 
54 	if (id >= NR_DAMON_OPS)
55 		return false;
56 	mutex_lock(&damon_ops_lock);
57 	registered = __damon_is_registered_ops(id);
58 	mutex_unlock(&damon_ops_lock);
59 	return registered;
60 }
61 
62 /**
63  * damon_register_ops() - Register a monitoring operations set to DAMON.
64  * @ops:	monitoring operations set to register.
65  *
66  * This function registers a monitoring operations set of valid &struct
67  * damon_operations->id so that others can find and use them later.
68  *
69  * Return: 0 on success, negative error code otherwise.
70  */
71 int damon_register_ops(struct damon_operations *ops)
72 {
73 	int err = 0;
74 
75 	if (ops->id >= NR_DAMON_OPS)
76 		return -EINVAL;
77 	mutex_lock(&damon_ops_lock);
78 	/* Fail for already registered ops */
79 	if (__damon_is_registered_ops(ops->id)) {
80 		err = -EINVAL;
81 		goto out;
82 	}
83 	damon_registered_ops[ops->id] = *ops;
84 out:
85 	mutex_unlock(&damon_ops_lock);
86 	return err;
87 }
88 
89 /**
90  * damon_select_ops() - Select a monitoring operations to use with the context.
91  * @ctx:	monitoring context to use the operations.
92  * @id:		id of the registered monitoring operations to select.
93  *
94  * This function finds registered monitoring operations set of @id and make
95  * @ctx to use it.
96  *
97  * Return: 0 on success, negative error code otherwise.
98  */
99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
100 {
101 	int err = 0;
102 
103 	if (id >= NR_DAMON_OPS)
104 		return -EINVAL;
105 
106 	mutex_lock(&damon_ops_lock);
107 	if (!__damon_is_registered_ops(id))
108 		err = -EINVAL;
109 	else
110 		ctx->ops = damon_registered_ops[id];
111 	mutex_unlock(&damon_ops_lock);
112 	return err;
113 }
114 
115 /*
116  * Construct a damon_region struct
117  *
118  * Returns the pointer to the new struct if success, or NULL otherwise
119  */
120 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
121 {
122 	struct damon_region *region;
123 
124 	region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
125 	if (!region)
126 		return NULL;
127 
128 	region->ar.start = start;
129 	region->ar.end = end;
130 	region->nr_accesses = 0;
131 	INIT_LIST_HEAD(&region->list);
132 
133 	region->age = 0;
134 	region->last_nr_accesses = 0;
135 
136 	return region;
137 }
138 
139 void damon_add_region(struct damon_region *r, struct damon_target *t)
140 {
141 	list_add_tail(&r->list, &t->regions_list);
142 	t->nr_regions++;
143 }
144 
145 static void damon_del_region(struct damon_region *r, struct damon_target *t)
146 {
147 	list_del(&r->list);
148 	t->nr_regions--;
149 }
150 
151 static void damon_free_region(struct damon_region *r)
152 {
153 	kmem_cache_free(damon_region_cache, r);
154 }
155 
156 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
157 {
158 	damon_del_region(r, t);
159 	damon_free_region(r);
160 }
161 
162 /*
163  * Check whether a region is intersecting an address range
164  *
165  * Returns true if it is.
166  */
167 static bool damon_intersect(struct damon_region *r,
168 		struct damon_addr_range *re)
169 {
170 	return !(r->ar.end <= re->start || re->end <= r->ar.start);
171 }
172 
173 /*
174  * Fill holes in regions with new regions.
175  */
176 static int damon_fill_regions_holes(struct damon_region *first,
177 		struct damon_region *last, struct damon_target *t)
178 {
179 	struct damon_region *r = first;
180 
181 	damon_for_each_region_from(r, t) {
182 		struct damon_region *next, *newr;
183 
184 		if (r == last)
185 			break;
186 		next = damon_next_region(r);
187 		if (r->ar.end != next->ar.start) {
188 			newr = damon_new_region(r->ar.end, next->ar.start);
189 			if (!newr)
190 				return -ENOMEM;
191 			damon_insert_region(newr, r, next, t);
192 		}
193 	}
194 	return 0;
195 }
196 
197 /*
198  * damon_set_regions() - Set regions of a target for given address ranges.
199  * @t:		the given target.
200  * @ranges:	array of new monitoring target ranges.
201  * @nr_ranges:	length of @ranges.
202  *
203  * This function adds new regions to, or modify existing regions of a
204  * monitoring target to fit in specific ranges.
205  *
206  * Return: 0 if success, or negative error code otherwise.
207  */
208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
209 		unsigned int nr_ranges)
210 {
211 	struct damon_region *r, *next;
212 	unsigned int i;
213 	int err;
214 
215 	/* Remove regions which are not in the new ranges */
216 	damon_for_each_region_safe(r, next, t) {
217 		for (i = 0; i < nr_ranges; i++) {
218 			if (damon_intersect(r, &ranges[i]))
219 				break;
220 		}
221 		if (i == nr_ranges)
222 			damon_destroy_region(r, t);
223 	}
224 
225 	r = damon_first_region(t);
226 	/* Add new regions or resize existing regions to fit in the ranges */
227 	for (i = 0; i < nr_ranges; i++) {
228 		struct damon_region *first = NULL, *last, *newr;
229 		struct damon_addr_range *range;
230 
231 		range = &ranges[i];
232 		/* Get the first/last regions intersecting with the range */
233 		damon_for_each_region_from(r, t) {
234 			if (damon_intersect(r, range)) {
235 				if (!first)
236 					first = r;
237 				last = r;
238 			}
239 			if (r->ar.start >= range->end)
240 				break;
241 		}
242 		if (!first) {
243 			/* no region intersects with this range */
244 			newr = damon_new_region(
245 					ALIGN_DOWN(range->start,
246 						DAMON_MIN_REGION),
247 					ALIGN(range->end, DAMON_MIN_REGION));
248 			if (!newr)
249 				return -ENOMEM;
250 			damon_insert_region(newr, damon_prev_region(r), r, t);
251 		} else {
252 			/* resize intersecting regions to fit in this range */
253 			first->ar.start = ALIGN_DOWN(range->start,
254 					DAMON_MIN_REGION);
255 			last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
256 
257 			/* fill possible holes in the range */
258 			err = damon_fill_regions_holes(first, last, t);
259 			if (err)
260 				return err;
261 		}
262 	}
263 	return 0;
264 }
265 
266 /* initialize private fields of damos_quota and return the pointer */
267 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
268 {
269 	quota->total_charged_sz = 0;
270 	quota->total_charged_ns = 0;
271 	quota->esz = 0;
272 	quota->charged_sz = 0;
273 	quota->charged_from = 0;
274 	quota->charge_target_from = NULL;
275 	quota->charge_addr_from = 0;
276 	return quota;
277 }
278 
279 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
280 			enum damos_action action, struct damos_quota *quota,
281 			struct damos_watermarks *wmarks)
282 {
283 	struct damos *scheme;
284 
285 	scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
286 	if (!scheme)
287 		return NULL;
288 	scheme->pattern = *pattern;
289 	scheme->action = action;
290 	scheme->stat = (struct damos_stat){};
291 	INIT_LIST_HEAD(&scheme->list);
292 
293 	scheme->quota = *(damos_quota_init_priv(quota));
294 
295 	scheme->wmarks = *wmarks;
296 	scheme->wmarks.activated = true;
297 
298 	return scheme;
299 }
300 
301 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
302 {
303 	list_add_tail(&s->list, &ctx->schemes);
304 }
305 
306 static void damon_del_scheme(struct damos *s)
307 {
308 	list_del(&s->list);
309 }
310 
311 static void damon_free_scheme(struct damos *s)
312 {
313 	kfree(s);
314 }
315 
316 void damon_destroy_scheme(struct damos *s)
317 {
318 	damon_del_scheme(s);
319 	damon_free_scheme(s);
320 }
321 
322 /*
323  * Construct a damon_target struct
324  *
325  * Returns the pointer to the new struct if success, or NULL otherwise
326  */
327 struct damon_target *damon_new_target(void)
328 {
329 	struct damon_target *t;
330 
331 	t = kmalloc(sizeof(*t), GFP_KERNEL);
332 	if (!t)
333 		return NULL;
334 
335 	t->pid = NULL;
336 	t->nr_regions = 0;
337 	INIT_LIST_HEAD(&t->regions_list);
338 	INIT_LIST_HEAD(&t->list);
339 
340 	return t;
341 }
342 
343 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
344 {
345 	list_add_tail(&t->list, &ctx->adaptive_targets);
346 }
347 
348 bool damon_targets_empty(struct damon_ctx *ctx)
349 {
350 	return list_empty(&ctx->adaptive_targets);
351 }
352 
353 static void damon_del_target(struct damon_target *t)
354 {
355 	list_del(&t->list);
356 }
357 
358 void damon_free_target(struct damon_target *t)
359 {
360 	struct damon_region *r, *next;
361 
362 	damon_for_each_region_safe(r, next, t)
363 		damon_free_region(r);
364 	kfree(t);
365 }
366 
367 void damon_destroy_target(struct damon_target *t)
368 {
369 	damon_del_target(t);
370 	damon_free_target(t);
371 }
372 
373 unsigned int damon_nr_regions(struct damon_target *t)
374 {
375 	return t->nr_regions;
376 }
377 
378 struct damon_ctx *damon_new_ctx(void)
379 {
380 	struct damon_ctx *ctx;
381 
382 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
383 	if (!ctx)
384 		return NULL;
385 
386 	ctx->attrs.sample_interval = 5 * 1000;
387 	ctx->attrs.aggr_interval = 100 * 1000;
388 	ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
389 
390 	ktime_get_coarse_ts64(&ctx->last_aggregation);
391 	ctx->last_ops_update = ctx->last_aggregation;
392 
393 	mutex_init(&ctx->kdamond_lock);
394 
395 	ctx->attrs.min_nr_regions = 10;
396 	ctx->attrs.max_nr_regions = 1000;
397 
398 	INIT_LIST_HEAD(&ctx->adaptive_targets);
399 	INIT_LIST_HEAD(&ctx->schemes);
400 
401 	return ctx;
402 }
403 
404 static void damon_destroy_targets(struct damon_ctx *ctx)
405 {
406 	struct damon_target *t, *next_t;
407 
408 	if (ctx->ops.cleanup) {
409 		ctx->ops.cleanup(ctx);
410 		return;
411 	}
412 
413 	damon_for_each_target_safe(t, next_t, ctx)
414 		damon_destroy_target(t);
415 }
416 
417 void damon_destroy_ctx(struct damon_ctx *ctx)
418 {
419 	struct damos *s, *next_s;
420 
421 	damon_destroy_targets(ctx);
422 
423 	damon_for_each_scheme_safe(s, next_s, ctx)
424 		damon_destroy_scheme(s);
425 
426 	kfree(ctx);
427 }
428 
429 /**
430  * damon_set_attrs() - Set attributes for the monitoring.
431  * @ctx:		monitoring context
432  * @attrs:		monitoring attributes
433  *
434  * This function should not be called while the kdamond is running.
435  * Every time interval is in micro-seconds.
436  *
437  * Return: 0 on success, negative error code otherwise.
438  */
439 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
440 {
441 	if (attrs->min_nr_regions < 3)
442 		return -EINVAL;
443 	if (attrs->min_nr_regions > attrs->max_nr_regions)
444 		return -EINVAL;
445 
446 	ctx->attrs = *attrs;
447 	return 0;
448 }
449 
450 /**
451  * damon_set_schemes() - Set data access monitoring based operation schemes.
452  * @ctx:	monitoring context
453  * @schemes:	array of the schemes
454  * @nr_schemes:	number of entries in @schemes
455  *
456  * This function should not be called while the kdamond of the context is
457  * running.
458  */
459 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
460 			ssize_t nr_schemes)
461 {
462 	struct damos *s, *next;
463 	ssize_t i;
464 
465 	damon_for_each_scheme_safe(s, next, ctx)
466 		damon_destroy_scheme(s);
467 	for (i = 0; i < nr_schemes; i++)
468 		damon_add_scheme(ctx, schemes[i]);
469 }
470 
471 /**
472  * damon_nr_running_ctxs() - Return number of currently running contexts.
473  */
474 int damon_nr_running_ctxs(void)
475 {
476 	int nr_ctxs;
477 
478 	mutex_lock(&damon_lock);
479 	nr_ctxs = nr_running_ctxs;
480 	mutex_unlock(&damon_lock);
481 
482 	return nr_ctxs;
483 }
484 
485 /* Returns the size upper limit for each monitoring region */
486 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
487 {
488 	struct damon_target *t;
489 	struct damon_region *r;
490 	unsigned long sz = 0;
491 
492 	damon_for_each_target(t, ctx) {
493 		damon_for_each_region(r, t)
494 			sz += damon_sz_region(r);
495 	}
496 
497 	if (ctx->attrs.min_nr_regions)
498 		sz /= ctx->attrs.min_nr_regions;
499 	if (sz < DAMON_MIN_REGION)
500 		sz = DAMON_MIN_REGION;
501 
502 	return sz;
503 }
504 
505 static int kdamond_fn(void *data);
506 
507 /*
508  * __damon_start() - Starts monitoring with given context.
509  * @ctx:	monitoring context
510  *
511  * This function should be called while damon_lock is hold.
512  *
513  * Return: 0 on success, negative error code otherwise.
514  */
515 static int __damon_start(struct damon_ctx *ctx)
516 {
517 	int err = -EBUSY;
518 
519 	mutex_lock(&ctx->kdamond_lock);
520 	if (!ctx->kdamond) {
521 		err = 0;
522 		ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
523 				nr_running_ctxs);
524 		if (IS_ERR(ctx->kdamond)) {
525 			err = PTR_ERR(ctx->kdamond);
526 			ctx->kdamond = NULL;
527 		}
528 	}
529 	mutex_unlock(&ctx->kdamond_lock);
530 
531 	return err;
532 }
533 
534 /**
535  * damon_start() - Starts the monitorings for a given group of contexts.
536  * @ctxs:	an array of the pointers for contexts to start monitoring
537  * @nr_ctxs:	size of @ctxs
538  * @exclusive:	exclusiveness of this contexts group
539  *
540  * This function starts a group of monitoring threads for a group of monitoring
541  * contexts.  One thread per each context is created and run in parallel.  The
542  * caller should handle synchronization between the threads by itself.  If
543  * @exclusive is true and a group of threads that created by other
544  * 'damon_start()' call is currently running, this function does nothing but
545  * returns -EBUSY.
546  *
547  * Return: 0 on success, negative error code otherwise.
548  */
549 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
550 {
551 	int i;
552 	int err = 0;
553 
554 	mutex_lock(&damon_lock);
555 	if ((exclusive && nr_running_ctxs) ||
556 			(!exclusive && running_exclusive_ctxs)) {
557 		mutex_unlock(&damon_lock);
558 		return -EBUSY;
559 	}
560 
561 	for (i = 0; i < nr_ctxs; i++) {
562 		err = __damon_start(ctxs[i]);
563 		if (err)
564 			break;
565 		nr_running_ctxs++;
566 	}
567 	if (exclusive && nr_running_ctxs)
568 		running_exclusive_ctxs = true;
569 	mutex_unlock(&damon_lock);
570 
571 	return err;
572 }
573 
574 /*
575  * __damon_stop() - Stops monitoring of a given context.
576  * @ctx:	monitoring context
577  *
578  * Return: 0 on success, negative error code otherwise.
579  */
580 static int __damon_stop(struct damon_ctx *ctx)
581 {
582 	struct task_struct *tsk;
583 
584 	mutex_lock(&ctx->kdamond_lock);
585 	tsk = ctx->kdamond;
586 	if (tsk) {
587 		get_task_struct(tsk);
588 		mutex_unlock(&ctx->kdamond_lock);
589 		kthread_stop(tsk);
590 		put_task_struct(tsk);
591 		return 0;
592 	}
593 	mutex_unlock(&ctx->kdamond_lock);
594 
595 	return -EPERM;
596 }
597 
598 /**
599  * damon_stop() - Stops the monitorings for a given group of contexts.
600  * @ctxs:	an array of the pointers for contexts to stop monitoring
601  * @nr_ctxs:	size of @ctxs
602  *
603  * Return: 0 on success, negative error code otherwise.
604  */
605 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
606 {
607 	int i, err = 0;
608 
609 	for (i = 0; i < nr_ctxs; i++) {
610 		/* nr_running_ctxs is decremented in kdamond_fn */
611 		err = __damon_stop(ctxs[i]);
612 		if (err)
613 			break;
614 	}
615 	return err;
616 }
617 
618 /*
619  * damon_check_reset_time_interval() - Check if a time interval is elapsed.
620  * @baseline:	the time to check whether the interval has elapsed since
621  * @interval:	the time interval (microseconds)
622  *
623  * See whether the given time interval has passed since the given baseline
624  * time.  If so, it also updates the baseline to current time for next check.
625  *
626  * Return:	true if the time interval has passed, or false otherwise.
627  */
628 static bool damon_check_reset_time_interval(struct timespec64 *baseline,
629 		unsigned long interval)
630 {
631 	struct timespec64 now;
632 
633 	ktime_get_coarse_ts64(&now);
634 	if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
635 			interval * 1000)
636 		return false;
637 	*baseline = now;
638 	return true;
639 }
640 
641 /*
642  * Check whether it is time to flush the aggregated information
643  */
644 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
645 {
646 	return damon_check_reset_time_interval(&ctx->last_aggregation,
647 			ctx->attrs.aggr_interval);
648 }
649 
650 /*
651  * Reset the aggregated monitoring results ('nr_accesses' of each region).
652  */
653 static void kdamond_reset_aggregated(struct damon_ctx *c)
654 {
655 	struct damon_target *t;
656 	unsigned int ti = 0;	/* target's index */
657 
658 	damon_for_each_target(t, c) {
659 		struct damon_region *r;
660 
661 		damon_for_each_region(r, t) {
662 			trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
663 			r->last_nr_accesses = r->nr_accesses;
664 			r->nr_accesses = 0;
665 		}
666 		ti++;
667 	}
668 }
669 
670 static void damon_split_region_at(struct damon_target *t,
671 				  struct damon_region *r, unsigned long sz_r);
672 
673 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
674 {
675 	unsigned long sz;
676 
677 	sz = damon_sz_region(r);
678 	return s->pattern.min_sz_region <= sz &&
679 		sz <= s->pattern.max_sz_region &&
680 		s->pattern.min_nr_accesses <= r->nr_accesses &&
681 		r->nr_accesses <= s->pattern.max_nr_accesses &&
682 		s->pattern.min_age_region <= r->age &&
683 		r->age <= s->pattern.max_age_region;
684 }
685 
686 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
687 		struct damon_region *r, struct damos *s)
688 {
689 	bool ret = __damos_valid_target(r, s);
690 
691 	if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
692 		return ret;
693 
694 	return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
695 }
696 
697 /*
698  * damos_skip_charged_region() - Check if the given region or starting part of
699  * it is already charged for the DAMOS quota.
700  * @t:	The target of the region.
701  * @rp:	The pointer to the region.
702  * @s:	The scheme to be applied.
703  *
704  * If a quota of a scheme has exceeded in a quota charge window, the scheme's
705  * action would applied to only a part of the target access pattern fulfilling
706  * regions.  To avoid applying the scheme action to only already applied
707  * regions, DAMON skips applying the scheme action to the regions that charged
708  * in the previous charge window.
709  *
710  * This function checks if a given region should be skipped or not for the
711  * reason.  If only the starting part of the region has previously charged,
712  * this function splits the region into two so that the second one covers the
713  * area that not charged in the previous charge widnow and saves the second
714  * region in *rp and returns false, so that the caller can apply DAMON action
715  * to the second one.
716  *
717  * Return: true if the region should be entirely skipped, false otherwise.
718  */
719 static bool damos_skip_charged_region(struct damon_target *t,
720 		struct damon_region **rp, struct damos *s)
721 {
722 	struct damon_region *r = *rp;
723 	struct damos_quota *quota = &s->quota;
724 	unsigned long sz_to_skip;
725 
726 	/* Skip previously charged regions */
727 	if (quota->charge_target_from) {
728 		if (t != quota->charge_target_from)
729 			return true;
730 		if (r == damon_last_region(t)) {
731 			quota->charge_target_from = NULL;
732 			quota->charge_addr_from = 0;
733 			return true;
734 		}
735 		if (quota->charge_addr_from &&
736 				r->ar.end <= quota->charge_addr_from)
737 			return true;
738 
739 		if (quota->charge_addr_from && r->ar.start <
740 				quota->charge_addr_from) {
741 			sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
742 					r->ar.start, DAMON_MIN_REGION);
743 			if (!sz_to_skip) {
744 				if (damon_sz_region(r) <= DAMON_MIN_REGION)
745 					return true;
746 				sz_to_skip = DAMON_MIN_REGION;
747 			}
748 			damon_split_region_at(t, r, sz_to_skip);
749 			r = damon_next_region(r);
750 			*rp = r;
751 		}
752 		quota->charge_target_from = NULL;
753 		quota->charge_addr_from = 0;
754 	}
755 	return false;
756 }
757 
758 static void damos_update_stat(struct damos *s,
759 		unsigned long sz_tried, unsigned long sz_applied)
760 {
761 	s->stat.nr_tried++;
762 	s->stat.sz_tried += sz_tried;
763 	if (sz_applied)
764 		s->stat.nr_applied++;
765 	s->stat.sz_applied += sz_applied;
766 }
767 
768 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
769 		struct damon_region *r, struct damos *s)
770 {
771 	struct damos_quota *quota = &s->quota;
772 	unsigned long sz = damon_sz_region(r);
773 	struct timespec64 begin, end;
774 	unsigned long sz_applied = 0;
775 	int err = 0;
776 
777 	if (c->ops.apply_scheme) {
778 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
779 			sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
780 					DAMON_MIN_REGION);
781 			if (!sz)
782 				goto update_stat;
783 			damon_split_region_at(t, r, sz);
784 		}
785 		ktime_get_coarse_ts64(&begin);
786 		if (c->callback.before_damos_apply)
787 			err = c->callback.before_damos_apply(c, t, r, s);
788 		if (!err)
789 			sz_applied = c->ops.apply_scheme(c, t, r, s);
790 		ktime_get_coarse_ts64(&end);
791 		quota->total_charged_ns += timespec64_to_ns(&end) -
792 			timespec64_to_ns(&begin);
793 		quota->charged_sz += sz;
794 		if (quota->esz && quota->charged_sz >= quota->esz) {
795 			quota->charge_target_from = t;
796 			quota->charge_addr_from = r->ar.end + 1;
797 		}
798 	}
799 	if (s->action != DAMOS_STAT)
800 		r->age = 0;
801 
802 update_stat:
803 	damos_update_stat(s, sz, sz_applied);
804 }
805 
806 static void damon_do_apply_schemes(struct damon_ctx *c,
807 				   struct damon_target *t,
808 				   struct damon_region *r)
809 {
810 	struct damos *s;
811 
812 	damon_for_each_scheme(s, c) {
813 		struct damos_quota *quota = &s->quota;
814 
815 		if (!s->wmarks.activated)
816 			continue;
817 
818 		/* Check the quota */
819 		if (quota->esz && quota->charged_sz >= quota->esz)
820 			continue;
821 
822 		if (damos_skip_charged_region(t, &r, s))
823 			continue;
824 
825 		if (!damos_valid_target(c, t, r, s))
826 			continue;
827 
828 		damos_apply_scheme(c, t, r, s);
829 	}
830 }
831 
832 /* Shouldn't be called if quota->ms and quota->sz are zero */
833 static void damos_set_effective_quota(struct damos_quota *quota)
834 {
835 	unsigned long throughput;
836 	unsigned long esz;
837 
838 	if (!quota->ms) {
839 		quota->esz = quota->sz;
840 		return;
841 	}
842 
843 	if (quota->total_charged_ns)
844 		throughput = quota->total_charged_sz * 1000000 /
845 			quota->total_charged_ns;
846 	else
847 		throughput = PAGE_SIZE * 1024;
848 	esz = throughput * quota->ms;
849 
850 	if (quota->sz && quota->sz < esz)
851 		esz = quota->sz;
852 	quota->esz = esz;
853 }
854 
855 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
856 {
857 	struct damos_quota *quota = &s->quota;
858 	struct damon_target *t;
859 	struct damon_region *r;
860 	unsigned long cumulated_sz;
861 	unsigned int score, max_score = 0;
862 
863 	if (!quota->ms && !quota->sz)
864 		return;
865 
866 	/* New charge window starts */
867 	if (time_after_eq(jiffies, quota->charged_from +
868 				msecs_to_jiffies(quota->reset_interval))) {
869 		if (quota->esz && quota->charged_sz >= quota->esz)
870 			s->stat.qt_exceeds++;
871 		quota->total_charged_sz += quota->charged_sz;
872 		quota->charged_from = jiffies;
873 		quota->charged_sz = 0;
874 		damos_set_effective_quota(quota);
875 	}
876 
877 	if (!c->ops.get_scheme_score)
878 		return;
879 
880 	/* Fill up the score histogram */
881 	memset(quota->histogram, 0, sizeof(quota->histogram));
882 	damon_for_each_target(t, c) {
883 		damon_for_each_region(r, t) {
884 			if (!__damos_valid_target(r, s))
885 				continue;
886 			score = c->ops.get_scheme_score(c, t, r, s);
887 			quota->histogram[score] += damon_sz_region(r);
888 			if (score > max_score)
889 				max_score = score;
890 		}
891 	}
892 
893 	/* Set the min score limit */
894 	for (cumulated_sz = 0, score = max_score; ; score--) {
895 		cumulated_sz += quota->histogram[score];
896 		if (cumulated_sz >= quota->esz || !score)
897 			break;
898 	}
899 	quota->min_score = score;
900 }
901 
902 static void kdamond_apply_schemes(struct damon_ctx *c)
903 {
904 	struct damon_target *t;
905 	struct damon_region *r, *next_r;
906 	struct damos *s;
907 
908 	damon_for_each_scheme(s, c) {
909 		if (!s->wmarks.activated)
910 			continue;
911 
912 		damos_adjust_quota(c, s);
913 	}
914 
915 	damon_for_each_target(t, c) {
916 		damon_for_each_region_safe(r, next_r, t)
917 			damon_do_apply_schemes(c, t, r);
918 	}
919 }
920 
921 /*
922  * Merge two adjacent regions into one region
923  */
924 static void damon_merge_two_regions(struct damon_target *t,
925 		struct damon_region *l, struct damon_region *r)
926 {
927 	unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
928 
929 	l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
930 			(sz_l + sz_r);
931 	l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
932 	l->ar.end = r->ar.end;
933 	damon_destroy_region(r, t);
934 }
935 
936 /*
937  * Merge adjacent regions having similar access frequencies
938  *
939  * t		target affected by this merge operation
940  * thres	'->nr_accesses' diff threshold for the merge
941  * sz_limit	size upper limit of each region
942  */
943 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
944 				   unsigned long sz_limit)
945 {
946 	struct damon_region *r, *prev = NULL, *next;
947 
948 	damon_for_each_region_safe(r, next, t) {
949 		if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
950 			r->age = 0;
951 		else
952 			r->age++;
953 
954 		if (prev && prev->ar.end == r->ar.start &&
955 		    abs(prev->nr_accesses - r->nr_accesses) <= thres &&
956 		    damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
957 			damon_merge_two_regions(t, prev, r);
958 		else
959 			prev = r;
960 	}
961 }
962 
963 /*
964  * Merge adjacent regions having similar access frequencies
965  *
966  * threshold	'->nr_accesses' diff threshold for the merge
967  * sz_limit	size upper limit of each region
968  *
969  * This function merges monitoring target regions which are adjacent and their
970  * access frequencies are similar.  This is for minimizing the monitoring
971  * overhead under the dynamically changeable access pattern.  If a merge was
972  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
973  */
974 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
975 				  unsigned long sz_limit)
976 {
977 	struct damon_target *t;
978 
979 	damon_for_each_target(t, c)
980 		damon_merge_regions_of(t, threshold, sz_limit);
981 }
982 
983 /*
984  * Split a region in two
985  *
986  * r		the region to be split
987  * sz_r		size of the first sub-region that will be made
988  */
989 static void damon_split_region_at(struct damon_target *t,
990 				  struct damon_region *r, unsigned long sz_r)
991 {
992 	struct damon_region *new;
993 
994 	new = damon_new_region(r->ar.start + sz_r, r->ar.end);
995 	if (!new)
996 		return;
997 
998 	r->ar.end = new->ar.start;
999 
1000 	new->age = r->age;
1001 	new->last_nr_accesses = r->last_nr_accesses;
1002 
1003 	damon_insert_region(new, r, damon_next_region(r), t);
1004 }
1005 
1006 /* Split every region in the given target into 'nr_subs' regions */
1007 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1008 {
1009 	struct damon_region *r, *next;
1010 	unsigned long sz_region, sz_sub = 0;
1011 	int i;
1012 
1013 	damon_for_each_region_safe(r, next, t) {
1014 		sz_region = damon_sz_region(r);
1015 
1016 		for (i = 0; i < nr_subs - 1 &&
1017 				sz_region > 2 * DAMON_MIN_REGION; i++) {
1018 			/*
1019 			 * Randomly select size of left sub-region to be at
1020 			 * least 10 percent and at most 90% of original region
1021 			 */
1022 			sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1023 					sz_region / 10, DAMON_MIN_REGION);
1024 			/* Do not allow blank region */
1025 			if (sz_sub == 0 || sz_sub >= sz_region)
1026 				continue;
1027 
1028 			damon_split_region_at(t, r, sz_sub);
1029 			sz_region = sz_sub;
1030 		}
1031 	}
1032 }
1033 
1034 /*
1035  * Split every target region into randomly-sized small regions
1036  *
1037  * This function splits every target region into random-sized small regions if
1038  * current total number of the regions is equal or smaller than half of the
1039  * user-specified maximum number of regions.  This is for maximizing the
1040  * monitoring accuracy under the dynamically changeable access patterns.  If a
1041  * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1042  * it.
1043  */
1044 static void kdamond_split_regions(struct damon_ctx *ctx)
1045 {
1046 	struct damon_target *t;
1047 	unsigned int nr_regions = 0;
1048 	static unsigned int last_nr_regions;
1049 	int nr_subregions = 2;
1050 
1051 	damon_for_each_target(t, ctx)
1052 		nr_regions += damon_nr_regions(t);
1053 
1054 	if (nr_regions > ctx->attrs.max_nr_regions / 2)
1055 		return;
1056 
1057 	/* Maybe the middle of the region has different access frequency */
1058 	if (last_nr_regions == nr_regions &&
1059 			nr_regions < ctx->attrs.max_nr_regions / 3)
1060 		nr_subregions = 3;
1061 
1062 	damon_for_each_target(t, ctx)
1063 		damon_split_regions_of(t, nr_subregions);
1064 
1065 	last_nr_regions = nr_regions;
1066 }
1067 
1068 /*
1069  * Check whether it is time to check and apply the operations-related data
1070  * structures.
1071  *
1072  * Returns true if it is.
1073  */
1074 static bool kdamond_need_update_operations(struct damon_ctx *ctx)
1075 {
1076 	return damon_check_reset_time_interval(&ctx->last_ops_update,
1077 			ctx->attrs.ops_update_interval);
1078 }
1079 
1080 /*
1081  * Check whether current monitoring should be stopped
1082  *
1083  * The monitoring is stopped when either the user requested to stop, or all
1084  * monitoring targets are invalid.
1085  *
1086  * Returns true if need to stop current monitoring.
1087  */
1088 static bool kdamond_need_stop(struct damon_ctx *ctx)
1089 {
1090 	struct damon_target *t;
1091 
1092 	if (kthread_should_stop())
1093 		return true;
1094 
1095 	if (!ctx->ops.target_valid)
1096 		return false;
1097 
1098 	damon_for_each_target(t, ctx) {
1099 		if (ctx->ops.target_valid(t))
1100 			return false;
1101 	}
1102 
1103 	return true;
1104 }
1105 
1106 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
1107 {
1108 	struct sysinfo i;
1109 
1110 	switch (metric) {
1111 	case DAMOS_WMARK_FREE_MEM_RATE:
1112 		si_meminfo(&i);
1113 		return i.freeram * 1000 / i.totalram;
1114 	default:
1115 		break;
1116 	}
1117 	return -EINVAL;
1118 }
1119 
1120 /*
1121  * Returns zero if the scheme is active.  Else, returns time to wait for next
1122  * watermark check in micro-seconds.
1123  */
1124 static unsigned long damos_wmark_wait_us(struct damos *scheme)
1125 {
1126 	unsigned long metric;
1127 
1128 	if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
1129 		return 0;
1130 
1131 	metric = damos_wmark_metric_value(scheme->wmarks.metric);
1132 	/* higher than high watermark or lower than low watermark */
1133 	if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1134 		if (scheme->wmarks.activated)
1135 			pr_debug("deactivate a scheme (%d) for %s wmark\n",
1136 					scheme->action,
1137 					metric > scheme->wmarks.high ?
1138 					"high" : "low");
1139 		scheme->wmarks.activated = false;
1140 		return scheme->wmarks.interval;
1141 	}
1142 
1143 	/* inactive and higher than middle watermark */
1144 	if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1145 			!scheme->wmarks.activated)
1146 		return scheme->wmarks.interval;
1147 
1148 	if (!scheme->wmarks.activated)
1149 		pr_debug("activate a scheme (%d)\n", scheme->action);
1150 	scheme->wmarks.activated = true;
1151 	return 0;
1152 }
1153 
1154 static void kdamond_usleep(unsigned long usecs)
1155 {
1156 	/* See Documentation/timers/timers-howto.rst for the thresholds */
1157 	if (usecs > 20 * USEC_PER_MSEC)
1158 		schedule_timeout_idle(usecs_to_jiffies(usecs));
1159 	else
1160 		usleep_idle_range(usecs, usecs + 1);
1161 }
1162 
1163 /* Returns negative error code if it's not activated but should return */
1164 static int kdamond_wait_activation(struct damon_ctx *ctx)
1165 {
1166 	struct damos *s;
1167 	unsigned long wait_time;
1168 	unsigned long min_wait_time = 0;
1169 	bool init_wait_time = false;
1170 
1171 	while (!kdamond_need_stop(ctx)) {
1172 		damon_for_each_scheme(s, ctx) {
1173 			wait_time = damos_wmark_wait_us(s);
1174 			if (!init_wait_time || wait_time < min_wait_time) {
1175 				init_wait_time = true;
1176 				min_wait_time = wait_time;
1177 			}
1178 		}
1179 		if (!min_wait_time)
1180 			return 0;
1181 
1182 		kdamond_usleep(min_wait_time);
1183 
1184 		if (ctx->callback.after_wmarks_check &&
1185 				ctx->callback.after_wmarks_check(ctx))
1186 			break;
1187 	}
1188 	return -EBUSY;
1189 }
1190 
1191 /*
1192  * The monitoring daemon that runs as a kernel thread
1193  */
1194 static int kdamond_fn(void *data)
1195 {
1196 	struct damon_ctx *ctx = data;
1197 	struct damon_target *t;
1198 	struct damon_region *r, *next;
1199 	unsigned int max_nr_accesses = 0;
1200 	unsigned long sz_limit = 0;
1201 
1202 	pr_debug("kdamond (%d) starts\n", current->pid);
1203 
1204 	if (ctx->ops.init)
1205 		ctx->ops.init(ctx);
1206 	if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1207 		goto done;
1208 
1209 	sz_limit = damon_region_sz_limit(ctx);
1210 
1211 	while (!kdamond_need_stop(ctx)) {
1212 		if (kdamond_wait_activation(ctx))
1213 			break;
1214 
1215 		if (ctx->ops.prepare_access_checks)
1216 			ctx->ops.prepare_access_checks(ctx);
1217 		if (ctx->callback.after_sampling &&
1218 				ctx->callback.after_sampling(ctx))
1219 			break;
1220 
1221 		kdamond_usleep(ctx->attrs.sample_interval);
1222 
1223 		if (ctx->ops.check_accesses)
1224 			max_nr_accesses = ctx->ops.check_accesses(ctx);
1225 
1226 		if (kdamond_aggregate_interval_passed(ctx)) {
1227 			kdamond_merge_regions(ctx,
1228 					max_nr_accesses / 10,
1229 					sz_limit);
1230 			if (ctx->callback.after_aggregation &&
1231 					ctx->callback.after_aggregation(ctx))
1232 				break;
1233 			kdamond_apply_schemes(ctx);
1234 			kdamond_reset_aggregated(ctx);
1235 			kdamond_split_regions(ctx);
1236 			if (ctx->ops.reset_aggregated)
1237 				ctx->ops.reset_aggregated(ctx);
1238 		}
1239 
1240 		if (kdamond_need_update_operations(ctx)) {
1241 			if (ctx->ops.update)
1242 				ctx->ops.update(ctx);
1243 			sz_limit = damon_region_sz_limit(ctx);
1244 		}
1245 	}
1246 done:
1247 	damon_for_each_target(t, ctx) {
1248 		damon_for_each_region_safe(r, next, t)
1249 			damon_destroy_region(r, t);
1250 	}
1251 
1252 	if (ctx->callback.before_terminate)
1253 		ctx->callback.before_terminate(ctx);
1254 	if (ctx->ops.cleanup)
1255 		ctx->ops.cleanup(ctx);
1256 
1257 	pr_debug("kdamond (%d) finishes\n", current->pid);
1258 	mutex_lock(&ctx->kdamond_lock);
1259 	ctx->kdamond = NULL;
1260 	mutex_unlock(&ctx->kdamond_lock);
1261 
1262 	mutex_lock(&damon_lock);
1263 	nr_running_ctxs--;
1264 	if (!nr_running_ctxs && running_exclusive_ctxs)
1265 		running_exclusive_ctxs = false;
1266 	mutex_unlock(&damon_lock);
1267 
1268 	return 0;
1269 }
1270 
1271 /*
1272  * struct damon_system_ram_region - System RAM resource address region of
1273  *				    [@start, @end).
1274  * @start:	Start address of the region (inclusive).
1275  * @end:	End address of the region (exclusive).
1276  */
1277 struct damon_system_ram_region {
1278 	unsigned long start;
1279 	unsigned long end;
1280 };
1281 
1282 static int walk_system_ram(struct resource *res, void *arg)
1283 {
1284 	struct damon_system_ram_region *a = arg;
1285 
1286 	if (a->end - a->start < resource_size(res)) {
1287 		a->start = res->start;
1288 		a->end = res->end;
1289 	}
1290 	return 0;
1291 }
1292 
1293 /*
1294  * Find biggest 'System RAM' resource and store its start and end address in
1295  * @start and @end, respectively.  If no System RAM is found, returns false.
1296  */
1297 static bool damon_find_biggest_system_ram(unsigned long *start,
1298 						unsigned long *end)
1299 
1300 {
1301 	struct damon_system_ram_region arg = {};
1302 
1303 	walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
1304 	if (arg.end <= arg.start)
1305 		return false;
1306 
1307 	*start = arg.start;
1308 	*end = arg.end;
1309 	return true;
1310 }
1311 
1312 /**
1313  * damon_set_region_biggest_system_ram_default() - Set the region of the given
1314  * monitoring target as requested, or biggest 'System RAM'.
1315  * @t:		The monitoring target to set the region.
1316  * @start:	The pointer to the start address of the region.
1317  * @end:	The pointer to the end address of the region.
1318  *
1319  * This function sets the region of @t as requested by @start and @end.  If the
1320  * values of @start and @end are zero, however, this function finds the biggest
1321  * 'System RAM' resource and sets the region to cover the resource.  In the
1322  * latter case, this function saves the start and end addresses of the resource
1323  * in @start and @end, respectively.
1324  *
1325  * Return: 0 on success, negative error code otherwise.
1326  */
1327 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
1328 			unsigned long *start, unsigned long *end)
1329 {
1330 	struct damon_addr_range addr_range;
1331 
1332 	if (*start > *end)
1333 		return -EINVAL;
1334 
1335 	if (!*start && !*end &&
1336 		!damon_find_biggest_system_ram(start, end))
1337 		return -EINVAL;
1338 
1339 	addr_range.start = *start;
1340 	addr_range.end = *end;
1341 	return damon_set_regions(t, &addr_range, 1);
1342 }
1343 
1344 static int __init damon_init(void)
1345 {
1346 	damon_region_cache = KMEM_CACHE(damon_region, 0);
1347 	if (unlikely(!damon_region_cache)) {
1348 		pr_err("creating damon_region_cache fails\n");
1349 		return -ENOMEM;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 subsys_initcall(damon_init);
1356 
1357 #include "core-test.h"
1358