1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8 #define pr_fmt(fmt) "damon: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/damon.h>
20
21 #ifdef CONFIG_DAMON_KUNIT_TEST
22 #undef DAMON_MIN_REGION
23 #define DAMON_MIN_REGION 1
24 #endif
25
26 static DEFINE_MUTEX(damon_lock);
27 static int nr_running_ctxs;
28 static bool running_exclusive_ctxs;
29
30 static DEFINE_MUTEX(damon_ops_lock);
31 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
32
33 static struct kmem_cache *damon_region_cache __ro_after_init;
34
35 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
__damon_is_registered_ops(enum damon_ops_id id)36 static bool __damon_is_registered_ops(enum damon_ops_id id)
37 {
38 struct damon_operations empty_ops = {};
39
40 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
41 return false;
42 return true;
43 }
44
45 /**
46 * damon_is_registered_ops() - Check if a given damon_operations is registered.
47 * @id: Id of the damon_operations to check if registered.
48 *
49 * Return: true if the ops is set, false otherwise.
50 */
damon_is_registered_ops(enum damon_ops_id id)51 bool damon_is_registered_ops(enum damon_ops_id id)
52 {
53 bool registered;
54
55 if (id >= NR_DAMON_OPS)
56 return false;
57 mutex_lock(&damon_ops_lock);
58 registered = __damon_is_registered_ops(id);
59 mutex_unlock(&damon_ops_lock);
60 return registered;
61 }
62
63 /**
64 * damon_register_ops() - Register a monitoring operations set to DAMON.
65 * @ops: monitoring operations set to register.
66 *
67 * This function registers a monitoring operations set of valid &struct
68 * damon_operations->id so that others can find and use them later.
69 *
70 * Return: 0 on success, negative error code otherwise.
71 */
damon_register_ops(struct damon_operations * ops)72 int damon_register_ops(struct damon_operations *ops)
73 {
74 int err = 0;
75
76 if (ops->id >= NR_DAMON_OPS)
77 return -EINVAL;
78 mutex_lock(&damon_ops_lock);
79 /* Fail for already registered ops */
80 if (__damon_is_registered_ops(ops->id)) {
81 err = -EINVAL;
82 goto out;
83 }
84 damon_registered_ops[ops->id] = *ops;
85 out:
86 mutex_unlock(&damon_ops_lock);
87 return err;
88 }
89
90 /**
91 * damon_select_ops() - Select a monitoring operations to use with the context.
92 * @ctx: monitoring context to use the operations.
93 * @id: id of the registered monitoring operations to select.
94 *
95 * This function finds registered monitoring operations set of @id and make
96 * @ctx to use it.
97 *
98 * Return: 0 on success, negative error code otherwise.
99 */
damon_select_ops(struct damon_ctx * ctx,enum damon_ops_id id)100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101 {
102 int err = 0;
103
104 if (id >= NR_DAMON_OPS)
105 return -EINVAL;
106
107 mutex_lock(&damon_ops_lock);
108 if (!__damon_is_registered_ops(id))
109 err = -EINVAL;
110 else
111 ctx->ops = damon_registered_ops[id];
112 mutex_unlock(&damon_ops_lock);
113 return err;
114 }
115
116 /*
117 * Construct a damon_region struct
118 *
119 * Returns the pointer to the new struct if success, or NULL otherwise
120 */
damon_new_region(unsigned long start,unsigned long end)121 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122 {
123 struct damon_region *region;
124
125 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 if (!region)
127 return NULL;
128
129 region->ar.start = start;
130 region->ar.end = end;
131 region->nr_accesses = 0;
132 region->nr_accesses_bp = 0;
133 INIT_LIST_HEAD(®ion->list);
134
135 region->age = 0;
136 region->last_nr_accesses = 0;
137
138 return region;
139 }
140
damon_add_region(struct damon_region * r,struct damon_target * t)141 void damon_add_region(struct damon_region *r, struct damon_target *t)
142 {
143 list_add_tail(&r->list, &t->regions_list);
144 t->nr_regions++;
145 }
146
damon_del_region(struct damon_region * r,struct damon_target * t)147 static void damon_del_region(struct damon_region *r, struct damon_target *t)
148 {
149 list_del(&r->list);
150 t->nr_regions--;
151 }
152
damon_free_region(struct damon_region * r)153 static void damon_free_region(struct damon_region *r)
154 {
155 kmem_cache_free(damon_region_cache, r);
156 }
157
damon_destroy_region(struct damon_region * r,struct damon_target * t)158 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159 {
160 damon_del_region(r, t);
161 damon_free_region(r);
162 }
163
164 /*
165 * Check whether a region is intersecting an address range
166 *
167 * Returns true if it is.
168 */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)169 static bool damon_intersect(struct damon_region *r,
170 struct damon_addr_range *re)
171 {
172 return !(r->ar.end <= re->start || re->end <= r->ar.start);
173 }
174
175 /*
176 * Fill holes in regions with new regions.
177 */
damon_fill_regions_holes(struct damon_region * first,struct damon_region * last,struct damon_target * t)178 static int damon_fill_regions_holes(struct damon_region *first,
179 struct damon_region *last, struct damon_target *t)
180 {
181 struct damon_region *r = first;
182
183 damon_for_each_region_from(r, t) {
184 struct damon_region *next, *newr;
185
186 if (r == last)
187 break;
188 next = damon_next_region(r);
189 if (r->ar.end != next->ar.start) {
190 newr = damon_new_region(r->ar.end, next->ar.start);
191 if (!newr)
192 return -ENOMEM;
193 damon_insert_region(newr, r, next, t);
194 }
195 }
196 return 0;
197 }
198
199 /*
200 * damon_set_regions() - Set regions of a target for given address ranges.
201 * @t: the given target.
202 * @ranges: array of new monitoring target ranges.
203 * @nr_ranges: length of @ranges.
204 *
205 * This function adds new regions to, or modify existing regions of a
206 * monitoring target to fit in specific ranges.
207 *
208 * Return: 0 if success, or negative error code otherwise.
209 */
damon_set_regions(struct damon_target * t,struct damon_addr_range * ranges,unsigned int nr_ranges)210 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
211 unsigned int nr_ranges)
212 {
213 struct damon_region *r, *next;
214 unsigned int i;
215 int err;
216
217 /* Remove regions which are not in the new ranges */
218 damon_for_each_region_safe(r, next, t) {
219 for (i = 0; i < nr_ranges; i++) {
220 if (damon_intersect(r, &ranges[i]))
221 break;
222 }
223 if (i == nr_ranges)
224 damon_destroy_region(r, t);
225 }
226
227 r = damon_first_region(t);
228 /* Add new regions or resize existing regions to fit in the ranges */
229 for (i = 0; i < nr_ranges; i++) {
230 struct damon_region *first = NULL, *last, *newr;
231 struct damon_addr_range *range;
232
233 range = &ranges[i];
234 /* Get the first/last regions intersecting with the range */
235 damon_for_each_region_from(r, t) {
236 if (damon_intersect(r, range)) {
237 if (!first)
238 first = r;
239 last = r;
240 }
241 if (r->ar.start >= range->end)
242 break;
243 }
244 if (!first) {
245 /* no region intersects with this range */
246 newr = damon_new_region(
247 ALIGN_DOWN(range->start,
248 DAMON_MIN_REGION),
249 ALIGN(range->end, DAMON_MIN_REGION));
250 if (!newr)
251 return -ENOMEM;
252 damon_insert_region(newr, damon_prev_region(r), r, t);
253 } else {
254 /* resize intersecting regions to fit in this range */
255 first->ar.start = ALIGN_DOWN(range->start,
256 DAMON_MIN_REGION);
257 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
258
259 /* fill possible holes in the range */
260 err = damon_fill_regions_holes(first, last, t);
261 if (err)
262 return err;
263 }
264 }
265 return 0;
266 }
267
damos_new_filter(enum damos_filter_type type,bool matching)268 struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 bool matching)
270 {
271 struct damos_filter *filter;
272
273 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
274 if (!filter)
275 return NULL;
276 filter->type = type;
277 filter->matching = matching;
278 INIT_LIST_HEAD(&filter->list);
279 return filter;
280 }
281
damos_add_filter(struct damos * s,struct damos_filter * f)282 void damos_add_filter(struct damos *s, struct damos_filter *f)
283 {
284 list_add_tail(&f->list, &s->filters);
285 }
286
damos_del_filter(struct damos_filter * f)287 static void damos_del_filter(struct damos_filter *f)
288 {
289 list_del(&f->list);
290 }
291
damos_free_filter(struct damos_filter * f)292 static void damos_free_filter(struct damos_filter *f)
293 {
294 kfree(f);
295 }
296
damos_destroy_filter(struct damos_filter * f)297 void damos_destroy_filter(struct damos_filter *f)
298 {
299 damos_del_filter(f);
300 damos_free_filter(f);
301 }
302
damos_new_quota_goal(enum damos_quota_goal_metric metric,unsigned long target_value)303 struct damos_quota_goal *damos_new_quota_goal(
304 enum damos_quota_goal_metric metric,
305 unsigned long target_value)
306 {
307 struct damos_quota_goal *goal;
308
309 goal = kmalloc(sizeof(*goal), GFP_KERNEL);
310 if (!goal)
311 return NULL;
312 goal->metric = metric;
313 goal->target_value = target_value;
314 INIT_LIST_HEAD(&goal->list);
315 return goal;
316 }
317
damos_add_quota_goal(struct damos_quota * q,struct damos_quota_goal * g)318 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
319 {
320 list_add_tail(&g->list, &q->goals);
321 }
322
damos_del_quota_goal(struct damos_quota_goal * g)323 static void damos_del_quota_goal(struct damos_quota_goal *g)
324 {
325 list_del(&g->list);
326 }
327
damos_free_quota_goal(struct damos_quota_goal * g)328 static void damos_free_quota_goal(struct damos_quota_goal *g)
329 {
330 kfree(g);
331 }
332
damos_destroy_quota_goal(struct damos_quota_goal * g)333 void damos_destroy_quota_goal(struct damos_quota_goal *g)
334 {
335 damos_del_quota_goal(g);
336 damos_free_quota_goal(g);
337 }
338
339 /* initialize fields of @quota that normally API users wouldn't set */
damos_quota_init(struct damos_quota * quota)340 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
341 {
342 quota->esz = 0;
343 quota->total_charged_sz = 0;
344 quota->total_charged_ns = 0;
345 quota->charged_sz = 0;
346 quota->charged_from = 0;
347 quota->charge_target_from = NULL;
348 quota->charge_addr_from = 0;
349 quota->esz_bp = 0;
350 return quota;
351 }
352
damon_new_scheme(struct damos_access_pattern * pattern,enum damos_action action,unsigned long apply_interval_us,struct damos_quota * quota,struct damos_watermarks * wmarks,int target_nid)353 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
354 enum damos_action action,
355 unsigned long apply_interval_us,
356 struct damos_quota *quota,
357 struct damos_watermarks *wmarks,
358 int target_nid)
359 {
360 struct damos *scheme;
361
362 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
363 if (!scheme)
364 return NULL;
365 scheme->pattern = *pattern;
366 scheme->action = action;
367 scheme->apply_interval_us = apply_interval_us;
368 /*
369 * next_apply_sis will be set when kdamond starts. While kdamond is
370 * running, it will also updated when it is added to the DAMON context,
371 * or damon_attrs are updated.
372 */
373 scheme->next_apply_sis = 0;
374 INIT_LIST_HEAD(&scheme->filters);
375 scheme->stat = (struct damos_stat){};
376 INIT_LIST_HEAD(&scheme->list);
377
378 scheme->quota = *(damos_quota_init(quota));
379 /* quota.goals should be separately set by caller */
380 INIT_LIST_HEAD(&scheme->quota.goals);
381
382 scheme->wmarks = *wmarks;
383 scheme->wmarks.activated = true;
384
385 scheme->target_nid = target_nid;
386
387 return scheme;
388 }
389
damos_set_next_apply_sis(struct damos * s,struct damon_ctx * ctx)390 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
391 {
392 unsigned long sample_interval = ctx->attrs.sample_interval ?
393 ctx->attrs.sample_interval : 1;
394 unsigned long apply_interval = s->apply_interval_us ?
395 s->apply_interval_us : ctx->attrs.aggr_interval;
396
397 s->next_apply_sis = ctx->passed_sample_intervals +
398 apply_interval / sample_interval;
399 }
400
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)401 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
402 {
403 list_add_tail(&s->list, &ctx->schemes);
404 damos_set_next_apply_sis(s, ctx);
405 }
406
damon_del_scheme(struct damos * s)407 static void damon_del_scheme(struct damos *s)
408 {
409 list_del(&s->list);
410 }
411
damon_free_scheme(struct damos * s)412 static void damon_free_scheme(struct damos *s)
413 {
414 kfree(s);
415 }
416
damon_destroy_scheme(struct damos * s)417 void damon_destroy_scheme(struct damos *s)
418 {
419 struct damos_quota_goal *g, *g_next;
420 struct damos_filter *f, *next;
421
422 damos_for_each_quota_goal_safe(g, g_next, &s->quota)
423 damos_destroy_quota_goal(g);
424
425 damos_for_each_filter_safe(f, next, s)
426 damos_destroy_filter(f);
427 damon_del_scheme(s);
428 damon_free_scheme(s);
429 }
430
431 /*
432 * Construct a damon_target struct
433 *
434 * Returns the pointer to the new struct if success, or NULL otherwise
435 */
damon_new_target(void)436 struct damon_target *damon_new_target(void)
437 {
438 struct damon_target *t;
439
440 t = kmalloc(sizeof(*t), GFP_KERNEL);
441 if (!t)
442 return NULL;
443
444 t->pid = NULL;
445 t->nr_regions = 0;
446 INIT_LIST_HEAD(&t->regions_list);
447 INIT_LIST_HEAD(&t->list);
448
449 return t;
450 }
451
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)452 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
453 {
454 list_add_tail(&t->list, &ctx->adaptive_targets);
455 }
456
damon_targets_empty(struct damon_ctx * ctx)457 bool damon_targets_empty(struct damon_ctx *ctx)
458 {
459 return list_empty(&ctx->adaptive_targets);
460 }
461
damon_del_target(struct damon_target * t)462 static void damon_del_target(struct damon_target *t)
463 {
464 list_del(&t->list);
465 }
466
damon_free_target(struct damon_target * t)467 void damon_free_target(struct damon_target *t)
468 {
469 struct damon_region *r, *next;
470
471 damon_for_each_region_safe(r, next, t)
472 damon_free_region(r);
473 kfree(t);
474 }
475
damon_destroy_target(struct damon_target * t)476 void damon_destroy_target(struct damon_target *t)
477 {
478 damon_del_target(t);
479 damon_free_target(t);
480 }
481
damon_nr_regions(struct damon_target * t)482 unsigned int damon_nr_regions(struct damon_target *t)
483 {
484 return t->nr_regions;
485 }
486
damon_new_ctx(void)487 struct damon_ctx *damon_new_ctx(void)
488 {
489 struct damon_ctx *ctx;
490
491 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
492 if (!ctx)
493 return NULL;
494
495 init_completion(&ctx->kdamond_started);
496
497 ctx->attrs.sample_interval = 5 * 1000;
498 ctx->attrs.aggr_interval = 100 * 1000;
499 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
500
501 ctx->passed_sample_intervals = 0;
502 /* These will be set from kdamond_init_intervals_sis() */
503 ctx->next_aggregation_sis = 0;
504 ctx->next_ops_update_sis = 0;
505
506 mutex_init(&ctx->kdamond_lock);
507
508 ctx->attrs.min_nr_regions = 10;
509 ctx->attrs.max_nr_regions = 1000;
510
511 INIT_LIST_HEAD(&ctx->adaptive_targets);
512 INIT_LIST_HEAD(&ctx->schemes);
513
514 return ctx;
515 }
516
damon_destroy_targets(struct damon_ctx * ctx)517 static void damon_destroy_targets(struct damon_ctx *ctx)
518 {
519 struct damon_target *t, *next_t;
520
521 if (ctx->ops.cleanup) {
522 ctx->ops.cleanup(ctx);
523 return;
524 }
525
526 damon_for_each_target_safe(t, next_t, ctx)
527 damon_destroy_target(t);
528 }
529
damon_destroy_ctx(struct damon_ctx * ctx)530 void damon_destroy_ctx(struct damon_ctx *ctx)
531 {
532 struct damos *s, *next_s;
533
534 damon_destroy_targets(ctx);
535
536 damon_for_each_scheme_safe(s, next_s, ctx)
537 damon_destroy_scheme(s);
538
539 kfree(ctx);
540 }
541
damon_age_for_new_attrs(unsigned int age,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)542 static unsigned int damon_age_for_new_attrs(unsigned int age,
543 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
544 {
545 return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
546 }
547
548 /* convert access ratio in bp (per 10,000) to nr_accesses */
damon_accesses_bp_to_nr_accesses(unsigned int accesses_bp,struct damon_attrs * attrs)549 static unsigned int damon_accesses_bp_to_nr_accesses(
550 unsigned int accesses_bp, struct damon_attrs *attrs)
551 {
552 return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
553 }
554
555 /* convert nr_accesses to access ratio in bp (per 10,000) */
damon_nr_accesses_to_accesses_bp(unsigned int nr_accesses,struct damon_attrs * attrs)556 static unsigned int damon_nr_accesses_to_accesses_bp(
557 unsigned int nr_accesses, struct damon_attrs *attrs)
558 {
559 return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
560 }
561
damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)562 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
563 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
564 {
565 return damon_accesses_bp_to_nr_accesses(
566 damon_nr_accesses_to_accesses_bp(
567 nr_accesses, old_attrs),
568 new_attrs);
569 }
570
damon_update_monitoring_result(struct damon_region * r,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)571 static void damon_update_monitoring_result(struct damon_region *r,
572 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
573 {
574 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
575 old_attrs, new_attrs);
576 r->nr_accesses_bp = r->nr_accesses * 10000;
577 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
578 }
579
580 /*
581 * region->nr_accesses is the number of sampling intervals in the last
582 * aggregation interval that access to the region has found, and region->age is
583 * the number of aggregation intervals that its access pattern has maintained.
584 * For the reason, the real meaning of the two fields depend on current
585 * sampling interval and aggregation interval. This function updates
586 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
587 */
damon_update_monitoring_results(struct damon_ctx * ctx,struct damon_attrs * new_attrs)588 static void damon_update_monitoring_results(struct damon_ctx *ctx,
589 struct damon_attrs *new_attrs)
590 {
591 struct damon_attrs *old_attrs = &ctx->attrs;
592 struct damon_target *t;
593 struct damon_region *r;
594
595 /* if any interval is zero, simply forgive conversion */
596 if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
597 !new_attrs->sample_interval ||
598 !new_attrs->aggr_interval)
599 return;
600
601 damon_for_each_target(t, ctx)
602 damon_for_each_region(r, t)
603 damon_update_monitoring_result(
604 r, old_attrs, new_attrs);
605 }
606
607 /**
608 * damon_set_attrs() - Set attributes for the monitoring.
609 * @ctx: monitoring context
610 * @attrs: monitoring attributes
611 *
612 * This function should be called while the kdamond is not running, or an
613 * access check results aggregation is not ongoing (e.g., from
614 * &struct damon_callback->after_aggregation or
615 * &struct damon_callback->after_wmarks_check callbacks).
616 *
617 * Every time interval is in micro-seconds.
618 *
619 * Return: 0 on success, negative error code otherwise.
620 */
damon_set_attrs(struct damon_ctx * ctx,struct damon_attrs * attrs)621 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
622 {
623 unsigned long sample_interval = attrs->sample_interval ?
624 attrs->sample_interval : 1;
625 struct damos *s;
626
627 if (attrs->min_nr_regions < 3)
628 return -EINVAL;
629 if (attrs->min_nr_regions > attrs->max_nr_regions)
630 return -EINVAL;
631 if (attrs->sample_interval > attrs->aggr_interval)
632 return -EINVAL;
633
634 ctx->next_aggregation_sis = ctx->passed_sample_intervals +
635 attrs->aggr_interval / sample_interval;
636 ctx->next_ops_update_sis = ctx->passed_sample_intervals +
637 attrs->ops_update_interval / sample_interval;
638
639 damon_update_monitoring_results(ctx, attrs);
640 ctx->attrs = *attrs;
641
642 damon_for_each_scheme(s, ctx)
643 damos_set_next_apply_sis(s, ctx);
644
645 return 0;
646 }
647
648 /**
649 * damon_set_schemes() - Set data access monitoring based operation schemes.
650 * @ctx: monitoring context
651 * @schemes: array of the schemes
652 * @nr_schemes: number of entries in @schemes
653 *
654 * This function should not be called while the kdamond of the context is
655 * running.
656 */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)657 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
658 ssize_t nr_schemes)
659 {
660 struct damos *s, *next;
661 ssize_t i;
662
663 damon_for_each_scheme_safe(s, next, ctx)
664 damon_destroy_scheme(s);
665 for (i = 0; i < nr_schemes; i++)
666 damon_add_scheme(ctx, schemes[i]);
667 }
668
damos_nth_quota_goal(int n,struct damos_quota * q)669 static struct damos_quota_goal *damos_nth_quota_goal(
670 int n, struct damos_quota *q)
671 {
672 struct damos_quota_goal *goal;
673 int i = 0;
674
675 damos_for_each_quota_goal(goal, q) {
676 if (i++ == n)
677 return goal;
678 }
679 return NULL;
680 }
681
damos_commit_quota_goal(struct damos_quota_goal * dst,struct damos_quota_goal * src)682 static void damos_commit_quota_goal(
683 struct damos_quota_goal *dst, struct damos_quota_goal *src)
684 {
685 dst->metric = src->metric;
686 dst->target_value = src->target_value;
687 if (dst->metric == DAMOS_QUOTA_USER_INPUT)
688 dst->current_value = src->current_value;
689 /* keep last_psi_total as is, since it will be updated in next cycle */
690 }
691
692 /**
693 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
694 * @dst: The commit destination DAMOS quota.
695 * @src: The commit source DAMOS quota.
696 *
697 * Copies user-specified parameters for quota goals from @src to @dst. Users
698 * should use this function for quota goals-level parameters update of running
699 * DAMON contexts, instead of manual in-place updates.
700 *
701 * This function should be called from parameters-update safe context, like
702 * DAMON callbacks.
703 */
damos_commit_quota_goals(struct damos_quota * dst,struct damos_quota * src)704 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
705 {
706 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
707 int i = 0, j = 0;
708
709 damos_for_each_quota_goal_safe(dst_goal, next, dst) {
710 src_goal = damos_nth_quota_goal(i++, src);
711 if (src_goal)
712 damos_commit_quota_goal(dst_goal, src_goal);
713 else
714 damos_destroy_quota_goal(dst_goal);
715 }
716 damos_for_each_quota_goal_safe(src_goal, next, src) {
717 if (j++ < i)
718 continue;
719 new_goal = damos_new_quota_goal(
720 src_goal->metric, src_goal->target_value);
721 if (!new_goal)
722 return -ENOMEM;
723 damos_add_quota_goal(dst, new_goal);
724 }
725 return 0;
726 }
727
damos_commit_quota(struct damos_quota * dst,struct damos_quota * src)728 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
729 {
730 int err;
731
732 dst->reset_interval = src->reset_interval;
733 dst->ms = src->ms;
734 dst->sz = src->sz;
735 err = damos_commit_quota_goals(dst, src);
736 if (err)
737 return err;
738 dst->weight_sz = src->weight_sz;
739 dst->weight_nr_accesses = src->weight_nr_accesses;
740 dst->weight_age = src->weight_age;
741 return 0;
742 }
743
damos_nth_filter(int n,struct damos * s)744 static struct damos_filter *damos_nth_filter(int n, struct damos *s)
745 {
746 struct damos_filter *filter;
747 int i = 0;
748
749 damos_for_each_filter(filter, s) {
750 if (i++ == n)
751 return filter;
752 }
753 return NULL;
754 }
755
damos_commit_filter_arg(struct damos_filter * dst,struct damos_filter * src)756 static void damos_commit_filter_arg(
757 struct damos_filter *dst, struct damos_filter *src)
758 {
759 switch (dst->type) {
760 case DAMOS_FILTER_TYPE_MEMCG:
761 dst->memcg_id = src->memcg_id;
762 break;
763 case DAMOS_FILTER_TYPE_ADDR:
764 dst->addr_range = src->addr_range;
765 break;
766 case DAMOS_FILTER_TYPE_TARGET:
767 dst->target_idx = src->target_idx;
768 break;
769 default:
770 break;
771 }
772 }
773
damos_commit_filter(struct damos_filter * dst,struct damos_filter * src)774 static void damos_commit_filter(
775 struct damos_filter *dst, struct damos_filter *src)
776 {
777 dst->type = src->type;
778 dst->matching = src->matching;
779 damos_commit_filter_arg(dst, src);
780 }
781
damos_commit_filters(struct damos * dst,struct damos * src)782 static int damos_commit_filters(struct damos *dst, struct damos *src)
783 {
784 struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
785 int i = 0, j = 0;
786
787 damos_for_each_filter_safe(dst_filter, next, dst) {
788 src_filter = damos_nth_filter(i++, src);
789 if (src_filter)
790 damos_commit_filter(dst_filter, src_filter);
791 else
792 damos_destroy_filter(dst_filter);
793 }
794
795 damos_for_each_filter_safe(src_filter, next, src) {
796 if (j++ < i)
797 continue;
798
799 new_filter = damos_new_filter(
800 src_filter->type, src_filter->matching);
801 if (!new_filter)
802 return -ENOMEM;
803 damos_commit_filter_arg(new_filter, src_filter);
804 damos_add_filter(dst, new_filter);
805 }
806 return 0;
807 }
808
damon_nth_scheme(int n,struct damon_ctx * ctx)809 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
810 {
811 struct damos *s;
812 int i = 0;
813
814 damon_for_each_scheme(s, ctx) {
815 if (i++ == n)
816 return s;
817 }
818 return NULL;
819 }
820
damos_commit(struct damos * dst,struct damos * src)821 static int damos_commit(struct damos *dst, struct damos *src)
822 {
823 int err;
824
825 dst->pattern = src->pattern;
826 dst->action = src->action;
827 dst->apply_interval_us = src->apply_interval_us;
828
829 err = damos_commit_quota(&dst->quota, &src->quota);
830 if (err)
831 return err;
832
833 dst->wmarks = src->wmarks;
834
835 err = damos_commit_filters(dst, src);
836 return err;
837 }
838
damon_commit_schemes(struct damon_ctx * dst,struct damon_ctx * src)839 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
840 {
841 struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
842 int i = 0, j = 0, err;
843
844 damon_for_each_scheme_safe(dst_scheme, next, dst) {
845 src_scheme = damon_nth_scheme(i++, src);
846 if (src_scheme) {
847 err = damos_commit(dst_scheme, src_scheme);
848 if (err)
849 return err;
850 } else {
851 damon_destroy_scheme(dst_scheme);
852 }
853 }
854
855 damon_for_each_scheme_safe(src_scheme, next, src) {
856 if (j++ < i)
857 continue;
858 new_scheme = damon_new_scheme(&src_scheme->pattern,
859 src_scheme->action,
860 src_scheme->apply_interval_us,
861 &src_scheme->quota, &src_scheme->wmarks,
862 NUMA_NO_NODE);
863 if (!new_scheme)
864 return -ENOMEM;
865 damon_add_scheme(dst, new_scheme);
866 }
867 return 0;
868 }
869
damon_nth_target(int n,struct damon_ctx * ctx)870 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
871 {
872 struct damon_target *t;
873 int i = 0;
874
875 damon_for_each_target(t, ctx) {
876 if (i++ == n)
877 return t;
878 }
879 return NULL;
880 }
881
882 /*
883 * The caller should ensure the regions of @src are
884 * 1. valid (end >= src) and
885 * 2. sorted by starting address.
886 *
887 * If @src has no region, @dst keeps current regions.
888 */
damon_commit_target_regions(struct damon_target * dst,struct damon_target * src)889 static int damon_commit_target_regions(
890 struct damon_target *dst, struct damon_target *src)
891 {
892 struct damon_region *src_region;
893 struct damon_addr_range *ranges;
894 int i = 0, err;
895
896 damon_for_each_region(src_region, src)
897 i++;
898 if (!i)
899 return 0;
900
901 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
902 if (!ranges)
903 return -ENOMEM;
904 i = 0;
905 damon_for_each_region(src_region, src)
906 ranges[i++] = src_region->ar;
907 err = damon_set_regions(dst, ranges, i);
908 kfree(ranges);
909 return err;
910 }
911
damon_commit_target(struct damon_target * dst,bool dst_has_pid,struct damon_target * src,bool src_has_pid)912 static int damon_commit_target(
913 struct damon_target *dst, bool dst_has_pid,
914 struct damon_target *src, bool src_has_pid)
915 {
916 int err;
917
918 err = damon_commit_target_regions(dst, src);
919 if (err)
920 return err;
921 if (dst_has_pid)
922 put_pid(dst->pid);
923 if (src_has_pid)
924 get_pid(src->pid);
925 dst->pid = src->pid;
926 return 0;
927 }
928
damon_commit_targets(struct damon_ctx * dst,struct damon_ctx * src)929 static int damon_commit_targets(
930 struct damon_ctx *dst, struct damon_ctx *src)
931 {
932 struct damon_target *dst_target, *next, *src_target, *new_target;
933 int i = 0, j = 0, err;
934
935 damon_for_each_target_safe(dst_target, next, dst) {
936 src_target = damon_nth_target(i++, src);
937 if (src_target) {
938 err = damon_commit_target(
939 dst_target, damon_target_has_pid(dst),
940 src_target, damon_target_has_pid(src));
941 if (err)
942 return err;
943 } else {
944 if (damon_target_has_pid(dst))
945 put_pid(dst_target->pid);
946 damon_destroy_target(dst_target);
947 }
948 }
949
950 damon_for_each_target_safe(src_target, next, src) {
951 if (j++ < i)
952 continue;
953 new_target = damon_new_target();
954 if (!new_target)
955 return -ENOMEM;
956 err = damon_commit_target(new_target, false,
957 src_target, damon_target_has_pid(src));
958 if (err)
959 return err;
960 }
961 return 0;
962 }
963
964 /**
965 * damon_commit_ctx() - Commit parameters of a DAMON context to another.
966 * @dst: The commit destination DAMON context.
967 * @src: The commit source DAMON context.
968 *
969 * This function copies user-specified parameters from @src to @dst and update
970 * the internal status and results accordingly. Users should use this function
971 * for context-level parameters update of running context, instead of manual
972 * in-place updates.
973 *
974 * This function should be called from parameters-update safe context, like
975 * DAMON callbacks.
976 */
damon_commit_ctx(struct damon_ctx * dst,struct damon_ctx * src)977 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
978 {
979 int err;
980
981 err = damon_commit_schemes(dst, src);
982 if (err)
983 return err;
984 err = damon_commit_targets(dst, src);
985 if (err)
986 return err;
987 /*
988 * schemes and targets should be updated first, since
989 * 1. damon_set_attrs() updates monitoring results of targets and
990 * next_apply_sis of schemes, and
991 * 2. ops update should be done after pid handling is done (target
992 * committing require putting pids).
993 */
994 err = damon_set_attrs(dst, &src->attrs);
995 if (err)
996 return err;
997 dst->ops = src->ops;
998
999 return 0;
1000 }
1001
1002 /**
1003 * damon_nr_running_ctxs() - Return number of currently running contexts.
1004 */
damon_nr_running_ctxs(void)1005 int damon_nr_running_ctxs(void)
1006 {
1007 int nr_ctxs;
1008
1009 mutex_lock(&damon_lock);
1010 nr_ctxs = nr_running_ctxs;
1011 mutex_unlock(&damon_lock);
1012
1013 return nr_ctxs;
1014 }
1015
1016 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)1017 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1018 {
1019 struct damon_target *t;
1020 struct damon_region *r;
1021 unsigned long sz = 0;
1022
1023 damon_for_each_target(t, ctx) {
1024 damon_for_each_region(r, t)
1025 sz += damon_sz_region(r);
1026 }
1027
1028 if (ctx->attrs.min_nr_regions)
1029 sz /= ctx->attrs.min_nr_regions;
1030 if (sz < DAMON_MIN_REGION)
1031 sz = DAMON_MIN_REGION;
1032
1033 return sz;
1034 }
1035
1036 static int kdamond_fn(void *data);
1037
1038 /*
1039 * __damon_start() - Starts monitoring with given context.
1040 * @ctx: monitoring context
1041 *
1042 * This function should be called while damon_lock is hold.
1043 *
1044 * Return: 0 on success, negative error code otherwise.
1045 */
__damon_start(struct damon_ctx * ctx)1046 static int __damon_start(struct damon_ctx *ctx)
1047 {
1048 int err = -EBUSY;
1049
1050 mutex_lock(&ctx->kdamond_lock);
1051 if (!ctx->kdamond) {
1052 err = 0;
1053 reinit_completion(&ctx->kdamond_started);
1054 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1055 nr_running_ctxs);
1056 if (IS_ERR(ctx->kdamond)) {
1057 err = PTR_ERR(ctx->kdamond);
1058 ctx->kdamond = NULL;
1059 } else {
1060 wait_for_completion(&ctx->kdamond_started);
1061 }
1062 }
1063 mutex_unlock(&ctx->kdamond_lock);
1064
1065 return err;
1066 }
1067
1068 /**
1069 * damon_start() - Starts the monitorings for a given group of contexts.
1070 * @ctxs: an array of the pointers for contexts to start monitoring
1071 * @nr_ctxs: size of @ctxs
1072 * @exclusive: exclusiveness of this contexts group
1073 *
1074 * This function starts a group of monitoring threads for a group of monitoring
1075 * contexts. One thread per each context is created and run in parallel. The
1076 * caller should handle synchronization between the threads by itself. If
1077 * @exclusive is true and a group of threads that created by other
1078 * 'damon_start()' call is currently running, this function does nothing but
1079 * returns -EBUSY.
1080 *
1081 * Return: 0 on success, negative error code otherwise.
1082 */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs,bool exclusive)1083 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1084 {
1085 int i;
1086 int err = 0;
1087
1088 mutex_lock(&damon_lock);
1089 if ((exclusive && nr_running_ctxs) ||
1090 (!exclusive && running_exclusive_ctxs)) {
1091 mutex_unlock(&damon_lock);
1092 return -EBUSY;
1093 }
1094
1095 for (i = 0; i < nr_ctxs; i++) {
1096 err = __damon_start(ctxs[i]);
1097 if (err)
1098 break;
1099 nr_running_ctxs++;
1100 }
1101 if (exclusive && nr_running_ctxs)
1102 running_exclusive_ctxs = true;
1103 mutex_unlock(&damon_lock);
1104
1105 return err;
1106 }
1107
1108 /*
1109 * __damon_stop() - Stops monitoring of a given context.
1110 * @ctx: monitoring context
1111 *
1112 * Return: 0 on success, negative error code otherwise.
1113 */
__damon_stop(struct damon_ctx * ctx)1114 static int __damon_stop(struct damon_ctx *ctx)
1115 {
1116 struct task_struct *tsk;
1117
1118 mutex_lock(&ctx->kdamond_lock);
1119 tsk = ctx->kdamond;
1120 if (tsk) {
1121 get_task_struct(tsk);
1122 mutex_unlock(&ctx->kdamond_lock);
1123 kthread_stop_put(tsk);
1124 return 0;
1125 }
1126 mutex_unlock(&ctx->kdamond_lock);
1127
1128 return -EPERM;
1129 }
1130
1131 /**
1132 * damon_stop() - Stops the monitorings for a given group of contexts.
1133 * @ctxs: an array of the pointers for contexts to stop monitoring
1134 * @nr_ctxs: size of @ctxs
1135 *
1136 * Return: 0 on success, negative error code otherwise.
1137 */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)1138 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1139 {
1140 int i, err = 0;
1141
1142 for (i = 0; i < nr_ctxs; i++) {
1143 /* nr_running_ctxs is decremented in kdamond_fn */
1144 err = __damon_stop(ctxs[i]);
1145 if (err)
1146 break;
1147 }
1148 return err;
1149 }
1150
1151 /*
1152 * Reset the aggregated monitoring results ('nr_accesses' of each region).
1153 */
kdamond_reset_aggregated(struct damon_ctx * c)1154 static void kdamond_reset_aggregated(struct damon_ctx *c)
1155 {
1156 struct damon_target *t;
1157 unsigned int ti = 0; /* target's index */
1158
1159 damon_for_each_target(t, c) {
1160 struct damon_region *r;
1161
1162 damon_for_each_region(r, t) {
1163 trace_damon_aggregated(ti, r, damon_nr_regions(t));
1164 r->last_nr_accesses = r->nr_accesses;
1165 r->nr_accesses = 0;
1166 }
1167 ti++;
1168 }
1169 }
1170
1171 static void damon_split_region_at(struct damon_target *t,
1172 struct damon_region *r, unsigned long sz_r);
1173
__damos_valid_target(struct damon_region * r,struct damos * s)1174 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1175 {
1176 unsigned long sz;
1177 unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1178
1179 sz = damon_sz_region(r);
1180 return s->pattern.min_sz_region <= sz &&
1181 sz <= s->pattern.max_sz_region &&
1182 s->pattern.min_nr_accesses <= nr_accesses &&
1183 nr_accesses <= s->pattern.max_nr_accesses &&
1184 s->pattern.min_age_region <= r->age &&
1185 r->age <= s->pattern.max_age_region;
1186 }
1187
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1188 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1189 struct damon_region *r, struct damos *s)
1190 {
1191 bool ret = __damos_valid_target(r, s);
1192
1193 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1194 return ret;
1195
1196 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1197 }
1198
1199 /*
1200 * damos_skip_charged_region() - Check if the given region or starting part of
1201 * it is already charged for the DAMOS quota.
1202 * @t: The target of the region.
1203 * @rp: The pointer to the region.
1204 * @s: The scheme to be applied.
1205 *
1206 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1207 * action would applied to only a part of the target access pattern fulfilling
1208 * regions. To avoid applying the scheme action to only already applied
1209 * regions, DAMON skips applying the scheme action to the regions that charged
1210 * in the previous charge window.
1211 *
1212 * This function checks if a given region should be skipped or not for the
1213 * reason. If only the starting part of the region has previously charged,
1214 * this function splits the region into two so that the second one covers the
1215 * area that not charged in the previous charge widnow and saves the second
1216 * region in *rp and returns false, so that the caller can apply DAMON action
1217 * to the second one.
1218 *
1219 * Return: true if the region should be entirely skipped, false otherwise.
1220 */
damos_skip_charged_region(struct damon_target * t,struct damon_region ** rp,struct damos * s)1221 static bool damos_skip_charged_region(struct damon_target *t,
1222 struct damon_region **rp, struct damos *s)
1223 {
1224 struct damon_region *r = *rp;
1225 struct damos_quota *quota = &s->quota;
1226 unsigned long sz_to_skip;
1227
1228 /* Skip previously charged regions */
1229 if (quota->charge_target_from) {
1230 if (t != quota->charge_target_from)
1231 return true;
1232 if (r == damon_last_region(t)) {
1233 quota->charge_target_from = NULL;
1234 quota->charge_addr_from = 0;
1235 return true;
1236 }
1237 if (quota->charge_addr_from &&
1238 r->ar.end <= quota->charge_addr_from)
1239 return true;
1240
1241 if (quota->charge_addr_from && r->ar.start <
1242 quota->charge_addr_from) {
1243 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1244 r->ar.start, DAMON_MIN_REGION);
1245 if (!sz_to_skip) {
1246 if (damon_sz_region(r) <= DAMON_MIN_REGION)
1247 return true;
1248 sz_to_skip = DAMON_MIN_REGION;
1249 }
1250 damon_split_region_at(t, r, sz_to_skip);
1251 r = damon_next_region(r);
1252 *rp = r;
1253 }
1254 quota->charge_target_from = NULL;
1255 quota->charge_addr_from = 0;
1256 }
1257 return false;
1258 }
1259
damos_update_stat(struct damos * s,unsigned long sz_tried,unsigned long sz_applied)1260 static void damos_update_stat(struct damos *s,
1261 unsigned long sz_tried, unsigned long sz_applied)
1262 {
1263 s->stat.nr_tried++;
1264 s->stat.sz_tried += sz_tried;
1265 if (sz_applied)
1266 s->stat.nr_applied++;
1267 s->stat.sz_applied += sz_applied;
1268 }
1269
__damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos_filter * filter)1270 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1271 struct damon_region *r, struct damos_filter *filter)
1272 {
1273 bool matched = false;
1274 struct damon_target *ti;
1275 int target_idx = 0;
1276 unsigned long start, end;
1277
1278 switch (filter->type) {
1279 case DAMOS_FILTER_TYPE_TARGET:
1280 damon_for_each_target(ti, ctx) {
1281 if (ti == t)
1282 break;
1283 target_idx++;
1284 }
1285 matched = target_idx == filter->target_idx;
1286 break;
1287 case DAMOS_FILTER_TYPE_ADDR:
1288 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
1289 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
1290
1291 /* inside the range */
1292 if (start <= r->ar.start && r->ar.end <= end) {
1293 matched = true;
1294 break;
1295 }
1296 /* outside of the range */
1297 if (r->ar.end <= start || end <= r->ar.start) {
1298 matched = false;
1299 break;
1300 }
1301 /* start before the range and overlap */
1302 if (r->ar.start < start) {
1303 damon_split_region_at(t, r, start - r->ar.start);
1304 matched = false;
1305 break;
1306 }
1307 /* start inside the range */
1308 damon_split_region_at(t, r, end - r->ar.start);
1309 matched = true;
1310 break;
1311 default:
1312 return false;
1313 }
1314
1315 return matched == filter->matching;
1316 }
1317
damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s)1318 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1319 struct damon_region *r, struct damos *s)
1320 {
1321 struct damos_filter *filter;
1322
1323 damos_for_each_filter(filter, s) {
1324 if (__damos_filter_out(ctx, t, r, filter))
1325 return true;
1326 }
1327 return false;
1328 }
1329
damos_apply_scheme(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1330 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1331 struct damon_region *r, struct damos *s)
1332 {
1333 struct damos_quota *quota = &s->quota;
1334 unsigned long sz = damon_sz_region(r);
1335 struct timespec64 begin, end;
1336 unsigned long sz_applied = 0;
1337 int err = 0;
1338 /*
1339 * We plan to support multiple context per kdamond, as DAMON sysfs
1340 * implies with 'nr_contexts' file. Nevertheless, only single context
1341 * per kdamond is supported for now. So, we can simply use '0' context
1342 * index here.
1343 */
1344 unsigned int cidx = 0;
1345 struct damos *siter; /* schemes iterator */
1346 unsigned int sidx = 0;
1347 struct damon_target *titer; /* targets iterator */
1348 unsigned int tidx = 0;
1349 bool do_trace = false;
1350
1351 /* get indices for trace_damos_before_apply() */
1352 if (trace_damos_before_apply_enabled()) {
1353 damon_for_each_scheme(siter, c) {
1354 if (siter == s)
1355 break;
1356 sidx++;
1357 }
1358 damon_for_each_target(titer, c) {
1359 if (titer == t)
1360 break;
1361 tidx++;
1362 }
1363 do_trace = true;
1364 }
1365
1366 if (c->ops.apply_scheme) {
1367 if (quota->esz && quota->charged_sz + sz > quota->esz) {
1368 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1369 DAMON_MIN_REGION);
1370 if (!sz)
1371 goto update_stat;
1372 damon_split_region_at(t, r, sz);
1373 }
1374 if (damos_filter_out(c, t, r, s))
1375 return;
1376 ktime_get_coarse_ts64(&begin);
1377 if (c->callback.before_damos_apply)
1378 err = c->callback.before_damos_apply(c, t, r, s);
1379 if (!err) {
1380 trace_damos_before_apply(cidx, sidx, tidx, r,
1381 damon_nr_regions(t), do_trace);
1382 sz_applied = c->ops.apply_scheme(c, t, r, s);
1383 }
1384 ktime_get_coarse_ts64(&end);
1385 quota->total_charged_ns += timespec64_to_ns(&end) -
1386 timespec64_to_ns(&begin);
1387 quota->charged_sz += sz;
1388 if (quota->esz && quota->charged_sz >= quota->esz) {
1389 quota->charge_target_from = t;
1390 quota->charge_addr_from = r->ar.end + 1;
1391 }
1392 }
1393 if (s->action != DAMOS_STAT)
1394 r->age = 0;
1395
1396 update_stat:
1397 damos_update_stat(s, sz, sz_applied);
1398 }
1399
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)1400 static void damon_do_apply_schemes(struct damon_ctx *c,
1401 struct damon_target *t,
1402 struct damon_region *r)
1403 {
1404 struct damos *s;
1405
1406 damon_for_each_scheme(s, c) {
1407 struct damos_quota *quota = &s->quota;
1408
1409 if (c->passed_sample_intervals != s->next_apply_sis)
1410 continue;
1411
1412 if (!s->wmarks.activated)
1413 continue;
1414
1415 /* Check the quota */
1416 if (quota->esz && quota->charged_sz >= quota->esz)
1417 continue;
1418
1419 if (damos_skip_charged_region(t, &r, s))
1420 continue;
1421
1422 if (!damos_valid_target(c, t, r, s))
1423 continue;
1424
1425 damos_apply_scheme(c, t, r, s);
1426 }
1427 }
1428
1429 /*
1430 * damon_feed_loop_next_input() - get next input to achieve a target score.
1431 * @last_input The last input.
1432 * @score Current score that made with @last_input.
1433 *
1434 * Calculate next input to achieve the target score, based on the last input
1435 * and current score. Assuming the input and the score are positively
1436 * proportional, calculate how much compensation should be added to or
1437 * subtracted from the last input as a proportion of the last input. Avoid
1438 * next input always being zero by setting it non-zero always. In short form
1439 * (assuming support of float and signed calculations), the algorithm is as
1440 * below.
1441 *
1442 * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1443 *
1444 * For simple implementation, we assume the target score is always 10,000. The
1445 * caller should adjust @score for this.
1446 *
1447 * Returns next input that assumed to achieve the target score.
1448 */
damon_feed_loop_next_input(unsigned long last_input,unsigned long score)1449 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1450 unsigned long score)
1451 {
1452 const unsigned long goal = 10000;
1453 unsigned long score_goal_diff = max(goal, score) - min(goal, score);
1454 unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal;
1455 unsigned long compensation = last_input * score_goal_diff_bp / 10000;
1456 /* Set minimum input as 10000 to avoid compensation be zero */
1457 const unsigned long min_input = 10000;
1458
1459 if (goal > score)
1460 return last_input + compensation;
1461 if (last_input > compensation + min_input)
1462 return last_input - compensation;
1463 return min_input;
1464 }
1465
1466 #ifdef CONFIG_PSI
1467
damos_get_some_mem_psi_total(void)1468 static u64 damos_get_some_mem_psi_total(void)
1469 {
1470 if (static_branch_likely(&psi_disabled))
1471 return 0;
1472 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
1473 NSEC_PER_USEC);
1474 }
1475
1476 #else /* CONFIG_PSI */
1477
damos_get_some_mem_psi_total(void)1478 static inline u64 damos_get_some_mem_psi_total(void)
1479 {
1480 return 0;
1481 };
1482
1483 #endif /* CONFIG_PSI */
1484
damos_set_quota_goal_current_value(struct damos_quota_goal * goal)1485 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
1486 {
1487 u64 now_psi_total;
1488
1489 switch (goal->metric) {
1490 case DAMOS_QUOTA_USER_INPUT:
1491 /* User should already set goal->current_value */
1492 break;
1493 case DAMOS_QUOTA_SOME_MEM_PSI_US:
1494 now_psi_total = damos_get_some_mem_psi_total();
1495 goal->current_value = now_psi_total - goal->last_psi_total;
1496 goal->last_psi_total = now_psi_total;
1497 break;
1498 default:
1499 break;
1500 }
1501 }
1502
1503 /* Return the highest score since it makes schemes least aggressive */
damos_quota_score(struct damos_quota * quota)1504 static unsigned long damos_quota_score(struct damos_quota *quota)
1505 {
1506 struct damos_quota_goal *goal;
1507 unsigned long highest_score = 0;
1508
1509 damos_for_each_quota_goal(goal, quota) {
1510 damos_set_quota_goal_current_value(goal);
1511 highest_score = max(highest_score,
1512 goal->current_value * 10000 /
1513 goal->target_value);
1514 }
1515
1516 return highest_score;
1517 }
1518
1519 /*
1520 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1521 */
damos_set_effective_quota(struct damos_quota * quota)1522 static void damos_set_effective_quota(struct damos_quota *quota)
1523 {
1524 unsigned long throughput;
1525 unsigned long esz;
1526
1527 if (!quota->ms && list_empty("a->goals)) {
1528 quota->esz = quota->sz;
1529 return;
1530 }
1531
1532 if (!list_empty("a->goals)) {
1533 unsigned long score = damos_quota_score(quota);
1534
1535 quota->esz_bp = damon_feed_loop_next_input(
1536 max(quota->esz_bp, 10000UL),
1537 score);
1538 esz = quota->esz_bp / 10000;
1539 }
1540
1541 if (quota->ms) {
1542 if (quota->total_charged_ns)
1543 throughput = quota->total_charged_sz * 1000000 /
1544 quota->total_charged_ns;
1545 else
1546 throughput = PAGE_SIZE * 1024;
1547 if (!list_empty("a->goals))
1548 esz = min(throughput * quota->ms, esz);
1549 else
1550 esz = throughput * quota->ms;
1551 }
1552
1553 if (quota->sz && quota->sz < esz)
1554 esz = quota->sz;
1555
1556 quota->esz = esz;
1557 }
1558
damos_adjust_quota(struct damon_ctx * c,struct damos * s)1559 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1560 {
1561 struct damos_quota *quota = &s->quota;
1562 struct damon_target *t;
1563 struct damon_region *r;
1564 unsigned long cumulated_sz;
1565 unsigned int score, max_score = 0;
1566
1567 if (!quota->ms && !quota->sz && list_empty("a->goals))
1568 return;
1569
1570 /* New charge window starts */
1571 if (time_after_eq(jiffies, quota->charged_from +
1572 msecs_to_jiffies(quota->reset_interval))) {
1573 if (quota->esz && quota->charged_sz >= quota->esz)
1574 s->stat.qt_exceeds++;
1575 quota->total_charged_sz += quota->charged_sz;
1576 quota->charged_from = jiffies;
1577 quota->charged_sz = 0;
1578 damos_set_effective_quota(quota);
1579 }
1580
1581 if (!c->ops.get_scheme_score)
1582 return;
1583
1584 /* Fill up the score histogram */
1585 memset(quota->histogram, 0, sizeof(quota->histogram));
1586 damon_for_each_target(t, c) {
1587 damon_for_each_region(r, t) {
1588 if (!__damos_valid_target(r, s))
1589 continue;
1590 score = c->ops.get_scheme_score(c, t, r, s);
1591 quota->histogram[score] += damon_sz_region(r);
1592 if (score > max_score)
1593 max_score = score;
1594 }
1595 }
1596
1597 /* Set the min score limit */
1598 for (cumulated_sz = 0, score = max_score; ; score--) {
1599 cumulated_sz += quota->histogram[score];
1600 if (cumulated_sz >= quota->esz || !score)
1601 break;
1602 }
1603 quota->min_score = score;
1604 }
1605
kdamond_apply_schemes(struct damon_ctx * c)1606 static void kdamond_apply_schemes(struct damon_ctx *c)
1607 {
1608 struct damon_target *t;
1609 struct damon_region *r, *next_r;
1610 struct damos *s;
1611 unsigned long sample_interval = c->attrs.sample_interval ?
1612 c->attrs.sample_interval : 1;
1613 bool has_schemes_to_apply = false;
1614
1615 damon_for_each_scheme(s, c) {
1616 if (c->passed_sample_intervals != s->next_apply_sis)
1617 continue;
1618
1619 if (!s->wmarks.activated)
1620 continue;
1621
1622 has_schemes_to_apply = true;
1623
1624 damos_adjust_quota(c, s);
1625 }
1626
1627 if (!has_schemes_to_apply)
1628 return;
1629
1630 damon_for_each_target(t, c) {
1631 damon_for_each_region_safe(r, next_r, t)
1632 damon_do_apply_schemes(c, t, r);
1633 }
1634
1635 damon_for_each_scheme(s, c) {
1636 if (c->passed_sample_intervals != s->next_apply_sis)
1637 continue;
1638 s->next_apply_sis +=
1639 (s->apply_interval_us ? s->apply_interval_us :
1640 c->attrs.aggr_interval) / sample_interval;
1641 }
1642 }
1643
1644 /*
1645 * Merge two adjacent regions into one region
1646 */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)1647 static void damon_merge_two_regions(struct damon_target *t,
1648 struct damon_region *l, struct damon_region *r)
1649 {
1650 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1651
1652 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1653 (sz_l + sz_r);
1654 l->nr_accesses_bp = l->nr_accesses * 10000;
1655 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1656 l->ar.end = r->ar.end;
1657 damon_destroy_region(r, t);
1658 }
1659
1660 /*
1661 * Merge adjacent regions having similar access frequencies
1662 *
1663 * t target affected by this merge operation
1664 * thres '->nr_accesses' diff threshold for the merge
1665 * sz_limit size upper limit of each region
1666 */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)1667 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1668 unsigned long sz_limit)
1669 {
1670 struct damon_region *r, *prev = NULL, *next;
1671
1672 damon_for_each_region_safe(r, next, t) {
1673 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1674 r->age = 0;
1675 else
1676 r->age++;
1677
1678 if (prev && prev->ar.end == r->ar.start &&
1679 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1680 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1681 damon_merge_two_regions(t, prev, r);
1682 else
1683 prev = r;
1684 }
1685 }
1686
1687 /*
1688 * Merge adjacent regions having similar access frequencies
1689 *
1690 * threshold '->nr_accesses' diff threshold for the merge
1691 * sz_limit size upper limit of each region
1692 *
1693 * This function merges monitoring target regions which are adjacent and their
1694 * access frequencies are similar. This is for minimizing the monitoring
1695 * overhead under the dynamically changeable access pattern. If a merge was
1696 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1697 *
1698 * The total number of regions could be higher than the user-defined limit,
1699 * max_nr_regions for some cases. For example, the user can update
1700 * max_nr_regions to a number that lower than the current number of regions
1701 * while DAMON is running. For such a case, repeat merging until the limit is
1702 * met while increasing @threshold up to possible maximum level.
1703 */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)1704 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1705 unsigned long sz_limit)
1706 {
1707 struct damon_target *t;
1708 unsigned int nr_regions;
1709 unsigned int max_thres;
1710
1711 max_thres = c->attrs.aggr_interval /
1712 (c->attrs.sample_interval ? c->attrs.sample_interval : 1);
1713 do {
1714 nr_regions = 0;
1715 damon_for_each_target(t, c) {
1716 damon_merge_regions_of(t, threshold, sz_limit);
1717 nr_regions += damon_nr_regions(t);
1718 }
1719 threshold = max(1, threshold * 2);
1720 } while (nr_regions > c->attrs.max_nr_regions &&
1721 threshold / 2 < max_thres);
1722 }
1723
1724 /*
1725 * Split a region in two
1726 *
1727 * r the region to be split
1728 * sz_r size of the first sub-region that will be made
1729 */
damon_split_region_at(struct damon_target * t,struct damon_region * r,unsigned long sz_r)1730 static void damon_split_region_at(struct damon_target *t,
1731 struct damon_region *r, unsigned long sz_r)
1732 {
1733 struct damon_region *new;
1734
1735 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1736 if (!new)
1737 return;
1738
1739 r->ar.end = new->ar.start;
1740
1741 new->age = r->age;
1742 new->last_nr_accesses = r->last_nr_accesses;
1743 new->nr_accesses_bp = r->nr_accesses_bp;
1744 new->nr_accesses = r->nr_accesses;
1745
1746 damon_insert_region(new, r, damon_next_region(r), t);
1747 }
1748
1749 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_target * t,int nr_subs)1750 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1751 {
1752 struct damon_region *r, *next;
1753 unsigned long sz_region, sz_sub = 0;
1754 int i;
1755
1756 damon_for_each_region_safe(r, next, t) {
1757 sz_region = damon_sz_region(r);
1758
1759 for (i = 0; i < nr_subs - 1 &&
1760 sz_region > 2 * DAMON_MIN_REGION; i++) {
1761 /*
1762 * Randomly select size of left sub-region to be at
1763 * least 10 percent and at most 90% of original region
1764 */
1765 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1766 sz_region / 10, DAMON_MIN_REGION);
1767 /* Do not allow blank region */
1768 if (sz_sub == 0 || sz_sub >= sz_region)
1769 continue;
1770
1771 damon_split_region_at(t, r, sz_sub);
1772 sz_region = sz_sub;
1773 }
1774 }
1775 }
1776
1777 /*
1778 * Split every target region into randomly-sized small regions
1779 *
1780 * This function splits every target region into random-sized small regions if
1781 * current total number of the regions is equal or smaller than half of the
1782 * user-specified maximum number of regions. This is for maximizing the
1783 * monitoring accuracy under the dynamically changeable access patterns. If a
1784 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1785 * it.
1786 */
kdamond_split_regions(struct damon_ctx * ctx)1787 static void kdamond_split_regions(struct damon_ctx *ctx)
1788 {
1789 struct damon_target *t;
1790 unsigned int nr_regions = 0;
1791 static unsigned int last_nr_regions;
1792 int nr_subregions = 2;
1793
1794 damon_for_each_target(t, ctx)
1795 nr_regions += damon_nr_regions(t);
1796
1797 if (nr_regions > ctx->attrs.max_nr_regions / 2)
1798 return;
1799
1800 /* Maybe the middle of the region has different access frequency */
1801 if (last_nr_regions == nr_regions &&
1802 nr_regions < ctx->attrs.max_nr_regions / 3)
1803 nr_subregions = 3;
1804
1805 damon_for_each_target(t, ctx)
1806 damon_split_regions_of(t, nr_subregions);
1807
1808 last_nr_regions = nr_regions;
1809 }
1810
1811 /*
1812 * Check whether current monitoring should be stopped
1813 *
1814 * The monitoring is stopped when either the user requested to stop, or all
1815 * monitoring targets are invalid.
1816 *
1817 * Returns true if need to stop current monitoring.
1818 */
kdamond_need_stop(struct damon_ctx * ctx)1819 static bool kdamond_need_stop(struct damon_ctx *ctx)
1820 {
1821 struct damon_target *t;
1822
1823 if (kthread_should_stop())
1824 return true;
1825
1826 if (!ctx->ops.target_valid)
1827 return false;
1828
1829 damon_for_each_target(t, ctx) {
1830 if (ctx->ops.target_valid(t))
1831 return false;
1832 }
1833
1834 return true;
1835 }
1836
damos_get_wmark_metric_value(enum damos_wmark_metric metric,unsigned long * metric_value)1837 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
1838 unsigned long *metric_value)
1839 {
1840 switch (metric) {
1841 case DAMOS_WMARK_FREE_MEM_RATE:
1842 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
1843 totalram_pages();
1844 return 0;
1845 default:
1846 break;
1847 }
1848 return -EINVAL;
1849 }
1850
1851 /*
1852 * Returns zero if the scheme is active. Else, returns time to wait for next
1853 * watermark check in micro-seconds.
1854 */
damos_wmark_wait_us(struct damos * scheme)1855 static unsigned long damos_wmark_wait_us(struct damos *scheme)
1856 {
1857 unsigned long metric;
1858
1859 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
1860 return 0;
1861
1862 /* higher than high watermark or lower than low watermark */
1863 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1864 if (scheme->wmarks.activated)
1865 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1866 scheme->action,
1867 metric > scheme->wmarks.high ?
1868 "high" : "low");
1869 scheme->wmarks.activated = false;
1870 return scheme->wmarks.interval;
1871 }
1872
1873 /* inactive and higher than middle watermark */
1874 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1875 !scheme->wmarks.activated)
1876 return scheme->wmarks.interval;
1877
1878 if (!scheme->wmarks.activated)
1879 pr_debug("activate a scheme (%d)\n", scheme->action);
1880 scheme->wmarks.activated = true;
1881 return 0;
1882 }
1883
kdamond_usleep(unsigned long usecs)1884 static void kdamond_usleep(unsigned long usecs)
1885 {
1886 /* See Documentation/timers/timers-howto.rst for the thresholds */
1887 if (usecs > 20 * USEC_PER_MSEC)
1888 schedule_timeout_idle(usecs_to_jiffies(usecs));
1889 else
1890 usleep_idle_range(usecs, usecs + 1);
1891 }
1892
1893 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)1894 static int kdamond_wait_activation(struct damon_ctx *ctx)
1895 {
1896 struct damos *s;
1897 unsigned long wait_time;
1898 unsigned long min_wait_time = 0;
1899 bool init_wait_time = false;
1900
1901 while (!kdamond_need_stop(ctx)) {
1902 damon_for_each_scheme(s, ctx) {
1903 wait_time = damos_wmark_wait_us(s);
1904 if (!init_wait_time || wait_time < min_wait_time) {
1905 init_wait_time = true;
1906 min_wait_time = wait_time;
1907 }
1908 }
1909 if (!min_wait_time)
1910 return 0;
1911
1912 kdamond_usleep(min_wait_time);
1913
1914 if (ctx->callback.after_wmarks_check &&
1915 ctx->callback.after_wmarks_check(ctx))
1916 break;
1917 }
1918 return -EBUSY;
1919 }
1920
kdamond_init_intervals_sis(struct damon_ctx * ctx)1921 static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
1922 {
1923 unsigned long sample_interval = ctx->attrs.sample_interval ?
1924 ctx->attrs.sample_interval : 1;
1925 unsigned long apply_interval;
1926 struct damos *scheme;
1927
1928 ctx->passed_sample_intervals = 0;
1929 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
1930 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
1931 sample_interval;
1932
1933 damon_for_each_scheme(scheme, ctx) {
1934 apply_interval = scheme->apply_interval_us ?
1935 scheme->apply_interval_us : ctx->attrs.aggr_interval;
1936 scheme->next_apply_sis = apply_interval / sample_interval;
1937 }
1938 }
1939
1940 /*
1941 * The monitoring daemon that runs as a kernel thread
1942 */
kdamond_fn(void * data)1943 static int kdamond_fn(void *data)
1944 {
1945 struct damon_ctx *ctx = data;
1946 struct damon_target *t;
1947 struct damon_region *r, *next;
1948 unsigned int max_nr_accesses = 0;
1949 unsigned long sz_limit = 0;
1950
1951 pr_debug("kdamond (%d) starts\n", current->pid);
1952
1953 complete(&ctx->kdamond_started);
1954 kdamond_init_intervals_sis(ctx);
1955
1956 if (ctx->ops.init)
1957 ctx->ops.init(ctx);
1958 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1959 goto done;
1960
1961 sz_limit = damon_region_sz_limit(ctx);
1962
1963 while (!kdamond_need_stop(ctx)) {
1964 /*
1965 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
1966 * be changed from after_wmarks_check() or after_aggregation()
1967 * callbacks. Read the values here, and use those for this
1968 * iteration. That is, damon_set_attrs() updated new values
1969 * are respected from next iteration.
1970 */
1971 unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
1972 unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
1973 unsigned long sample_interval = ctx->attrs.sample_interval;
1974
1975 if (kdamond_wait_activation(ctx))
1976 break;
1977
1978 if (ctx->ops.prepare_access_checks)
1979 ctx->ops.prepare_access_checks(ctx);
1980 if (ctx->callback.after_sampling &&
1981 ctx->callback.after_sampling(ctx))
1982 break;
1983
1984 kdamond_usleep(sample_interval);
1985 ctx->passed_sample_intervals++;
1986
1987 if (ctx->ops.check_accesses)
1988 max_nr_accesses = ctx->ops.check_accesses(ctx);
1989
1990 if (ctx->passed_sample_intervals == next_aggregation_sis) {
1991 kdamond_merge_regions(ctx,
1992 max_nr_accesses / 10,
1993 sz_limit);
1994 if (ctx->callback.after_aggregation &&
1995 ctx->callback.after_aggregation(ctx))
1996 break;
1997 }
1998
1999 /*
2000 * do kdamond_apply_schemes() after kdamond_merge_regions() if
2001 * possible, to reduce overhead
2002 */
2003 if (!list_empty(&ctx->schemes))
2004 kdamond_apply_schemes(ctx);
2005
2006 sample_interval = ctx->attrs.sample_interval ?
2007 ctx->attrs.sample_interval : 1;
2008 if (ctx->passed_sample_intervals == next_aggregation_sis) {
2009 ctx->next_aggregation_sis = next_aggregation_sis +
2010 ctx->attrs.aggr_interval / sample_interval;
2011
2012 kdamond_reset_aggregated(ctx);
2013 kdamond_split_regions(ctx);
2014 if (ctx->ops.reset_aggregated)
2015 ctx->ops.reset_aggregated(ctx);
2016 }
2017
2018 if (ctx->passed_sample_intervals == next_ops_update_sis) {
2019 ctx->next_ops_update_sis = next_ops_update_sis +
2020 ctx->attrs.ops_update_interval /
2021 sample_interval;
2022 if (ctx->ops.update)
2023 ctx->ops.update(ctx);
2024 sz_limit = damon_region_sz_limit(ctx);
2025 }
2026 }
2027 done:
2028 damon_for_each_target(t, ctx) {
2029 damon_for_each_region_safe(r, next, t)
2030 damon_destroy_region(r, t);
2031 }
2032
2033 if (ctx->callback.before_terminate)
2034 ctx->callback.before_terminate(ctx);
2035 if (ctx->ops.cleanup)
2036 ctx->ops.cleanup(ctx);
2037
2038 pr_debug("kdamond (%d) finishes\n", current->pid);
2039 mutex_lock(&ctx->kdamond_lock);
2040 ctx->kdamond = NULL;
2041 mutex_unlock(&ctx->kdamond_lock);
2042
2043 mutex_lock(&damon_lock);
2044 nr_running_ctxs--;
2045 if (!nr_running_ctxs && running_exclusive_ctxs)
2046 running_exclusive_ctxs = false;
2047 mutex_unlock(&damon_lock);
2048
2049 return 0;
2050 }
2051
2052 /*
2053 * struct damon_system_ram_region - System RAM resource address region of
2054 * [@start, @end).
2055 * @start: Start address of the region (inclusive).
2056 * @end: End address of the region (exclusive).
2057 */
2058 struct damon_system_ram_region {
2059 unsigned long start;
2060 unsigned long end;
2061 };
2062
walk_system_ram(struct resource * res,void * arg)2063 static int walk_system_ram(struct resource *res, void *arg)
2064 {
2065 struct damon_system_ram_region *a = arg;
2066
2067 if (a->end - a->start < resource_size(res)) {
2068 a->start = res->start;
2069 a->end = res->end;
2070 }
2071 return 0;
2072 }
2073
2074 /*
2075 * Find biggest 'System RAM' resource and store its start and end address in
2076 * @start and @end, respectively. If no System RAM is found, returns false.
2077 */
damon_find_biggest_system_ram(unsigned long * start,unsigned long * end)2078 static bool damon_find_biggest_system_ram(unsigned long *start,
2079 unsigned long *end)
2080
2081 {
2082 struct damon_system_ram_region arg = {};
2083
2084 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2085 if (arg.end <= arg.start)
2086 return false;
2087
2088 *start = arg.start;
2089 *end = arg.end;
2090 return true;
2091 }
2092
2093 /**
2094 * damon_set_region_biggest_system_ram_default() - Set the region of the given
2095 * monitoring target as requested, or biggest 'System RAM'.
2096 * @t: The monitoring target to set the region.
2097 * @start: The pointer to the start address of the region.
2098 * @end: The pointer to the end address of the region.
2099 *
2100 * This function sets the region of @t as requested by @start and @end. If the
2101 * values of @start and @end are zero, however, this function finds the biggest
2102 * 'System RAM' resource and sets the region to cover the resource. In the
2103 * latter case, this function saves the start and end addresses of the resource
2104 * in @start and @end, respectively.
2105 *
2106 * Return: 0 on success, negative error code otherwise.
2107 */
damon_set_region_biggest_system_ram_default(struct damon_target * t,unsigned long * start,unsigned long * end)2108 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2109 unsigned long *start, unsigned long *end)
2110 {
2111 struct damon_addr_range addr_range;
2112
2113 if (*start > *end)
2114 return -EINVAL;
2115
2116 if (!*start && !*end &&
2117 !damon_find_biggest_system_ram(start, end))
2118 return -EINVAL;
2119
2120 addr_range.start = *start;
2121 addr_range.end = *end;
2122 return damon_set_regions(t, &addr_range, 1);
2123 }
2124
2125 /*
2126 * damon_moving_sum() - Calculate an inferred moving sum value.
2127 * @mvsum: Inferred sum of the last @len_window values.
2128 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
2129 * @len_window: The number of last values to take care of.
2130 * @new_value: New value that will be added to the pseudo moving sum.
2131 *
2132 * Moving sum (moving average * window size) is good for handling noise, but
2133 * the cost of keeping past values can be high for arbitrary window size. This
2134 * function implements a lightweight pseudo moving sum function that doesn't
2135 * keep the past window values.
2136 *
2137 * It simply assumes there was no noise in the past, and get the no-noise
2138 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
2139 * non-moving sum of the last window. For example, if @len_window is 10 and we
2140 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2141 * values. Hence, this function simply drops @nomvsum / @len_window from
2142 * given @mvsum and add @new_value.
2143 *
2144 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2145 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
2146 * calculating next moving sum with a new value, we should drop 0 from 50 and
2147 * add the new value. However, this function assumes it got value 5 for each
2148 * of the last ten times. Based on the assumption, when the next value is
2149 * measured, it drops the assumed past value, 5 from the current sum, and add
2150 * the new value to get the updated pseduo-moving average.
2151 *
2152 * This means the value could have errors, but the errors will be disappeared
2153 * for every @len_window aligned calls. For example, if @len_window is 10, the
2154 * pseudo moving sum with 11th value to 19th value would have an error. But
2155 * the sum with 20th value will not have the error.
2156 *
2157 * Return: Pseudo-moving average after getting the @new_value.
2158 */
damon_moving_sum(unsigned int mvsum,unsigned int nomvsum,unsigned int len_window,unsigned int new_value)2159 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2160 unsigned int len_window, unsigned int new_value)
2161 {
2162 return mvsum - nomvsum / len_window + new_value;
2163 }
2164
2165 /**
2166 * damon_update_region_access_rate() - Update the access rate of a region.
2167 * @r: The DAMON region to update for its access check result.
2168 * @accessed: Whether the region has accessed during last sampling interval.
2169 * @attrs: The damon_attrs of the DAMON context.
2170 *
2171 * Update the access rate of a region with the region's last sampling interval
2172 * access check result.
2173 *
2174 * Usually this will be called by &damon_operations->check_accesses callback.
2175 */
damon_update_region_access_rate(struct damon_region * r,bool accessed,struct damon_attrs * attrs)2176 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2177 struct damon_attrs *attrs)
2178 {
2179 unsigned int len_window = 1;
2180
2181 /*
2182 * sample_interval can be zero, but cannot be larger than
2183 * aggr_interval, owing to validation of damon_set_attrs().
2184 */
2185 if (attrs->sample_interval)
2186 len_window = damon_max_nr_accesses(attrs);
2187 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2188 r->last_nr_accesses * 10000, len_window,
2189 accessed ? 10000 : 0);
2190
2191 if (accessed)
2192 r->nr_accesses++;
2193 }
2194
damon_init(void)2195 static int __init damon_init(void)
2196 {
2197 damon_region_cache = KMEM_CACHE(damon_region, 0);
2198 if (unlikely(!damon_region_cache)) {
2199 pr_err("creating damon_region_cache fails\n");
2200 return -ENOMEM;
2201 }
2202
2203 return 0;
2204 }
2205
2206 subsys_initcall(damon_init);
2207
2208 #include "core-test.h"
2209