1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8 #define pr_fmt(fmt) "damon: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/psi.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/string_choices.h>
18
19 #define CREATE_TRACE_POINTS
20 #include <trace/events/damon.h>
21
22 #ifdef CONFIG_DAMON_KUNIT_TEST
23 #undef DAMON_MIN_REGION
24 #define DAMON_MIN_REGION 1
25 #endif
26
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
29 static bool running_exclusive_ctxs;
30
31 static DEFINE_MUTEX(damon_ops_lock);
32 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
33
34 static struct kmem_cache *damon_region_cache __ro_after_init;
35
36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
__damon_is_registered_ops(enum damon_ops_id id)37 static bool __damon_is_registered_ops(enum damon_ops_id id)
38 {
39 struct damon_operations empty_ops = {};
40
41 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
42 return false;
43 return true;
44 }
45
46 /**
47 * damon_is_registered_ops() - Check if a given damon_operations is registered.
48 * @id: Id of the damon_operations to check if registered.
49 *
50 * Return: true if the ops is set, false otherwise.
51 */
damon_is_registered_ops(enum damon_ops_id id)52 bool damon_is_registered_ops(enum damon_ops_id id)
53 {
54 bool registered;
55
56 if (id >= NR_DAMON_OPS)
57 return false;
58 mutex_lock(&damon_ops_lock);
59 registered = __damon_is_registered_ops(id);
60 mutex_unlock(&damon_ops_lock);
61 return registered;
62 }
63
64 /**
65 * damon_register_ops() - Register a monitoring operations set to DAMON.
66 * @ops: monitoring operations set to register.
67 *
68 * This function registers a monitoring operations set of valid &struct
69 * damon_operations->id so that others can find and use them later.
70 *
71 * Return: 0 on success, negative error code otherwise.
72 */
damon_register_ops(struct damon_operations * ops)73 int damon_register_ops(struct damon_operations *ops)
74 {
75 int err = 0;
76
77 if (ops->id >= NR_DAMON_OPS)
78 return -EINVAL;
79
80 mutex_lock(&damon_ops_lock);
81 /* Fail for already registered ops */
82 if (__damon_is_registered_ops(ops->id))
83 err = -EINVAL;
84 else
85 damon_registered_ops[ops->id] = *ops;
86 mutex_unlock(&damon_ops_lock);
87 return err;
88 }
89
90 /**
91 * damon_select_ops() - Select a monitoring operations to use with the context.
92 * @ctx: monitoring context to use the operations.
93 * @id: id of the registered monitoring operations to select.
94 *
95 * This function finds registered monitoring operations set of @id and make
96 * @ctx to use it.
97 *
98 * Return: 0 on success, negative error code otherwise.
99 */
damon_select_ops(struct damon_ctx * ctx,enum damon_ops_id id)100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
101 {
102 int err = 0;
103
104 if (id >= NR_DAMON_OPS)
105 return -EINVAL;
106
107 mutex_lock(&damon_ops_lock);
108 if (!__damon_is_registered_ops(id))
109 err = -EINVAL;
110 else
111 ctx->ops = damon_registered_ops[id];
112 mutex_unlock(&damon_ops_lock);
113 return err;
114 }
115
116 /*
117 * Construct a damon_region struct
118 *
119 * Returns the pointer to the new struct if success, or NULL otherwise
120 */
damon_new_region(unsigned long start,unsigned long end)121 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122 {
123 struct damon_region *region;
124
125 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
126 if (!region)
127 return NULL;
128
129 region->ar.start = start;
130 region->ar.end = end;
131 region->nr_accesses = 0;
132 region->nr_accesses_bp = 0;
133 INIT_LIST_HEAD(®ion->list);
134
135 region->age = 0;
136 region->last_nr_accesses = 0;
137
138 return region;
139 }
140
damon_add_region(struct damon_region * r,struct damon_target * t)141 void damon_add_region(struct damon_region *r, struct damon_target *t)
142 {
143 list_add_tail(&r->list, &t->regions_list);
144 t->nr_regions++;
145 }
146
damon_del_region(struct damon_region * r,struct damon_target * t)147 static void damon_del_region(struct damon_region *r, struct damon_target *t)
148 {
149 list_del(&r->list);
150 t->nr_regions--;
151 }
152
damon_free_region(struct damon_region * r)153 static void damon_free_region(struct damon_region *r)
154 {
155 kmem_cache_free(damon_region_cache, r);
156 }
157
damon_destroy_region(struct damon_region * r,struct damon_target * t)158 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
159 {
160 damon_del_region(r, t);
161 damon_free_region(r);
162 }
163
164 /*
165 * Check whether a region is intersecting an address range
166 *
167 * Returns true if it is.
168 */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)169 static bool damon_intersect(struct damon_region *r,
170 struct damon_addr_range *re)
171 {
172 return !(r->ar.end <= re->start || re->end <= r->ar.start);
173 }
174
175 /*
176 * Fill holes in regions with new regions.
177 */
damon_fill_regions_holes(struct damon_region * first,struct damon_region * last,struct damon_target * t)178 static int damon_fill_regions_holes(struct damon_region *first,
179 struct damon_region *last, struct damon_target *t)
180 {
181 struct damon_region *r = first;
182
183 damon_for_each_region_from(r, t) {
184 struct damon_region *next, *newr;
185
186 if (r == last)
187 break;
188 next = damon_next_region(r);
189 if (r->ar.end != next->ar.start) {
190 newr = damon_new_region(r->ar.end, next->ar.start);
191 if (!newr)
192 return -ENOMEM;
193 damon_insert_region(newr, r, next, t);
194 }
195 }
196 return 0;
197 }
198
199 /*
200 * damon_set_regions() - Set regions of a target for given address ranges.
201 * @t: the given target.
202 * @ranges: array of new monitoring target ranges.
203 * @nr_ranges: length of @ranges.
204 * @min_sz_region: minimum region size.
205 *
206 * This function adds new regions to, or modify existing regions of a
207 * monitoring target to fit in specific ranges.
208 *
209 * Return: 0 if success, or negative error code otherwise.
210 */
damon_set_regions(struct damon_target * t,struct damon_addr_range * ranges,unsigned int nr_ranges,unsigned long min_sz_region)211 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
212 unsigned int nr_ranges, unsigned long min_sz_region)
213 {
214 struct damon_region *r, *next;
215 unsigned int i;
216 int err;
217
218 /* Remove regions which are not in the new ranges */
219 damon_for_each_region_safe(r, next, t) {
220 for (i = 0; i < nr_ranges; i++) {
221 if (damon_intersect(r, &ranges[i]))
222 break;
223 }
224 if (i == nr_ranges)
225 damon_destroy_region(r, t);
226 }
227
228 r = damon_first_region(t);
229 /* Add new regions or resize existing regions to fit in the ranges */
230 for (i = 0; i < nr_ranges; i++) {
231 struct damon_region *first = NULL, *last, *newr;
232 struct damon_addr_range *range;
233
234 range = &ranges[i];
235 /* Get the first/last regions intersecting with the range */
236 damon_for_each_region_from(r, t) {
237 if (damon_intersect(r, range)) {
238 if (!first)
239 first = r;
240 last = r;
241 }
242 if (r->ar.start >= range->end)
243 break;
244 }
245 if (!first) {
246 /* no region intersects with this range */
247 newr = damon_new_region(
248 ALIGN_DOWN(range->start,
249 min_sz_region),
250 ALIGN(range->end, min_sz_region));
251 if (!newr)
252 return -ENOMEM;
253 damon_insert_region(newr, damon_prev_region(r), r, t);
254 } else {
255 /* resize intersecting regions to fit in this range */
256 first->ar.start = ALIGN_DOWN(range->start,
257 min_sz_region);
258 last->ar.end = ALIGN(range->end, min_sz_region);
259
260 /* fill possible holes in the range */
261 err = damon_fill_regions_holes(first, last, t);
262 if (err)
263 return err;
264 }
265 }
266 return 0;
267 }
268
damos_new_filter(enum damos_filter_type type,bool matching,bool allow)269 struct damos_filter *damos_new_filter(enum damos_filter_type type,
270 bool matching, bool allow)
271 {
272 struct damos_filter *filter;
273
274 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
275 if (!filter)
276 return NULL;
277 filter->type = type;
278 filter->matching = matching;
279 filter->allow = allow;
280 INIT_LIST_HEAD(&filter->list);
281 return filter;
282 }
283
284 /**
285 * damos_filter_for_ops() - Return if the filter is ops-hndled one.
286 * @type: type of the filter.
287 *
288 * Return: true if the filter of @type needs to be handled by ops layer, false
289 * otherwise.
290 */
damos_filter_for_ops(enum damos_filter_type type)291 bool damos_filter_for_ops(enum damos_filter_type type)
292 {
293 switch (type) {
294 case DAMOS_FILTER_TYPE_ADDR:
295 case DAMOS_FILTER_TYPE_TARGET:
296 return false;
297 default:
298 break;
299 }
300 return true;
301 }
302
damos_add_filter(struct damos * s,struct damos_filter * f)303 void damos_add_filter(struct damos *s, struct damos_filter *f)
304 {
305 if (damos_filter_for_ops(f->type))
306 list_add_tail(&f->list, &s->ops_filters);
307 else
308 list_add_tail(&f->list, &s->filters);
309 }
310
damos_del_filter(struct damos_filter * f)311 static void damos_del_filter(struct damos_filter *f)
312 {
313 list_del(&f->list);
314 }
315
damos_free_filter(struct damos_filter * f)316 static void damos_free_filter(struct damos_filter *f)
317 {
318 kfree(f);
319 }
320
damos_destroy_filter(struct damos_filter * f)321 void damos_destroy_filter(struct damos_filter *f)
322 {
323 damos_del_filter(f);
324 damos_free_filter(f);
325 }
326
damos_new_quota_goal(enum damos_quota_goal_metric metric,unsigned long target_value)327 struct damos_quota_goal *damos_new_quota_goal(
328 enum damos_quota_goal_metric metric,
329 unsigned long target_value)
330 {
331 struct damos_quota_goal *goal;
332
333 goal = kmalloc(sizeof(*goal), GFP_KERNEL);
334 if (!goal)
335 return NULL;
336 goal->metric = metric;
337 goal->target_value = target_value;
338 INIT_LIST_HEAD(&goal->list);
339 return goal;
340 }
341
damos_add_quota_goal(struct damos_quota * q,struct damos_quota_goal * g)342 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
343 {
344 list_add_tail(&g->list, &q->goals);
345 }
346
damos_del_quota_goal(struct damos_quota_goal * g)347 static void damos_del_quota_goal(struct damos_quota_goal *g)
348 {
349 list_del(&g->list);
350 }
351
damos_free_quota_goal(struct damos_quota_goal * g)352 static void damos_free_quota_goal(struct damos_quota_goal *g)
353 {
354 kfree(g);
355 }
356
damos_destroy_quota_goal(struct damos_quota_goal * g)357 void damos_destroy_quota_goal(struct damos_quota_goal *g)
358 {
359 damos_del_quota_goal(g);
360 damos_free_quota_goal(g);
361 }
362
363 /* initialize fields of @quota that normally API users wouldn't set */
damos_quota_init(struct damos_quota * quota)364 static struct damos_quota *damos_quota_init(struct damos_quota *quota)
365 {
366 quota->esz = 0;
367 quota->total_charged_sz = 0;
368 quota->total_charged_ns = 0;
369 quota->charged_sz = 0;
370 quota->charged_from = 0;
371 quota->charge_target_from = NULL;
372 quota->charge_addr_from = 0;
373 quota->esz_bp = 0;
374 return quota;
375 }
376
damon_new_scheme(struct damos_access_pattern * pattern,enum damos_action action,unsigned long apply_interval_us,struct damos_quota * quota,struct damos_watermarks * wmarks,int target_nid)377 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
378 enum damos_action action,
379 unsigned long apply_interval_us,
380 struct damos_quota *quota,
381 struct damos_watermarks *wmarks,
382 int target_nid)
383 {
384 struct damos *scheme;
385
386 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
387 if (!scheme)
388 return NULL;
389 scheme->pattern = *pattern;
390 scheme->action = action;
391 scheme->apply_interval_us = apply_interval_us;
392 /*
393 * next_apply_sis will be set when kdamond starts. While kdamond is
394 * running, it will also updated when it is added to the DAMON context,
395 * or damon_attrs are updated.
396 */
397 scheme->next_apply_sis = 0;
398 scheme->walk_completed = false;
399 INIT_LIST_HEAD(&scheme->filters);
400 INIT_LIST_HEAD(&scheme->ops_filters);
401 scheme->stat = (struct damos_stat){};
402 INIT_LIST_HEAD(&scheme->list);
403
404 scheme->quota = *(damos_quota_init(quota));
405 /* quota.goals should be separately set by caller */
406 INIT_LIST_HEAD(&scheme->quota.goals);
407
408 scheme->wmarks = *wmarks;
409 scheme->wmarks.activated = true;
410
411 scheme->migrate_dests = (struct damos_migrate_dests){};
412 scheme->target_nid = target_nid;
413
414 return scheme;
415 }
416
damos_set_next_apply_sis(struct damos * s,struct damon_ctx * ctx)417 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
418 {
419 unsigned long sample_interval = ctx->attrs.sample_interval ?
420 ctx->attrs.sample_interval : 1;
421 unsigned long apply_interval = s->apply_interval_us ?
422 s->apply_interval_us : ctx->attrs.aggr_interval;
423
424 s->next_apply_sis = ctx->passed_sample_intervals +
425 apply_interval / sample_interval;
426 }
427
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)428 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
429 {
430 list_add_tail(&s->list, &ctx->schemes);
431 damos_set_next_apply_sis(s, ctx);
432 }
433
damon_del_scheme(struct damos * s)434 static void damon_del_scheme(struct damos *s)
435 {
436 list_del(&s->list);
437 }
438
damon_free_scheme(struct damos * s)439 static void damon_free_scheme(struct damos *s)
440 {
441 kfree(s);
442 }
443
damon_destroy_scheme(struct damos * s)444 void damon_destroy_scheme(struct damos *s)
445 {
446 struct damos_quota_goal *g, *g_next;
447 struct damos_filter *f, *next;
448
449 damos_for_each_quota_goal_safe(g, g_next, &s->quota)
450 damos_destroy_quota_goal(g);
451
452 damos_for_each_filter_safe(f, next, s)
453 damos_destroy_filter(f);
454
455 damos_for_each_ops_filter_safe(f, next, s)
456 damos_destroy_filter(f);
457
458 kfree(s->migrate_dests.node_id_arr);
459 kfree(s->migrate_dests.weight_arr);
460 damon_del_scheme(s);
461 damon_free_scheme(s);
462 }
463
464 /*
465 * Construct a damon_target struct
466 *
467 * Returns the pointer to the new struct if success, or NULL otherwise
468 */
damon_new_target(void)469 struct damon_target *damon_new_target(void)
470 {
471 struct damon_target *t;
472
473 t = kmalloc(sizeof(*t), GFP_KERNEL);
474 if (!t)
475 return NULL;
476
477 t->pid = NULL;
478 t->nr_regions = 0;
479 INIT_LIST_HEAD(&t->regions_list);
480 INIT_LIST_HEAD(&t->list);
481
482 return t;
483 }
484
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)485 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
486 {
487 list_add_tail(&t->list, &ctx->adaptive_targets);
488 }
489
damon_targets_empty(struct damon_ctx * ctx)490 bool damon_targets_empty(struct damon_ctx *ctx)
491 {
492 return list_empty(&ctx->adaptive_targets);
493 }
494
damon_del_target(struct damon_target * t)495 static void damon_del_target(struct damon_target *t)
496 {
497 list_del(&t->list);
498 }
499
damon_free_target(struct damon_target * t)500 void damon_free_target(struct damon_target *t)
501 {
502 struct damon_region *r, *next;
503
504 damon_for_each_region_safe(r, next, t)
505 damon_free_region(r);
506 kfree(t);
507 }
508
damon_destroy_target(struct damon_target * t,struct damon_ctx * ctx)509 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx)
510 {
511
512 if (ctx && ctx->ops.cleanup_target)
513 ctx->ops.cleanup_target(t);
514
515 damon_del_target(t);
516 damon_free_target(t);
517 }
518
damon_nr_regions(struct damon_target * t)519 unsigned int damon_nr_regions(struct damon_target *t)
520 {
521 return t->nr_regions;
522 }
523
damon_new_ctx(void)524 struct damon_ctx *damon_new_ctx(void)
525 {
526 struct damon_ctx *ctx;
527
528 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
529 if (!ctx)
530 return NULL;
531
532 init_completion(&ctx->kdamond_started);
533
534 ctx->attrs.sample_interval = 5 * 1000;
535 ctx->attrs.aggr_interval = 100 * 1000;
536 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
537
538 ctx->passed_sample_intervals = 0;
539 /* These will be set from kdamond_init_ctx() */
540 ctx->next_aggregation_sis = 0;
541 ctx->next_ops_update_sis = 0;
542
543 mutex_init(&ctx->kdamond_lock);
544 INIT_LIST_HEAD(&ctx->call_controls);
545 mutex_init(&ctx->call_controls_lock);
546 mutex_init(&ctx->walk_control_lock);
547
548 ctx->attrs.min_nr_regions = 10;
549 ctx->attrs.max_nr_regions = 1000;
550
551 ctx->addr_unit = 1;
552 ctx->min_sz_region = DAMON_MIN_REGION;
553
554 INIT_LIST_HEAD(&ctx->adaptive_targets);
555 INIT_LIST_HEAD(&ctx->schemes);
556
557 return ctx;
558 }
559
damon_destroy_targets(struct damon_ctx * ctx)560 static void damon_destroy_targets(struct damon_ctx *ctx)
561 {
562 struct damon_target *t, *next_t;
563
564 damon_for_each_target_safe(t, next_t, ctx)
565 damon_destroy_target(t, ctx);
566 }
567
damon_destroy_ctx(struct damon_ctx * ctx)568 void damon_destroy_ctx(struct damon_ctx *ctx)
569 {
570 struct damos *s, *next_s;
571
572 damon_destroy_targets(ctx);
573
574 damon_for_each_scheme_safe(s, next_s, ctx)
575 damon_destroy_scheme(s);
576
577 kfree(ctx);
578 }
579
damon_attrs_equals(const struct damon_attrs * attrs1,const struct damon_attrs * attrs2)580 static bool damon_attrs_equals(const struct damon_attrs *attrs1,
581 const struct damon_attrs *attrs2)
582 {
583 const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal;
584 const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal;
585
586 return attrs1->sample_interval == attrs2->sample_interval &&
587 attrs1->aggr_interval == attrs2->aggr_interval &&
588 attrs1->ops_update_interval == attrs2->ops_update_interval &&
589 attrs1->min_nr_regions == attrs2->min_nr_regions &&
590 attrs1->max_nr_regions == attrs2->max_nr_regions &&
591 ig1->access_bp == ig2->access_bp &&
592 ig1->aggrs == ig2->aggrs &&
593 ig1->min_sample_us == ig2->min_sample_us &&
594 ig1->max_sample_us == ig2->max_sample_us;
595 }
596
damon_age_for_new_attrs(unsigned int age,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)597 static unsigned int damon_age_for_new_attrs(unsigned int age,
598 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
599 {
600 return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
601 }
602
603 /* convert access ratio in bp (per 10,000) to nr_accesses */
damon_accesses_bp_to_nr_accesses(unsigned int accesses_bp,struct damon_attrs * attrs)604 static unsigned int damon_accesses_bp_to_nr_accesses(
605 unsigned int accesses_bp, struct damon_attrs *attrs)
606 {
607 return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
608 }
609
610 /*
611 * Convert nr_accesses to access ratio in bp (per 10,000).
612 *
613 * Callers should ensure attrs.aggr_interval is not zero, like
614 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would
615 * happen.
616 */
damon_nr_accesses_to_accesses_bp(unsigned int nr_accesses,struct damon_attrs * attrs)617 static unsigned int damon_nr_accesses_to_accesses_bp(
618 unsigned int nr_accesses, struct damon_attrs *attrs)
619 {
620 return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
621 }
622
damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)623 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
624 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
625 {
626 return damon_accesses_bp_to_nr_accesses(
627 damon_nr_accesses_to_accesses_bp(
628 nr_accesses, old_attrs),
629 new_attrs);
630 }
631
damon_update_monitoring_result(struct damon_region * r,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs,bool aggregating)632 static void damon_update_monitoring_result(struct damon_region *r,
633 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
634 bool aggregating)
635 {
636 if (!aggregating) {
637 r->nr_accesses = damon_nr_accesses_for_new_attrs(
638 r->nr_accesses, old_attrs, new_attrs);
639 r->nr_accesses_bp = r->nr_accesses * 10000;
640 } else {
641 /*
642 * if this is called in the middle of the aggregation, reset
643 * the aggregations we made so far for this aggregation
644 * interval. In other words, make the status like
645 * kdamond_reset_aggregated() is called.
646 */
647 r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
648 r->last_nr_accesses, old_attrs, new_attrs);
649 r->nr_accesses_bp = r->last_nr_accesses * 10000;
650 r->nr_accesses = 0;
651 }
652 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
653 }
654
655 /*
656 * region->nr_accesses is the number of sampling intervals in the last
657 * aggregation interval that access to the region has found, and region->age is
658 * the number of aggregation intervals that its access pattern has maintained.
659 * For the reason, the real meaning of the two fields depend on current
660 * sampling interval and aggregation interval. This function updates
661 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
662 */
damon_update_monitoring_results(struct damon_ctx * ctx,struct damon_attrs * new_attrs,bool aggregating)663 static void damon_update_monitoring_results(struct damon_ctx *ctx,
664 struct damon_attrs *new_attrs, bool aggregating)
665 {
666 struct damon_attrs *old_attrs = &ctx->attrs;
667 struct damon_target *t;
668 struct damon_region *r;
669
670 /* if any interval is zero, simply forgive conversion */
671 if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
672 !new_attrs->sample_interval ||
673 !new_attrs->aggr_interval)
674 return;
675
676 damon_for_each_target(t, ctx)
677 damon_for_each_region(r, t)
678 damon_update_monitoring_result(
679 r, old_attrs, new_attrs, aggregating);
680 }
681
682 /*
683 * damon_valid_intervals_goal() - return if the intervals goal of @attrs is
684 * valid.
685 */
damon_valid_intervals_goal(struct damon_attrs * attrs)686 static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
687 {
688 struct damon_intervals_goal *goal = &attrs->intervals_goal;
689
690 /* tuning is disabled */
691 if (!goal->aggrs)
692 return true;
693 if (goal->min_sample_us > goal->max_sample_us)
694 return false;
695 if (attrs->sample_interval < goal->min_sample_us ||
696 goal->max_sample_us < attrs->sample_interval)
697 return false;
698 return true;
699 }
700
701 /**
702 * damon_set_attrs() - Set attributes for the monitoring.
703 * @ctx: monitoring context
704 * @attrs: monitoring attributes
705 *
706 * This function should be called while the kdamond is not running, an access
707 * check results aggregation is not ongoing (e.g., from damon_call().
708 *
709 * Every time interval is in micro-seconds.
710 *
711 * Return: 0 on success, negative error code otherwise.
712 */
damon_set_attrs(struct damon_ctx * ctx,struct damon_attrs * attrs)713 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
714 {
715 unsigned long sample_interval = attrs->sample_interval ?
716 attrs->sample_interval : 1;
717 struct damos *s;
718 bool aggregating = ctx->passed_sample_intervals <
719 ctx->next_aggregation_sis;
720
721 if (!damon_valid_intervals_goal(attrs))
722 return -EINVAL;
723
724 if (attrs->min_nr_regions < 3)
725 return -EINVAL;
726 if (attrs->min_nr_regions > attrs->max_nr_regions)
727 return -EINVAL;
728 if (attrs->sample_interval > attrs->aggr_interval)
729 return -EINVAL;
730
731 /* calls from core-external doesn't set this. */
732 if (!attrs->aggr_samples)
733 attrs->aggr_samples = attrs->aggr_interval / sample_interval;
734
735 ctx->next_aggregation_sis = ctx->passed_sample_intervals +
736 attrs->aggr_interval / sample_interval;
737 ctx->next_ops_update_sis = ctx->passed_sample_intervals +
738 attrs->ops_update_interval / sample_interval;
739
740 damon_update_monitoring_results(ctx, attrs, aggregating);
741 ctx->attrs = *attrs;
742
743 damon_for_each_scheme(s, ctx)
744 damos_set_next_apply_sis(s, ctx);
745
746 return 0;
747 }
748
749 /**
750 * damon_set_schemes() - Set data access monitoring based operation schemes.
751 * @ctx: monitoring context
752 * @schemes: array of the schemes
753 * @nr_schemes: number of entries in @schemes
754 *
755 * This function should not be called while the kdamond of the context is
756 * running.
757 */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)758 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
759 ssize_t nr_schemes)
760 {
761 struct damos *s, *next;
762 ssize_t i;
763
764 damon_for_each_scheme_safe(s, next, ctx)
765 damon_destroy_scheme(s);
766 for (i = 0; i < nr_schemes; i++)
767 damon_add_scheme(ctx, schemes[i]);
768 }
769
damos_nth_quota_goal(int n,struct damos_quota * q)770 static struct damos_quota_goal *damos_nth_quota_goal(
771 int n, struct damos_quota *q)
772 {
773 struct damos_quota_goal *goal;
774 int i = 0;
775
776 damos_for_each_quota_goal(goal, q) {
777 if (i++ == n)
778 return goal;
779 }
780 return NULL;
781 }
782
damos_commit_quota_goal_union(struct damos_quota_goal * dst,struct damos_quota_goal * src)783 static void damos_commit_quota_goal_union(
784 struct damos_quota_goal *dst, struct damos_quota_goal *src)
785 {
786 switch (dst->metric) {
787 case DAMOS_QUOTA_NODE_MEM_USED_BP:
788 case DAMOS_QUOTA_NODE_MEM_FREE_BP:
789 dst->nid = src->nid;
790 break;
791 default:
792 break;
793 }
794 }
795
damos_commit_quota_goal(struct damos_quota_goal * dst,struct damos_quota_goal * src)796 static void damos_commit_quota_goal(
797 struct damos_quota_goal *dst, struct damos_quota_goal *src)
798 {
799 dst->metric = src->metric;
800 dst->target_value = src->target_value;
801 if (dst->metric == DAMOS_QUOTA_USER_INPUT)
802 dst->current_value = src->current_value;
803 /* keep last_psi_total as is, since it will be updated in next cycle */
804 damos_commit_quota_goal_union(dst, src);
805 }
806
807 /**
808 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
809 * @dst: The commit destination DAMOS quota.
810 * @src: The commit source DAMOS quota.
811 *
812 * Copies user-specified parameters for quota goals from @src to @dst. Users
813 * should use this function for quota goals-level parameters update of running
814 * DAMON contexts, instead of manual in-place updates.
815 *
816 * This function should be called from parameters-update safe context, like
817 * damon_call().
818 */
damos_commit_quota_goals(struct damos_quota * dst,struct damos_quota * src)819 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
820 {
821 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal;
822 int i = 0, j = 0;
823
824 damos_for_each_quota_goal_safe(dst_goal, next, dst) {
825 src_goal = damos_nth_quota_goal(i++, src);
826 if (src_goal)
827 damos_commit_quota_goal(dst_goal, src_goal);
828 else
829 damos_destroy_quota_goal(dst_goal);
830 }
831 damos_for_each_quota_goal_safe(src_goal, next, src) {
832 if (j++ < i)
833 continue;
834 new_goal = damos_new_quota_goal(
835 src_goal->metric, src_goal->target_value);
836 if (!new_goal)
837 return -ENOMEM;
838 damos_commit_quota_goal(new_goal, src_goal);
839 damos_add_quota_goal(dst, new_goal);
840 }
841 return 0;
842 }
843
damos_commit_quota(struct damos_quota * dst,struct damos_quota * src)844 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src)
845 {
846 int err;
847
848 dst->reset_interval = src->reset_interval;
849 dst->ms = src->ms;
850 dst->sz = src->sz;
851 err = damos_commit_quota_goals(dst, src);
852 if (err)
853 return err;
854 dst->weight_sz = src->weight_sz;
855 dst->weight_nr_accesses = src->weight_nr_accesses;
856 dst->weight_age = src->weight_age;
857 return 0;
858 }
859
damos_nth_filter(int n,struct damos * s)860 static struct damos_filter *damos_nth_filter(int n, struct damos *s)
861 {
862 struct damos_filter *filter;
863 int i = 0;
864
865 damos_for_each_filter(filter, s) {
866 if (i++ == n)
867 return filter;
868 }
869 return NULL;
870 }
871
damos_nth_ops_filter(int n,struct damos * s)872 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s)
873 {
874 struct damos_filter *filter;
875 int i = 0;
876
877 damos_for_each_ops_filter(filter, s) {
878 if (i++ == n)
879 return filter;
880 }
881 return NULL;
882 }
883
damos_commit_filter_arg(struct damos_filter * dst,struct damos_filter * src)884 static void damos_commit_filter_arg(
885 struct damos_filter *dst, struct damos_filter *src)
886 {
887 switch (dst->type) {
888 case DAMOS_FILTER_TYPE_MEMCG:
889 dst->memcg_id = src->memcg_id;
890 break;
891 case DAMOS_FILTER_TYPE_ADDR:
892 dst->addr_range = src->addr_range;
893 break;
894 case DAMOS_FILTER_TYPE_TARGET:
895 dst->target_idx = src->target_idx;
896 break;
897 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
898 dst->sz_range = src->sz_range;
899 break;
900 default:
901 break;
902 }
903 }
904
damos_commit_filter(struct damos_filter * dst,struct damos_filter * src)905 static void damos_commit_filter(
906 struct damos_filter *dst, struct damos_filter *src)
907 {
908 dst->type = src->type;
909 dst->matching = src->matching;
910 dst->allow = src->allow;
911 damos_commit_filter_arg(dst, src);
912 }
913
damos_commit_core_filters(struct damos * dst,struct damos * src)914 static int damos_commit_core_filters(struct damos *dst, struct damos *src)
915 {
916 struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
917 int i = 0, j = 0;
918
919 damos_for_each_filter_safe(dst_filter, next, dst) {
920 src_filter = damos_nth_filter(i++, src);
921 if (src_filter)
922 damos_commit_filter(dst_filter, src_filter);
923 else
924 damos_destroy_filter(dst_filter);
925 }
926
927 damos_for_each_filter_safe(src_filter, next, src) {
928 if (j++ < i)
929 continue;
930
931 new_filter = damos_new_filter(
932 src_filter->type, src_filter->matching,
933 src_filter->allow);
934 if (!new_filter)
935 return -ENOMEM;
936 damos_commit_filter_arg(new_filter, src_filter);
937 damos_add_filter(dst, new_filter);
938 }
939 return 0;
940 }
941
damos_commit_ops_filters(struct damos * dst,struct damos * src)942 static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
943 {
944 struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
945 int i = 0, j = 0;
946
947 damos_for_each_ops_filter_safe(dst_filter, next, dst) {
948 src_filter = damos_nth_ops_filter(i++, src);
949 if (src_filter)
950 damos_commit_filter(dst_filter, src_filter);
951 else
952 damos_destroy_filter(dst_filter);
953 }
954
955 damos_for_each_ops_filter_safe(src_filter, next, src) {
956 if (j++ < i)
957 continue;
958
959 new_filter = damos_new_filter(
960 src_filter->type, src_filter->matching,
961 src_filter->allow);
962 if (!new_filter)
963 return -ENOMEM;
964 damos_commit_filter_arg(new_filter, src_filter);
965 damos_add_filter(dst, new_filter);
966 }
967 return 0;
968 }
969
970 /**
971 * damos_filters_default_reject() - decide whether to reject memory that didn't
972 * match with any given filter.
973 * @filters: Given DAMOS filters of a group.
974 */
damos_filters_default_reject(struct list_head * filters)975 static bool damos_filters_default_reject(struct list_head *filters)
976 {
977 struct damos_filter *last_filter;
978
979 if (list_empty(filters))
980 return false;
981 last_filter = list_last_entry(filters, struct damos_filter, list);
982 return last_filter->allow;
983 }
984
damos_set_filters_default_reject(struct damos * s)985 static void damos_set_filters_default_reject(struct damos *s)
986 {
987 if (!list_empty(&s->ops_filters))
988 s->core_filters_default_reject = false;
989 else
990 s->core_filters_default_reject =
991 damos_filters_default_reject(&s->filters);
992 s->ops_filters_default_reject =
993 damos_filters_default_reject(&s->ops_filters);
994 }
995
damos_commit_dests(struct damos * dst,struct damos * src)996 static int damos_commit_dests(struct damos *dst, struct damos *src)
997 {
998 struct damos_migrate_dests *dst_dests, *src_dests;
999
1000 dst_dests = &dst->migrate_dests;
1001 src_dests = &src->migrate_dests;
1002
1003 if (dst_dests->nr_dests != src_dests->nr_dests) {
1004 kfree(dst_dests->node_id_arr);
1005 kfree(dst_dests->weight_arr);
1006
1007 dst_dests->node_id_arr = kmalloc_array(src_dests->nr_dests,
1008 sizeof(*dst_dests->node_id_arr), GFP_KERNEL);
1009 if (!dst_dests->node_id_arr) {
1010 dst_dests->weight_arr = NULL;
1011 return -ENOMEM;
1012 }
1013
1014 dst_dests->weight_arr = kmalloc_array(src_dests->nr_dests,
1015 sizeof(*dst_dests->weight_arr), GFP_KERNEL);
1016 if (!dst_dests->weight_arr) {
1017 /* ->node_id_arr will be freed by scheme destruction */
1018 return -ENOMEM;
1019 }
1020 }
1021
1022 dst_dests->nr_dests = src_dests->nr_dests;
1023 for (int i = 0; i < src_dests->nr_dests; i++) {
1024 dst_dests->node_id_arr[i] = src_dests->node_id_arr[i];
1025 dst_dests->weight_arr[i] = src_dests->weight_arr[i];
1026 }
1027
1028 return 0;
1029 }
1030
damos_commit_filters(struct damos * dst,struct damos * src)1031 static int damos_commit_filters(struct damos *dst, struct damos *src)
1032 {
1033 int err;
1034
1035 err = damos_commit_core_filters(dst, src);
1036 if (err)
1037 return err;
1038 err = damos_commit_ops_filters(dst, src);
1039 if (err)
1040 return err;
1041 damos_set_filters_default_reject(dst);
1042 return 0;
1043 }
1044
damon_nth_scheme(int n,struct damon_ctx * ctx)1045 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
1046 {
1047 struct damos *s;
1048 int i = 0;
1049
1050 damon_for_each_scheme(s, ctx) {
1051 if (i++ == n)
1052 return s;
1053 }
1054 return NULL;
1055 }
1056
damos_commit(struct damos * dst,struct damos * src)1057 static int damos_commit(struct damos *dst, struct damos *src)
1058 {
1059 int err;
1060
1061 dst->pattern = src->pattern;
1062 dst->action = src->action;
1063 dst->apply_interval_us = src->apply_interval_us;
1064
1065 err = damos_commit_quota(&dst->quota, &src->quota);
1066 if (err)
1067 return err;
1068
1069 dst->wmarks = src->wmarks;
1070 dst->target_nid = src->target_nid;
1071
1072 err = damos_commit_dests(dst, src);
1073 if (err)
1074 return err;
1075
1076 err = damos_commit_filters(dst, src);
1077 return err;
1078 }
1079
damon_commit_schemes(struct damon_ctx * dst,struct damon_ctx * src)1080 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
1081 {
1082 struct damos *dst_scheme, *next, *src_scheme, *new_scheme;
1083 int i = 0, j = 0, err;
1084
1085 damon_for_each_scheme_safe(dst_scheme, next, dst) {
1086 src_scheme = damon_nth_scheme(i++, src);
1087 if (src_scheme) {
1088 err = damos_commit(dst_scheme, src_scheme);
1089 if (err)
1090 return err;
1091 } else {
1092 damon_destroy_scheme(dst_scheme);
1093 }
1094 }
1095
1096 damon_for_each_scheme_safe(src_scheme, next, src) {
1097 if (j++ < i)
1098 continue;
1099 new_scheme = damon_new_scheme(&src_scheme->pattern,
1100 src_scheme->action,
1101 src_scheme->apply_interval_us,
1102 &src_scheme->quota, &src_scheme->wmarks,
1103 NUMA_NO_NODE);
1104 if (!new_scheme)
1105 return -ENOMEM;
1106 err = damos_commit(new_scheme, src_scheme);
1107 if (err) {
1108 damon_destroy_scheme(new_scheme);
1109 return err;
1110 }
1111 damon_add_scheme(dst, new_scheme);
1112 }
1113 return 0;
1114 }
1115
damon_nth_target(int n,struct damon_ctx * ctx)1116 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx)
1117 {
1118 struct damon_target *t;
1119 int i = 0;
1120
1121 damon_for_each_target(t, ctx) {
1122 if (i++ == n)
1123 return t;
1124 }
1125 return NULL;
1126 }
1127
1128 /*
1129 * The caller should ensure the regions of @src are
1130 * 1. valid (end >= src) and
1131 * 2. sorted by starting address.
1132 *
1133 * If @src has no region, @dst keeps current regions.
1134 */
damon_commit_target_regions(struct damon_target * dst,struct damon_target * src,unsigned long src_min_sz_region)1135 static int damon_commit_target_regions(struct damon_target *dst,
1136 struct damon_target *src, unsigned long src_min_sz_region)
1137 {
1138 struct damon_region *src_region;
1139 struct damon_addr_range *ranges;
1140 int i = 0, err;
1141
1142 damon_for_each_region(src_region, src)
1143 i++;
1144 if (!i)
1145 return 0;
1146
1147 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
1148 if (!ranges)
1149 return -ENOMEM;
1150 i = 0;
1151 damon_for_each_region(src_region, src)
1152 ranges[i++] = src_region->ar;
1153 err = damon_set_regions(dst, ranges, i, src_min_sz_region);
1154 kfree(ranges);
1155 return err;
1156 }
1157
damon_commit_target(struct damon_target * dst,bool dst_has_pid,struct damon_target * src,bool src_has_pid,unsigned long src_min_sz_region)1158 static int damon_commit_target(
1159 struct damon_target *dst, bool dst_has_pid,
1160 struct damon_target *src, bool src_has_pid,
1161 unsigned long src_min_sz_region)
1162 {
1163 int err;
1164
1165 err = damon_commit_target_regions(dst, src, src_min_sz_region);
1166 if (err)
1167 return err;
1168 if (dst_has_pid)
1169 put_pid(dst->pid);
1170 if (src_has_pid)
1171 get_pid(src->pid);
1172 dst->pid = src->pid;
1173 return 0;
1174 }
1175
damon_commit_targets(struct damon_ctx * dst,struct damon_ctx * src)1176 static int damon_commit_targets(
1177 struct damon_ctx *dst, struct damon_ctx *src)
1178 {
1179 struct damon_target *dst_target, *next, *src_target, *new_target;
1180 int i = 0, j = 0, err;
1181
1182 damon_for_each_target_safe(dst_target, next, dst) {
1183 src_target = damon_nth_target(i++, src);
1184 if (src_target) {
1185 err = damon_commit_target(
1186 dst_target, damon_target_has_pid(dst),
1187 src_target, damon_target_has_pid(src),
1188 src->min_sz_region);
1189 if (err)
1190 return err;
1191 } else {
1192 struct damos *s;
1193
1194 damon_destroy_target(dst_target, dst);
1195 damon_for_each_scheme(s, dst) {
1196 if (s->quota.charge_target_from == dst_target) {
1197 s->quota.charge_target_from = NULL;
1198 s->quota.charge_addr_from = 0;
1199 }
1200 }
1201 }
1202 }
1203
1204 damon_for_each_target_safe(src_target, next, src) {
1205 if (j++ < i)
1206 continue;
1207 new_target = damon_new_target();
1208 if (!new_target)
1209 return -ENOMEM;
1210 err = damon_commit_target(new_target, false,
1211 src_target, damon_target_has_pid(src),
1212 src->min_sz_region);
1213 if (err) {
1214 damon_destroy_target(new_target, NULL);
1215 return err;
1216 }
1217 damon_add_target(dst, new_target);
1218 }
1219 return 0;
1220 }
1221
1222 /**
1223 * damon_commit_ctx() - Commit parameters of a DAMON context to another.
1224 * @dst: The commit destination DAMON context.
1225 * @src: The commit source DAMON context.
1226 *
1227 * This function copies user-specified parameters from @src to @dst and update
1228 * the internal status and results accordingly. Users should use this function
1229 * for context-level parameters update of running context, instead of manual
1230 * in-place updates.
1231 *
1232 * This function should be called from parameters-update safe context, like
1233 * damon_call().
1234 */
damon_commit_ctx(struct damon_ctx * dst,struct damon_ctx * src)1235 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
1236 {
1237 int err;
1238
1239 err = damon_commit_schemes(dst, src);
1240 if (err)
1241 return err;
1242 err = damon_commit_targets(dst, src);
1243 if (err)
1244 return err;
1245 /*
1246 * schemes and targets should be updated first, since
1247 * 1. damon_set_attrs() updates monitoring results of targets and
1248 * next_apply_sis of schemes, and
1249 * 2. ops update should be done after pid handling is done (target
1250 * committing require putting pids).
1251 */
1252 if (!damon_attrs_equals(&dst->attrs, &src->attrs)) {
1253 err = damon_set_attrs(dst, &src->attrs);
1254 if (err)
1255 return err;
1256 }
1257 dst->ops = src->ops;
1258 dst->addr_unit = src->addr_unit;
1259 dst->min_sz_region = src->min_sz_region;
1260
1261 return 0;
1262 }
1263
1264 /**
1265 * damon_nr_running_ctxs() - Return number of currently running contexts.
1266 */
damon_nr_running_ctxs(void)1267 int damon_nr_running_ctxs(void)
1268 {
1269 int nr_ctxs;
1270
1271 mutex_lock(&damon_lock);
1272 nr_ctxs = nr_running_ctxs;
1273 mutex_unlock(&damon_lock);
1274
1275 return nr_ctxs;
1276 }
1277
1278 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)1279 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
1280 {
1281 struct damon_target *t;
1282 struct damon_region *r;
1283 unsigned long sz = 0;
1284
1285 damon_for_each_target(t, ctx) {
1286 damon_for_each_region(r, t)
1287 sz += damon_sz_region(r);
1288 }
1289
1290 if (ctx->attrs.min_nr_regions)
1291 sz /= ctx->attrs.min_nr_regions;
1292 if (sz < ctx->min_sz_region)
1293 sz = ctx->min_sz_region;
1294
1295 return sz;
1296 }
1297
1298 static int kdamond_fn(void *data);
1299
1300 /*
1301 * __damon_start() - Starts monitoring with given context.
1302 * @ctx: monitoring context
1303 *
1304 * This function should be called while damon_lock is hold.
1305 *
1306 * Return: 0 on success, negative error code otherwise.
1307 */
__damon_start(struct damon_ctx * ctx)1308 static int __damon_start(struct damon_ctx *ctx)
1309 {
1310 int err = -EBUSY;
1311
1312 mutex_lock(&ctx->kdamond_lock);
1313 if (!ctx->kdamond) {
1314 err = 0;
1315 reinit_completion(&ctx->kdamond_started);
1316 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
1317 nr_running_ctxs);
1318 if (IS_ERR(ctx->kdamond)) {
1319 err = PTR_ERR(ctx->kdamond);
1320 ctx->kdamond = NULL;
1321 } else {
1322 wait_for_completion(&ctx->kdamond_started);
1323 }
1324 }
1325 mutex_unlock(&ctx->kdamond_lock);
1326
1327 return err;
1328 }
1329
1330 /**
1331 * damon_start() - Starts the monitorings for a given group of contexts.
1332 * @ctxs: an array of the pointers for contexts to start monitoring
1333 * @nr_ctxs: size of @ctxs
1334 * @exclusive: exclusiveness of this contexts group
1335 *
1336 * This function starts a group of monitoring threads for a group of monitoring
1337 * contexts. One thread per each context is created and run in parallel. The
1338 * caller should handle synchronization between the threads by itself. If
1339 * @exclusive is true and a group of threads that created by other
1340 * 'damon_start()' call is currently running, this function does nothing but
1341 * returns -EBUSY.
1342 *
1343 * Return: 0 on success, negative error code otherwise.
1344 */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs,bool exclusive)1345 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
1346 {
1347 int i;
1348 int err = 0;
1349
1350 mutex_lock(&damon_lock);
1351 if ((exclusive && nr_running_ctxs) ||
1352 (!exclusive && running_exclusive_ctxs)) {
1353 mutex_unlock(&damon_lock);
1354 return -EBUSY;
1355 }
1356
1357 for (i = 0; i < nr_ctxs; i++) {
1358 err = __damon_start(ctxs[i]);
1359 if (err)
1360 break;
1361 nr_running_ctxs++;
1362 }
1363 if (exclusive && nr_running_ctxs)
1364 running_exclusive_ctxs = true;
1365 mutex_unlock(&damon_lock);
1366
1367 return err;
1368 }
1369
1370 /*
1371 * __damon_stop() - Stops monitoring of a given context.
1372 * @ctx: monitoring context
1373 *
1374 * Return: 0 on success, negative error code otherwise.
1375 */
__damon_stop(struct damon_ctx * ctx)1376 static int __damon_stop(struct damon_ctx *ctx)
1377 {
1378 struct task_struct *tsk;
1379
1380 mutex_lock(&ctx->kdamond_lock);
1381 tsk = ctx->kdamond;
1382 if (tsk) {
1383 get_task_struct(tsk);
1384 mutex_unlock(&ctx->kdamond_lock);
1385 kthread_stop_put(tsk);
1386 return 0;
1387 }
1388 mutex_unlock(&ctx->kdamond_lock);
1389
1390 return -EPERM;
1391 }
1392
1393 /**
1394 * damon_stop() - Stops the monitorings for a given group of contexts.
1395 * @ctxs: an array of the pointers for contexts to stop monitoring
1396 * @nr_ctxs: size of @ctxs
1397 *
1398 * Return: 0 on success, negative error code otherwise.
1399 */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)1400 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
1401 {
1402 int i, err = 0;
1403
1404 for (i = 0; i < nr_ctxs; i++) {
1405 /* nr_running_ctxs is decremented in kdamond_fn */
1406 err = __damon_stop(ctxs[i]);
1407 if (err)
1408 break;
1409 }
1410 return err;
1411 }
1412
1413 /**
1414 * damon_is_running() - Returns if a given DAMON context is running.
1415 * @ctx: The DAMON context to see if running.
1416 *
1417 * Return: true if @ctx is running, false otherwise.
1418 */
damon_is_running(struct damon_ctx * ctx)1419 bool damon_is_running(struct damon_ctx *ctx)
1420 {
1421 bool running;
1422
1423 mutex_lock(&ctx->kdamond_lock);
1424 running = ctx->kdamond != NULL;
1425 mutex_unlock(&ctx->kdamond_lock);
1426 return running;
1427 }
1428
1429 /**
1430 * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1431 * @ctx: DAMON context to call the function for.
1432 * @control: Control variable of the call request.
1433 *
1434 * Ask DAMON worker thread (kdamond) of @ctx to call a function with an
1435 * argument data that respectively passed via &damon_call_control->fn and
1436 * &damon_call_control->data of @control. If &damon_call_control->repeat of
1437 * @control is set, further wait until the kdamond finishes handling of the
1438 * request. Otherwise, return as soon as the request is made.
1439 *
1440 * The kdamond executes the function with the argument in the main loop, just
1441 * after a sampling of the iteration is finished. The function can hence
1442 * safely access the internal data of the &struct damon_ctx without additional
1443 * synchronization. The return value of the function will be saved in
1444 * &damon_call_control->return_code.
1445 *
1446 * Return: 0 on success, negative error code otherwise.
1447 */
damon_call(struct damon_ctx * ctx,struct damon_call_control * control)1448 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
1449 {
1450 if (!control->repeat)
1451 init_completion(&control->completion);
1452 control->canceled = false;
1453 INIT_LIST_HEAD(&control->list);
1454
1455 mutex_lock(&ctx->call_controls_lock);
1456 list_add_tail(&control->list, &ctx->call_controls);
1457 mutex_unlock(&ctx->call_controls_lock);
1458 if (!damon_is_running(ctx))
1459 return -EINVAL;
1460 if (control->repeat)
1461 return 0;
1462 wait_for_completion(&control->completion);
1463 if (control->canceled)
1464 return -ECANCELED;
1465 return 0;
1466 }
1467
1468 /**
1469 * damos_walk() - Invoke a given functions while DAMOS walk regions.
1470 * @ctx: DAMON context to call the functions for.
1471 * @control: Control variable of the walk request.
1472 *
1473 * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region
1474 * that the kdamond will apply DAMOS action to, and wait until the kdamond
1475 * finishes handling of the request.
1476 *
1477 * The kdamond executes the given function in the main loop, for each region
1478 * just after it applied any DAMOS actions of @ctx to it. The invocation is
1479 * made only within one &damos->apply_interval_us since damos_walk()
1480 * invocation, for each scheme. The given callback function can hence safely
1481 * access the internal data of &struct damon_ctx and &struct damon_region that
1482 * each of the scheme will apply the action for next interval, without
1483 * additional synchronizations against the kdamond. If every scheme of @ctx
1484 * passed at least one &damos->apply_interval_us, kdamond marks the request as
1485 * completed so that damos_walk() can wakeup and return.
1486 *
1487 * Return: 0 on success, negative error code otherwise.
1488 */
damos_walk(struct damon_ctx * ctx,struct damos_walk_control * control)1489 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
1490 {
1491 init_completion(&control->completion);
1492 control->canceled = false;
1493 mutex_lock(&ctx->walk_control_lock);
1494 if (ctx->walk_control) {
1495 mutex_unlock(&ctx->walk_control_lock);
1496 return -EBUSY;
1497 }
1498 ctx->walk_control = control;
1499 mutex_unlock(&ctx->walk_control_lock);
1500 if (!damon_is_running(ctx))
1501 return -EINVAL;
1502 wait_for_completion(&control->completion);
1503 if (control->canceled)
1504 return -ECANCELED;
1505 return 0;
1506 }
1507
1508 /*
1509 * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing
1510 * the problem being propagated.
1511 */
damon_warn_fix_nr_accesses_corruption(struct damon_region * r)1512 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r)
1513 {
1514 if (r->nr_accesses_bp == r->nr_accesses * 10000)
1515 return;
1516 WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n",
1517 r->nr_accesses_bp, r->nr_accesses);
1518 r->nr_accesses_bp = r->nr_accesses * 10000;
1519 }
1520
1521 /*
1522 * Reset the aggregated monitoring results ('nr_accesses' of each region).
1523 */
kdamond_reset_aggregated(struct damon_ctx * c)1524 static void kdamond_reset_aggregated(struct damon_ctx *c)
1525 {
1526 struct damon_target *t;
1527 unsigned int ti = 0; /* target's index */
1528
1529 damon_for_each_target(t, c) {
1530 struct damon_region *r;
1531
1532 damon_for_each_region(r, t) {
1533 trace_damon_aggregated(ti, r, damon_nr_regions(t));
1534 damon_warn_fix_nr_accesses_corruption(r);
1535 r->last_nr_accesses = r->nr_accesses;
1536 r->nr_accesses = 0;
1537 }
1538 ti++;
1539 }
1540 }
1541
damon_get_intervals_score(struct damon_ctx * c)1542 static unsigned long damon_get_intervals_score(struct damon_ctx *c)
1543 {
1544 struct damon_target *t;
1545 struct damon_region *r;
1546 unsigned long sz_region, max_access_events = 0, access_events = 0;
1547 unsigned long target_access_events;
1548 unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
1549
1550 damon_for_each_target(t, c) {
1551 damon_for_each_region(r, t) {
1552 sz_region = damon_sz_region(r);
1553 max_access_events += sz_region * c->attrs.aggr_samples;
1554 access_events += sz_region * r->nr_accesses;
1555 }
1556 }
1557 target_access_events = max_access_events * goal_bp / 10000;
1558 target_access_events = target_access_events ? : 1;
1559 return access_events * 10000 / target_access_events;
1560 }
1561
1562 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1563 unsigned long score);
1564
damon_get_intervals_adaptation_bp(struct damon_ctx * c)1565 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
1566 {
1567 unsigned long score_bp, adaptation_bp;
1568
1569 score_bp = damon_get_intervals_score(c);
1570 adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
1571 10000;
1572 /*
1573 * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of
1574 * the intervals by rescaling [1,10,000] to [5000, 10,000].
1575 */
1576 if (adaptation_bp <= 10000)
1577 adaptation_bp = 5000 + adaptation_bp / 2;
1578 return adaptation_bp;
1579 }
1580
kdamond_tune_intervals(struct damon_ctx * c)1581 static void kdamond_tune_intervals(struct damon_ctx *c)
1582 {
1583 unsigned long adaptation_bp;
1584 struct damon_attrs new_attrs;
1585 struct damon_intervals_goal *goal;
1586
1587 adaptation_bp = damon_get_intervals_adaptation_bp(c);
1588 if (adaptation_bp == 10000)
1589 return;
1590
1591 new_attrs = c->attrs;
1592 goal = &c->attrs.intervals_goal;
1593 new_attrs.sample_interval = min(goal->max_sample_us,
1594 c->attrs.sample_interval * adaptation_bp / 10000);
1595 new_attrs.sample_interval = max(goal->min_sample_us,
1596 new_attrs.sample_interval);
1597 new_attrs.aggr_interval = new_attrs.sample_interval *
1598 c->attrs.aggr_samples;
1599 trace_damon_monitor_intervals_tune(new_attrs.sample_interval);
1600 damon_set_attrs(c, &new_attrs);
1601 }
1602
1603 static void damon_split_region_at(struct damon_target *t,
1604 struct damon_region *r, unsigned long sz_r);
1605
__damos_valid_target(struct damon_region * r,struct damos * s)1606 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
1607 {
1608 unsigned long sz;
1609 unsigned int nr_accesses = r->nr_accesses_bp / 10000;
1610
1611 sz = damon_sz_region(r);
1612 return s->pattern.min_sz_region <= sz &&
1613 sz <= s->pattern.max_sz_region &&
1614 s->pattern.min_nr_accesses <= nr_accesses &&
1615 nr_accesses <= s->pattern.max_nr_accesses &&
1616 s->pattern.min_age_region <= r->age &&
1617 r->age <= s->pattern.max_age_region;
1618 }
1619
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1620 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
1621 struct damon_region *r, struct damos *s)
1622 {
1623 bool ret = __damos_valid_target(r, s);
1624
1625 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
1626 return ret;
1627
1628 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
1629 }
1630
1631 /*
1632 * damos_skip_charged_region() - Check if the given region or starting part of
1633 * it is already charged for the DAMOS quota.
1634 * @t: The target of the region.
1635 * @rp: The pointer to the region.
1636 * @s: The scheme to be applied.
1637 * @min_sz_region: minimum region size.
1638 *
1639 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
1640 * action would applied to only a part of the target access pattern fulfilling
1641 * regions. To avoid applying the scheme action to only already applied
1642 * regions, DAMON skips applying the scheme action to the regions that charged
1643 * in the previous charge window.
1644 *
1645 * This function checks if a given region should be skipped or not for the
1646 * reason. If only the starting part of the region has previously charged,
1647 * this function splits the region into two so that the second one covers the
1648 * area that not charged in the previous charge widnow and saves the second
1649 * region in *rp and returns false, so that the caller can apply DAMON action
1650 * to the second one.
1651 *
1652 * Return: true if the region should be entirely skipped, false otherwise.
1653 */
damos_skip_charged_region(struct damon_target * t,struct damon_region ** rp,struct damos * s,unsigned long min_sz_region)1654 static bool damos_skip_charged_region(struct damon_target *t,
1655 struct damon_region **rp, struct damos *s, unsigned long min_sz_region)
1656 {
1657 struct damon_region *r = *rp;
1658 struct damos_quota *quota = &s->quota;
1659 unsigned long sz_to_skip;
1660
1661 /* Skip previously charged regions */
1662 if (quota->charge_target_from) {
1663 if (t != quota->charge_target_from)
1664 return true;
1665 if (r == damon_last_region(t)) {
1666 quota->charge_target_from = NULL;
1667 quota->charge_addr_from = 0;
1668 return true;
1669 }
1670 if (quota->charge_addr_from &&
1671 r->ar.end <= quota->charge_addr_from)
1672 return true;
1673
1674 if (quota->charge_addr_from && r->ar.start <
1675 quota->charge_addr_from) {
1676 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
1677 r->ar.start, min_sz_region);
1678 if (!sz_to_skip) {
1679 if (damon_sz_region(r) <= min_sz_region)
1680 return true;
1681 sz_to_skip = min_sz_region;
1682 }
1683 damon_split_region_at(t, r, sz_to_skip);
1684 r = damon_next_region(r);
1685 *rp = r;
1686 }
1687 quota->charge_target_from = NULL;
1688 quota->charge_addr_from = 0;
1689 }
1690 return false;
1691 }
1692
damos_update_stat(struct damos * s,unsigned long sz_tried,unsigned long sz_applied,unsigned long sz_ops_filter_passed)1693 static void damos_update_stat(struct damos *s,
1694 unsigned long sz_tried, unsigned long sz_applied,
1695 unsigned long sz_ops_filter_passed)
1696 {
1697 s->stat.nr_tried++;
1698 s->stat.sz_tried += sz_tried;
1699 if (sz_applied)
1700 s->stat.nr_applied++;
1701 s->stat.sz_applied += sz_applied;
1702 s->stat.sz_ops_filter_passed += sz_ops_filter_passed;
1703 }
1704
damos_filter_match(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos_filter * filter,unsigned long min_sz_region)1705 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
1706 struct damon_region *r, struct damos_filter *filter,
1707 unsigned long min_sz_region)
1708 {
1709 bool matched = false;
1710 struct damon_target *ti;
1711 int target_idx = 0;
1712 unsigned long start, end;
1713
1714 switch (filter->type) {
1715 case DAMOS_FILTER_TYPE_TARGET:
1716 damon_for_each_target(ti, ctx) {
1717 if (ti == t)
1718 break;
1719 target_idx++;
1720 }
1721 matched = target_idx == filter->target_idx;
1722 break;
1723 case DAMOS_FILTER_TYPE_ADDR:
1724 start = ALIGN_DOWN(filter->addr_range.start, min_sz_region);
1725 end = ALIGN_DOWN(filter->addr_range.end, min_sz_region);
1726
1727 /* inside the range */
1728 if (start <= r->ar.start && r->ar.end <= end) {
1729 matched = true;
1730 break;
1731 }
1732 /* outside of the range */
1733 if (r->ar.end <= start || end <= r->ar.start) {
1734 matched = false;
1735 break;
1736 }
1737 /* start before the range and overlap */
1738 if (r->ar.start < start) {
1739 damon_split_region_at(t, r, start - r->ar.start);
1740 matched = false;
1741 break;
1742 }
1743 /* start inside the range */
1744 damon_split_region_at(t, r, end - r->ar.start);
1745 matched = true;
1746 break;
1747 default:
1748 return false;
1749 }
1750
1751 return matched == filter->matching;
1752 }
1753
damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s)1754 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
1755 struct damon_region *r, struct damos *s)
1756 {
1757 struct damos_filter *filter;
1758
1759 s->core_filters_allowed = false;
1760 damos_for_each_filter(filter, s) {
1761 if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) {
1762 if (filter->allow)
1763 s->core_filters_allowed = true;
1764 return !filter->allow;
1765 }
1766 }
1767 return s->core_filters_default_reject;
1768 }
1769
1770 /*
1771 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1772 * @ctx: The context of &damon_ctx->walk_control.
1773 * @t: The monitoring target of @r that @s will be applied.
1774 * @r: The region of @t that @s will be applied.
1775 * @s: The scheme of @ctx that will be applied to @r.
1776 *
1777 * This function is called from kdamond whenever it asked the operation set to
1778 * apply a DAMOS scheme action to a region. If a DAMOS walk request is
1779 * installed by damos_walk() and not yet uninstalled, invoke it.
1780 */
damos_walk_call_walk(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s,unsigned long sz_filter_passed)1781 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
1782 struct damon_region *r, struct damos *s,
1783 unsigned long sz_filter_passed)
1784 {
1785 struct damos_walk_control *control;
1786
1787 if (s->walk_completed)
1788 return;
1789
1790 control = ctx->walk_control;
1791 if (!control)
1792 return;
1793
1794 control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
1795 }
1796
1797 /*
1798 * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1799 * @ctx: The context of &damon_ctx->walk_control.
1800 * @s: A scheme of @ctx that all walks are now done.
1801 *
1802 * This function is called when kdamond finished applying the action of a DAMOS
1803 * scheme to all regions that eligible for the given &damos->apply_interval_us.
1804 * If every scheme of @ctx including @s now finished walking for at least one
1805 * &damos->apply_interval_us, this function makrs the handling of the given
1806 * DAMOS walk request is done, so that damos_walk() can wake up and return.
1807 */
damos_walk_complete(struct damon_ctx * ctx,struct damos * s)1808 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
1809 {
1810 struct damos *siter;
1811 struct damos_walk_control *control;
1812
1813 control = ctx->walk_control;
1814 if (!control)
1815 return;
1816
1817 s->walk_completed = true;
1818 /* if all schemes completed, signal completion to walker */
1819 damon_for_each_scheme(siter, ctx) {
1820 if (!siter->walk_completed)
1821 return;
1822 }
1823 damon_for_each_scheme(siter, ctx)
1824 siter->walk_completed = false;
1825
1826 complete(&control->completion);
1827 ctx->walk_control = NULL;
1828 }
1829
1830 /*
1831 * damos_walk_cancel() - Cancel the current DAMOS walk request.
1832 * @ctx: The context of &damon_ctx->walk_control.
1833 *
1834 * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS
1835 * walk is requested but there is no DAMOS scheme to walk for, or the kdamond
1836 * is already out of the main loop and therefore gonna be terminated, and hence
1837 * cannot continue the walks. This function therefore marks the walk request
1838 * as canceled, so that damos_walk() can wake up and return.
1839 */
damos_walk_cancel(struct damon_ctx * ctx)1840 static void damos_walk_cancel(struct damon_ctx *ctx)
1841 {
1842 struct damos_walk_control *control;
1843
1844 mutex_lock(&ctx->walk_control_lock);
1845 control = ctx->walk_control;
1846 mutex_unlock(&ctx->walk_control_lock);
1847
1848 if (!control)
1849 return;
1850 control->canceled = true;
1851 complete(&control->completion);
1852 mutex_lock(&ctx->walk_control_lock);
1853 ctx->walk_control = NULL;
1854 mutex_unlock(&ctx->walk_control_lock);
1855 }
1856
damos_apply_scheme(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)1857 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
1858 struct damon_region *r, struct damos *s)
1859 {
1860 struct damos_quota *quota = &s->quota;
1861 unsigned long sz = damon_sz_region(r);
1862 struct timespec64 begin, end;
1863 unsigned long sz_applied = 0;
1864 unsigned long sz_ops_filter_passed = 0;
1865 /*
1866 * We plan to support multiple context per kdamond, as DAMON sysfs
1867 * implies with 'nr_contexts' file. Nevertheless, only single context
1868 * per kdamond is supported for now. So, we can simply use '0' context
1869 * index here.
1870 */
1871 unsigned int cidx = 0;
1872 struct damos *siter; /* schemes iterator */
1873 unsigned int sidx = 0;
1874 struct damon_target *titer; /* targets iterator */
1875 unsigned int tidx = 0;
1876 bool do_trace = false;
1877
1878 /* get indices for trace_damos_before_apply() */
1879 if (trace_damos_before_apply_enabled()) {
1880 damon_for_each_scheme(siter, c) {
1881 if (siter == s)
1882 break;
1883 sidx++;
1884 }
1885 damon_for_each_target(titer, c) {
1886 if (titer == t)
1887 break;
1888 tidx++;
1889 }
1890 do_trace = true;
1891 }
1892
1893 if (c->ops.apply_scheme) {
1894 if (quota->esz && quota->charged_sz + sz > quota->esz) {
1895 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
1896 c->min_sz_region);
1897 if (!sz)
1898 goto update_stat;
1899 damon_split_region_at(t, r, sz);
1900 }
1901 if (damos_filter_out(c, t, r, s))
1902 return;
1903 ktime_get_coarse_ts64(&begin);
1904 trace_damos_before_apply(cidx, sidx, tidx, r,
1905 damon_nr_regions(t), do_trace);
1906 sz_applied = c->ops.apply_scheme(c, t, r, s,
1907 &sz_ops_filter_passed);
1908 damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
1909 ktime_get_coarse_ts64(&end);
1910 quota->total_charged_ns += timespec64_to_ns(&end) -
1911 timespec64_to_ns(&begin);
1912 quota->charged_sz += sz;
1913 if (quota->esz && quota->charged_sz >= quota->esz) {
1914 quota->charge_target_from = t;
1915 quota->charge_addr_from = r->ar.end + 1;
1916 }
1917 }
1918 if (s->action != DAMOS_STAT)
1919 r->age = 0;
1920
1921 update_stat:
1922 damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed);
1923 }
1924
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)1925 static void damon_do_apply_schemes(struct damon_ctx *c,
1926 struct damon_target *t,
1927 struct damon_region *r)
1928 {
1929 struct damos *s;
1930
1931 damon_for_each_scheme(s, c) {
1932 struct damos_quota *quota = &s->quota;
1933
1934 if (c->passed_sample_intervals < s->next_apply_sis)
1935 continue;
1936
1937 if (!s->wmarks.activated)
1938 continue;
1939
1940 /* Check the quota */
1941 if (quota->esz && quota->charged_sz >= quota->esz)
1942 continue;
1943
1944 if (damos_skip_charged_region(t, &r, s, c->min_sz_region))
1945 continue;
1946
1947 if (!damos_valid_target(c, t, r, s))
1948 continue;
1949
1950 damos_apply_scheme(c, t, r, s);
1951 }
1952 }
1953
1954 /*
1955 * damon_feed_loop_next_input() - get next input to achieve a target score.
1956 * @last_input The last input.
1957 * @score Current score that made with @last_input.
1958 *
1959 * Calculate next input to achieve the target score, based on the last input
1960 * and current score. Assuming the input and the score are positively
1961 * proportional, calculate how much compensation should be added to or
1962 * subtracted from the last input as a proportion of the last input. Avoid
1963 * next input always being zero by setting it non-zero always. In short form
1964 * (assuming support of float and signed calculations), the algorithm is as
1965 * below.
1966 *
1967 * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1968 *
1969 * For simple implementation, we assume the target score is always 10,000. The
1970 * caller should adjust @score for this.
1971 *
1972 * Returns next input that assumed to achieve the target score.
1973 */
damon_feed_loop_next_input(unsigned long last_input,unsigned long score)1974 static unsigned long damon_feed_loop_next_input(unsigned long last_input,
1975 unsigned long score)
1976 {
1977 const unsigned long goal = 10000;
1978 /* Set minimum input as 10000 to avoid compensation be zero */
1979 const unsigned long min_input = 10000;
1980 unsigned long score_goal_diff, compensation;
1981 bool over_achieving = score > goal;
1982
1983 if (score == goal)
1984 return last_input;
1985 if (score >= goal * 2)
1986 return min_input;
1987
1988 if (over_achieving)
1989 score_goal_diff = score - goal;
1990 else
1991 score_goal_diff = goal - score;
1992
1993 if (last_input < ULONG_MAX / score_goal_diff)
1994 compensation = last_input * score_goal_diff / goal;
1995 else
1996 compensation = last_input / goal * score_goal_diff;
1997
1998 if (over_achieving)
1999 return max(last_input - compensation, min_input);
2000 if (last_input < ULONG_MAX - compensation)
2001 return last_input + compensation;
2002 return ULONG_MAX;
2003 }
2004
2005 #ifdef CONFIG_PSI
2006
damos_get_some_mem_psi_total(void)2007 static u64 damos_get_some_mem_psi_total(void)
2008 {
2009 if (static_branch_likely(&psi_disabled))
2010 return 0;
2011 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2],
2012 NSEC_PER_USEC);
2013 }
2014
2015 #else /* CONFIG_PSI */
2016
damos_get_some_mem_psi_total(void)2017 static inline u64 damos_get_some_mem_psi_total(void)
2018 {
2019 return 0;
2020 };
2021
2022 #endif /* CONFIG_PSI */
2023
2024 #ifdef CONFIG_NUMA
damos_get_node_mem_bp(struct damos_quota_goal * goal)2025 static __kernel_ulong_t damos_get_node_mem_bp(
2026 struct damos_quota_goal *goal)
2027 {
2028 struct sysinfo i;
2029 __kernel_ulong_t numerator;
2030
2031 si_meminfo_node(&i, goal->nid);
2032 if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP)
2033 numerator = i.totalram - i.freeram;
2034 else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */
2035 numerator = i.freeram;
2036 return numerator * 10000 / i.totalram;
2037 }
2038 #else
damos_get_node_mem_bp(struct damos_quota_goal * goal)2039 static __kernel_ulong_t damos_get_node_mem_bp(
2040 struct damos_quota_goal *goal)
2041 {
2042 return 0;
2043 }
2044 #endif
2045
2046
damos_set_quota_goal_current_value(struct damos_quota_goal * goal)2047 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal)
2048 {
2049 u64 now_psi_total;
2050
2051 switch (goal->metric) {
2052 case DAMOS_QUOTA_USER_INPUT:
2053 /* User should already set goal->current_value */
2054 break;
2055 case DAMOS_QUOTA_SOME_MEM_PSI_US:
2056 now_psi_total = damos_get_some_mem_psi_total();
2057 goal->current_value = now_psi_total - goal->last_psi_total;
2058 goal->last_psi_total = now_psi_total;
2059 break;
2060 case DAMOS_QUOTA_NODE_MEM_USED_BP:
2061 case DAMOS_QUOTA_NODE_MEM_FREE_BP:
2062 goal->current_value = damos_get_node_mem_bp(goal);
2063 break;
2064 default:
2065 break;
2066 }
2067 }
2068
2069 /* Return the highest score since it makes schemes least aggressive */
damos_quota_score(struct damos_quota * quota)2070 static unsigned long damos_quota_score(struct damos_quota *quota)
2071 {
2072 struct damos_quota_goal *goal;
2073 unsigned long highest_score = 0;
2074
2075 damos_for_each_quota_goal(goal, quota) {
2076 damos_set_quota_goal_current_value(goal);
2077 highest_score = max(highest_score,
2078 goal->current_value * 10000 /
2079 goal->target_value);
2080 }
2081
2082 return highest_score;
2083 }
2084
2085 /*
2086 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
2087 */
damos_set_effective_quota(struct damos_quota * quota)2088 static void damos_set_effective_quota(struct damos_quota *quota)
2089 {
2090 unsigned long throughput;
2091 unsigned long esz = ULONG_MAX;
2092
2093 if (!quota->ms && list_empty("a->goals)) {
2094 quota->esz = quota->sz;
2095 return;
2096 }
2097
2098 if (!list_empty("a->goals)) {
2099 unsigned long score = damos_quota_score(quota);
2100
2101 quota->esz_bp = damon_feed_loop_next_input(
2102 max(quota->esz_bp, 10000UL),
2103 score);
2104 esz = quota->esz_bp / 10000;
2105 }
2106
2107 if (quota->ms) {
2108 if (quota->total_charged_ns)
2109 throughput = mult_frac(quota->total_charged_sz, 1000000,
2110 quota->total_charged_ns);
2111 else
2112 throughput = PAGE_SIZE * 1024;
2113 esz = min(throughput * quota->ms, esz);
2114 }
2115
2116 if (quota->sz && quota->sz < esz)
2117 esz = quota->sz;
2118
2119 quota->esz = esz;
2120 }
2121
damos_trace_esz(struct damon_ctx * c,struct damos * s,struct damos_quota * quota)2122 static void damos_trace_esz(struct damon_ctx *c, struct damos *s,
2123 struct damos_quota *quota)
2124 {
2125 unsigned int cidx = 0, sidx = 0;
2126 struct damos *siter;
2127
2128 damon_for_each_scheme(siter, c) {
2129 if (siter == s)
2130 break;
2131 sidx++;
2132 }
2133 trace_damos_esz(cidx, sidx, quota->esz);
2134 }
2135
damos_adjust_quota(struct damon_ctx * c,struct damos * s)2136 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
2137 {
2138 struct damos_quota *quota = &s->quota;
2139 struct damon_target *t;
2140 struct damon_region *r;
2141 unsigned long cumulated_sz, cached_esz;
2142 unsigned int score, max_score = 0;
2143
2144 if (!quota->ms && !quota->sz && list_empty("a->goals))
2145 return;
2146
2147 /* First charge window */
2148 if (!quota->total_charged_sz && !quota->charged_from) {
2149 quota->charged_from = jiffies;
2150 damos_set_effective_quota(quota);
2151 }
2152
2153 /* New charge window starts */
2154 if (time_after_eq(jiffies, quota->charged_from +
2155 msecs_to_jiffies(quota->reset_interval))) {
2156 if (quota->esz && quota->charged_sz >= quota->esz)
2157 s->stat.qt_exceeds++;
2158 quota->total_charged_sz += quota->charged_sz;
2159 quota->charged_from = jiffies;
2160 quota->charged_sz = 0;
2161 if (trace_damos_esz_enabled())
2162 cached_esz = quota->esz;
2163 damos_set_effective_quota(quota);
2164 if (trace_damos_esz_enabled() && quota->esz != cached_esz)
2165 damos_trace_esz(c, s, quota);
2166 }
2167
2168 if (!c->ops.get_scheme_score)
2169 return;
2170
2171 /* Fill up the score histogram */
2172 memset(c->regions_score_histogram, 0,
2173 sizeof(*c->regions_score_histogram) *
2174 (DAMOS_MAX_SCORE + 1));
2175 damon_for_each_target(t, c) {
2176 damon_for_each_region(r, t) {
2177 if (!__damos_valid_target(r, s))
2178 continue;
2179 score = c->ops.get_scheme_score(c, t, r, s);
2180 c->regions_score_histogram[score] +=
2181 damon_sz_region(r);
2182 if (score > max_score)
2183 max_score = score;
2184 }
2185 }
2186
2187 /* Set the min score limit */
2188 for (cumulated_sz = 0, score = max_score; ; score--) {
2189 cumulated_sz += c->regions_score_histogram[score];
2190 if (cumulated_sz >= quota->esz || !score)
2191 break;
2192 }
2193 quota->min_score = score;
2194 }
2195
kdamond_apply_schemes(struct damon_ctx * c)2196 static void kdamond_apply_schemes(struct damon_ctx *c)
2197 {
2198 struct damon_target *t;
2199 struct damon_region *r, *next_r;
2200 struct damos *s;
2201 unsigned long sample_interval = c->attrs.sample_interval ?
2202 c->attrs.sample_interval : 1;
2203 bool has_schemes_to_apply = false;
2204
2205 damon_for_each_scheme(s, c) {
2206 if (c->passed_sample_intervals < s->next_apply_sis)
2207 continue;
2208
2209 if (!s->wmarks.activated)
2210 continue;
2211
2212 has_schemes_to_apply = true;
2213
2214 damos_adjust_quota(c, s);
2215 }
2216
2217 if (!has_schemes_to_apply)
2218 return;
2219
2220 mutex_lock(&c->walk_control_lock);
2221 damon_for_each_target(t, c) {
2222 damon_for_each_region_safe(r, next_r, t)
2223 damon_do_apply_schemes(c, t, r);
2224 }
2225
2226 damon_for_each_scheme(s, c) {
2227 if (c->passed_sample_intervals < s->next_apply_sis)
2228 continue;
2229 damos_walk_complete(c, s);
2230 s->next_apply_sis = c->passed_sample_intervals +
2231 (s->apply_interval_us ? s->apply_interval_us :
2232 c->attrs.aggr_interval) / sample_interval;
2233 s->last_applied = NULL;
2234 }
2235 mutex_unlock(&c->walk_control_lock);
2236 }
2237
2238 /*
2239 * Merge two adjacent regions into one region
2240 */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)2241 static void damon_merge_two_regions(struct damon_target *t,
2242 struct damon_region *l, struct damon_region *r)
2243 {
2244 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
2245
2246 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
2247 (sz_l + sz_r);
2248 l->nr_accesses_bp = l->nr_accesses * 10000;
2249 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
2250 l->ar.end = r->ar.end;
2251 damon_destroy_region(r, t);
2252 }
2253
2254 /*
2255 * Merge adjacent regions having similar access frequencies
2256 *
2257 * t target affected by this merge operation
2258 * thres '->nr_accesses' diff threshold for the merge
2259 * sz_limit size upper limit of each region
2260 */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)2261 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
2262 unsigned long sz_limit)
2263 {
2264 struct damon_region *r, *prev = NULL, *next;
2265
2266 damon_for_each_region_safe(r, next, t) {
2267 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
2268 r->age = 0;
2269 else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0))
2270 r->age = 0;
2271 else
2272 r->age++;
2273
2274 if (prev && prev->ar.end == r->ar.start &&
2275 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
2276 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
2277 damon_merge_two_regions(t, prev, r);
2278 else
2279 prev = r;
2280 }
2281 }
2282
2283 /*
2284 * Merge adjacent regions having similar access frequencies
2285 *
2286 * threshold '->nr_accesses' diff threshold for the merge
2287 * sz_limit size upper limit of each region
2288 *
2289 * This function merges monitoring target regions which are adjacent and their
2290 * access frequencies are similar. This is for minimizing the monitoring
2291 * overhead under the dynamically changeable access pattern. If a merge was
2292 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
2293 *
2294 * The total number of regions could be higher than the user-defined limit,
2295 * max_nr_regions for some cases. For example, the user can update
2296 * max_nr_regions to a number that lower than the current number of regions
2297 * while DAMON is running. For such a case, repeat merging until the limit is
2298 * met while increasing @threshold up to possible maximum level.
2299 */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)2300 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
2301 unsigned long sz_limit)
2302 {
2303 struct damon_target *t;
2304 unsigned int nr_regions;
2305 unsigned int max_thres;
2306
2307 max_thres = c->attrs.aggr_interval /
2308 (c->attrs.sample_interval ? c->attrs.sample_interval : 1);
2309 do {
2310 nr_regions = 0;
2311 damon_for_each_target(t, c) {
2312 damon_merge_regions_of(t, threshold, sz_limit);
2313 nr_regions += damon_nr_regions(t);
2314 }
2315 threshold = max(1, threshold * 2);
2316 } while (nr_regions > c->attrs.max_nr_regions &&
2317 threshold / 2 < max_thres);
2318 }
2319
2320 /*
2321 * Split a region in two
2322 *
2323 * r the region to be split
2324 * sz_r size of the first sub-region that will be made
2325 */
damon_split_region_at(struct damon_target * t,struct damon_region * r,unsigned long sz_r)2326 static void damon_split_region_at(struct damon_target *t,
2327 struct damon_region *r, unsigned long sz_r)
2328 {
2329 struct damon_region *new;
2330
2331 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
2332 if (!new)
2333 return;
2334
2335 r->ar.end = new->ar.start;
2336
2337 new->age = r->age;
2338 new->last_nr_accesses = r->last_nr_accesses;
2339 new->nr_accesses_bp = r->nr_accesses_bp;
2340 new->nr_accesses = r->nr_accesses;
2341
2342 damon_insert_region(new, r, damon_next_region(r), t);
2343 }
2344
2345 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_target * t,int nr_subs,unsigned long min_sz_region)2346 static void damon_split_regions_of(struct damon_target *t, int nr_subs,
2347 unsigned long min_sz_region)
2348 {
2349 struct damon_region *r, *next;
2350 unsigned long sz_region, sz_sub = 0;
2351 int i;
2352
2353 damon_for_each_region_safe(r, next, t) {
2354 sz_region = damon_sz_region(r);
2355
2356 for (i = 0; i < nr_subs - 1 &&
2357 sz_region > 2 * min_sz_region; i++) {
2358 /*
2359 * Randomly select size of left sub-region to be at
2360 * least 10 percent and at most 90% of original region
2361 */
2362 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
2363 sz_region / 10, min_sz_region);
2364 /* Do not allow blank region */
2365 if (sz_sub == 0 || sz_sub >= sz_region)
2366 continue;
2367
2368 damon_split_region_at(t, r, sz_sub);
2369 sz_region = sz_sub;
2370 }
2371 }
2372 }
2373
2374 /*
2375 * Split every target region into randomly-sized small regions
2376 *
2377 * This function splits every target region into random-sized small regions if
2378 * current total number of the regions is equal or smaller than half of the
2379 * user-specified maximum number of regions. This is for maximizing the
2380 * monitoring accuracy under the dynamically changeable access patterns. If a
2381 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
2382 * it.
2383 */
kdamond_split_regions(struct damon_ctx * ctx)2384 static void kdamond_split_regions(struct damon_ctx *ctx)
2385 {
2386 struct damon_target *t;
2387 unsigned int nr_regions = 0;
2388 static unsigned int last_nr_regions;
2389 int nr_subregions = 2;
2390
2391 damon_for_each_target(t, ctx)
2392 nr_regions += damon_nr_regions(t);
2393
2394 if (nr_regions > ctx->attrs.max_nr_regions / 2)
2395 return;
2396
2397 /* Maybe the middle of the region has different access frequency */
2398 if (last_nr_regions == nr_regions &&
2399 nr_regions < ctx->attrs.max_nr_regions / 3)
2400 nr_subregions = 3;
2401
2402 damon_for_each_target(t, ctx)
2403 damon_split_regions_of(t, nr_subregions, ctx->min_sz_region);
2404
2405 last_nr_regions = nr_regions;
2406 }
2407
2408 /*
2409 * Check whether current monitoring should be stopped
2410 *
2411 * The monitoring is stopped when either the user requested to stop, or all
2412 * monitoring targets are invalid.
2413 *
2414 * Returns true if need to stop current monitoring.
2415 */
kdamond_need_stop(struct damon_ctx * ctx)2416 static bool kdamond_need_stop(struct damon_ctx *ctx)
2417 {
2418 struct damon_target *t;
2419
2420 if (kthread_should_stop())
2421 return true;
2422
2423 if (!ctx->ops.target_valid)
2424 return false;
2425
2426 damon_for_each_target(t, ctx) {
2427 if (ctx->ops.target_valid(t))
2428 return false;
2429 }
2430
2431 return true;
2432 }
2433
damos_get_wmark_metric_value(enum damos_wmark_metric metric,unsigned long * metric_value)2434 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric,
2435 unsigned long *metric_value)
2436 {
2437 switch (metric) {
2438 case DAMOS_WMARK_FREE_MEM_RATE:
2439 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 /
2440 totalram_pages();
2441 return 0;
2442 default:
2443 break;
2444 }
2445 return -EINVAL;
2446 }
2447
2448 /*
2449 * Returns zero if the scheme is active. Else, returns time to wait for next
2450 * watermark check in micro-seconds.
2451 */
damos_wmark_wait_us(struct damos * scheme)2452 static unsigned long damos_wmark_wait_us(struct damos *scheme)
2453 {
2454 unsigned long metric;
2455
2456 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric))
2457 return 0;
2458
2459 /* higher than high watermark or lower than low watermark */
2460 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
2461 if (scheme->wmarks.activated)
2462 pr_debug("deactivate a scheme (%d) for %s wmark\n",
2463 scheme->action,
2464 str_high_low(metric > scheme->wmarks.high));
2465 scheme->wmarks.activated = false;
2466 return scheme->wmarks.interval;
2467 }
2468
2469 /* inactive and higher than middle watermark */
2470 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
2471 !scheme->wmarks.activated)
2472 return scheme->wmarks.interval;
2473
2474 if (!scheme->wmarks.activated)
2475 pr_debug("activate a scheme (%d)\n", scheme->action);
2476 scheme->wmarks.activated = true;
2477 return 0;
2478 }
2479
kdamond_usleep(unsigned long usecs)2480 static void kdamond_usleep(unsigned long usecs)
2481 {
2482 if (usecs >= USLEEP_RANGE_UPPER_BOUND)
2483 schedule_timeout_idle(usecs_to_jiffies(usecs));
2484 else
2485 usleep_range_idle(usecs, usecs + 1);
2486 }
2487
2488 /*
2489 * kdamond_call() - handle damon_call_control objects.
2490 * @ctx: The &struct damon_ctx of the kdamond.
2491 * @cancel: Whether to cancel the invocation of the function.
2492 *
2493 * If there are &struct damon_call_control requests that registered via
2494 * &damon_call() on @ctx, do or cancel the invocation of the function depending
2495 * on @cancel. @cancel is set when the kdamond is already out of the main loop
2496 * and therefore will be terminated.
2497 */
kdamond_call(struct damon_ctx * ctx,bool cancel)2498 static void kdamond_call(struct damon_ctx *ctx, bool cancel)
2499 {
2500 struct damon_call_control *control;
2501 LIST_HEAD(repeat_controls);
2502 int ret = 0;
2503
2504 while (true) {
2505 mutex_lock(&ctx->call_controls_lock);
2506 control = list_first_entry_or_null(&ctx->call_controls,
2507 struct damon_call_control, list);
2508 mutex_unlock(&ctx->call_controls_lock);
2509 if (!control)
2510 break;
2511 if (cancel) {
2512 control->canceled = true;
2513 } else {
2514 ret = control->fn(control->data);
2515 control->return_code = ret;
2516 }
2517 mutex_lock(&ctx->call_controls_lock);
2518 list_del(&control->list);
2519 mutex_unlock(&ctx->call_controls_lock);
2520 if (!control->repeat) {
2521 complete(&control->completion);
2522 } else if (control->canceled && control->dealloc_on_cancel) {
2523 kfree(control);
2524 continue;
2525 } else {
2526 list_add(&control->list, &repeat_controls);
2527 }
2528 }
2529 control = list_first_entry_or_null(&repeat_controls,
2530 struct damon_call_control, list);
2531 if (!control || cancel)
2532 return;
2533 mutex_lock(&ctx->call_controls_lock);
2534 list_add_tail(&control->list, &ctx->call_controls);
2535 mutex_unlock(&ctx->call_controls_lock);
2536 }
2537
2538 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)2539 static int kdamond_wait_activation(struct damon_ctx *ctx)
2540 {
2541 struct damos *s;
2542 unsigned long wait_time;
2543 unsigned long min_wait_time = 0;
2544 bool init_wait_time = false;
2545
2546 while (!kdamond_need_stop(ctx)) {
2547 damon_for_each_scheme(s, ctx) {
2548 wait_time = damos_wmark_wait_us(s);
2549 if (!init_wait_time || wait_time < min_wait_time) {
2550 init_wait_time = true;
2551 min_wait_time = wait_time;
2552 }
2553 }
2554 if (!min_wait_time)
2555 return 0;
2556
2557 kdamond_usleep(min_wait_time);
2558
2559 kdamond_call(ctx, false);
2560 damos_walk_cancel(ctx);
2561 }
2562 return -EBUSY;
2563 }
2564
kdamond_init_ctx(struct damon_ctx * ctx)2565 static void kdamond_init_ctx(struct damon_ctx *ctx)
2566 {
2567 unsigned long sample_interval = ctx->attrs.sample_interval ?
2568 ctx->attrs.sample_interval : 1;
2569 unsigned long apply_interval;
2570 struct damos *scheme;
2571
2572 ctx->passed_sample_intervals = 0;
2573 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
2574 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
2575 sample_interval;
2576 ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
2577 ctx->attrs.intervals_goal.aggrs;
2578
2579 damon_for_each_scheme(scheme, ctx) {
2580 apply_interval = scheme->apply_interval_us ?
2581 scheme->apply_interval_us : ctx->attrs.aggr_interval;
2582 scheme->next_apply_sis = apply_interval / sample_interval;
2583 damos_set_filters_default_reject(scheme);
2584 }
2585 }
2586
2587 /*
2588 * The monitoring daemon that runs as a kernel thread
2589 */
kdamond_fn(void * data)2590 static int kdamond_fn(void *data)
2591 {
2592 struct damon_ctx *ctx = data;
2593 struct damon_target *t;
2594 struct damon_region *r, *next;
2595 unsigned int max_nr_accesses = 0;
2596 unsigned long sz_limit = 0;
2597
2598 pr_debug("kdamond (%d) starts\n", current->pid);
2599
2600 complete(&ctx->kdamond_started);
2601 kdamond_init_ctx(ctx);
2602
2603 if (ctx->ops.init)
2604 ctx->ops.init(ctx);
2605 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
2606 sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
2607 if (!ctx->regions_score_histogram)
2608 goto done;
2609
2610 sz_limit = damon_region_sz_limit(ctx);
2611
2612 while (!kdamond_need_stop(ctx)) {
2613 /*
2614 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
2615 * be changed from kdamond_call(). Read the values here, and
2616 * use those for this iteration. That is, damon_set_attrs()
2617 * updated new values are respected from next iteration.
2618 */
2619 unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
2620 unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
2621 unsigned long sample_interval = ctx->attrs.sample_interval;
2622
2623 if (kdamond_wait_activation(ctx))
2624 break;
2625
2626 if (ctx->ops.prepare_access_checks)
2627 ctx->ops.prepare_access_checks(ctx);
2628
2629 kdamond_usleep(sample_interval);
2630 ctx->passed_sample_intervals++;
2631
2632 if (ctx->ops.check_accesses)
2633 max_nr_accesses = ctx->ops.check_accesses(ctx);
2634
2635 if (ctx->passed_sample_intervals >= next_aggregation_sis)
2636 kdamond_merge_regions(ctx,
2637 max_nr_accesses / 10,
2638 sz_limit);
2639
2640 /*
2641 * do kdamond_call() and kdamond_apply_schemes() after
2642 * kdamond_merge_regions() if possible, to reduce overhead
2643 */
2644 kdamond_call(ctx, false);
2645 if (!list_empty(&ctx->schemes))
2646 kdamond_apply_schemes(ctx);
2647 else
2648 damos_walk_cancel(ctx);
2649
2650 sample_interval = ctx->attrs.sample_interval ?
2651 ctx->attrs.sample_interval : 1;
2652 if (ctx->passed_sample_intervals >= next_aggregation_sis) {
2653 if (ctx->attrs.intervals_goal.aggrs &&
2654 ctx->passed_sample_intervals >=
2655 ctx->next_intervals_tune_sis) {
2656 /*
2657 * ctx->next_aggregation_sis might be updated
2658 * from kdamond_call(). In the case,
2659 * damon_set_attrs() which will be called from
2660 * kdamond_tune_interval() may wrongly think
2661 * this is in the middle of the current
2662 * aggregation, and make aggregation
2663 * information reset for all regions. Then,
2664 * following kdamond_reset_aggregated() call
2665 * will make the region information invalid,
2666 * particularly for ->nr_accesses_bp.
2667 *
2668 * Reset ->next_aggregation_sis to avoid that.
2669 * It will anyway correctly updated after this
2670 * if caluse.
2671 */
2672 ctx->next_aggregation_sis =
2673 next_aggregation_sis;
2674 ctx->next_intervals_tune_sis +=
2675 ctx->attrs.aggr_samples *
2676 ctx->attrs.intervals_goal.aggrs;
2677 kdamond_tune_intervals(ctx);
2678 sample_interval = ctx->attrs.sample_interval ?
2679 ctx->attrs.sample_interval : 1;
2680
2681 }
2682 ctx->next_aggregation_sis = next_aggregation_sis +
2683 ctx->attrs.aggr_interval / sample_interval;
2684
2685 kdamond_reset_aggregated(ctx);
2686 kdamond_split_regions(ctx);
2687 }
2688
2689 if (ctx->passed_sample_intervals >= next_ops_update_sis) {
2690 ctx->next_ops_update_sis = next_ops_update_sis +
2691 ctx->attrs.ops_update_interval /
2692 sample_interval;
2693 if (ctx->ops.update)
2694 ctx->ops.update(ctx);
2695 sz_limit = damon_region_sz_limit(ctx);
2696 }
2697 }
2698 done:
2699 damon_for_each_target(t, ctx) {
2700 damon_for_each_region_safe(r, next, t)
2701 damon_destroy_region(r, t);
2702 }
2703
2704 if (ctx->ops.cleanup)
2705 ctx->ops.cleanup(ctx);
2706 kfree(ctx->regions_score_histogram);
2707
2708 pr_debug("kdamond (%d) finishes\n", current->pid);
2709 mutex_lock(&ctx->kdamond_lock);
2710 ctx->kdamond = NULL;
2711 mutex_unlock(&ctx->kdamond_lock);
2712
2713 kdamond_call(ctx, true);
2714 damos_walk_cancel(ctx);
2715
2716 mutex_lock(&damon_lock);
2717 nr_running_ctxs--;
2718 if (!nr_running_ctxs && running_exclusive_ctxs)
2719 running_exclusive_ctxs = false;
2720 mutex_unlock(&damon_lock);
2721
2722 damon_destroy_targets(ctx);
2723 return 0;
2724 }
2725
2726 /*
2727 * struct damon_system_ram_region - System RAM resource address region of
2728 * [@start, @end).
2729 * @start: Start address of the region (inclusive).
2730 * @end: End address of the region (exclusive).
2731 */
2732 struct damon_system_ram_region {
2733 unsigned long start;
2734 unsigned long end;
2735 };
2736
walk_system_ram(struct resource * res,void * arg)2737 static int walk_system_ram(struct resource *res, void *arg)
2738 {
2739 struct damon_system_ram_region *a = arg;
2740
2741 if (a->end - a->start < resource_size(res)) {
2742 a->start = res->start;
2743 a->end = res->end;
2744 }
2745 return 0;
2746 }
2747
2748 /*
2749 * Find biggest 'System RAM' resource and store its start and end address in
2750 * @start and @end, respectively. If no System RAM is found, returns false.
2751 */
damon_find_biggest_system_ram(unsigned long * start,unsigned long * end)2752 static bool damon_find_biggest_system_ram(unsigned long *start,
2753 unsigned long *end)
2754
2755 {
2756 struct damon_system_ram_region arg = {};
2757
2758 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
2759 if (arg.end <= arg.start)
2760 return false;
2761
2762 *start = arg.start;
2763 *end = arg.end;
2764 return true;
2765 }
2766
2767 /**
2768 * damon_set_region_biggest_system_ram_default() - Set the region of the given
2769 * monitoring target as requested, or biggest 'System RAM'.
2770 * @t: The monitoring target to set the region.
2771 * @start: The pointer to the start address of the region.
2772 * @end: The pointer to the end address of the region.
2773 *
2774 * This function sets the region of @t as requested by @start and @end. If the
2775 * values of @start and @end are zero, however, this function finds the biggest
2776 * 'System RAM' resource and sets the region to cover the resource. In the
2777 * latter case, this function saves the start and end addresses of the resource
2778 * in @start and @end, respectively.
2779 *
2780 * Return: 0 on success, negative error code otherwise.
2781 */
damon_set_region_biggest_system_ram_default(struct damon_target * t,unsigned long * start,unsigned long * end)2782 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
2783 unsigned long *start, unsigned long *end)
2784 {
2785 struct damon_addr_range addr_range;
2786
2787 if (*start > *end)
2788 return -EINVAL;
2789
2790 if (!*start && !*end &&
2791 !damon_find_biggest_system_ram(start, end))
2792 return -EINVAL;
2793
2794 addr_range.start = *start;
2795 addr_range.end = *end;
2796 return damon_set_regions(t, &addr_range, 1, DAMON_MIN_REGION);
2797 }
2798
2799 /*
2800 * damon_moving_sum() - Calculate an inferred moving sum value.
2801 * @mvsum: Inferred sum of the last @len_window values.
2802 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
2803 * @len_window: The number of last values to take care of.
2804 * @new_value: New value that will be added to the pseudo moving sum.
2805 *
2806 * Moving sum (moving average * window size) is good for handling noise, but
2807 * the cost of keeping past values can be high for arbitrary window size. This
2808 * function implements a lightweight pseudo moving sum function that doesn't
2809 * keep the past window values.
2810 *
2811 * It simply assumes there was no noise in the past, and get the no-noise
2812 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
2813 * non-moving sum of the last window. For example, if @len_window is 10 and we
2814 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
2815 * values. Hence, this function simply drops @nomvsum / @len_window from
2816 * given @mvsum and add @new_value.
2817 *
2818 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
2819 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
2820 * calculating next moving sum with a new value, we should drop 0 from 50 and
2821 * add the new value. However, this function assumes it got value 5 for each
2822 * of the last ten times. Based on the assumption, when the next value is
2823 * measured, it drops the assumed past value, 5 from the current sum, and add
2824 * the new value to get the updated pseduo-moving average.
2825 *
2826 * This means the value could have errors, but the errors will be disappeared
2827 * for every @len_window aligned calls. For example, if @len_window is 10, the
2828 * pseudo moving sum with 11th value to 19th value would have an error. But
2829 * the sum with 20th value will not have the error.
2830 *
2831 * Return: Pseudo-moving average after getting the @new_value.
2832 */
damon_moving_sum(unsigned int mvsum,unsigned int nomvsum,unsigned int len_window,unsigned int new_value)2833 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
2834 unsigned int len_window, unsigned int new_value)
2835 {
2836 return mvsum - nomvsum / len_window + new_value;
2837 }
2838
2839 /**
2840 * damon_update_region_access_rate() - Update the access rate of a region.
2841 * @r: The DAMON region to update for its access check result.
2842 * @accessed: Whether the region has accessed during last sampling interval.
2843 * @attrs: The damon_attrs of the DAMON context.
2844 *
2845 * Update the access rate of a region with the region's last sampling interval
2846 * access check result.
2847 *
2848 * Usually this will be called by &damon_operations->check_accesses callback.
2849 */
damon_update_region_access_rate(struct damon_region * r,bool accessed,struct damon_attrs * attrs)2850 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
2851 struct damon_attrs *attrs)
2852 {
2853 unsigned int len_window = 1;
2854
2855 /*
2856 * sample_interval can be zero, but cannot be larger than
2857 * aggr_interval, owing to validation of damon_set_attrs().
2858 */
2859 if (attrs->sample_interval)
2860 len_window = damon_max_nr_accesses(attrs);
2861 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
2862 r->last_nr_accesses * 10000, len_window,
2863 accessed ? 10000 : 0);
2864
2865 if (accessed)
2866 r->nr_accesses++;
2867 }
2868
2869 /**
2870 * damon_initialized() - Return if DAMON is ready to be used.
2871 *
2872 * Return: true if DAMON is ready to be used, false otherwise.
2873 */
damon_initialized(void)2874 bool damon_initialized(void)
2875 {
2876 return damon_region_cache != NULL;
2877 }
2878
damon_init(void)2879 static int __init damon_init(void)
2880 {
2881 damon_region_cache = KMEM_CACHE(damon_region, 0);
2882 if (unlikely(!damon_region_cache)) {
2883 pr_err("creating damon_region_cache fails\n");
2884 return -ENOMEM;
2885 }
2886
2887 return 0;
2888 }
2889
2890 subsys_initcall(damon_init);
2891
2892 #include "tests/core-kunit.h"
2893