1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/psi.h> 15 #include <linux/slab.h> 16 #include <linux/string.h> 17 #include <linux/string_choices.h> 18 19 #define CREATE_TRACE_POINTS 20 #include <trace/events/damon.h> 21 22 #ifdef CONFIG_DAMON_KUNIT_TEST 23 #undef DAMON_MIN_REGION 24 #define DAMON_MIN_REGION 1 25 #endif 26 27 static DEFINE_MUTEX(damon_lock); 28 static int nr_running_ctxs; 29 static bool running_exclusive_ctxs; 30 31 static DEFINE_MUTEX(damon_ops_lock); 32 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 33 34 static struct kmem_cache *damon_region_cache __ro_after_init; 35 36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 37 static bool __damon_is_registered_ops(enum damon_ops_id id) 38 { 39 struct damon_operations empty_ops = {}; 40 41 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 42 return false; 43 return true; 44 } 45 46 /** 47 * damon_is_registered_ops() - Check if a given damon_operations is registered. 48 * @id: Id of the damon_operations to check if registered. 49 * 50 * Return: true if the ops is set, false otherwise. 51 */ 52 bool damon_is_registered_ops(enum damon_ops_id id) 53 { 54 bool registered; 55 56 if (id >= NR_DAMON_OPS) 57 return false; 58 mutex_lock(&damon_ops_lock); 59 registered = __damon_is_registered_ops(id); 60 mutex_unlock(&damon_ops_lock); 61 return registered; 62 } 63 64 /** 65 * damon_register_ops() - Register a monitoring operations set to DAMON. 66 * @ops: monitoring operations set to register. 67 * 68 * This function registers a monitoring operations set of valid &struct 69 * damon_operations->id so that others can find and use them later. 70 * 71 * Return: 0 on success, negative error code otherwise. 72 */ 73 int damon_register_ops(struct damon_operations *ops) 74 { 75 int err = 0; 76 77 if (ops->id >= NR_DAMON_OPS) 78 return -EINVAL; 79 80 mutex_lock(&damon_ops_lock); 81 /* Fail for already registered ops */ 82 if (__damon_is_registered_ops(ops->id)) 83 err = -EINVAL; 84 else 85 damon_registered_ops[ops->id] = *ops; 86 mutex_unlock(&damon_ops_lock); 87 return err; 88 } 89 90 /** 91 * damon_select_ops() - Select a monitoring operations to use with the context. 92 * @ctx: monitoring context to use the operations. 93 * @id: id of the registered monitoring operations to select. 94 * 95 * This function finds registered monitoring operations set of @id and make 96 * @ctx to use it. 97 * 98 * Return: 0 on success, negative error code otherwise. 99 */ 100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 101 { 102 int err = 0; 103 104 if (id >= NR_DAMON_OPS) 105 return -EINVAL; 106 107 mutex_lock(&damon_ops_lock); 108 if (!__damon_is_registered_ops(id)) 109 err = -EINVAL; 110 else 111 ctx->ops = damon_registered_ops[id]; 112 mutex_unlock(&damon_ops_lock); 113 return err; 114 } 115 116 /* 117 * Construct a damon_region struct 118 * 119 * Returns the pointer to the new struct if success, or NULL otherwise 120 */ 121 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 122 { 123 struct damon_region *region; 124 125 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 126 if (!region) 127 return NULL; 128 129 region->ar.start = start; 130 region->ar.end = end; 131 region->nr_accesses = 0; 132 region->nr_accesses_bp = 0; 133 INIT_LIST_HEAD(®ion->list); 134 135 region->age = 0; 136 region->last_nr_accesses = 0; 137 138 return region; 139 } 140 141 void damon_add_region(struct damon_region *r, struct damon_target *t) 142 { 143 list_add_tail(&r->list, &t->regions_list); 144 t->nr_regions++; 145 } 146 147 static void damon_del_region(struct damon_region *r, struct damon_target *t) 148 { 149 list_del(&r->list); 150 t->nr_regions--; 151 } 152 153 static void damon_free_region(struct damon_region *r) 154 { 155 kmem_cache_free(damon_region_cache, r); 156 } 157 158 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 159 { 160 damon_del_region(r, t); 161 damon_free_region(r); 162 } 163 164 /* 165 * Check whether a region is intersecting an address range 166 * 167 * Returns true if it is. 168 */ 169 static bool damon_intersect(struct damon_region *r, 170 struct damon_addr_range *re) 171 { 172 return !(r->ar.end <= re->start || re->end <= r->ar.start); 173 } 174 175 /* 176 * Fill holes in regions with new regions. 177 */ 178 static int damon_fill_regions_holes(struct damon_region *first, 179 struct damon_region *last, struct damon_target *t) 180 { 181 struct damon_region *r = first; 182 183 damon_for_each_region_from(r, t) { 184 struct damon_region *next, *newr; 185 186 if (r == last) 187 break; 188 next = damon_next_region(r); 189 if (r->ar.end != next->ar.start) { 190 newr = damon_new_region(r->ar.end, next->ar.start); 191 if (!newr) 192 return -ENOMEM; 193 damon_insert_region(newr, r, next, t); 194 } 195 } 196 return 0; 197 } 198 199 /* 200 * damon_set_regions() - Set regions of a target for given address ranges. 201 * @t: the given target. 202 * @ranges: array of new monitoring target ranges. 203 * @nr_ranges: length of @ranges. 204 * @min_sz_region: minimum region size. 205 * 206 * This function adds new regions to, or modify existing regions of a 207 * monitoring target to fit in specific ranges. 208 * 209 * Return: 0 if success, or negative error code otherwise. 210 */ 211 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 212 unsigned int nr_ranges, unsigned long min_sz_region) 213 { 214 struct damon_region *r, *next; 215 unsigned int i; 216 int err; 217 218 /* Remove regions which are not in the new ranges */ 219 damon_for_each_region_safe(r, next, t) { 220 for (i = 0; i < nr_ranges; i++) { 221 if (damon_intersect(r, &ranges[i])) 222 break; 223 } 224 if (i == nr_ranges) 225 damon_destroy_region(r, t); 226 } 227 228 r = damon_first_region(t); 229 /* Add new regions or resize existing regions to fit in the ranges */ 230 for (i = 0; i < nr_ranges; i++) { 231 struct damon_region *first = NULL, *last, *newr; 232 struct damon_addr_range *range; 233 234 range = &ranges[i]; 235 /* Get the first/last regions intersecting with the range */ 236 damon_for_each_region_from(r, t) { 237 if (damon_intersect(r, range)) { 238 if (!first) 239 first = r; 240 last = r; 241 } 242 if (r->ar.start >= range->end) 243 break; 244 } 245 if (!first) { 246 /* no region intersects with this range */ 247 newr = damon_new_region( 248 ALIGN_DOWN(range->start, 249 min_sz_region), 250 ALIGN(range->end, min_sz_region)); 251 if (!newr) 252 return -ENOMEM; 253 damon_insert_region(newr, damon_prev_region(r), r, t); 254 } else { 255 /* resize intersecting regions to fit in this range */ 256 first->ar.start = ALIGN_DOWN(range->start, 257 min_sz_region); 258 last->ar.end = ALIGN(range->end, min_sz_region); 259 260 /* fill possible holes in the range */ 261 err = damon_fill_regions_holes(first, last, t); 262 if (err) 263 return err; 264 } 265 } 266 return 0; 267 } 268 269 struct damos_filter *damos_new_filter(enum damos_filter_type type, 270 bool matching, bool allow) 271 { 272 struct damos_filter *filter; 273 274 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 275 if (!filter) 276 return NULL; 277 filter->type = type; 278 filter->matching = matching; 279 filter->allow = allow; 280 INIT_LIST_HEAD(&filter->list); 281 return filter; 282 } 283 284 /** 285 * damos_filter_for_ops() - Return if the filter is ops-hndled one. 286 * @type: type of the filter. 287 * 288 * Return: true if the filter of @type needs to be handled by ops layer, false 289 * otherwise. 290 */ 291 bool damos_filter_for_ops(enum damos_filter_type type) 292 { 293 switch (type) { 294 case DAMOS_FILTER_TYPE_ADDR: 295 case DAMOS_FILTER_TYPE_TARGET: 296 return false; 297 default: 298 break; 299 } 300 return true; 301 } 302 303 void damos_add_filter(struct damos *s, struct damos_filter *f) 304 { 305 if (damos_filter_for_ops(f->type)) 306 list_add_tail(&f->list, &s->ops_filters); 307 else 308 list_add_tail(&f->list, &s->filters); 309 } 310 311 static void damos_del_filter(struct damos_filter *f) 312 { 313 list_del(&f->list); 314 } 315 316 static void damos_free_filter(struct damos_filter *f) 317 { 318 kfree(f); 319 } 320 321 void damos_destroy_filter(struct damos_filter *f) 322 { 323 damos_del_filter(f); 324 damos_free_filter(f); 325 } 326 327 struct damos_quota_goal *damos_new_quota_goal( 328 enum damos_quota_goal_metric metric, 329 unsigned long target_value) 330 { 331 struct damos_quota_goal *goal; 332 333 goal = kmalloc(sizeof(*goal), GFP_KERNEL); 334 if (!goal) 335 return NULL; 336 goal->metric = metric; 337 goal->target_value = target_value; 338 INIT_LIST_HEAD(&goal->list); 339 return goal; 340 } 341 342 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) 343 { 344 list_add_tail(&g->list, &q->goals); 345 } 346 347 static void damos_del_quota_goal(struct damos_quota_goal *g) 348 { 349 list_del(&g->list); 350 } 351 352 static void damos_free_quota_goal(struct damos_quota_goal *g) 353 { 354 kfree(g); 355 } 356 357 void damos_destroy_quota_goal(struct damos_quota_goal *g) 358 { 359 damos_del_quota_goal(g); 360 damos_free_quota_goal(g); 361 } 362 363 /* initialize fields of @quota that normally API users wouldn't set */ 364 static struct damos_quota *damos_quota_init(struct damos_quota *quota) 365 { 366 quota->esz = 0; 367 quota->total_charged_sz = 0; 368 quota->total_charged_ns = 0; 369 quota->charged_sz = 0; 370 quota->charged_from = 0; 371 quota->charge_target_from = NULL; 372 quota->charge_addr_from = 0; 373 quota->esz_bp = 0; 374 return quota; 375 } 376 377 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 378 enum damos_action action, 379 unsigned long apply_interval_us, 380 struct damos_quota *quota, 381 struct damos_watermarks *wmarks, 382 int target_nid) 383 { 384 struct damos *scheme; 385 386 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 387 if (!scheme) 388 return NULL; 389 scheme->pattern = *pattern; 390 scheme->action = action; 391 scheme->apply_interval_us = apply_interval_us; 392 /* 393 * next_apply_sis will be set when kdamond starts. While kdamond is 394 * running, it will also updated when it is added to the DAMON context, 395 * or damon_attrs are updated. 396 */ 397 scheme->next_apply_sis = 0; 398 scheme->walk_completed = false; 399 INIT_LIST_HEAD(&scheme->filters); 400 INIT_LIST_HEAD(&scheme->ops_filters); 401 scheme->stat = (struct damos_stat){}; 402 INIT_LIST_HEAD(&scheme->list); 403 404 scheme->quota = *(damos_quota_init(quota)); 405 /* quota.goals should be separately set by caller */ 406 INIT_LIST_HEAD(&scheme->quota.goals); 407 408 scheme->wmarks = *wmarks; 409 scheme->wmarks.activated = true; 410 411 scheme->migrate_dests = (struct damos_migrate_dests){}; 412 scheme->target_nid = target_nid; 413 414 return scheme; 415 } 416 417 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 418 { 419 unsigned long sample_interval = ctx->attrs.sample_interval ? 420 ctx->attrs.sample_interval : 1; 421 unsigned long apply_interval = s->apply_interval_us ? 422 s->apply_interval_us : ctx->attrs.aggr_interval; 423 424 s->next_apply_sis = ctx->passed_sample_intervals + 425 apply_interval / sample_interval; 426 } 427 428 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 429 { 430 list_add_tail(&s->list, &ctx->schemes); 431 damos_set_next_apply_sis(s, ctx); 432 } 433 434 static void damon_del_scheme(struct damos *s) 435 { 436 list_del(&s->list); 437 } 438 439 static void damon_free_scheme(struct damos *s) 440 { 441 kfree(s); 442 } 443 444 void damon_destroy_scheme(struct damos *s) 445 { 446 struct damos_quota_goal *g, *g_next; 447 struct damos_filter *f, *next; 448 449 damos_for_each_quota_goal_safe(g, g_next, &s->quota) 450 damos_destroy_quota_goal(g); 451 452 damos_for_each_filter_safe(f, next, s) 453 damos_destroy_filter(f); 454 455 kfree(s->migrate_dests.node_id_arr); 456 kfree(s->migrate_dests.weight_arr); 457 damon_del_scheme(s); 458 damon_free_scheme(s); 459 } 460 461 /* 462 * Construct a damon_target struct 463 * 464 * Returns the pointer to the new struct if success, or NULL otherwise 465 */ 466 struct damon_target *damon_new_target(void) 467 { 468 struct damon_target *t; 469 470 t = kmalloc(sizeof(*t), GFP_KERNEL); 471 if (!t) 472 return NULL; 473 474 t->pid = NULL; 475 t->nr_regions = 0; 476 INIT_LIST_HEAD(&t->regions_list); 477 INIT_LIST_HEAD(&t->list); 478 479 return t; 480 } 481 482 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 483 { 484 list_add_tail(&t->list, &ctx->adaptive_targets); 485 } 486 487 bool damon_targets_empty(struct damon_ctx *ctx) 488 { 489 return list_empty(&ctx->adaptive_targets); 490 } 491 492 static void damon_del_target(struct damon_target *t) 493 { 494 list_del(&t->list); 495 } 496 497 void damon_free_target(struct damon_target *t) 498 { 499 struct damon_region *r, *next; 500 501 damon_for_each_region_safe(r, next, t) 502 damon_free_region(r); 503 kfree(t); 504 } 505 506 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx) 507 { 508 509 if (ctx && ctx->ops.cleanup_target) 510 ctx->ops.cleanup_target(t); 511 512 damon_del_target(t); 513 damon_free_target(t); 514 } 515 516 unsigned int damon_nr_regions(struct damon_target *t) 517 { 518 return t->nr_regions; 519 } 520 521 struct damon_ctx *damon_new_ctx(void) 522 { 523 struct damon_ctx *ctx; 524 525 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 526 if (!ctx) 527 return NULL; 528 529 init_completion(&ctx->kdamond_started); 530 531 ctx->attrs.sample_interval = 5 * 1000; 532 ctx->attrs.aggr_interval = 100 * 1000; 533 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 534 535 ctx->passed_sample_intervals = 0; 536 /* These will be set from kdamond_init_ctx() */ 537 ctx->next_aggregation_sis = 0; 538 ctx->next_ops_update_sis = 0; 539 540 mutex_init(&ctx->kdamond_lock); 541 INIT_LIST_HEAD(&ctx->call_controls); 542 mutex_init(&ctx->call_controls_lock); 543 mutex_init(&ctx->walk_control_lock); 544 545 ctx->attrs.min_nr_regions = 10; 546 ctx->attrs.max_nr_regions = 1000; 547 548 ctx->addr_unit = 1; 549 ctx->min_sz_region = DAMON_MIN_REGION; 550 551 INIT_LIST_HEAD(&ctx->adaptive_targets); 552 INIT_LIST_HEAD(&ctx->schemes); 553 554 return ctx; 555 } 556 557 static void damon_destroy_targets(struct damon_ctx *ctx) 558 { 559 struct damon_target *t, *next_t; 560 561 damon_for_each_target_safe(t, next_t, ctx) 562 damon_destroy_target(t, ctx); 563 } 564 565 void damon_destroy_ctx(struct damon_ctx *ctx) 566 { 567 struct damos *s, *next_s; 568 569 damon_destroy_targets(ctx); 570 571 damon_for_each_scheme_safe(s, next_s, ctx) 572 damon_destroy_scheme(s); 573 574 kfree(ctx); 575 } 576 577 static bool damon_attrs_equals(const struct damon_attrs *attrs1, 578 const struct damon_attrs *attrs2) 579 { 580 const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal; 581 const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal; 582 583 return attrs1->sample_interval == attrs2->sample_interval && 584 attrs1->aggr_interval == attrs2->aggr_interval && 585 attrs1->ops_update_interval == attrs2->ops_update_interval && 586 attrs1->min_nr_regions == attrs2->min_nr_regions && 587 attrs1->max_nr_regions == attrs2->max_nr_regions && 588 ig1->access_bp == ig2->access_bp && 589 ig1->aggrs == ig2->aggrs && 590 ig1->min_sample_us == ig2->min_sample_us && 591 ig1->max_sample_us == ig2->max_sample_us; 592 } 593 594 static unsigned int damon_age_for_new_attrs(unsigned int age, 595 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 596 { 597 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 598 } 599 600 /* convert access ratio in bp (per 10,000) to nr_accesses */ 601 static unsigned int damon_accesses_bp_to_nr_accesses( 602 unsigned int accesses_bp, struct damon_attrs *attrs) 603 { 604 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 605 } 606 607 /* 608 * Convert nr_accesses to access ratio in bp (per 10,000). 609 * 610 * Callers should ensure attrs.aggr_interval is not zero, like 611 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would 612 * happen. 613 */ 614 static unsigned int damon_nr_accesses_to_accesses_bp( 615 unsigned int nr_accesses, struct damon_attrs *attrs) 616 { 617 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 618 } 619 620 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 621 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 622 { 623 return damon_accesses_bp_to_nr_accesses( 624 damon_nr_accesses_to_accesses_bp( 625 nr_accesses, old_attrs), 626 new_attrs); 627 } 628 629 static void damon_update_monitoring_result(struct damon_region *r, 630 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, 631 bool aggregating) 632 { 633 if (!aggregating) { 634 r->nr_accesses = damon_nr_accesses_for_new_attrs( 635 r->nr_accesses, old_attrs, new_attrs); 636 r->nr_accesses_bp = r->nr_accesses * 10000; 637 } else { 638 /* 639 * if this is called in the middle of the aggregation, reset 640 * the aggregations we made so far for this aggregation 641 * interval. In other words, make the status like 642 * kdamond_reset_aggregated() is called. 643 */ 644 r->last_nr_accesses = damon_nr_accesses_for_new_attrs( 645 r->last_nr_accesses, old_attrs, new_attrs); 646 r->nr_accesses_bp = r->last_nr_accesses * 10000; 647 r->nr_accesses = 0; 648 } 649 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 650 } 651 652 /* 653 * region->nr_accesses is the number of sampling intervals in the last 654 * aggregation interval that access to the region has found, and region->age is 655 * the number of aggregation intervals that its access pattern has maintained. 656 * For the reason, the real meaning of the two fields depend on current 657 * sampling interval and aggregation interval. This function updates 658 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 659 */ 660 static void damon_update_monitoring_results(struct damon_ctx *ctx, 661 struct damon_attrs *new_attrs, bool aggregating) 662 { 663 struct damon_attrs *old_attrs = &ctx->attrs; 664 struct damon_target *t; 665 struct damon_region *r; 666 667 /* if any interval is zero, simply forgive conversion */ 668 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 669 !new_attrs->sample_interval || 670 !new_attrs->aggr_interval) 671 return; 672 673 damon_for_each_target(t, ctx) 674 damon_for_each_region(r, t) 675 damon_update_monitoring_result( 676 r, old_attrs, new_attrs, aggregating); 677 } 678 679 /* 680 * damon_valid_intervals_goal() - return if the intervals goal of @attrs is 681 * valid. 682 */ 683 static bool damon_valid_intervals_goal(struct damon_attrs *attrs) 684 { 685 struct damon_intervals_goal *goal = &attrs->intervals_goal; 686 687 /* tuning is disabled */ 688 if (!goal->aggrs) 689 return true; 690 if (goal->min_sample_us > goal->max_sample_us) 691 return false; 692 if (attrs->sample_interval < goal->min_sample_us || 693 goal->max_sample_us < attrs->sample_interval) 694 return false; 695 return true; 696 } 697 698 /** 699 * damon_set_attrs() - Set attributes for the monitoring. 700 * @ctx: monitoring context 701 * @attrs: monitoring attributes 702 * 703 * This function should be called while the kdamond is not running, an access 704 * check results aggregation is not ongoing (e.g., from damon_call(). 705 * 706 * Every time interval is in micro-seconds. 707 * 708 * Return: 0 on success, negative error code otherwise. 709 */ 710 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 711 { 712 unsigned long sample_interval = attrs->sample_interval ? 713 attrs->sample_interval : 1; 714 struct damos *s; 715 bool aggregating = ctx->passed_sample_intervals < 716 ctx->next_aggregation_sis; 717 718 if (!damon_valid_intervals_goal(attrs)) 719 return -EINVAL; 720 721 if (attrs->min_nr_regions < 3) 722 return -EINVAL; 723 if (attrs->min_nr_regions > attrs->max_nr_regions) 724 return -EINVAL; 725 if (attrs->sample_interval > attrs->aggr_interval) 726 return -EINVAL; 727 728 /* calls from core-external doesn't set this. */ 729 if (!attrs->aggr_samples) 730 attrs->aggr_samples = attrs->aggr_interval / sample_interval; 731 732 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 733 attrs->aggr_interval / sample_interval; 734 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 735 attrs->ops_update_interval / sample_interval; 736 737 damon_update_monitoring_results(ctx, attrs, aggregating); 738 ctx->attrs = *attrs; 739 740 damon_for_each_scheme(s, ctx) 741 damos_set_next_apply_sis(s, ctx); 742 743 return 0; 744 } 745 746 /** 747 * damon_set_schemes() - Set data access monitoring based operation schemes. 748 * @ctx: monitoring context 749 * @schemes: array of the schemes 750 * @nr_schemes: number of entries in @schemes 751 * 752 * This function should not be called while the kdamond of the context is 753 * running. 754 */ 755 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 756 ssize_t nr_schemes) 757 { 758 struct damos *s, *next; 759 ssize_t i; 760 761 damon_for_each_scheme_safe(s, next, ctx) 762 damon_destroy_scheme(s); 763 for (i = 0; i < nr_schemes; i++) 764 damon_add_scheme(ctx, schemes[i]); 765 } 766 767 static struct damos_quota_goal *damos_nth_quota_goal( 768 int n, struct damos_quota *q) 769 { 770 struct damos_quota_goal *goal; 771 int i = 0; 772 773 damos_for_each_quota_goal(goal, q) { 774 if (i++ == n) 775 return goal; 776 } 777 return NULL; 778 } 779 780 static void damos_commit_quota_goal_union( 781 struct damos_quota_goal *dst, struct damos_quota_goal *src) 782 { 783 switch (dst->metric) { 784 case DAMOS_QUOTA_NODE_MEM_USED_BP: 785 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 786 dst->nid = src->nid; 787 break; 788 default: 789 break; 790 } 791 } 792 793 static void damos_commit_quota_goal( 794 struct damos_quota_goal *dst, struct damos_quota_goal *src) 795 { 796 dst->metric = src->metric; 797 dst->target_value = src->target_value; 798 if (dst->metric == DAMOS_QUOTA_USER_INPUT) 799 dst->current_value = src->current_value; 800 /* keep last_psi_total as is, since it will be updated in next cycle */ 801 damos_commit_quota_goal_union(dst, src); 802 } 803 804 /** 805 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. 806 * @dst: The commit destination DAMOS quota. 807 * @src: The commit source DAMOS quota. 808 * 809 * Copies user-specified parameters for quota goals from @src to @dst. Users 810 * should use this function for quota goals-level parameters update of running 811 * DAMON contexts, instead of manual in-place updates. 812 * 813 * This function should be called from parameters-update safe context, like 814 * damon_call(). 815 */ 816 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) 817 { 818 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; 819 int i = 0, j = 0; 820 821 damos_for_each_quota_goal_safe(dst_goal, next, dst) { 822 src_goal = damos_nth_quota_goal(i++, src); 823 if (src_goal) 824 damos_commit_quota_goal(dst_goal, src_goal); 825 else 826 damos_destroy_quota_goal(dst_goal); 827 } 828 damos_for_each_quota_goal_safe(src_goal, next, src) { 829 if (j++ < i) 830 continue; 831 new_goal = damos_new_quota_goal( 832 src_goal->metric, src_goal->target_value); 833 if (!new_goal) 834 return -ENOMEM; 835 damos_commit_quota_goal_union(new_goal, src_goal); 836 damos_add_quota_goal(dst, new_goal); 837 } 838 return 0; 839 } 840 841 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) 842 { 843 int err; 844 845 dst->reset_interval = src->reset_interval; 846 dst->ms = src->ms; 847 dst->sz = src->sz; 848 err = damos_commit_quota_goals(dst, src); 849 if (err) 850 return err; 851 dst->weight_sz = src->weight_sz; 852 dst->weight_nr_accesses = src->weight_nr_accesses; 853 dst->weight_age = src->weight_age; 854 return 0; 855 } 856 857 static struct damos_filter *damos_nth_filter(int n, struct damos *s) 858 { 859 struct damos_filter *filter; 860 int i = 0; 861 862 damos_for_each_filter(filter, s) { 863 if (i++ == n) 864 return filter; 865 } 866 return NULL; 867 } 868 869 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s) 870 { 871 struct damos_filter *filter; 872 int i = 0; 873 874 damos_for_each_ops_filter(filter, s) { 875 if (i++ == n) 876 return filter; 877 } 878 return NULL; 879 } 880 881 static void damos_commit_filter_arg( 882 struct damos_filter *dst, struct damos_filter *src) 883 { 884 switch (dst->type) { 885 case DAMOS_FILTER_TYPE_MEMCG: 886 dst->memcg_id = src->memcg_id; 887 break; 888 case DAMOS_FILTER_TYPE_ADDR: 889 dst->addr_range = src->addr_range; 890 break; 891 case DAMOS_FILTER_TYPE_TARGET: 892 dst->target_idx = src->target_idx; 893 break; 894 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 895 dst->sz_range = src->sz_range; 896 break; 897 default: 898 break; 899 } 900 } 901 902 static void damos_commit_filter( 903 struct damos_filter *dst, struct damos_filter *src) 904 { 905 dst->type = src->type; 906 dst->matching = src->matching; 907 dst->allow = src->allow; 908 damos_commit_filter_arg(dst, src); 909 } 910 911 static int damos_commit_core_filters(struct damos *dst, struct damos *src) 912 { 913 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 914 int i = 0, j = 0; 915 916 damos_for_each_filter_safe(dst_filter, next, dst) { 917 src_filter = damos_nth_filter(i++, src); 918 if (src_filter) 919 damos_commit_filter(dst_filter, src_filter); 920 else 921 damos_destroy_filter(dst_filter); 922 } 923 924 damos_for_each_filter_safe(src_filter, next, src) { 925 if (j++ < i) 926 continue; 927 928 new_filter = damos_new_filter( 929 src_filter->type, src_filter->matching, 930 src_filter->allow); 931 if (!new_filter) 932 return -ENOMEM; 933 damos_commit_filter_arg(new_filter, src_filter); 934 damos_add_filter(dst, new_filter); 935 } 936 return 0; 937 } 938 939 static int damos_commit_ops_filters(struct damos *dst, struct damos *src) 940 { 941 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 942 int i = 0, j = 0; 943 944 damos_for_each_ops_filter_safe(dst_filter, next, dst) { 945 src_filter = damos_nth_ops_filter(i++, src); 946 if (src_filter) 947 damos_commit_filter(dst_filter, src_filter); 948 else 949 damos_destroy_filter(dst_filter); 950 } 951 952 damos_for_each_ops_filter_safe(src_filter, next, src) { 953 if (j++ < i) 954 continue; 955 956 new_filter = damos_new_filter( 957 src_filter->type, src_filter->matching, 958 src_filter->allow); 959 if (!new_filter) 960 return -ENOMEM; 961 damos_commit_filter_arg(new_filter, src_filter); 962 damos_add_filter(dst, new_filter); 963 } 964 return 0; 965 } 966 967 /** 968 * damos_filters_default_reject() - decide whether to reject memory that didn't 969 * match with any given filter. 970 * @filters: Given DAMOS filters of a group. 971 */ 972 static bool damos_filters_default_reject(struct list_head *filters) 973 { 974 struct damos_filter *last_filter; 975 976 if (list_empty(filters)) 977 return false; 978 last_filter = list_last_entry(filters, struct damos_filter, list); 979 return last_filter->allow; 980 } 981 982 static void damos_set_filters_default_reject(struct damos *s) 983 { 984 if (!list_empty(&s->ops_filters)) 985 s->core_filters_default_reject = false; 986 else 987 s->core_filters_default_reject = 988 damos_filters_default_reject(&s->filters); 989 s->ops_filters_default_reject = 990 damos_filters_default_reject(&s->ops_filters); 991 } 992 993 static int damos_commit_dests(struct damos *dst, struct damos *src) 994 { 995 struct damos_migrate_dests *dst_dests, *src_dests; 996 997 dst_dests = &dst->migrate_dests; 998 src_dests = &src->migrate_dests; 999 1000 if (dst_dests->nr_dests != src_dests->nr_dests) { 1001 kfree(dst_dests->node_id_arr); 1002 kfree(dst_dests->weight_arr); 1003 1004 dst_dests->node_id_arr = kmalloc_array(src_dests->nr_dests, 1005 sizeof(*dst_dests->node_id_arr), GFP_KERNEL); 1006 if (!dst_dests->node_id_arr) { 1007 dst_dests->weight_arr = NULL; 1008 return -ENOMEM; 1009 } 1010 1011 dst_dests->weight_arr = kmalloc_array(src_dests->nr_dests, 1012 sizeof(*dst_dests->weight_arr), GFP_KERNEL); 1013 if (!dst_dests->weight_arr) { 1014 /* ->node_id_arr will be freed by scheme destruction */ 1015 return -ENOMEM; 1016 } 1017 } 1018 1019 dst_dests->nr_dests = src_dests->nr_dests; 1020 for (int i = 0; i < src_dests->nr_dests; i++) { 1021 dst_dests->node_id_arr[i] = src_dests->node_id_arr[i]; 1022 dst_dests->weight_arr[i] = src_dests->weight_arr[i]; 1023 } 1024 1025 return 0; 1026 } 1027 1028 static int damos_commit_filters(struct damos *dst, struct damos *src) 1029 { 1030 int err; 1031 1032 err = damos_commit_core_filters(dst, src); 1033 if (err) 1034 return err; 1035 err = damos_commit_ops_filters(dst, src); 1036 if (err) 1037 return err; 1038 damos_set_filters_default_reject(dst); 1039 return 0; 1040 } 1041 1042 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) 1043 { 1044 struct damos *s; 1045 int i = 0; 1046 1047 damon_for_each_scheme(s, ctx) { 1048 if (i++ == n) 1049 return s; 1050 } 1051 return NULL; 1052 } 1053 1054 static int damos_commit(struct damos *dst, struct damos *src) 1055 { 1056 int err; 1057 1058 dst->pattern = src->pattern; 1059 dst->action = src->action; 1060 dst->apply_interval_us = src->apply_interval_us; 1061 1062 err = damos_commit_quota(&dst->quota, &src->quota); 1063 if (err) 1064 return err; 1065 1066 dst->wmarks = src->wmarks; 1067 dst->target_nid = src->target_nid; 1068 1069 err = damos_commit_dests(dst, src); 1070 if (err) 1071 return err; 1072 1073 err = damos_commit_filters(dst, src); 1074 return err; 1075 } 1076 1077 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) 1078 { 1079 struct damos *dst_scheme, *next, *src_scheme, *new_scheme; 1080 int i = 0, j = 0, err; 1081 1082 damon_for_each_scheme_safe(dst_scheme, next, dst) { 1083 src_scheme = damon_nth_scheme(i++, src); 1084 if (src_scheme) { 1085 err = damos_commit(dst_scheme, src_scheme); 1086 if (err) 1087 return err; 1088 } else { 1089 damon_destroy_scheme(dst_scheme); 1090 } 1091 } 1092 1093 damon_for_each_scheme_safe(src_scheme, next, src) { 1094 if (j++ < i) 1095 continue; 1096 new_scheme = damon_new_scheme(&src_scheme->pattern, 1097 src_scheme->action, 1098 src_scheme->apply_interval_us, 1099 &src_scheme->quota, &src_scheme->wmarks, 1100 NUMA_NO_NODE); 1101 if (!new_scheme) 1102 return -ENOMEM; 1103 err = damos_commit(new_scheme, src_scheme); 1104 if (err) { 1105 damon_destroy_scheme(new_scheme); 1106 return err; 1107 } 1108 damon_add_scheme(dst, new_scheme); 1109 } 1110 return 0; 1111 } 1112 1113 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) 1114 { 1115 struct damon_target *t; 1116 int i = 0; 1117 1118 damon_for_each_target(t, ctx) { 1119 if (i++ == n) 1120 return t; 1121 } 1122 return NULL; 1123 } 1124 1125 /* 1126 * The caller should ensure the regions of @src are 1127 * 1. valid (end >= src) and 1128 * 2. sorted by starting address. 1129 * 1130 * If @src has no region, @dst keeps current regions. 1131 */ 1132 static int damon_commit_target_regions(struct damon_target *dst, 1133 struct damon_target *src, unsigned long src_min_sz_region) 1134 { 1135 struct damon_region *src_region; 1136 struct damon_addr_range *ranges; 1137 int i = 0, err; 1138 1139 damon_for_each_region(src_region, src) 1140 i++; 1141 if (!i) 1142 return 0; 1143 1144 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); 1145 if (!ranges) 1146 return -ENOMEM; 1147 i = 0; 1148 damon_for_each_region(src_region, src) 1149 ranges[i++] = src_region->ar; 1150 err = damon_set_regions(dst, ranges, i, src_min_sz_region); 1151 kfree(ranges); 1152 return err; 1153 } 1154 1155 static int damon_commit_target( 1156 struct damon_target *dst, bool dst_has_pid, 1157 struct damon_target *src, bool src_has_pid, 1158 unsigned long src_min_sz_region) 1159 { 1160 int err; 1161 1162 err = damon_commit_target_regions(dst, src, src_min_sz_region); 1163 if (err) 1164 return err; 1165 if (dst_has_pid) 1166 put_pid(dst->pid); 1167 if (src_has_pid) 1168 get_pid(src->pid); 1169 dst->pid = src->pid; 1170 return 0; 1171 } 1172 1173 static int damon_commit_targets( 1174 struct damon_ctx *dst, struct damon_ctx *src) 1175 { 1176 struct damon_target *dst_target, *next, *src_target, *new_target; 1177 int i = 0, j = 0, err; 1178 1179 damon_for_each_target_safe(dst_target, next, dst) { 1180 src_target = damon_nth_target(i++, src); 1181 if (src_target) { 1182 err = damon_commit_target( 1183 dst_target, damon_target_has_pid(dst), 1184 src_target, damon_target_has_pid(src), 1185 src->min_sz_region); 1186 if (err) 1187 return err; 1188 } else { 1189 struct damos *s; 1190 1191 damon_destroy_target(dst_target, dst); 1192 damon_for_each_scheme(s, dst) { 1193 if (s->quota.charge_target_from == dst_target) { 1194 s->quota.charge_target_from = NULL; 1195 s->quota.charge_addr_from = 0; 1196 } 1197 } 1198 } 1199 } 1200 1201 damon_for_each_target_safe(src_target, next, src) { 1202 if (j++ < i) 1203 continue; 1204 new_target = damon_new_target(); 1205 if (!new_target) 1206 return -ENOMEM; 1207 err = damon_commit_target(new_target, false, 1208 src_target, damon_target_has_pid(src), 1209 src->min_sz_region); 1210 if (err) { 1211 damon_destroy_target(new_target, NULL); 1212 return err; 1213 } 1214 damon_add_target(dst, new_target); 1215 } 1216 return 0; 1217 } 1218 1219 /** 1220 * damon_commit_ctx() - Commit parameters of a DAMON context to another. 1221 * @dst: The commit destination DAMON context. 1222 * @src: The commit source DAMON context. 1223 * 1224 * This function copies user-specified parameters from @src to @dst and update 1225 * the internal status and results accordingly. Users should use this function 1226 * for context-level parameters update of running context, instead of manual 1227 * in-place updates. 1228 * 1229 * This function should be called from parameters-update safe context, like 1230 * damon_call(). 1231 */ 1232 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) 1233 { 1234 int err; 1235 1236 err = damon_commit_schemes(dst, src); 1237 if (err) 1238 return err; 1239 err = damon_commit_targets(dst, src); 1240 if (err) 1241 return err; 1242 /* 1243 * schemes and targets should be updated first, since 1244 * 1. damon_set_attrs() updates monitoring results of targets and 1245 * next_apply_sis of schemes, and 1246 * 2. ops update should be done after pid handling is done (target 1247 * committing require putting pids). 1248 */ 1249 if (!damon_attrs_equals(&dst->attrs, &src->attrs)) { 1250 err = damon_set_attrs(dst, &src->attrs); 1251 if (err) 1252 return err; 1253 } 1254 dst->ops = src->ops; 1255 dst->addr_unit = src->addr_unit; 1256 dst->min_sz_region = src->min_sz_region; 1257 1258 return 0; 1259 } 1260 1261 /** 1262 * damon_nr_running_ctxs() - Return number of currently running contexts. 1263 */ 1264 int damon_nr_running_ctxs(void) 1265 { 1266 int nr_ctxs; 1267 1268 mutex_lock(&damon_lock); 1269 nr_ctxs = nr_running_ctxs; 1270 mutex_unlock(&damon_lock); 1271 1272 return nr_ctxs; 1273 } 1274 1275 /* Returns the size upper limit for each monitoring region */ 1276 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 1277 { 1278 struct damon_target *t; 1279 struct damon_region *r; 1280 unsigned long sz = 0; 1281 1282 damon_for_each_target(t, ctx) { 1283 damon_for_each_region(r, t) 1284 sz += damon_sz_region(r); 1285 } 1286 1287 if (ctx->attrs.min_nr_regions) 1288 sz /= ctx->attrs.min_nr_regions; 1289 if (sz < ctx->min_sz_region) 1290 sz = ctx->min_sz_region; 1291 1292 return sz; 1293 } 1294 1295 static int kdamond_fn(void *data); 1296 1297 /* 1298 * __damon_start() - Starts monitoring with given context. 1299 * @ctx: monitoring context 1300 * 1301 * This function should be called while damon_lock is hold. 1302 * 1303 * Return: 0 on success, negative error code otherwise. 1304 */ 1305 static int __damon_start(struct damon_ctx *ctx) 1306 { 1307 int err = -EBUSY; 1308 1309 mutex_lock(&ctx->kdamond_lock); 1310 if (!ctx->kdamond) { 1311 err = 0; 1312 reinit_completion(&ctx->kdamond_started); 1313 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 1314 nr_running_ctxs); 1315 if (IS_ERR(ctx->kdamond)) { 1316 err = PTR_ERR(ctx->kdamond); 1317 ctx->kdamond = NULL; 1318 } else { 1319 wait_for_completion(&ctx->kdamond_started); 1320 } 1321 } 1322 mutex_unlock(&ctx->kdamond_lock); 1323 1324 return err; 1325 } 1326 1327 /** 1328 * damon_start() - Starts the monitorings for a given group of contexts. 1329 * @ctxs: an array of the pointers for contexts to start monitoring 1330 * @nr_ctxs: size of @ctxs 1331 * @exclusive: exclusiveness of this contexts group 1332 * 1333 * This function starts a group of monitoring threads for a group of monitoring 1334 * contexts. One thread per each context is created and run in parallel. The 1335 * caller should handle synchronization between the threads by itself. If 1336 * @exclusive is true and a group of threads that created by other 1337 * 'damon_start()' call is currently running, this function does nothing but 1338 * returns -EBUSY. 1339 * 1340 * Return: 0 on success, negative error code otherwise. 1341 */ 1342 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 1343 { 1344 int i; 1345 int err = 0; 1346 1347 mutex_lock(&damon_lock); 1348 if ((exclusive && nr_running_ctxs) || 1349 (!exclusive && running_exclusive_ctxs)) { 1350 mutex_unlock(&damon_lock); 1351 return -EBUSY; 1352 } 1353 1354 for (i = 0; i < nr_ctxs; i++) { 1355 err = __damon_start(ctxs[i]); 1356 if (err) 1357 break; 1358 nr_running_ctxs++; 1359 } 1360 if (exclusive && nr_running_ctxs) 1361 running_exclusive_ctxs = true; 1362 mutex_unlock(&damon_lock); 1363 1364 return err; 1365 } 1366 1367 /* 1368 * __damon_stop() - Stops monitoring of a given context. 1369 * @ctx: monitoring context 1370 * 1371 * Return: 0 on success, negative error code otherwise. 1372 */ 1373 static int __damon_stop(struct damon_ctx *ctx) 1374 { 1375 struct task_struct *tsk; 1376 1377 mutex_lock(&ctx->kdamond_lock); 1378 tsk = ctx->kdamond; 1379 if (tsk) { 1380 get_task_struct(tsk); 1381 mutex_unlock(&ctx->kdamond_lock); 1382 kthread_stop_put(tsk); 1383 return 0; 1384 } 1385 mutex_unlock(&ctx->kdamond_lock); 1386 1387 return -EPERM; 1388 } 1389 1390 /** 1391 * damon_stop() - Stops the monitorings for a given group of contexts. 1392 * @ctxs: an array of the pointers for contexts to stop monitoring 1393 * @nr_ctxs: size of @ctxs 1394 * 1395 * Return: 0 on success, negative error code otherwise. 1396 */ 1397 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 1398 { 1399 int i, err = 0; 1400 1401 for (i = 0; i < nr_ctxs; i++) { 1402 /* nr_running_ctxs is decremented in kdamond_fn */ 1403 err = __damon_stop(ctxs[i]); 1404 if (err) 1405 break; 1406 } 1407 return err; 1408 } 1409 1410 /** 1411 * damon_is_running() - Returns if a given DAMON context is running. 1412 * @ctx: The DAMON context to see if running. 1413 * 1414 * Return: true if @ctx is running, false otherwise. 1415 */ 1416 bool damon_is_running(struct damon_ctx *ctx) 1417 { 1418 bool running; 1419 1420 mutex_lock(&ctx->kdamond_lock); 1421 running = ctx->kdamond != NULL; 1422 mutex_unlock(&ctx->kdamond_lock); 1423 return running; 1424 } 1425 1426 /** 1427 * damon_call() - Invoke a given function on DAMON worker thread (kdamond). 1428 * @ctx: DAMON context to call the function for. 1429 * @control: Control variable of the call request. 1430 * 1431 * Ask DAMON worker thread (kdamond) of @ctx to call a function with an 1432 * argument data that respectively passed via &damon_call_control->fn and 1433 * &damon_call_control->data of @control. If &damon_call_control->repeat of 1434 * @control is set, further wait until the kdamond finishes handling of the 1435 * request. Otherwise, return as soon as the request is made. 1436 * 1437 * The kdamond executes the function with the argument in the main loop, just 1438 * after a sampling of the iteration is finished. The function can hence 1439 * safely access the internal data of the &struct damon_ctx without additional 1440 * synchronization. The return value of the function will be saved in 1441 * &damon_call_control->return_code. 1442 * 1443 * Return: 0 on success, negative error code otherwise. 1444 */ 1445 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) 1446 { 1447 if (!control->repeat) 1448 init_completion(&control->completion); 1449 control->canceled = false; 1450 INIT_LIST_HEAD(&control->list); 1451 1452 mutex_lock(&ctx->call_controls_lock); 1453 list_add_tail(&ctx->call_controls, &control->list); 1454 mutex_unlock(&ctx->call_controls_lock); 1455 if (!damon_is_running(ctx)) 1456 return -EINVAL; 1457 if (control->repeat) 1458 return 0; 1459 wait_for_completion(&control->completion); 1460 if (control->canceled) 1461 return -ECANCELED; 1462 return 0; 1463 } 1464 1465 /** 1466 * damos_walk() - Invoke a given functions while DAMOS walk regions. 1467 * @ctx: DAMON context to call the functions for. 1468 * @control: Control variable of the walk request. 1469 * 1470 * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region 1471 * that the kdamond will apply DAMOS action to, and wait until the kdamond 1472 * finishes handling of the request. 1473 * 1474 * The kdamond executes the given function in the main loop, for each region 1475 * just after it applied any DAMOS actions of @ctx to it. The invocation is 1476 * made only within one &damos->apply_interval_us since damos_walk() 1477 * invocation, for each scheme. The given callback function can hence safely 1478 * access the internal data of &struct damon_ctx and &struct damon_region that 1479 * each of the scheme will apply the action for next interval, without 1480 * additional synchronizations against the kdamond. If every scheme of @ctx 1481 * passed at least one &damos->apply_interval_us, kdamond marks the request as 1482 * completed so that damos_walk() can wakeup and return. 1483 * 1484 * Return: 0 on success, negative error code otherwise. 1485 */ 1486 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) 1487 { 1488 init_completion(&control->completion); 1489 control->canceled = false; 1490 mutex_lock(&ctx->walk_control_lock); 1491 if (ctx->walk_control) { 1492 mutex_unlock(&ctx->walk_control_lock); 1493 return -EBUSY; 1494 } 1495 ctx->walk_control = control; 1496 mutex_unlock(&ctx->walk_control_lock); 1497 if (!damon_is_running(ctx)) 1498 return -EINVAL; 1499 wait_for_completion(&control->completion); 1500 if (control->canceled) 1501 return -ECANCELED; 1502 return 0; 1503 } 1504 1505 /* 1506 * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing 1507 * the problem being propagated. 1508 */ 1509 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r) 1510 { 1511 if (r->nr_accesses_bp == r->nr_accesses * 10000) 1512 return; 1513 WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n", 1514 r->nr_accesses_bp, r->nr_accesses); 1515 r->nr_accesses_bp = r->nr_accesses * 10000; 1516 } 1517 1518 /* 1519 * Reset the aggregated monitoring results ('nr_accesses' of each region). 1520 */ 1521 static void kdamond_reset_aggregated(struct damon_ctx *c) 1522 { 1523 struct damon_target *t; 1524 unsigned int ti = 0; /* target's index */ 1525 1526 damon_for_each_target(t, c) { 1527 struct damon_region *r; 1528 1529 damon_for_each_region(r, t) { 1530 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 1531 damon_warn_fix_nr_accesses_corruption(r); 1532 r->last_nr_accesses = r->nr_accesses; 1533 r->nr_accesses = 0; 1534 } 1535 ti++; 1536 } 1537 } 1538 1539 static unsigned long damon_get_intervals_score(struct damon_ctx *c) 1540 { 1541 struct damon_target *t; 1542 struct damon_region *r; 1543 unsigned long sz_region, max_access_events = 0, access_events = 0; 1544 unsigned long target_access_events; 1545 unsigned long goal_bp = c->attrs.intervals_goal.access_bp; 1546 1547 damon_for_each_target(t, c) { 1548 damon_for_each_region(r, t) { 1549 sz_region = damon_sz_region(r); 1550 max_access_events += sz_region * c->attrs.aggr_samples; 1551 access_events += sz_region * r->nr_accesses; 1552 } 1553 } 1554 target_access_events = max_access_events * goal_bp / 10000; 1555 target_access_events = target_access_events ? : 1; 1556 return access_events * 10000 / target_access_events; 1557 } 1558 1559 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1560 unsigned long score); 1561 1562 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) 1563 { 1564 unsigned long score_bp, adaptation_bp; 1565 1566 score_bp = damon_get_intervals_score(c); 1567 adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / 1568 10000; 1569 /* 1570 * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of 1571 * the intervals by rescaling [1,10,000] to [5000, 10,000]. 1572 */ 1573 if (adaptation_bp <= 10000) 1574 adaptation_bp = 5000 + adaptation_bp / 2; 1575 return adaptation_bp; 1576 } 1577 1578 static void kdamond_tune_intervals(struct damon_ctx *c) 1579 { 1580 unsigned long adaptation_bp; 1581 struct damon_attrs new_attrs; 1582 struct damon_intervals_goal *goal; 1583 1584 adaptation_bp = damon_get_intervals_adaptation_bp(c); 1585 if (adaptation_bp == 10000) 1586 return; 1587 1588 new_attrs = c->attrs; 1589 goal = &c->attrs.intervals_goal; 1590 new_attrs.sample_interval = min(goal->max_sample_us, 1591 c->attrs.sample_interval * adaptation_bp / 10000); 1592 new_attrs.sample_interval = max(goal->min_sample_us, 1593 new_attrs.sample_interval); 1594 new_attrs.aggr_interval = new_attrs.sample_interval * 1595 c->attrs.aggr_samples; 1596 trace_damon_monitor_intervals_tune(new_attrs.sample_interval); 1597 damon_set_attrs(c, &new_attrs); 1598 } 1599 1600 static void damon_split_region_at(struct damon_target *t, 1601 struct damon_region *r, unsigned long sz_r); 1602 1603 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 1604 { 1605 unsigned long sz; 1606 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 1607 1608 sz = damon_sz_region(r); 1609 return s->pattern.min_sz_region <= sz && 1610 sz <= s->pattern.max_sz_region && 1611 s->pattern.min_nr_accesses <= nr_accesses && 1612 nr_accesses <= s->pattern.max_nr_accesses && 1613 s->pattern.min_age_region <= r->age && 1614 r->age <= s->pattern.max_age_region; 1615 } 1616 1617 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 1618 struct damon_region *r, struct damos *s) 1619 { 1620 bool ret = __damos_valid_target(r, s); 1621 1622 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 1623 return ret; 1624 1625 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 1626 } 1627 1628 /* 1629 * damos_skip_charged_region() - Check if the given region or starting part of 1630 * it is already charged for the DAMOS quota. 1631 * @t: The target of the region. 1632 * @rp: The pointer to the region. 1633 * @s: The scheme to be applied. 1634 * @min_sz_region: minimum region size. 1635 * 1636 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 1637 * action would applied to only a part of the target access pattern fulfilling 1638 * regions. To avoid applying the scheme action to only already applied 1639 * regions, DAMON skips applying the scheme action to the regions that charged 1640 * in the previous charge window. 1641 * 1642 * This function checks if a given region should be skipped or not for the 1643 * reason. If only the starting part of the region has previously charged, 1644 * this function splits the region into two so that the second one covers the 1645 * area that not charged in the previous charge widnow and saves the second 1646 * region in *rp and returns false, so that the caller can apply DAMON action 1647 * to the second one. 1648 * 1649 * Return: true if the region should be entirely skipped, false otherwise. 1650 */ 1651 static bool damos_skip_charged_region(struct damon_target *t, 1652 struct damon_region **rp, struct damos *s, unsigned long min_sz_region) 1653 { 1654 struct damon_region *r = *rp; 1655 struct damos_quota *quota = &s->quota; 1656 unsigned long sz_to_skip; 1657 1658 /* Skip previously charged regions */ 1659 if (quota->charge_target_from) { 1660 if (t != quota->charge_target_from) 1661 return true; 1662 if (r == damon_last_region(t)) { 1663 quota->charge_target_from = NULL; 1664 quota->charge_addr_from = 0; 1665 return true; 1666 } 1667 if (quota->charge_addr_from && 1668 r->ar.end <= quota->charge_addr_from) 1669 return true; 1670 1671 if (quota->charge_addr_from && r->ar.start < 1672 quota->charge_addr_from) { 1673 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 1674 r->ar.start, min_sz_region); 1675 if (!sz_to_skip) { 1676 if (damon_sz_region(r) <= min_sz_region) 1677 return true; 1678 sz_to_skip = min_sz_region; 1679 } 1680 damon_split_region_at(t, r, sz_to_skip); 1681 r = damon_next_region(r); 1682 *rp = r; 1683 } 1684 quota->charge_target_from = NULL; 1685 quota->charge_addr_from = 0; 1686 } 1687 return false; 1688 } 1689 1690 static void damos_update_stat(struct damos *s, 1691 unsigned long sz_tried, unsigned long sz_applied, 1692 unsigned long sz_ops_filter_passed) 1693 { 1694 s->stat.nr_tried++; 1695 s->stat.sz_tried += sz_tried; 1696 if (sz_applied) 1697 s->stat.nr_applied++; 1698 s->stat.sz_applied += sz_applied; 1699 s->stat.sz_ops_filter_passed += sz_ops_filter_passed; 1700 } 1701 1702 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, 1703 struct damon_region *r, struct damos_filter *filter, 1704 unsigned long min_sz_region) 1705 { 1706 bool matched = false; 1707 struct damon_target *ti; 1708 int target_idx = 0; 1709 unsigned long start, end; 1710 1711 switch (filter->type) { 1712 case DAMOS_FILTER_TYPE_TARGET: 1713 damon_for_each_target(ti, ctx) { 1714 if (ti == t) 1715 break; 1716 target_idx++; 1717 } 1718 matched = target_idx == filter->target_idx; 1719 break; 1720 case DAMOS_FILTER_TYPE_ADDR: 1721 start = ALIGN_DOWN(filter->addr_range.start, min_sz_region); 1722 end = ALIGN_DOWN(filter->addr_range.end, min_sz_region); 1723 1724 /* inside the range */ 1725 if (start <= r->ar.start && r->ar.end <= end) { 1726 matched = true; 1727 break; 1728 } 1729 /* outside of the range */ 1730 if (r->ar.end <= start || end <= r->ar.start) { 1731 matched = false; 1732 break; 1733 } 1734 /* start before the range and overlap */ 1735 if (r->ar.start < start) { 1736 damon_split_region_at(t, r, start - r->ar.start); 1737 matched = false; 1738 break; 1739 } 1740 /* start inside the range */ 1741 damon_split_region_at(t, r, end - r->ar.start); 1742 matched = true; 1743 break; 1744 default: 1745 return false; 1746 } 1747 1748 return matched == filter->matching; 1749 } 1750 1751 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 1752 struct damon_region *r, struct damos *s) 1753 { 1754 struct damos_filter *filter; 1755 1756 s->core_filters_allowed = false; 1757 damos_for_each_filter(filter, s) { 1758 if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) { 1759 if (filter->allow) 1760 s->core_filters_allowed = true; 1761 return !filter->allow; 1762 } 1763 } 1764 return s->core_filters_default_reject; 1765 } 1766 1767 /* 1768 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. 1769 * @ctx: The context of &damon_ctx->walk_control. 1770 * @t: The monitoring target of @r that @s will be applied. 1771 * @r: The region of @t that @s will be applied. 1772 * @s: The scheme of @ctx that will be applied to @r. 1773 * 1774 * This function is called from kdamond whenever it asked the operation set to 1775 * apply a DAMOS scheme action to a region. If a DAMOS walk request is 1776 * installed by damos_walk() and not yet uninstalled, invoke it. 1777 */ 1778 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, 1779 struct damon_region *r, struct damos *s, 1780 unsigned long sz_filter_passed) 1781 { 1782 struct damos_walk_control *control; 1783 1784 if (s->walk_completed) 1785 return; 1786 1787 control = ctx->walk_control; 1788 if (!control) 1789 return; 1790 1791 control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); 1792 } 1793 1794 /* 1795 * damos_walk_complete() - Complete DAMOS walk request if all walks are done. 1796 * @ctx: The context of &damon_ctx->walk_control. 1797 * @s: A scheme of @ctx that all walks are now done. 1798 * 1799 * This function is called when kdamond finished applying the action of a DAMOS 1800 * scheme to all regions that eligible for the given &damos->apply_interval_us. 1801 * If every scheme of @ctx including @s now finished walking for at least one 1802 * &damos->apply_interval_us, this function makrs the handling of the given 1803 * DAMOS walk request is done, so that damos_walk() can wake up and return. 1804 */ 1805 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) 1806 { 1807 struct damos *siter; 1808 struct damos_walk_control *control; 1809 1810 control = ctx->walk_control; 1811 if (!control) 1812 return; 1813 1814 s->walk_completed = true; 1815 /* if all schemes completed, signal completion to walker */ 1816 damon_for_each_scheme(siter, ctx) { 1817 if (!siter->walk_completed) 1818 return; 1819 } 1820 damon_for_each_scheme(siter, ctx) 1821 siter->walk_completed = false; 1822 1823 complete(&control->completion); 1824 ctx->walk_control = NULL; 1825 } 1826 1827 /* 1828 * damos_walk_cancel() - Cancel the current DAMOS walk request. 1829 * @ctx: The context of &damon_ctx->walk_control. 1830 * 1831 * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS 1832 * walk is requested but there is no DAMOS scheme to walk for, or the kdamond 1833 * is already out of the main loop and therefore gonna be terminated, and hence 1834 * cannot continue the walks. This function therefore marks the walk request 1835 * as canceled, so that damos_walk() can wake up and return. 1836 */ 1837 static void damos_walk_cancel(struct damon_ctx *ctx) 1838 { 1839 struct damos_walk_control *control; 1840 1841 mutex_lock(&ctx->walk_control_lock); 1842 control = ctx->walk_control; 1843 mutex_unlock(&ctx->walk_control_lock); 1844 1845 if (!control) 1846 return; 1847 control->canceled = true; 1848 complete(&control->completion); 1849 mutex_lock(&ctx->walk_control_lock); 1850 ctx->walk_control = NULL; 1851 mutex_unlock(&ctx->walk_control_lock); 1852 } 1853 1854 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 1855 struct damon_region *r, struct damos *s) 1856 { 1857 struct damos_quota *quota = &s->quota; 1858 unsigned long sz = damon_sz_region(r); 1859 struct timespec64 begin, end; 1860 unsigned long sz_applied = 0; 1861 unsigned long sz_ops_filter_passed = 0; 1862 /* 1863 * We plan to support multiple context per kdamond, as DAMON sysfs 1864 * implies with 'nr_contexts' file. Nevertheless, only single context 1865 * per kdamond is supported for now. So, we can simply use '0' context 1866 * index here. 1867 */ 1868 unsigned int cidx = 0; 1869 struct damos *siter; /* schemes iterator */ 1870 unsigned int sidx = 0; 1871 struct damon_target *titer; /* targets iterator */ 1872 unsigned int tidx = 0; 1873 bool do_trace = false; 1874 1875 /* get indices for trace_damos_before_apply() */ 1876 if (trace_damos_before_apply_enabled()) { 1877 damon_for_each_scheme(siter, c) { 1878 if (siter == s) 1879 break; 1880 sidx++; 1881 } 1882 damon_for_each_target(titer, c) { 1883 if (titer == t) 1884 break; 1885 tidx++; 1886 } 1887 do_trace = true; 1888 } 1889 1890 if (c->ops.apply_scheme) { 1891 if (quota->esz && quota->charged_sz + sz > quota->esz) { 1892 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 1893 c->min_sz_region); 1894 if (!sz) 1895 goto update_stat; 1896 damon_split_region_at(t, r, sz); 1897 } 1898 if (damos_filter_out(c, t, r, s)) 1899 return; 1900 ktime_get_coarse_ts64(&begin); 1901 trace_damos_before_apply(cidx, sidx, tidx, r, 1902 damon_nr_regions(t), do_trace); 1903 sz_applied = c->ops.apply_scheme(c, t, r, s, 1904 &sz_ops_filter_passed); 1905 damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); 1906 ktime_get_coarse_ts64(&end); 1907 quota->total_charged_ns += timespec64_to_ns(&end) - 1908 timespec64_to_ns(&begin); 1909 quota->charged_sz += sz; 1910 if (quota->esz && quota->charged_sz >= quota->esz) { 1911 quota->charge_target_from = t; 1912 quota->charge_addr_from = r->ar.end + 1; 1913 } 1914 } 1915 if (s->action != DAMOS_STAT) 1916 r->age = 0; 1917 1918 update_stat: 1919 damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); 1920 } 1921 1922 static void damon_do_apply_schemes(struct damon_ctx *c, 1923 struct damon_target *t, 1924 struct damon_region *r) 1925 { 1926 struct damos *s; 1927 1928 damon_for_each_scheme(s, c) { 1929 struct damos_quota *quota = &s->quota; 1930 1931 if (c->passed_sample_intervals < s->next_apply_sis) 1932 continue; 1933 1934 if (!s->wmarks.activated) 1935 continue; 1936 1937 /* Check the quota */ 1938 if (quota->esz && quota->charged_sz >= quota->esz) 1939 continue; 1940 1941 if (damos_skip_charged_region(t, &r, s, c->min_sz_region)) 1942 continue; 1943 1944 if (!damos_valid_target(c, t, r, s)) 1945 continue; 1946 1947 damos_apply_scheme(c, t, r, s); 1948 } 1949 } 1950 1951 /* 1952 * damon_feed_loop_next_input() - get next input to achieve a target score. 1953 * @last_input The last input. 1954 * @score Current score that made with @last_input. 1955 * 1956 * Calculate next input to achieve the target score, based on the last input 1957 * and current score. Assuming the input and the score are positively 1958 * proportional, calculate how much compensation should be added to or 1959 * subtracted from the last input as a proportion of the last input. Avoid 1960 * next input always being zero by setting it non-zero always. In short form 1961 * (assuming support of float and signed calculations), the algorithm is as 1962 * below. 1963 * 1964 * next_input = max(last_input * ((goal - current) / goal + 1), 1) 1965 * 1966 * For simple implementation, we assume the target score is always 10,000. The 1967 * caller should adjust @score for this. 1968 * 1969 * Returns next input that assumed to achieve the target score. 1970 */ 1971 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1972 unsigned long score) 1973 { 1974 const unsigned long goal = 10000; 1975 /* Set minimum input as 10000 to avoid compensation be zero */ 1976 const unsigned long min_input = 10000; 1977 unsigned long score_goal_diff, compensation; 1978 bool over_achieving = score > goal; 1979 1980 if (score == goal) 1981 return last_input; 1982 if (score >= goal * 2) 1983 return min_input; 1984 1985 if (over_achieving) 1986 score_goal_diff = score - goal; 1987 else 1988 score_goal_diff = goal - score; 1989 1990 if (last_input < ULONG_MAX / score_goal_diff) 1991 compensation = last_input * score_goal_diff / goal; 1992 else 1993 compensation = last_input / goal * score_goal_diff; 1994 1995 if (over_achieving) 1996 return max(last_input - compensation, min_input); 1997 if (last_input < ULONG_MAX - compensation) 1998 return last_input + compensation; 1999 return ULONG_MAX; 2000 } 2001 2002 #ifdef CONFIG_PSI 2003 2004 static u64 damos_get_some_mem_psi_total(void) 2005 { 2006 if (static_branch_likely(&psi_disabled)) 2007 return 0; 2008 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], 2009 NSEC_PER_USEC); 2010 } 2011 2012 #else /* CONFIG_PSI */ 2013 2014 static inline u64 damos_get_some_mem_psi_total(void) 2015 { 2016 return 0; 2017 }; 2018 2019 #endif /* CONFIG_PSI */ 2020 2021 #ifdef CONFIG_NUMA 2022 static __kernel_ulong_t damos_get_node_mem_bp( 2023 struct damos_quota_goal *goal) 2024 { 2025 struct sysinfo i; 2026 __kernel_ulong_t numerator; 2027 2028 si_meminfo_node(&i, goal->nid); 2029 if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP) 2030 numerator = i.totalram - i.freeram; 2031 else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */ 2032 numerator = i.freeram; 2033 return numerator * 10000 / i.totalram; 2034 } 2035 #else 2036 static __kernel_ulong_t damos_get_node_mem_bp( 2037 struct damos_quota_goal *goal) 2038 { 2039 return 0; 2040 } 2041 #endif 2042 2043 2044 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) 2045 { 2046 u64 now_psi_total; 2047 2048 switch (goal->metric) { 2049 case DAMOS_QUOTA_USER_INPUT: 2050 /* User should already set goal->current_value */ 2051 break; 2052 case DAMOS_QUOTA_SOME_MEM_PSI_US: 2053 now_psi_total = damos_get_some_mem_psi_total(); 2054 goal->current_value = now_psi_total - goal->last_psi_total; 2055 goal->last_psi_total = now_psi_total; 2056 break; 2057 case DAMOS_QUOTA_NODE_MEM_USED_BP: 2058 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 2059 goal->current_value = damos_get_node_mem_bp(goal); 2060 break; 2061 default: 2062 break; 2063 } 2064 } 2065 2066 /* Return the highest score since it makes schemes least aggressive */ 2067 static unsigned long damos_quota_score(struct damos_quota *quota) 2068 { 2069 struct damos_quota_goal *goal; 2070 unsigned long highest_score = 0; 2071 2072 damos_for_each_quota_goal(goal, quota) { 2073 damos_set_quota_goal_current_value(goal); 2074 highest_score = max(highest_score, 2075 goal->current_value * 10000 / 2076 goal->target_value); 2077 } 2078 2079 return highest_score; 2080 } 2081 2082 /* 2083 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty 2084 */ 2085 static void damos_set_effective_quota(struct damos_quota *quota) 2086 { 2087 unsigned long throughput; 2088 unsigned long esz = ULONG_MAX; 2089 2090 if (!quota->ms && list_empty("a->goals)) { 2091 quota->esz = quota->sz; 2092 return; 2093 } 2094 2095 if (!list_empty("a->goals)) { 2096 unsigned long score = damos_quota_score(quota); 2097 2098 quota->esz_bp = damon_feed_loop_next_input( 2099 max(quota->esz_bp, 10000UL), 2100 score); 2101 esz = quota->esz_bp / 10000; 2102 } 2103 2104 if (quota->ms) { 2105 if (quota->total_charged_ns) 2106 throughput = mult_frac(quota->total_charged_sz, 1000000, 2107 quota->total_charged_ns); 2108 else 2109 throughput = PAGE_SIZE * 1024; 2110 esz = min(throughput * quota->ms, esz); 2111 } 2112 2113 if (quota->sz && quota->sz < esz) 2114 esz = quota->sz; 2115 2116 quota->esz = esz; 2117 } 2118 2119 static void damos_trace_esz(struct damon_ctx *c, struct damos *s, 2120 struct damos_quota *quota) 2121 { 2122 unsigned int cidx = 0, sidx = 0; 2123 struct damos *siter; 2124 2125 damon_for_each_scheme(siter, c) { 2126 if (siter == s) 2127 break; 2128 sidx++; 2129 } 2130 trace_damos_esz(cidx, sidx, quota->esz); 2131 } 2132 2133 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 2134 { 2135 struct damos_quota *quota = &s->quota; 2136 struct damon_target *t; 2137 struct damon_region *r; 2138 unsigned long cumulated_sz, cached_esz; 2139 unsigned int score, max_score = 0; 2140 2141 if (!quota->ms && !quota->sz && list_empty("a->goals)) 2142 return; 2143 2144 /* First charge window */ 2145 if (!quota->total_charged_sz && !quota->charged_from) 2146 quota->charged_from = jiffies; 2147 2148 /* New charge window starts */ 2149 if (time_after_eq(jiffies, quota->charged_from + 2150 msecs_to_jiffies(quota->reset_interval))) { 2151 if (quota->esz && quota->charged_sz >= quota->esz) 2152 s->stat.qt_exceeds++; 2153 quota->total_charged_sz += quota->charged_sz; 2154 quota->charged_from = jiffies; 2155 quota->charged_sz = 0; 2156 if (trace_damos_esz_enabled()) 2157 cached_esz = quota->esz; 2158 damos_set_effective_quota(quota); 2159 if (trace_damos_esz_enabled() && quota->esz != cached_esz) 2160 damos_trace_esz(c, s, quota); 2161 } 2162 2163 if (!c->ops.get_scheme_score) 2164 return; 2165 2166 /* Fill up the score histogram */ 2167 memset(c->regions_score_histogram, 0, 2168 sizeof(*c->regions_score_histogram) * 2169 (DAMOS_MAX_SCORE + 1)); 2170 damon_for_each_target(t, c) { 2171 damon_for_each_region(r, t) { 2172 if (!__damos_valid_target(r, s)) 2173 continue; 2174 score = c->ops.get_scheme_score(c, t, r, s); 2175 c->regions_score_histogram[score] += 2176 damon_sz_region(r); 2177 if (score > max_score) 2178 max_score = score; 2179 } 2180 } 2181 2182 /* Set the min score limit */ 2183 for (cumulated_sz = 0, score = max_score; ; score--) { 2184 cumulated_sz += c->regions_score_histogram[score]; 2185 if (cumulated_sz >= quota->esz || !score) 2186 break; 2187 } 2188 quota->min_score = score; 2189 } 2190 2191 static void kdamond_apply_schemes(struct damon_ctx *c) 2192 { 2193 struct damon_target *t; 2194 struct damon_region *r, *next_r; 2195 struct damos *s; 2196 unsigned long sample_interval = c->attrs.sample_interval ? 2197 c->attrs.sample_interval : 1; 2198 bool has_schemes_to_apply = false; 2199 2200 damon_for_each_scheme(s, c) { 2201 if (c->passed_sample_intervals < s->next_apply_sis) 2202 continue; 2203 2204 if (!s->wmarks.activated) 2205 continue; 2206 2207 has_schemes_to_apply = true; 2208 2209 damos_adjust_quota(c, s); 2210 } 2211 2212 if (!has_schemes_to_apply) 2213 return; 2214 2215 mutex_lock(&c->walk_control_lock); 2216 damon_for_each_target(t, c) { 2217 damon_for_each_region_safe(r, next_r, t) 2218 damon_do_apply_schemes(c, t, r); 2219 } 2220 2221 damon_for_each_scheme(s, c) { 2222 if (c->passed_sample_intervals < s->next_apply_sis) 2223 continue; 2224 damos_walk_complete(c, s); 2225 s->next_apply_sis = c->passed_sample_intervals + 2226 (s->apply_interval_us ? s->apply_interval_us : 2227 c->attrs.aggr_interval) / sample_interval; 2228 s->last_applied = NULL; 2229 } 2230 mutex_unlock(&c->walk_control_lock); 2231 } 2232 2233 /* 2234 * Merge two adjacent regions into one region 2235 */ 2236 static void damon_merge_two_regions(struct damon_target *t, 2237 struct damon_region *l, struct damon_region *r) 2238 { 2239 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 2240 2241 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 2242 (sz_l + sz_r); 2243 l->nr_accesses_bp = l->nr_accesses * 10000; 2244 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 2245 l->ar.end = r->ar.end; 2246 damon_destroy_region(r, t); 2247 } 2248 2249 /* 2250 * Merge adjacent regions having similar access frequencies 2251 * 2252 * t target affected by this merge operation 2253 * thres '->nr_accesses' diff threshold for the merge 2254 * sz_limit size upper limit of each region 2255 */ 2256 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 2257 unsigned long sz_limit) 2258 { 2259 struct damon_region *r, *prev = NULL, *next; 2260 2261 damon_for_each_region_safe(r, next, t) { 2262 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 2263 r->age = 0; 2264 else 2265 r->age++; 2266 2267 if (prev && prev->ar.end == r->ar.start && 2268 abs(prev->nr_accesses - r->nr_accesses) <= thres && 2269 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 2270 damon_merge_two_regions(t, prev, r); 2271 else 2272 prev = r; 2273 } 2274 } 2275 2276 /* 2277 * Merge adjacent regions having similar access frequencies 2278 * 2279 * threshold '->nr_accesses' diff threshold for the merge 2280 * sz_limit size upper limit of each region 2281 * 2282 * This function merges monitoring target regions which are adjacent and their 2283 * access frequencies are similar. This is for minimizing the monitoring 2284 * overhead under the dynamically changeable access pattern. If a merge was 2285 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 2286 * 2287 * The total number of regions could be higher than the user-defined limit, 2288 * max_nr_regions for some cases. For example, the user can update 2289 * max_nr_regions to a number that lower than the current number of regions 2290 * while DAMON is running. For such a case, repeat merging until the limit is 2291 * met while increasing @threshold up to possible maximum level. 2292 */ 2293 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 2294 unsigned long sz_limit) 2295 { 2296 struct damon_target *t; 2297 unsigned int nr_regions; 2298 unsigned int max_thres; 2299 2300 max_thres = c->attrs.aggr_interval / 2301 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); 2302 do { 2303 nr_regions = 0; 2304 damon_for_each_target(t, c) { 2305 damon_merge_regions_of(t, threshold, sz_limit); 2306 nr_regions += damon_nr_regions(t); 2307 } 2308 threshold = max(1, threshold * 2); 2309 } while (nr_regions > c->attrs.max_nr_regions && 2310 threshold / 2 < max_thres); 2311 } 2312 2313 /* 2314 * Split a region in two 2315 * 2316 * r the region to be split 2317 * sz_r size of the first sub-region that will be made 2318 */ 2319 static void damon_split_region_at(struct damon_target *t, 2320 struct damon_region *r, unsigned long sz_r) 2321 { 2322 struct damon_region *new; 2323 2324 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 2325 if (!new) 2326 return; 2327 2328 r->ar.end = new->ar.start; 2329 2330 new->age = r->age; 2331 new->last_nr_accesses = r->last_nr_accesses; 2332 new->nr_accesses_bp = r->nr_accesses_bp; 2333 new->nr_accesses = r->nr_accesses; 2334 2335 damon_insert_region(new, r, damon_next_region(r), t); 2336 } 2337 2338 /* Split every region in the given target into 'nr_subs' regions */ 2339 static void damon_split_regions_of(struct damon_target *t, int nr_subs, 2340 unsigned long min_sz_region) 2341 { 2342 struct damon_region *r, *next; 2343 unsigned long sz_region, sz_sub = 0; 2344 int i; 2345 2346 damon_for_each_region_safe(r, next, t) { 2347 sz_region = damon_sz_region(r); 2348 2349 for (i = 0; i < nr_subs - 1 && 2350 sz_region > 2 * min_sz_region; i++) { 2351 /* 2352 * Randomly select size of left sub-region to be at 2353 * least 10 percent and at most 90% of original region 2354 */ 2355 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 2356 sz_region / 10, min_sz_region); 2357 /* Do not allow blank region */ 2358 if (sz_sub == 0 || sz_sub >= sz_region) 2359 continue; 2360 2361 damon_split_region_at(t, r, sz_sub); 2362 sz_region = sz_sub; 2363 } 2364 } 2365 } 2366 2367 /* 2368 * Split every target region into randomly-sized small regions 2369 * 2370 * This function splits every target region into random-sized small regions if 2371 * current total number of the regions is equal or smaller than half of the 2372 * user-specified maximum number of regions. This is for maximizing the 2373 * monitoring accuracy under the dynamically changeable access patterns. If a 2374 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 2375 * it. 2376 */ 2377 static void kdamond_split_regions(struct damon_ctx *ctx) 2378 { 2379 struct damon_target *t; 2380 unsigned int nr_regions = 0; 2381 static unsigned int last_nr_regions; 2382 int nr_subregions = 2; 2383 2384 damon_for_each_target(t, ctx) 2385 nr_regions += damon_nr_regions(t); 2386 2387 if (nr_regions > ctx->attrs.max_nr_regions / 2) 2388 return; 2389 2390 /* Maybe the middle of the region has different access frequency */ 2391 if (last_nr_regions == nr_regions && 2392 nr_regions < ctx->attrs.max_nr_regions / 3) 2393 nr_subregions = 3; 2394 2395 damon_for_each_target(t, ctx) 2396 damon_split_regions_of(t, nr_subregions, ctx->min_sz_region); 2397 2398 last_nr_regions = nr_regions; 2399 } 2400 2401 /* 2402 * Check whether current monitoring should be stopped 2403 * 2404 * The monitoring is stopped when either the user requested to stop, or all 2405 * monitoring targets are invalid. 2406 * 2407 * Returns true if need to stop current monitoring. 2408 */ 2409 static bool kdamond_need_stop(struct damon_ctx *ctx) 2410 { 2411 struct damon_target *t; 2412 2413 if (kthread_should_stop()) 2414 return true; 2415 2416 if (!ctx->ops.target_valid) 2417 return false; 2418 2419 damon_for_each_target(t, ctx) { 2420 if (ctx->ops.target_valid(t)) 2421 return false; 2422 } 2423 2424 return true; 2425 } 2426 2427 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, 2428 unsigned long *metric_value) 2429 { 2430 switch (metric) { 2431 case DAMOS_WMARK_FREE_MEM_RATE: 2432 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / 2433 totalram_pages(); 2434 return 0; 2435 default: 2436 break; 2437 } 2438 return -EINVAL; 2439 } 2440 2441 /* 2442 * Returns zero if the scheme is active. Else, returns time to wait for next 2443 * watermark check in micro-seconds. 2444 */ 2445 static unsigned long damos_wmark_wait_us(struct damos *scheme) 2446 { 2447 unsigned long metric; 2448 2449 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) 2450 return 0; 2451 2452 /* higher than high watermark or lower than low watermark */ 2453 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 2454 if (scheme->wmarks.activated) 2455 pr_debug("deactivate a scheme (%d) for %s wmark\n", 2456 scheme->action, 2457 str_high_low(metric > scheme->wmarks.high)); 2458 scheme->wmarks.activated = false; 2459 return scheme->wmarks.interval; 2460 } 2461 2462 /* inactive and higher than middle watermark */ 2463 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 2464 !scheme->wmarks.activated) 2465 return scheme->wmarks.interval; 2466 2467 if (!scheme->wmarks.activated) 2468 pr_debug("activate a scheme (%d)\n", scheme->action); 2469 scheme->wmarks.activated = true; 2470 return 0; 2471 } 2472 2473 static void kdamond_usleep(unsigned long usecs) 2474 { 2475 if (usecs >= USLEEP_RANGE_UPPER_BOUND) 2476 schedule_timeout_idle(usecs_to_jiffies(usecs)); 2477 else 2478 usleep_range_idle(usecs, usecs + 1); 2479 } 2480 2481 /* 2482 * kdamond_call() - handle damon_call_control objects. 2483 * @ctx: The &struct damon_ctx of the kdamond. 2484 * @cancel: Whether to cancel the invocation of the function. 2485 * 2486 * If there are &struct damon_call_control requests that registered via 2487 * &damon_call() on @ctx, do or cancel the invocation of the function depending 2488 * on @cancel. @cancel is set when the kdamond is already out of the main loop 2489 * and therefore will be terminated. 2490 */ 2491 static void kdamond_call(struct damon_ctx *ctx, bool cancel) 2492 { 2493 struct damon_call_control *control; 2494 LIST_HEAD(repeat_controls); 2495 int ret = 0; 2496 2497 while (true) { 2498 mutex_lock(&ctx->call_controls_lock); 2499 control = list_first_entry_or_null(&ctx->call_controls, 2500 struct damon_call_control, list); 2501 mutex_unlock(&ctx->call_controls_lock); 2502 if (!control) 2503 break; 2504 if (cancel) { 2505 control->canceled = true; 2506 } else { 2507 ret = control->fn(control->data); 2508 control->return_code = ret; 2509 } 2510 mutex_lock(&ctx->call_controls_lock); 2511 list_del(&control->list); 2512 mutex_unlock(&ctx->call_controls_lock); 2513 if (!control->repeat) { 2514 complete(&control->completion); 2515 } else if (control->canceled && control->dealloc_on_cancel) { 2516 kfree(control); 2517 continue; 2518 } else { 2519 list_add(&control->list, &repeat_controls); 2520 } 2521 } 2522 control = list_first_entry_or_null(&repeat_controls, 2523 struct damon_call_control, list); 2524 if (!control || cancel) 2525 return; 2526 mutex_lock(&ctx->call_controls_lock); 2527 list_add_tail(&control->list, &ctx->call_controls); 2528 mutex_unlock(&ctx->call_controls_lock); 2529 } 2530 2531 /* Returns negative error code if it's not activated but should return */ 2532 static int kdamond_wait_activation(struct damon_ctx *ctx) 2533 { 2534 struct damos *s; 2535 unsigned long wait_time; 2536 unsigned long min_wait_time = 0; 2537 bool init_wait_time = false; 2538 2539 while (!kdamond_need_stop(ctx)) { 2540 damon_for_each_scheme(s, ctx) { 2541 wait_time = damos_wmark_wait_us(s); 2542 if (!init_wait_time || wait_time < min_wait_time) { 2543 init_wait_time = true; 2544 min_wait_time = wait_time; 2545 } 2546 } 2547 if (!min_wait_time) 2548 return 0; 2549 2550 kdamond_usleep(min_wait_time); 2551 2552 kdamond_call(ctx, false); 2553 damos_walk_cancel(ctx); 2554 } 2555 return -EBUSY; 2556 } 2557 2558 static void kdamond_init_ctx(struct damon_ctx *ctx) 2559 { 2560 unsigned long sample_interval = ctx->attrs.sample_interval ? 2561 ctx->attrs.sample_interval : 1; 2562 unsigned long apply_interval; 2563 struct damos *scheme; 2564 2565 ctx->passed_sample_intervals = 0; 2566 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 2567 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 2568 sample_interval; 2569 ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * 2570 ctx->attrs.intervals_goal.aggrs; 2571 2572 damon_for_each_scheme(scheme, ctx) { 2573 apply_interval = scheme->apply_interval_us ? 2574 scheme->apply_interval_us : ctx->attrs.aggr_interval; 2575 scheme->next_apply_sis = apply_interval / sample_interval; 2576 damos_set_filters_default_reject(scheme); 2577 } 2578 } 2579 2580 /* 2581 * The monitoring daemon that runs as a kernel thread 2582 */ 2583 static int kdamond_fn(void *data) 2584 { 2585 struct damon_ctx *ctx = data; 2586 struct damon_target *t; 2587 struct damon_region *r, *next; 2588 unsigned int max_nr_accesses = 0; 2589 unsigned long sz_limit = 0; 2590 2591 pr_debug("kdamond (%d) starts\n", current->pid); 2592 2593 complete(&ctx->kdamond_started); 2594 kdamond_init_ctx(ctx); 2595 2596 if (ctx->ops.init) 2597 ctx->ops.init(ctx); 2598 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, 2599 sizeof(*ctx->regions_score_histogram), GFP_KERNEL); 2600 if (!ctx->regions_score_histogram) 2601 goto done; 2602 2603 sz_limit = damon_region_sz_limit(ctx); 2604 2605 while (!kdamond_need_stop(ctx)) { 2606 /* 2607 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 2608 * be changed from kdamond_call(). Read the values here, and 2609 * use those for this iteration. That is, damon_set_attrs() 2610 * updated new values are respected from next iteration. 2611 */ 2612 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 2613 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 2614 unsigned long sample_interval = ctx->attrs.sample_interval; 2615 2616 if (kdamond_wait_activation(ctx)) 2617 break; 2618 2619 if (ctx->ops.prepare_access_checks) 2620 ctx->ops.prepare_access_checks(ctx); 2621 2622 kdamond_usleep(sample_interval); 2623 ctx->passed_sample_intervals++; 2624 2625 if (ctx->ops.check_accesses) 2626 max_nr_accesses = ctx->ops.check_accesses(ctx); 2627 2628 if (ctx->passed_sample_intervals >= next_aggregation_sis) 2629 kdamond_merge_regions(ctx, 2630 max_nr_accesses / 10, 2631 sz_limit); 2632 2633 /* 2634 * do kdamond_call() and kdamond_apply_schemes() after 2635 * kdamond_merge_regions() if possible, to reduce overhead 2636 */ 2637 kdamond_call(ctx, false); 2638 if (!list_empty(&ctx->schemes)) 2639 kdamond_apply_schemes(ctx); 2640 else 2641 damos_walk_cancel(ctx); 2642 2643 sample_interval = ctx->attrs.sample_interval ? 2644 ctx->attrs.sample_interval : 1; 2645 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 2646 if (ctx->attrs.intervals_goal.aggrs && 2647 ctx->passed_sample_intervals >= 2648 ctx->next_intervals_tune_sis) { 2649 /* 2650 * ctx->next_aggregation_sis might be updated 2651 * from kdamond_call(). In the case, 2652 * damon_set_attrs() which will be called from 2653 * kdamond_tune_interval() may wrongly think 2654 * this is in the middle of the current 2655 * aggregation, and make aggregation 2656 * information reset for all regions. Then, 2657 * following kdamond_reset_aggregated() call 2658 * will make the region information invalid, 2659 * particularly for ->nr_accesses_bp. 2660 * 2661 * Reset ->next_aggregation_sis to avoid that. 2662 * It will anyway correctly updated after this 2663 * if caluse. 2664 */ 2665 ctx->next_aggregation_sis = 2666 next_aggregation_sis; 2667 ctx->next_intervals_tune_sis += 2668 ctx->attrs.aggr_samples * 2669 ctx->attrs.intervals_goal.aggrs; 2670 kdamond_tune_intervals(ctx); 2671 sample_interval = ctx->attrs.sample_interval ? 2672 ctx->attrs.sample_interval : 1; 2673 2674 } 2675 ctx->next_aggregation_sis = next_aggregation_sis + 2676 ctx->attrs.aggr_interval / sample_interval; 2677 2678 kdamond_reset_aggregated(ctx); 2679 kdamond_split_regions(ctx); 2680 } 2681 2682 if (ctx->passed_sample_intervals >= next_ops_update_sis) { 2683 ctx->next_ops_update_sis = next_ops_update_sis + 2684 ctx->attrs.ops_update_interval / 2685 sample_interval; 2686 if (ctx->ops.update) 2687 ctx->ops.update(ctx); 2688 sz_limit = damon_region_sz_limit(ctx); 2689 } 2690 } 2691 done: 2692 damon_for_each_target(t, ctx) { 2693 damon_for_each_region_safe(r, next, t) 2694 damon_destroy_region(r, t); 2695 } 2696 2697 if (ctx->ops.cleanup) 2698 ctx->ops.cleanup(ctx); 2699 kfree(ctx->regions_score_histogram); 2700 2701 pr_debug("kdamond (%d) finishes\n", current->pid); 2702 mutex_lock(&ctx->kdamond_lock); 2703 ctx->kdamond = NULL; 2704 mutex_unlock(&ctx->kdamond_lock); 2705 2706 kdamond_call(ctx, true); 2707 damos_walk_cancel(ctx); 2708 2709 mutex_lock(&damon_lock); 2710 nr_running_ctxs--; 2711 if (!nr_running_ctxs && running_exclusive_ctxs) 2712 running_exclusive_ctxs = false; 2713 mutex_unlock(&damon_lock); 2714 2715 damon_destroy_targets(ctx); 2716 return 0; 2717 } 2718 2719 /* 2720 * struct damon_system_ram_region - System RAM resource address region of 2721 * [@start, @end). 2722 * @start: Start address of the region (inclusive). 2723 * @end: End address of the region (exclusive). 2724 */ 2725 struct damon_system_ram_region { 2726 unsigned long start; 2727 unsigned long end; 2728 }; 2729 2730 static int walk_system_ram(struct resource *res, void *arg) 2731 { 2732 struct damon_system_ram_region *a = arg; 2733 2734 if (a->end - a->start < resource_size(res)) { 2735 a->start = res->start; 2736 a->end = res->end; 2737 } 2738 return 0; 2739 } 2740 2741 /* 2742 * Find biggest 'System RAM' resource and store its start and end address in 2743 * @start and @end, respectively. If no System RAM is found, returns false. 2744 */ 2745 static bool damon_find_biggest_system_ram(unsigned long *start, 2746 unsigned long *end) 2747 2748 { 2749 struct damon_system_ram_region arg = {}; 2750 2751 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 2752 if (arg.end <= arg.start) 2753 return false; 2754 2755 *start = arg.start; 2756 *end = arg.end; 2757 return true; 2758 } 2759 2760 /** 2761 * damon_set_region_biggest_system_ram_default() - Set the region of the given 2762 * monitoring target as requested, or biggest 'System RAM'. 2763 * @t: The monitoring target to set the region. 2764 * @start: The pointer to the start address of the region. 2765 * @end: The pointer to the end address of the region. 2766 * 2767 * This function sets the region of @t as requested by @start and @end. If the 2768 * values of @start and @end are zero, however, this function finds the biggest 2769 * 'System RAM' resource and sets the region to cover the resource. In the 2770 * latter case, this function saves the start and end addresses of the resource 2771 * in @start and @end, respectively. 2772 * 2773 * Return: 0 on success, negative error code otherwise. 2774 */ 2775 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 2776 unsigned long *start, unsigned long *end) 2777 { 2778 struct damon_addr_range addr_range; 2779 2780 if (*start > *end) 2781 return -EINVAL; 2782 2783 if (!*start && !*end && 2784 !damon_find_biggest_system_ram(start, end)) 2785 return -EINVAL; 2786 2787 addr_range.start = *start; 2788 addr_range.end = *end; 2789 return damon_set_regions(t, &addr_range, 1, DAMON_MIN_REGION); 2790 } 2791 2792 /* 2793 * damon_moving_sum() - Calculate an inferred moving sum value. 2794 * @mvsum: Inferred sum of the last @len_window values. 2795 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 2796 * @len_window: The number of last values to take care of. 2797 * @new_value: New value that will be added to the pseudo moving sum. 2798 * 2799 * Moving sum (moving average * window size) is good for handling noise, but 2800 * the cost of keeping past values can be high for arbitrary window size. This 2801 * function implements a lightweight pseudo moving sum function that doesn't 2802 * keep the past window values. 2803 * 2804 * It simply assumes there was no noise in the past, and get the no-noise 2805 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 2806 * non-moving sum of the last window. For example, if @len_window is 10 and we 2807 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 2808 * values. Hence, this function simply drops @nomvsum / @len_window from 2809 * given @mvsum and add @new_value. 2810 * 2811 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 2812 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 2813 * calculating next moving sum with a new value, we should drop 0 from 50 and 2814 * add the new value. However, this function assumes it got value 5 for each 2815 * of the last ten times. Based on the assumption, when the next value is 2816 * measured, it drops the assumed past value, 5 from the current sum, and add 2817 * the new value to get the updated pseduo-moving average. 2818 * 2819 * This means the value could have errors, but the errors will be disappeared 2820 * for every @len_window aligned calls. For example, if @len_window is 10, the 2821 * pseudo moving sum with 11th value to 19th value would have an error. But 2822 * the sum with 20th value will not have the error. 2823 * 2824 * Return: Pseudo-moving average after getting the @new_value. 2825 */ 2826 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 2827 unsigned int len_window, unsigned int new_value) 2828 { 2829 return mvsum - nomvsum / len_window + new_value; 2830 } 2831 2832 /** 2833 * damon_update_region_access_rate() - Update the access rate of a region. 2834 * @r: The DAMON region to update for its access check result. 2835 * @accessed: Whether the region has accessed during last sampling interval. 2836 * @attrs: The damon_attrs of the DAMON context. 2837 * 2838 * Update the access rate of a region with the region's last sampling interval 2839 * access check result. 2840 * 2841 * Usually this will be called by &damon_operations->check_accesses callback. 2842 */ 2843 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 2844 struct damon_attrs *attrs) 2845 { 2846 unsigned int len_window = 1; 2847 2848 /* 2849 * sample_interval can be zero, but cannot be larger than 2850 * aggr_interval, owing to validation of damon_set_attrs(). 2851 */ 2852 if (attrs->sample_interval) 2853 len_window = damon_max_nr_accesses(attrs); 2854 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 2855 r->last_nr_accesses * 10000, len_window, 2856 accessed ? 10000 : 0); 2857 2858 if (accessed) 2859 r->nr_accesses++; 2860 } 2861 2862 static int __init damon_init(void) 2863 { 2864 damon_region_cache = KMEM_CACHE(damon_region, 0); 2865 if (unlikely(!damon_region_cache)) { 2866 pr_err("creating damon_region_cache fails\n"); 2867 return -ENOMEM; 2868 } 2869 2870 return 0; 2871 } 2872 2873 subsys_initcall(damon_init); 2874 2875 #include "tests/core-kunit.h" 2876