1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/psi.h> 15 #include <linux/slab.h> 16 #include <linux/string.h> 17 18 #define CREATE_TRACE_POINTS 19 #include <trace/events/damon.h> 20 21 #ifdef CONFIG_DAMON_KUNIT_TEST 22 #undef DAMON_MIN_REGION 23 #define DAMON_MIN_REGION 1 24 #endif 25 26 static DEFINE_MUTEX(damon_lock); 27 static int nr_running_ctxs; 28 static bool running_exclusive_ctxs; 29 30 static DEFINE_MUTEX(damon_ops_lock); 31 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 32 33 static struct kmem_cache *damon_region_cache __ro_after_init; 34 35 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 36 static bool __damon_is_registered_ops(enum damon_ops_id id) 37 { 38 struct damon_operations empty_ops = {}; 39 40 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 41 return false; 42 return true; 43 } 44 45 /** 46 * damon_is_registered_ops() - Check if a given damon_operations is registered. 47 * @id: Id of the damon_operations to check if registered. 48 * 49 * Return: true if the ops is set, false otherwise. 50 */ 51 bool damon_is_registered_ops(enum damon_ops_id id) 52 { 53 bool registered; 54 55 if (id >= NR_DAMON_OPS) 56 return false; 57 mutex_lock(&damon_ops_lock); 58 registered = __damon_is_registered_ops(id); 59 mutex_unlock(&damon_ops_lock); 60 return registered; 61 } 62 63 /** 64 * damon_register_ops() - Register a monitoring operations set to DAMON. 65 * @ops: monitoring operations set to register. 66 * 67 * This function registers a monitoring operations set of valid &struct 68 * damon_operations->id so that others can find and use them later. 69 * 70 * Return: 0 on success, negative error code otherwise. 71 */ 72 int damon_register_ops(struct damon_operations *ops) 73 { 74 int err = 0; 75 76 if (ops->id >= NR_DAMON_OPS) 77 return -EINVAL; 78 mutex_lock(&damon_ops_lock); 79 /* Fail for already registered ops */ 80 if (__damon_is_registered_ops(ops->id)) { 81 err = -EINVAL; 82 goto out; 83 } 84 damon_registered_ops[ops->id] = *ops; 85 out: 86 mutex_unlock(&damon_ops_lock); 87 return err; 88 } 89 90 /** 91 * damon_select_ops() - Select a monitoring operations to use with the context. 92 * @ctx: monitoring context to use the operations. 93 * @id: id of the registered monitoring operations to select. 94 * 95 * This function finds registered monitoring operations set of @id and make 96 * @ctx to use it. 97 * 98 * Return: 0 on success, negative error code otherwise. 99 */ 100 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 101 { 102 int err = 0; 103 104 if (id >= NR_DAMON_OPS) 105 return -EINVAL; 106 107 mutex_lock(&damon_ops_lock); 108 if (!__damon_is_registered_ops(id)) 109 err = -EINVAL; 110 else 111 ctx->ops = damon_registered_ops[id]; 112 mutex_unlock(&damon_ops_lock); 113 return err; 114 } 115 116 /* 117 * Construct a damon_region struct 118 * 119 * Returns the pointer to the new struct if success, or NULL otherwise 120 */ 121 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 122 { 123 struct damon_region *region; 124 125 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 126 if (!region) 127 return NULL; 128 129 region->ar.start = start; 130 region->ar.end = end; 131 region->nr_accesses = 0; 132 region->nr_accesses_bp = 0; 133 INIT_LIST_HEAD(®ion->list); 134 135 region->age = 0; 136 region->last_nr_accesses = 0; 137 138 return region; 139 } 140 141 void damon_add_region(struct damon_region *r, struct damon_target *t) 142 { 143 list_add_tail(&r->list, &t->regions_list); 144 t->nr_regions++; 145 } 146 147 static void damon_del_region(struct damon_region *r, struct damon_target *t) 148 { 149 list_del(&r->list); 150 t->nr_regions--; 151 } 152 153 static void damon_free_region(struct damon_region *r) 154 { 155 kmem_cache_free(damon_region_cache, r); 156 } 157 158 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 159 { 160 damon_del_region(r, t); 161 damon_free_region(r); 162 } 163 164 /* 165 * Check whether a region is intersecting an address range 166 * 167 * Returns true if it is. 168 */ 169 static bool damon_intersect(struct damon_region *r, 170 struct damon_addr_range *re) 171 { 172 return !(r->ar.end <= re->start || re->end <= r->ar.start); 173 } 174 175 /* 176 * Fill holes in regions with new regions. 177 */ 178 static int damon_fill_regions_holes(struct damon_region *first, 179 struct damon_region *last, struct damon_target *t) 180 { 181 struct damon_region *r = first; 182 183 damon_for_each_region_from(r, t) { 184 struct damon_region *next, *newr; 185 186 if (r == last) 187 break; 188 next = damon_next_region(r); 189 if (r->ar.end != next->ar.start) { 190 newr = damon_new_region(r->ar.end, next->ar.start); 191 if (!newr) 192 return -ENOMEM; 193 damon_insert_region(newr, r, next, t); 194 } 195 } 196 return 0; 197 } 198 199 /* 200 * damon_set_regions() - Set regions of a target for given address ranges. 201 * @t: the given target. 202 * @ranges: array of new monitoring target ranges. 203 * @nr_ranges: length of @ranges. 204 * 205 * This function adds new regions to, or modify existing regions of a 206 * monitoring target to fit in specific ranges. 207 * 208 * Return: 0 if success, or negative error code otherwise. 209 */ 210 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 211 unsigned int nr_ranges) 212 { 213 struct damon_region *r, *next; 214 unsigned int i; 215 int err; 216 217 /* Remove regions which are not in the new ranges */ 218 damon_for_each_region_safe(r, next, t) { 219 for (i = 0; i < nr_ranges; i++) { 220 if (damon_intersect(r, &ranges[i])) 221 break; 222 } 223 if (i == nr_ranges) 224 damon_destroy_region(r, t); 225 } 226 227 r = damon_first_region(t); 228 /* Add new regions or resize existing regions to fit in the ranges */ 229 for (i = 0; i < nr_ranges; i++) { 230 struct damon_region *first = NULL, *last, *newr; 231 struct damon_addr_range *range; 232 233 range = &ranges[i]; 234 /* Get the first/last regions intersecting with the range */ 235 damon_for_each_region_from(r, t) { 236 if (damon_intersect(r, range)) { 237 if (!first) 238 first = r; 239 last = r; 240 } 241 if (r->ar.start >= range->end) 242 break; 243 } 244 if (!first) { 245 /* no region intersects with this range */ 246 newr = damon_new_region( 247 ALIGN_DOWN(range->start, 248 DAMON_MIN_REGION), 249 ALIGN(range->end, DAMON_MIN_REGION)); 250 if (!newr) 251 return -ENOMEM; 252 damon_insert_region(newr, damon_prev_region(r), r, t); 253 } else { 254 /* resize intersecting regions to fit in this range */ 255 first->ar.start = ALIGN_DOWN(range->start, 256 DAMON_MIN_REGION); 257 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 258 259 /* fill possible holes in the range */ 260 err = damon_fill_regions_holes(first, last, t); 261 if (err) 262 return err; 263 } 264 } 265 return 0; 266 } 267 268 struct damos_filter *damos_new_filter(enum damos_filter_type type, 269 bool matching) 270 { 271 struct damos_filter *filter; 272 273 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 274 if (!filter) 275 return NULL; 276 filter->type = type; 277 filter->matching = matching; 278 INIT_LIST_HEAD(&filter->list); 279 return filter; 280 } 281 282 void damos_add_filter(struct damos *s, struct damos_filter *f) 283 { 284 list_add_tail(&f->list, &s->filters); 285 } 286 287 static void damos_del_filter(struct damos_filter *f) 288 { 289 list_del(&f->list); 290 } 291 292 static void damos_free_filter(struct damos_filter *f) 293 { 294 kfree(f); 295 } 296 297 void damos_destroy_filter(struct damos_filter *f) 298 { 299 damos_del_filter(f); 300 damos_free_filter(f); 301 } 302 303 struct damos_quota_goal *damos_new_quota_goal( 304 enum damos_quota_goal_metric metric, 305 unsigned long target_value) 306 { 307 struct damos_quota_goal *goal; 308 309 goal = kmalloc(sizeof(*goal), GFP_KERNEL); 310 if (!goal) 311 return NULL; 312 goal->metric = metric; 313 goal->target_value = target_value; 314 INIT_LIST_HEAD(&goal->list); 315 return goal; 316 } 317 318 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) 319 { 320 list_add_tail(&g->list, &q->goals); 321 } 322 323 static void damos_del_quota_goal(struct damos_quota_goal *g) 324 { 325 list_del(&g->list); 326 } 327 328 static void damos_free_quota_goal(struct damos_quota_goal *g) 329 { 330 kfree(g); 331 } 332 333 void damos_destroy_quota_goal(struct damos_quota_goal *g) 334 { 335 damos_del_quota_goal(g); 336 damos_free_quota_goal(g); 337 } 338 339 /* initialize fields of @quota that normally API users wouldn't set */ 340 static struct damos_quota *damos_quota_init(struct damos_quota *quota) 341 { 342 quota->esz = 0; 343 quota->total_charged_sz = 0; 344 quota->total_charged_ns = 0; 345 quota->charged_sz = 0; 346 quota->charged_from = 0; 347 quota->charge_target_from = NULL; 348 quota->charge_addr_from = 0; 349 quota->esz_bp = 0; 350 return quota; 351 } 352 353 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 354 enum damos_action action, 355 unsigned long apply_interval_us, 356 struct damos_quota *quota, 357 struct damos_watermarks *wmarks, 358 int target_nid) 359 { 360 struct damos *scheme; 361 362 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 363 if (!scheme) 364 return NULL; 365 scheme->pattern = *pattern; 366 scheme->action = action; 367 scheme->apply_interval_us = apply_interval_us; 368 /* 369 * next_apply_sis will be set when kdamond starts. While kdamond is 370 * running, it will also updated when it is added to the DAMON context, 371 * or damon_attrs are updated. 372 */ 373 scheme->next_apply_sis = 0; 374 INIT_LIST_HEAD(&scheme->filters); 375 scheme->stat = (struct damos_stat){}; 376 INIT_LIST_HEAD(&scheme->list); 377 378 scheme->quota = *(damos_quota_init(quota)); 379 /* quota.goals should be separately set by caller */ 380 INIT_LIST_HEAD(&scheme->quota.goals); 381 382 scheme->wmarks = *wmarks; 383 scheme->wmarks.activated = true; 384 385 scheme->target_nid = target_nid; 386 387 return scheme; 388 } 389 390 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 391 { 392 unsigned long sample_interval = ctx->attrs.sample_interval ? 393 ctx->attrs.sample_interval : 1; 394 unsigned long apply_interval = s->apply_interval_us ? 395 s->apply_interval_us : ctx->attrs.aggr_interval; 396 397 s->next_apply_sis = ctx->passed_sample_intervals + 398 apply_interval / sample_interval; 399 } 400 401 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 402 { 403 list_add_tail(&s->list, &ctx->schemes); 404 damos_set_next_apply_sis(s, ctx); 405 } 406 407 static void damon_del_scheme(struct damos *s) 408 { 409 list_del(&s->list); 410 } 411 412 static void damon_free_scheme(struct damos *s) 413 { 414 kfree(s); 415 } 416 417 void damon_destroy_scheme(struct damos *s) 418 { 419 struct damos_quota_goal *g, *g_next; 420 struct damos_filter *f, *next; 421 422 damos_for_each_quota_goal_safe(g, g_next, &s->quota) 423 damos_destroy_quota_goal(g); 424 425 damos_for_each_filter_safe(f, next, s) 426 damos_destroy_filter(f); 427 damon_del_scheme(s); 428 damon_free_scheme(s); 429 } 430 431 /* 432 * Construct a damon_target struct 433 * 434 * Returns the pointer to the new struct if success, or NULL otherwise 435 */ 436 struct damon_target *damon_new_target(void) 437 { 438 struct damon_target *t; 439 440 t = kmalloc(sizeof(*t), GFP_KERNEL); 441 if (!t) 442 return NULL; 443 444 t->pid = NULL; 445 t->nr_regions = 0; 446 INIT_LIST_HEAD(&t->regions_list); 447 INIT_LIST_HEAD(&t->list); 448 449 return t; 450 } 451 452 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 453 { 454 list_add_tail(&t->list, &ctx->adaptive_targets); 455 } 456 457 bool damon_targets_empty(struct damon_ctx *ctx) 458 { 459 return list_empty(&ctx->adaptive_targets); 460 } 461 462 static void damon_del_target(struct damon_target *t) 463 { 464 list_del(&t->list); 465 } 466 467 void damon_free_target(struct damon_target *t) 468 { 469 struct damon_region *r, *next; 470 471 damon_for_each_region_safe(r, next, t) 472 damon_free_region(r); 473 kfree(t); 474 } 475 476 void damon_destroy_target(struct damon_target *t) 477 { 478 damon_del_target(t); 479 damon_free_target(t); 480 } 481 482 unsigned int damon_nr_regions(struct damon_target *t) 483 { 484 return t->nr_regions; 485 } 486 487 struct damon_ctx *damon_new_ctx(void) 488 { 489 struct damon_ctx *ctx; 490 491 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 492 if (!ctx) 493 return NULL; 494 495 init_completion(&ctx->kdamond_started); 496 497 ctx->attrs.sample_interval = 5 * 1000; 498 ctx->attrs.aggr_interval = 100 * 1000; 499 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 500 501 ctx->passed_sample_intervals = 0; 502 /* These will be set from kdamond_init_intervals_sis() */ 503 ctx->next_aggregation_sis = 0; 504 ctx->next_ops_update_sis = 0; 505 506 mutex_init(&ctx->kdamond_lock); 507 508 ctx->attrs.min_nr_regions = 10; 509 ctx->attrs.max_nr_regions = 1000; 510 511 INIT_LIST_HEAD(&ctx->adaptive_targets); 512 INIT_LIST_HEAD(&ctx->schemes); 513 514 return ctx; 515 } 516 517 static void damon_destroy_targets(struct damon_ctx *ctx) 518 { 519 struct damon_target *t, *next_t; 520 521 if (ctx->ops.cleanup) { 522 ctx->ops.cleanup(ctx); 523 return; 524 } 525 526 damon_for_each_target_safe(t, next_t, ctx) 527 damon_destroy_target(t); 528 } 529 530 void damon_destroy_ctx(struct damon_ctx *ctx) 531 { 532 struct damos *s, *next_s; 533 534 damon_destroy_targets(ctx); 535 536 damon_for_each_scheme_safe(s, next_s, ctx) 537 damon_destroy_scheme(s); 538 539 kfree(ctx); 540 } 541 542 static unsigned int damon_age_for_new_attrs(unsigned int age, 543 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 544 { 545 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 546 } 547 548 /* convert access ratio in bp (per 10,000) to nr_accesses */ 549 static unsigned int damon_accesses_bp_to_nr_accesses( 550 unsigned int accesses_bp, struct damon_attrs *attrs) 551 { 552 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 553 } 554 555 /* 556 * Convert nr_accesses to access ratio in bp (per 10,000). 557 * 558 * Callers should ensure attrs.aggr_interval is not zero, like 559 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would 560 * happen. 561 */ 562 static unsigned int damon_nr_accesses_to_accesses_bp( 563 unsigned int nr_accesses, struct damon_attrs *attrs) 564 { 565 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 566 } 567 568 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 569 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 570 { 571 return damon_accesses_bp_to_nr_accesses( 572 damon_nr_accesses_to_accesses_bp( 573 nr_accesses, old_attrs), 574 new_attrs); 575 } 576 577 static void damon_update_monitoring_result(struct damon_region *r, 578 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 579 { 580 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 581 old_attrs, new_attrs); 582 r->nr_accesses_bp = r->nr_accesses * 10000; 583 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 584 } 585 586 /* 587 * region->nr_accesses is the number of sampling intervals in the last 588 * aggregation interval that access to the region has found, and region->age is 589 * the number of aggregation intervals that its access pattern has maintained. 590 * For the reason, the real meaning of the two fields depend on current 591 * sampling interval and aggregation interval. This function updates 592 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 593 */ 594 static void damon_update_monitoring_results(struct damon_ctx *ctx, 595 struct damon_attrs *new_attrs) 596 { 597 struct damon_attrs *old_attrs = &ctx->attrs; 598 struct damon_target *t; 599 struct damon_region *r; 600 601 /* if any interval is zero, simply forgive conversion */ 602 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 603 !new_attrs->sample_interval || 604 !new_attrs->aggr_interval) 605 return; 606 607 damon_for_each_target(t, ctx) 608 damon_for_each_region(r, t) 609 damon_update_monitoring_result( 610 r, old_attrs, new_attrs); 611 } 612 613 /** 614 * damon_set_attrs() - Set attributes for the monitoring. 615 * @ctx: monitoring context 616 * @attrs: monitoring attributes 617 * 618 * This function should be called while the kdamond is not running, or an 619 * access check results aggregation is not ongoing (e.g., from 620 * &struct damon_callback->after_aggregation or 621 * &struct damon_callback->after_wmarks_check callbacks). 622 * 623 * Every time interval is in micro-seconds. 624 * 625 * Return: 0 on success, negative error code otherwise. 626 */ 627 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 628 { 629 unsigned long sample_interval = attrs->sample_interval ? 630 attrs->sample_interval : 1; 631 struct damos *s; 632 633 if (attrs->min_nr_regions < 3) 634 return -EINVAL; 635 if (attrs->min_nr_regions > attrs->max_nr_regions) 636 return -EINVAL; 637 if (attrs->sample_interval > attrs->aggr_interval) 638 return -EINVAL; 639 640 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 641 attrs->aggr_interval / sample_interval; 642 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 643 attrs->ops_update_interval / sample_interval; 644 645 damon_update_monitoring_results(ctx, attrs); 646 ctx->attrs = *attrs; 647 648 damon_for_each_scheme(s, ctx) 649 damos_set_next_apply_sis(s, ctx); 650 651 return 0; 652 } 653 654 /** 655 * damon_set_schemes() - Set data access monitoring based operation schemes. 656 * @ctx: monitoring context 657 * @schemes: array of the schemes 658 * @nr_schemes: number of entries in @schemes 659 * 660 * This function should not be called while the kdamond of the context is 661 * running. 662 */ 663 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 664 ssize_t nr_schemes) 665 { 666 struct damos *s, *next; 667 ssize_t i; 668 669 damon_for_each_scheme_safe(s, next, ctx) 670 damon_destroy_scheme(s); 671 for (i = 0; i < nr_schemes; i++) 672 damon_add_scheme(ctx, schemes[i]); 673 } 674 675 static struct damos_quota_goal *damos_nth_quota_goal( 676 int n, struct damos_quota *q) 677 { 678 struct damos_quota_goal *goal; 679 int i = 0; 680 681 damos_for_each_quota_goal(goal, q) { 682 if (i++ == n) 683 return goal; 684 } 685 return NULL; 686 } 687 688 static void damos_commit_quota_goal( 689 struct damos_quota_goal *dst, struct damos_quota_goal *src) 690 { 691 dst->metric = src->metric; 692 dst->target_value = src->target_value; 693 if (dst->metric == DAMOS_QUOTA_USER_INPUT) 694 dst->current_value = src->current_value; 695 /* keep last_psi_total as is, since it will be updated in next cycle */ 696 } 697 698 /** 699 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. 700 * @dst: The commit destination DAMOS quota. 701 * @src: The commit source DAMOS quota. 702 * 703 * Copies user-specified parameters for quota goals from @src to @dst. Users 704 * should use this function for quota goals-level parameters update of running 705 * DAMON contexts, instead of manual in-place updates. 706 * 707 * This function should be called from parameters-update safe context, like 708 * DAMON callbacks. 709 */ 710 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) 711 { 712 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; 713 int i = 0, j = 0; 714 715 damos_for_each_quota_goal_safe(dst_goal, next, dst) { 716 src_goal = damos_nth_quota_goal(i++, src); 717 if (src_goal) 718 damos_commit_quota_goal(dst_goal, src_goal); 719 else 720 damos_destroy_quota_goal(dst_goal); 721 } 722 damos_for_each_quota_goal_safe(src_goal, next, src) { 723 if (j++ < i) 724 continue; 725 new_goal = damos_new_quota_goal( 726 src_goal->metric, src_goal->target_value); 727 if (!new_goal) 728 return -ENOMEM; 729 damos_add_quota_goal(dst, new_goal); 730 } 731 return 0; 732 } 733 734 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) 735 { 736 int err; 737 738 dst->reset_interval = src->reset_interval; 739 dst->ms = src->ms; 740 dst->sz = src->sz; 741 err = damos_commit_quota_goals(dst, src); 742 if (err) 743 return err; 744 dst->weight_sz = src->weight_sz; 745 dst->weight_nr_accesses = src->weight_nr_accesses; 746 dst->weight_age = src->weight_age; 747 return 0; 748 } 749 750 static struct damos_filter *damos_nth_filter(int n, struct damos *s) 751 { 752 struct damos_filter *filter; 753 int i = 0; 754 755 damos_for_each_filter(filter, s) { 756 if (i++ == n) 757 return filter; 758 } 759 return NULL; 760 } 761 762 static void damos_commit_filter_arg( 763 struct damos_filter *dst, struct damos_filter *src) 764 { 765 switch (dst->type) { 766 case DAMOS_FILTER_TYPE_MEMCG: 767 dst->memcg_id = src->memcg_id; 768 break; 769 case DAMOS_FILTER_TYPE_ADDR: 770 dst->addr_range = src->addr_range; 771 break; 772 case DAMOS_FILTER_TYPE_TARGET: 773 dst->target_idx = src->target_idx; 774 break; 775 default: 776 break; 777 } 778 } 779 780 static void damos_commit_filter( 781 struct damos_filter *dst, struct damos_filter *src) 782 { 783 dst->type = src->type; 784 dst->matching = src->matching; 785 damos_commit_filter_arg(dst, src); 786 } 787 788 static int damos_commit_filters(struct damos *dst, struct damos *src) 789 { 790 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 791 int i = 0, j = 0; 792 793 damos_for_each_filter_safe(dst_filter, next, dst) { 794 src_filter = damos_nth_filter(i++, src); 795 if (src_filter) 796 damos_commit_filter(dst_filter, src_filter); 797 else 798 damos_destroy_filter(dst_filter); 799 } 800 801 damos_for_each_filter_safe(src_filter, next, src) { 802 if (j++ < i) 803 continue; 804 805 new_filter = damos_new_filter( 806 src_filter->type, src_filter->matching); 807 if (!new_filter) 808 return -ENOMEM; 809 damos_commit_filter_arg(new_filter, src_filter); 810 damos_add_filter(dst, new_filter); 811 } 812 return 0; 813 } 814 815 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) 816 { 817 struct damos *s; 818 int i = 0; 819 820 damon_for_each_scheme(s, ctx) { 821 if (i++ == n) 822 return s; 823 } 824 return NULL; 825 } 826 827 static int damos_commit(struct damos *dst, struct damos *src) 828 { 829 int err; 830 831 dst->pattern = src->pattern; 832 dst->action = src->action; 833 dst->apply_interval_us = src->apply_interval_us; 834 835 err = damos_commit_quota(&dst->quota, &src->quota); 836 if (err) 837 return err; 838 839 dst->wmarks = src->wmarks; 840 841 err = damos_commit_filters(dst, src); 842 return err; 843 } 844 845 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) 846 { 847 struct damos *dst_scheme, *next, *src_scheme, *new_scheme; 848 int i = 0, j = 0, err; 849 850 damon_for_each_scheme_safe(dst_scheme, next, dst) { 851 src_scheme = damon_nth_scheme(i++, src); 852 if (src_scheme) { 853 err = damos_commit(dst_scheme, src_scheme); 854 if (err) 855 return err; 856 } else { 857 damon_destroy_scheme(dst_scheme); 858 } 859 } 860 861 damon_for_each_scheme_safe(src_scheme, next, src) { 862 if (j++ < i) 863 continue; 864 new_scheme = damon_new_scheme(&src_scheme->pattern, 865 src_scheme->action, 866 src_scheme->apply_interval_us, 867 &src_scheme->quota, &src_scheme->wmarks, 868 NUMA_NO_NODE); 869 if (!new_scheme) 870 return -ENOMEM; 871 damon_add_scheme(dst, new_scheme); 872 } 873 return 0; 874 } 875 876 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) 877 { 878 struct damon_target *t; 879 int i = 0; 880 881 damon_for_each_target(t, ctx) { 882 if (i++ == n) 883 return t; 884 } 885 return NULL; 886 } 887 888 /* 889 * The caller should ensure the regions of @src are 890 * 1. valid (end >= src) and 891 * 2. sorted by starting address. 892 * 893 * If @src has no region, @dst keeps current regions. 894 */ 895 static int damon_commit_target_regions( 896 struct damon_target *dst, struct damon_target *src) 897 { 898 struct damon_region *src_region; 899 struct damon_addr_range *ranges; 900 int i = 0, err; 901 902 damon_for_each_region(src_region, src) 903 i++; 904 if (!i) 905 return 0; 906 907 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); 908 if (!ranges) 909 return -ENOMEM; 910 i = 0; 911 damon_for_each_region(src_region, src) 912 ranges[i++] = src_region->ar; 913 err = damon_set_regions(dst, ranges, i); 914 kfree(ranges); 915 return err; 916 } 917 918 static int damon_commit_target( 919 struct damon_target *dst, bool dst_has_pid, 920 struct damon_target *src, bool src_has_pid) 921 { 922 int err; 923 924 err = damon_commit_target_regions(dst, src); 925 if (err) 926 return err; 927 if (dst_has_pid) 928 put_pid(dst->pid); 929 if (src_has_pid) 930 get_pid(src->pid); 931 dst->pid = src->pid; 932 return 0; 933 } 934 935 static int damon_commit_targets( 936 struct damon_ctx *dst, struct damon_ctx *src) 937 { 938 struct damon_target *dst_target, *next, *src_target, *new_target; 939 int i = 0, j = 0, err; 940 941 damon_for_each_target_safe(dst_target, next, dst) { 942 src_target = damon_nth_target(i++, src); 943 if (src_target) { 944 err = damon_commit_target( 945 dst_target, damon_target_has_pid(dst), 946 src_target, damon_target_has_pid(src)); 947 if (err) 948 return err; 949 } else { 950 if (damon_target_has_pid(dst)) 951 put_pid(dst_target->pid); 952 damon_destroy_target(dst_target); 953 } 954 } 955 956 damon_for_each_target_safe(src_target, next, src) { 957 if (j++ < i) 958 continue; 959 new_target = damon_new_target(); 960 if (!new_target) 961 return -ENOMEM; 962 err = damon_commit_target(new_target, false, 963 src_target, damon_target_has_pid(src)); 964 if (err) 965 return err; 966 } 967 return 0; 968 } 969 970 /** 971 * damon_commit_ctx() - Commit parameters of a DAMON context to another. 972 * @dst: The commit destination DAMON context. 973 * @src: The commit source DAMON context. 974 * 975 * This function copies user-specified parameters from @src to @dst and update 976 * the internal status and results accordingly. Users should use this function 977 * for context-level parameters update of running context, instead of manual 978 * in-place updates. 979 * 980 * This function should be called from parameters-update safe context, like 981 * DAMON callbacks. 982 */ 983 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) 984 { 985 int err; 986 987 err = damon_commit_schemes(dst, src); 988 if (err) 989 return err; 990 err = damon_commit_targets(dst, src); 991 if (err) 992 return err; 993 /* 994 * schemes and targets should be updated first, since 995 * 1. damon_set_attrs() updates monitoring results of targets and 996 * next_apply_sis of schemes, and 997 * 2. ops update should be done after pid handling is done (target 998 * committing require putting pids). 999 */ 1000 err = damon_set_attrs(dst, &src->attrs); 1001 if (err) 1002 return err; 1003 dst->ops = src->ops; 1004 1005 return 0; 1006 } 1007 1008 /** 1009 * damon_nr_running_ctxs() - Return number of currently running contexts. 1010 */ 1011 int damon_nr_running_ctxs(void) 1012 { 1013 int nr_ctxs; 1014 1015 mutex_lock(&damon_lock); 1016 nr_ctxs = nr_running_ctxs; 1017 mutex_unlock(&damon_lock); 1018 1019 return nr_ctxs; 1020 } 1021 1022 /* Returns the size upper limit for each monitoring region */ 1023 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 1024 { 1025 struct damon_target *t; 1026 struct damon_region *r; 1027 unsigned long sz = 0; 1028 1029 damon_for_each_target(t, ctx) { 1030 damon_for_each_region(r, t) 1031 sz += damon_sz_region(r); 1032 } 1033 1034 if (ctx->attrs.min_nr_regions) 1035 sz /= ctx->attrs.min_nr_regions; 1036 if (sz < DAMON_MIN_REGION) 1037 sz = DAMON_MIN_REGION; 1038 1039 return sz; 1040 } 1041 1042 static int kdamond_fn(void *data); 1043 1044 /* 1045 * __damon_start() - Starts monitoring with given context. 1046 * @ctx: monitoring context 1047 * 1048 * This function should be called while damon_lock is hold. 1049 * 1050 * Return: 0 on success, negative error code otherwise. 1051 */ 1052 static int __damon_start(struct damon_ctx *ctx) 1053 { 1054 int err = -EBUSY; 1055 1056 mutex_lock(&ctx->kdamond_lock); 1057 if (!ctx->kdamond) { 1058 err = 0; 1059 reinit_completion(&ctx->kdamond_started); 1060 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 1061 nr_running_ctxs); 1062 if (IS_ERR(ctx->kdamond)) { 1063 err = PTR_ERR(ctx->kdamond); 1064 ctx->kdamond = NULL; 1065 } else { 1066 wait_for_completion(&ctx->kdamond_started); 1067 } 1068 } 1069 mutex_unlock(&ctx->kdamond_lock); 1070 1071 return err; 1072 } 1073 1074 /** 1075 * damon_start() - Starts the monitorings for a given group of contexts. 1076 * @ctxs: an array of the pointers for contexts to start monitoring 1077 * @nr_ctxs: size of @ctxs 1078 * @exclusive: exclusiveness of this contexts group 1079 * 1080 * This function starts a group of monitoring threads for a group of monitoring 1081 * contexts. One thread per each context is created and run in parallel. The 1082 * caller should handle synchronization between the threads by itself. If 1083 * @exclusive is true and a group of threads that created by other 1084 * 'damon_start()' call is currently running, this function does nothing but 1085 * returns -EBUSY. 1086 * 1087 * Return: 0 on success, negative error code otherwise. 1088 */ 1089 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 1090 { 1091 int i; 1092 int err = 0; 1093 1094 mutex_lock(&damon_lock); 1095 if ((exclusive && nr_running_ctxs) || 1096 (!exclusive && running_exclusive_ctxs)) { 1097 mutex_unlock(&damon_lock); 1098 return -EBUSY; 1099 } 1100 1101 for (i = 0; i < nr_ctxs; i++) { 1102 err = __damon_start(ctxs[i]); 1103 if (err) 1104 break; 1105 nr_running_ctxs++; 1106 } 1107 if (exclusive && nr_running_ctxs) 1108 running_exclusive_ctxs = true; 1109 mutex_unlock(&damon_lock); 1110 1111 return err; 1112 } 1113 1114 /* 1115 * __damon_stop() - Stops monitoring of a given context. 1116 * @ctx: monitoring context 1117 * 1118 * Return: 0 on success, negative error code otherwise. 1119 */ 1120 static int __damon_stop(struct damon_ctx *ctx) 1121 { 1122 struct task_struct *tsk; 1123 1124 mutex_lock(&ctx->kdamond_lock); 1125 tsk = ctx->kdamond; 1126 if (tsk) { 1127 get_task_struct(tsk); 1128 mutex_unlock(&ctx->kdamond_lock); 1129 kthread_stop_put(tsk); 1130 return 0; 1131 } 1132 mutex_unlock(&ctx->kdamond_lock); 1133 1134 return -EPERM; 1135 } 1136 1137 /** 1138 * damon_stop() - Stops the monitorings for a given group of contexts. 1139 * @ctxs: an array of the pointers for contexts to stop monitoring 1140 * @nr_ctxs: size of @ctxs 1141 * 1142 * Return: 0 on success, negative error code otherwise. 1143 */ 1144 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 1145 { 1146 int i, err = 0; 1147 1148 for (i = 0; i < nr_ctxs; i++) { 1149 /* nr_running_ctxs is decremented in kdamond_fn */ 1150 err = __damon_stop(ctxs[i]); 1151 if (err) 1152 break; 1153 } 1154 return err; 1155 } 1156 1157 /* 1158 * Reset the aggregated monitoring results ('nr_accesses' of each region). 1159 */ 1160 static void kdamond_reset_aggregated(struct damon_ctx *c) 1161 { 1162 struct damon_target *t; 1163 unsigned int ti = 0; /* target's index */ 1164 1165 damon_for_each_target(t, c) { 1166 struct damon_region *r; 1167 1168 damon_for_each_region(r, t) { 1169 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 1170 r->last_nr_accesses = r->nr_accesses; 1171 r->nr_accesses = 0; 1172 } 1173 ti++; 1174 } 1175 } 1176 1177 static void damon_split_region_at(struct damon_target *t, 1178 struct damon_region *r, unsigned long sz_r); 1179 1180 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 1181 { 1182 unsigned long sz; 1183 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 1184 1185 sz = damon_sz_region(r); 1186 return s->pattern.min_sz_region <= sz && 1187 sz <= s->pattern.max_sz_region && 1188 s->pattern.min_nr_accesses <= nr_accesses && 1189 nr_accesses <= s->pattern.max_nr_accesses && 1190 s->pattern.min_age_region <= r->age && 1191 r->age <= s->pattern.max_age_region; 1192 } 1193 1194 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 1195 struct damon_region *r, struct damos *s) 1196 { 1197 bool ret = __damos_valid_target(r, s); 1198 1199 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 1200 return ret; 1201 1202 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 1203 } 1204 1205 /* 1206 * damos_skip_charged_region() - Check if the given region or starting part of 1207 * it is already charged for the DAMOS quota. 1208 * @t: The target of the region. 1209 * @rp: The pointer to the region. 1210 * @s: The scheme to be applied. 1211 * 1212 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 1213 * action would applied to only a part of the target access pattern fulfilling 1214 * regions. To avoid applying the scheme action to only already applied 1215 * regions, DAMON skips applying the scheme action to the regions that charged 1216 * in the previous charge window. 1217 * 1218 * This function checks if a given region should be skipped or not for the 1219 * reason. If only the starting part of the region has previously charged, 1220 * this function splits the region into two so that the second one covers the 1221 * area that not charged in the previous charge widnow and saves the second 1222 * region in *rp and returns false, so that the caller can apply DAMON action 1223 * to the second one. 1224 * 1225 * Return: true if the region should be entirely skipped, false otherwise. 1226 */ 1227 static bool damos_skip_charged_region(struct damon_target *t, 1228 struct damon_region **rp, struct damos *s) 1229 { 1230 struct damon_region *r = *rp; 1231 struct damos_quota *quota = &s->quota; 1232 unsigned long sz_to_skip; 1233 1234 /* Skip previously charged regions */ 1235 if (quota->charge_target_from) { 1236 if (t != quota->charge_target_from) 1237 return true; 1238 if (r == damon_last_region(t)) { 1239 quota->charge_target_from = NULL; 1240 quota->charge_addr_from = 0; 1241 return true; 1242 } 1243 if (quota->charge_addr_from && 1244 r->ar.end <= quota->charge_addr_from) 1245 return true; 1246 1247 if (quota->charge_addr_from && r->ar.start < 1248 quota->charge_addr_from) { 1249 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 1250 r->ar.start, DAMON_MIN_REGION); 1251 if (!sz_to_skip) { 1252 if (damon_sz_region(r) <= DAMON_MIN_REGION) 1253 return true; 1254 sz_to_skip = DAMON_MIN_REGION; 1255 } 1256 damon_split_region_at(t, r, sz_to_skip); 1257 r = damon_next_region(r); 1258 *rp = r; 1259 } 1260 quota->charge_target_from = NULL; 1261 quota->charge_addr_from = 0; 1262 } 1263 return false; 1264 } 1265 1266 static void damos_update_stat(struct damos *s, 1267 unsigned long sz_tried, unsigned long sz_applied) 1268 { 1269 s->stat.nr_tried++; 1270 s->stat.sz_tried += sz_tried; 1271 if (sz_applied) 1272 s->stat.nr_applied++; 1273 s->stat.sz_applied += sz_applied; 1274 } 1275 1276 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 1277 struct damon_region *r, struct damos_filter *filter) 1278 { 1279 bool matched = false; 1280 struct damon_target *ti; 1281 int target_idx = 0; 1282 unsigned long start, end; 1283 1284 switch (filter->type) { 1285 case DAMOS_FILTER_TYPE_TARGET: 1286 damon_for_each_target(ti, ctx) { 1287 if (ti == t) 1288 break; 1289 target_idx++; 1290 } 1291 matched = target_idx == filter->target_idx; 1292 break; 1293 case DAMOS_FILTER_TYPE_ADDR: 1294 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 1295 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 1296 1297 /* inside the range */ 1298 if (start <= r->ar.start && r->ar.end <= end) { 1299 matched = true; 1300 break; 1301 } 1302 /* outside of the range */ 1303 if (r->ar.end <= start || end <= r->ar.start) { 1304 matched = false; 1305 break; 1306 } 1307 /* start before the range and overlap */ 1308 if (r->ar.start < start) { 1309 damon_split_region_at(t, r, start - r->ar.start); 1310 matched = false; 1311 break; 1312 } 1313 /* start inside the range */ 1314 damon_split_region_at(t, r, end - r->ar.start); 1315 matched = true; 1316 break; 1317 default: 1318 return false; 1319 } 1320 1321 return matched == filter->matching; 1322 } 1323 1324 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 1325 struct damon_region *r, struct damos *s) 1326 { 1327 struct damos_filter *filter; 1328 1329 damos_for_each_filter(filter, s) { 1330 if (__damos_filter_out(ctx, t, r, filter)) 1331 return true; 1332 } 1333 return false; 1334 } 1335 1336 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 1337 struct damon_region *r, struct damos *s) 1338 { 1339 struct damos_quota *quota = &s->quota; 1340 unsigned long sz = damon_sz_region(r); 1341 struct timespec64 begin, end; 1342 unsigned long sz_applied = 0; 1343 int err = 0; 1344 /* 1345 * We plan to support multiple context per kdamond, as DAMON sysfs 1346 * implies with 'nr_contexts' file. Nevertheless, only single context 1347 * per kdamond is supported for now. So, we can simply use '0' context 1348 * index here. 1349 */ 1350 unsigned int cidx = 0; 1351 struct damos *siter; /* schemes iterator */ 1352 unsigned int sidx = 0; 1353 struct damon_target *titer; /* targets iterator */ 1354 unsigned int tidx = 0; 1355 bool do_trace = false; 1356 1357 /* get indices for trace_damos_before_apply() */ 1358 if (trace_damos_before_apply_enabled()) { 1359 damon_for_each_scheme(siter, c) { 1360 if (siter == s) 1361 break; 1362 sidx++; 1363 } 1364 damon_for_each_target(titer, c) { 1365 if (titer == t) 1366 break; 1367 tidx++; 1368 } 1369 do_trace = true; 1370 } 1371 1372 if (c->ops.apply_scheme) { 1373 if (quota->esz && quota->charged_sz + sz > quota->esz) { 1374 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 1375 DAMON_MIN_REGION); 1376 if (!sz) 1377 goto update_stat; 1378 damon_split_region_at(t, r, sz); 1379 } 1380 if (damos_filter_out(c, t, r, s)) 1381 return; 1382 ktime_get_coarse_ts64(&begin); 1383 if (c->callback.before_damos_apply) 1384 err = c->callback.before_damos_apply(c, t, r, s); 1385 if (!err) { 1386 trace_damos_before_apply(cidx, sidx, tidx, r, 1387 damon_nr_regions(t), do_trace); 1388 sz_applied = c->ops.apply_scheme(c, t, r, s); 1389 } 1390 ktime_get_coarse_ts64(&end); 1391 quota->total_charged_ns += timespec64_to_ns(&end) - 1392 timespec64_to_ns(&begin); 1393 quota->charged_sz += sz; 1394 if (quota->esz && quota->charged_sz >= quota->esz) { 1395 quota->charge_target_from = t; 1396 quota->charge_addr_from = r->ar.end + 1; 1397 } 1398 } 1399 if (s->action != DAMOS_STAT) 1400 r->age = 0; 1401 1402 update_stat: 1403 damos_update_stat(s, sz, sz_applied); 1404 } 1405 1406 static void damon_do_apply_schemes(struct damon_ctx *c, 1407 struct damon_target *t, 1408 struct damon_region *r) 1409 { 1410 struct damos *s; 1411 1412 damon_for_each_scheme(s, c) { 1413 struct damos_quota *quota = &s->quota; 1414 1415 if (c->passed_sample_intervals != s->next_apply_sis) 1416 continue; 1417 1418 if (!s->wmarks.activated) 1419 continue; 1420 1421 /* Check the quota */ 1422 if (quota->esz && quota->charged_sz >= quota->esz) 1423 continue; 1424 1425 if (damos_skip_charged_region(t, &r, s)) 1426 continue; 1427 1428 if (!damos_valid_target(c, t, r, s)) 1429 continue; 1430 1431 damos_apply_scheme(c, t, r, s); 1432 } 1433 } 1434 1435 /* 1436 * damon_feed_loop_next_input() - get next input to achieve a target score. 1437 * @last_input The last input. 1438 * @score Current score that made with @last_input. 1439 * 1440 * Calculate next input to achieve the target score, based on the last input 1441 * and current score. Assuming the input and the score are positively 1442 * proportional, calculate how much compensation should be added to or 1443 * subtracted from the last input as a proportion of the last input. Avoid 1444 * next input always being zero by setting it non-zero always. In short form 1445 * (assuming support of float and signed calculations), the algorithm is as 1446 * below. 1447 * 1448 * next_input = max(last_input * ((goal - current) / goal + 1), 1) 1449 * 1450 * For simple implementation, we assume the target score is always 10,000. The 1451 * caller should adjust @score for this. 1452 * 1453 * Returns next input that assumed to achieve the target score. 1454 */ 1455 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1456 unsigned long score) 1457 { 1458 const unsigned long goal = 10000; 1459 unsigned long score_goal_diff = max(goal, score) - min(goal, score); 1460 unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal; 1461 unsigned long compensation = last_input * score_goal_diff_bp / 10000; 1462 /* Set minimum input as 10000 to avoid compensation be zero */ 1463 const unsigned long min_input = 10000; 1464 1465 if (goal > score) 1466 return last_input + compensation; 1467 if (last_input > compensation + min_input) 1468 return last_input - compensation; 1469 return min_input; 1470 } 1471 1472 #ifdef CONFIG_PSI 1473 1474 static u64 damos_get_some_mem_psi_total(void) 1475 { 1476 if (static_branch_likely(&psi_disabled)) 1477 return 0; 1478 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], 1479 NSEC_PER_USEC); 1480 } 1481 1482 #else /* CONFIG_PSI */ 1483 1484 static inline u64 damos_get_some_mem_psi_total(void) 1485 { 1486 return 0; 1487 }; 1488 1489 #endif /* CONFIG_PSI */ 1490 1491 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) 1492 { 1493 u64 now_psi_total; 1494 1495 switch (goal->metric) { 1496 case DAMOS_QUOTA_USER_INPUT: 1497 /* User should already set goal->current_value */ 1498 break; 1499 case DAMOS_QUOTA_SOME_MEM_PSI_US: 1500 now_psi_total = damos_get_some_mem_psi_total(); 1501 goal->current_value = now_psi_total - goal->last_psi_total; 1502 goal->last_psi_total = now_psi_total; 1503 break; 1504 default: 1505 break; 1506 } 1507 } 1508 1509 /* Return the highest score since it makes schemes least aggressive */ 1510 static unsigned long damos_quota_score(struct damos_quota *quota) 1511 { 1512 struct damos_quota_goal *goal; 1513 unsigned long highest_score = 0; 1514 1515 damos_for_each_quota_goal(goal, quota) { 1516 damos_set_quota_goal_current_value(goal); 1517 highest_score = max(highest_score, 1518 goal->current_value * 10000 / 1519 goal->target_value); 1520 } 1521 1522 return highest_score; 1523 } 1524 1525 /* 1526 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty 1527 */ 1528 static void damos_set_effective_quota(struct damos_quota *quota) 1529 { 1530 unsigned long throughput; 1531 unsigned long esz; 1532 1533 if (!quota->ms && list_empty("a->goals)) { 1534 quota->esz = quota->sz; 1535 return; 1536 } 1537 1538 if (!list_empty("a->goals)) { 1539 unsigned long score = damos_quota_score(quota); 1540 1541 quota->esz_bp = damon_feed_loop_next_input( 1542 max(quota->esz_bp, 10000UL), 1543 score); 1544 esz = quota->esz_bp / 10000; 1545 } 1546 1547 if (quota->ms) { 1548 if (quota->total_charged_ns) 1549 throughput = quota->total_charged_sz * 1000000 / 1550 quota->total_charged_ns; 1551 else 1552 throughput = PAGE_SIZE * 1024; 1553 if (!list_empty("a->goals)) 1554 esz = min(throughput * quota->ms, esz); 1555 else 1556 esz = throughput * quota->ms; 1557 } 1558 1559 if (quota->sz && quota->sz < esz) 1560 esz = quota->sz; 1561 1562 quota->esz = esz; 1563 } 1564 1565 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1566 { 1567 struct damos_quota *quota = &s->quota; 1568 struct damon_target *t; 1569 struct damon_region *r; 1570 unsigned long cumulated_sz; 1571 unsigned int score, max_score = 0; 1572 1573 if (!quota->ms && !quota->sz && list_empty("a->goals)) 1574 return; 1575 1576 /* New charge window starts */ 1577 if (time_after_eq(jiffies, quota->charged_from + 1578 msecs_to_jiffies(quota->reset_interval))) { 1579 if (quota->esz && quota->charged_sz >= quota->esz) 1580 s->stat.qt_exceeds++; 1581 quota->total_charged_sz += quota->charged_sz; 1582 quota->charged_from = jiffies; 1583 quota->charged_sz = 0; 1584 damos_set_effective_quota(quota); 1585 } 1586 1587 if (!c->ops.get_scheme_score) 1588 return; 1589 1590 /* Fill up the score histogram */ 1591 memset(c->regions_score_histogram, 0, 1592 sizeof(*c->regions_score_histogram) * 1593 (DAMOS_MAX_SCORE + 1)); 1594 damon_for_each_target(t, c) { 1595 damon_for_each_region(r, t) { 1596 if (!__damos_valid_target(r, s)) 1597 continue; 1598 score = c->ops.get_scheme_score(c, t, r, s); 1599 c->regions_score_histogram[score] += 1600 damon_sz_region(r); 1601 if (score > max_score) 1602 max_score = score; 1603 } 1604 } 1605 1606 /* Set the min score limit */ 1607 for (cumulated_sz = 0, score = max_score; ; score--) { 1608 cumulated_sz += c->regions_score_histogram[score]; 1609 if (cumulated_sz >= quota->esz || !score) 1610 break; 1611 } 1612 quota->min_score = score; 1613 } 1614 1615 static void kdamond_apply_schemes(struct damon_ctx *c) 1616 { 1617 struct damon_target *t; 1618 struct damon_region *r, *next_r; 1619 struct damos *s; 1620 unsigned long sample_interval = c->attrs.sample_interval ? 1621 c->attrs.sample_interval : 1; 1622 bool has_schemes_to_apply = false; 1623 1624 damon_for_each_scheme(s, c) { 1625 if (c->passed_sample_intervals != s->next_apply_sis) 1626 continue; 1627 1628 if (!s->wmarks.activated) 1629 continue; 1630 1631 has_schemes_to_apply = true; 1632 1633 damos_adjust_quota(c, s); 1634 } 1635 1636 if (!has_schemes_to_apply) 1637 return; 1638 1639 damon_for_each_target(t, c) { 1640 damon_for_each_region_safe(r, next_r, t) 1641 damon_do_apply_schemes(c, t, r); 1642 } 1643 1644 damon_for_each_scheme(s, c) { 1645 if (c->passed_sample_intervals != s->next_apply_sis) 1646 continue; 1647 s->next_apply_sis += 1648 (s->apply_interval_us ? s->apply_interval_us : 1649 c->attrs.aggr_interval) / sample_interval; 1650 } 1651 } 1652 1653 /* 1654 * Merge two adjacent regions into one region 1655 */ 1656 static void damon_merge_two_regions(struct damon_target *t, 1657 struct damon_region *l, struct damon_region *r) 1658 { 1659 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1660 1661 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1662 (sz_l + sz_r); 1663 l->nr_accesses_bp = l->nr_accesses * 10000; 1664 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1665 l->ar.end = r->ar.end; 1666 damon_destroy_region(r, t); 1667 } 1668 1669 /* 1670 * Merge adjacent regions having similar access frequencies 1671 * 1672 * t target affected by this merge operation 1673 * thres '->nr_accesses' diff threshold for the merge 1674 * sz_limit size upper limit of each region 1675 */ 1676 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1677 unsigned long sz_limit) 1678 { 1679 struct damon_region *r, *prev = NULL, *next; 1680 1681 damon_for_each_region_safe(r, next, t) { 1682 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1683 r->age = 0; 1684 else 1685 r->age++; 1686 1687 if (prev && prev->ar.end == r->ar.start && 1688 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1689 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1690 damon_merge_two_regions(t, prev, r); 1691 else 1692 prev = r; 1693 } 1694 } 1695 1696 /* 1697 * Merge adjacent regions having similar access frequencies 1698 * 1699 * threshold '->nr_accesses' diff threshold for the merge 1700 * sz_limit size upper limit of each region 1701 * 1702 * This function merges monitoring target regions which are adjacent and their 1703 * access frequencies are similar. This is for minimizing the monitoring 1704 * overhead under the dynamically changeable access pattern. If a merge was 1705 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1706 * 1707 * The total number of regions could be higher than the user-defined limit, 1708 * max_nr_regions for some cases. For example, the user can update 1709 * max_nr_regions to a number that lower than the current number of regions 1710 * while DAMON is running. For such a case, repeat merging until the limit is 1711 * met while increasing @threshold up to possible maximum level. 1712 */ 1713 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1714 unsigned long sz_limit) 1715 { 1716 struct damon_target *t; 1717 unsigned int nr_regions; 1718 unsigned int max_thres; 1719 1720 max_thres = c->attrs.aggr_interval / 1721 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); 1722 do { 1723 nr_regions = 0; 1724 damon_for_each_target(t, c) { 1725 damon_merge_regions_of(t, threshold, sz_limit); 1726 nr_regions += damon_nr_regions(t); 1727 } 1728 threshold = max(1, threshold * 2); 1729 } while (nr_regions > c->attrs.max_nr_regions && 1730 threshold / 2 < max_thres); 1731 } 1732 1733 /* 1734 * Split a region in two 1735 * 1736 * r the region to be split 1737 * sz_r size of the first sub-region that will be made 1738 */ 1739 static void damon_split_region_at(struct damon_target *t, 1740 struct damon_region *r, unsigned long sz_r) 1741 { 1742 struct damon_region *new; 1743 1744 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1745 if (!new) 1746 return; 1747 1748 r->ar.end = new->ar.start; 1749 1750 new->age = r->age; 1751 new->last_nr_accesses = r->last_nr_accesses; 1752 new->nr_accesses_bp = r->nr_accesses_bp; 1753 new->nr_accesses = r->nr_accesses; 1754 1755 damon_insert_region(new, r, damon_next_region(r), t); 1756 } 1757 1758 /* Split every region in the given target into 'nr_subs' regions */ 1759 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1760 { 1761 struct damon_region *r, *next; 1762 unsigned long sz_region, sz_sub = 0; 1763 int i; 1764 1765 damon_for_each_region_safe(r, next, t) { 1766 sz_region = damon_sz_region(r); 1767 1768 for (i = 0; i < nr_subs - 1 && 1769 sz_region > 2 * DAMON_MIN_REGION; i++) { 1770 /* 1771 * Randomly select size of left sub-region to be at 1772 * least 10 percent and at most 90% of original region 1773 */ 1774 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1775 sz_region / 10, DAMON_MIN_REGION); 1776 /* Do not allow blank region */ 1777 if (sz_sub == 0 || sz_sub >= sz_region) 1778 continue; 1779 1780 damon_split_region_at(t, r, sz_sub); 1781 sz_region = sz_sub; 1782 } 1783 } 1784 } 1785 1786 /* 1787 * Split every target region into randomly-sized small regions 1788 * 1789 * This function splits every target region into random-sized small regions if 1790 * current total number of the regions is equal or smaller than half of the 1791 * user-specified maximum number of regions. This is for maximizing the 1792 * monitoring accuracy under the dynamically changeable access patterns. If a 1793 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1794 * it. 1795 */ 1796 static void kdamond_split_regions(struct damon_ctx *ctx) 1797 { 1798 struct damon_target *t; 1799 unsigned int nr_regions = 0; 1800 static unsigned int last_nr_regions; 1801 int nr_subregions = 2; 1802 1803 damon_for_each_target(t, ctx) 1804 nr_regions += damon_nr_regions(t); 1805 1806 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1807 return; 1808 1809 /* Maybe the middle of the region has different access frequency */ 1810 if (last_nr_regions == nr_regions && 1811 nr_regions < ctx->attrs.max_nr_regions / 3) 1812 nr_subregions = 3; 1813 1814 damon_for_each_target(t, ctx) 1815 damon_split_regions_of(t, nr_subregions); 1816 1817 last_nr_regions = nr_regions; 1818 } 1819 1820 /* 1821 * Check whether current monitoring should be stopped 1822 * 1823 * The monitoring is stopped when either the user requested to stop, or all 1824 * monitoring targets are invalid. 1825 * 1826 * Returns true if need to stop current monitoring. 1827 */ 1828 static bool kdamond_need_stop(struct damon_ctx *ctx) 1829 { 1830 struct damon_target *t; 1831 1832 if (kthread_should_stop()) 1833 return true; 1834 1835 if (!ctx->ops.target_valid) 1836 return false; 1837 1838 damon_for_each_target(t, ctx) { 1839 if (ctx->ops.target_valid(t)) 1840 return false; 1841 } 1842 1843 return true; 1844 } 1845 1846 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, 1847 unsigned long *metric_value) 1848 { 1849 switch (metric) { 1850 case DAMOS_WMARK_FREE_MEM_RATE: 1851 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / 1852 totalram_pages(); 1853 return 0; 1854 default: 1855 break; 1856 } 1857 return -EINVAL; 1858 } 1859 1860 /* 1861 * Returns zero if the scheme is active. Else, returns time to wait for next 1862 * watermark check in micro-seconds. 1863 */ 1864 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1865 { 1866 unsigned long metric; 1867 1868 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) 1869 return 0; 1870 1871 /* higher than high watermark or lower than low watermark */ 1872 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1873 if (scheme->wmarks.activated) 1874 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1875 scheme->action, 1876 metric > scheme->wmarks.high ? 1877 "high" : "low"); 1878 scheme->wmarks.activated = false; 1879 return scheme->wmarks.interval; 1880 } 1881 1882 /* inactive and higher than middle watermark */ 1883 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1884 !scheme->wmarks.activated) 1885 return scheme->wmarks.interval; 1886 1887 if (!scheme->wmarks.activated) 1888 pr_debug("activate a scheme (%d)\n", scheme->action); 1889 scheme->wmarks.activated = true; 1890 return 0; 1891 } 1892 1893 static void kdamond_usleep(unsigned long usecs) 1894 { 1895 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1896 if (usecs > 20 * USEC_PER_MSEC) 1897 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1898 else 1899 usleep_idle_range(usecs, usecs + 1); 1900 } 1901 1902 /* Returns negative error code if it's not activated but should return */ 1903 static int kdamond_wait_activation(struct damon_ctx *ctx) 1904 { 1905 struct damos *s; 1906 unsigned long wait_time; 1907 unsigned long min_wait_time = 0; 1908 bool init_wait_time = false; 1909 1910 while (!kdamond_need_stop(ctx)) { 1911 damon_for_each_scheme(s, ctx) { 1912 wait_time = damos_wmark_wait_us(s); 1913 if (!init_wait_time || wait_time < min_wait_time) { 1914 init_wait_time = true; 1915 min_wait_time = wait_time; 1916 } 1917 } 1918 if (!min_wait_time) 1919 return 0; 1920 1921 kdamond_usleep(min_wait_time); 1922 1923 if (ctx->callback.after_wmarks_check && 1924 ctx->callback.after_wmarks_check(ctx)) 1925 break; 1926 } 1927 return -EBUSY; 1928 } 1929 1930 static void kdamond_init_intervals_sis(struct damon_ctx *ctx) 1931 { 1932 unsigned long sample_interval = ctx->attrs.sample_interval ? 1933 ctx->attrs.sample_interval : 1; 1934 unsigned long apply_interval; 1935 struct damos *scheme; 1936 1937 ctx->passed_sample_intervals = 0; 1938 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 1939 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 1940 sample_interval; 1941 1942 damon_for_each_scheme(scheme, ctx) { 1943 apply_interval = scheme->apply_interval_us ? 1944 scheme->apply_interval_us : ctx->attrs.aggr_interval; 1945 scheme->next_apply_sis = apply_interval / sample_interval; 1946 } 1947 } 1948 1949 /* 1950 * The monitoring daemon that runs as a kernel thread 1951 */ 1952 static int kdamond_fn(void *data) 1953 { 1954 struct damon_ctx *ctx = data; 1955 struct damon_target *t; 1956 struct damon_region *r, *next; 1957 unsigned int max_nr_accesses = 0; 1958 unsigned long sz_limit = 0; 1959 1960 pr_debug("kdamond (%d) starts\n", current->pid); 1961 1962 complete(&ctx->kdamond_started); 1963 kdamond_init_intervals_sis(ctx); 1964 1965 if (ctx->ops.init) 1966 ctx->ops.init(ctx); 1967 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1968 goto done; 1969 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, 1970 sizeof(*ctx->regions_score_histogram), GFP_KERNEL); 1971 if (!ctx->regions_score_histogram) 1972 goto done; 1973 1974 sz_limit = damon_region_sz_limit(ctx); 1975 1976 while (!kdamond_need_stop(ctx)) { 1977 /* 1978 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 1979 * be changed from after_wmarks_check() or after_aggregation() 1980 * callbacks. Read the values here, and use those for this 1981 * iteration. That is, damon_set_attrs() updated new values 1982 * are respected from next iteration. 1983 */ 1984 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 1985 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 1986 unsigned long sample_interval = ctx->attrs.sample_interval; 1987 1988 if (kdamond_wait_activation(ctx)) 1989 break; 1990 1991 if (ctx->ops.prepare_access_checks) 1992 ctx->ops.prepare_access_checks(ctx); 1993 if (ctx->callback.after_sampling && 1994 ctx->callback.after_sampling(ctx)) 1995 break; 1996 1997 kdamond_usleep(sample_interval); 1998 ctx->passed_sample_intervals++; 1999 2000 if (ctx->ops.check_accesses) 2001 max_nr_accesses = ctx->ops.check_accesses(ctx); 2002 2003 if (ctx->passed_sample_intervals == next_aggregation_sis) { 2004 kdamond_merge_regions(ctx, 2005 max_nr_accesses / 10, 2006 sz_limit); 2007 if (ctx->callback.after_aggregation && 2008 ctx->callback.after_aggregation(ctx)) 2009 break; 2010 } 2011 2012 /* 2013 * do kdamond_apply_schemes() after kdamond_merge_regions() if 2014 * possible, to reduce overhead 2015 */ 2016 if (!list_empty(&ctx->schemes)) 2017 kdamond_apply_schemes(ctx); 2018 2019 sample_interval = ctx->attrs.sample_interval ? 2020 ctx->attrs.sample_interval : 1; 2021 if (ctx->passed_sample_intervals == next_aggregation_sis) { 2022 ctx->next_aggregation_sis = next_aggregation_sis + 2023 ctx->attrs.aggr_interval / sample_interval; 2024 2025 kdamond_reset_aggregated(ctx); 2026 kdamond_split_regions(ctx); 2027 if (ctx->ops.reset_aggregated) 2028 ctx->ops.reset_aggregated(ctx); 2029 } 2030 2031 if (ctx->passed_sample_intervals == next_ops_update_sis) { 2032 ctx->next_ops_update_sis = next_ops_update_sis + 2033 ctx->attrs.ops_update_interval / 2034 sample_interval; 2035 if (ctx->ops.update) 2036 ctx->ops.update(ctx); 2037 sz_limit = damon_region_sz_limit(ctx); 2038 } 2039 } 2040 done: 2041 damon_for_each_target(t, ctx) { 2042 damon_for_each_region_safe(r, next, t) 2043 damon_destroy_region(r, t); 2044 } 2045 2046 if (ctx->callback.before_terminate) 2047 ctx->callback.before_terminate(ctx); 2048 if (ctx->ops.cleanup) 2049 ctx->ops.cleanup(ctx); 2050 kfree(ctx->regions_score_histogram); 2051 2052 pr_debug("kdamond (%d) finishes\n", current->pid); 2053 mutex_lock(&ctx->kdamond_lock); 2054 ctx->kdamond = NULL; 2055 mutex_unlock(&ctx->kdamond_lock); 2056 2057 mutex_lock(&damon_lock); 2058 nr_running_ctxs--; 2059 if (!nr_running_ctxs && running_exclusive_ctxs) 2060 running_exclusive_ctxs = false; 2061 mutex_unlock(&damon_lock); 2062 2063 return 0; 2064 } 2065 2066 /* 2067 * struct damon_system_ram_region - System RAM resource address region of 2068 * [@start, @end). 2069 * @start: Start address of the region (inclusive). 2070 * @end: End address of the region (exclusive). 2071 */ 2072 struct damon_system_ram_region { 2073 unsigned long start; 2074 unsigned long end; 2075 }; 2076 2077 static int walk_system_ram(struct resource *res, void *arg) 2078 { 2079 struct damon_system_ram_region *a = arg; 2080 2081 if (a->end - a->start < resource_size(res)) { 2082 a->start = res->start; 2083 a->end = res->end; 2084 } 2085 return 0; 2086 } 2087 2088 /* 2089 * Find biggest 'System RAM' resource and store its start and end address in 2090 * @start and @end, respectively. If no System RAM is found, returns false. 2091 */ 2092 static bool damon_find_biggest_system_ram(unsigned long *start, 2093 unsigned long *end) 2094 2095 { 2096 struct damon_system_ram_region arg = {}; 2097 2098 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 2099 if (arg.end <= arg.start) 2100 return false; 2101 2102 *start = arg.start; 2103 *end = arg.end; 2104 return true; 2105 } 2106 2107 /** 2108 * damon_set_region_biggest_system_ram_default() - Set the region of the given 2109 * monitoring target as requested, or biggest 'System RAM'. 2110 * @t: The monitoring target to set the region. 2111 * @start: The pointer to the start address of the region. 2112 * @end: The pointer to the end address of the region. 2113 * 2114 * This function sets the region of @t as requested by @start and @end. If the 2115 * values of @start and @end are zero, however, this function finds the biggest 2116 * 'System RAM' resource and sets the region to cover the resource. In the 2117 * latter case, this function saves the start and end addresses of the resource 2118 * in @start and @end, respectively. 2119 * 2120 * Return: 0 on success, negative error code otherwise. 2121 */ 2122 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 2123 unsigned long *start, unsigned long *end) 2124 { 2125 struct damon_addr_range addr_range; 2126 2127 if (*start > *end) 2128 return -EINVAL; 2129 2130 if (!*start && !*end && 2131 !damon_find_biggest_system_ram(start, end)) 2132 return -EINVAL; 2133 2134 addr_range.start = *start; 2135 addr_range.end = *end; 2136 return damon_set_regions(t, &addr_range, 1); 2137 } 2138 2139 /* 2140 * damon_moving_sum() - Calculate an inferred moving sum value. 2141 * @mvsum: Inferred sum of the last @len_window values. 2142 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 2143 * @len_window: The number of last values to take care of. 2144 * @new_value: New value that will be added to the pseudo moving sum. 2145 * 2146 * Moving sum (moving average * window size) is good for handling noise, but 2147 * the cost of keeping past values can be high for arbitrary window size. This 2148 * function implements a lightweight pseudo moving sum function that doesn't 2149 * keep the past window values. 2150 * 2151 * It simply assumes there was no noise in the past, and get the no-noise 2152 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 2153 * non-moving sum of the last window. For example, if @len_window is 10 and we 2154 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 2155 * values. Hence, this function simply drops @nomvsum / @len_window from 2156 * given @mvsum and add @new_value. 2157 * 2158 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 2159 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 2160 * calculating next moving sum with a new value, we should drop 0 from 50 and 2161 * add the new value. However, this function assumes it got value 5 for each 2162 * of the last ten times. Based on the assumption, when the next value is 2163 * measured, it drops the assumed past value, 5 from the current sum, and add 2164 * the new value to get the updated pseduo-moving average. 2165 * 2166 * This means the value could have errors, but the errors will be disappeared 2167 * for every @len_window aligned calls. For example, if @len_window is 10, the 2168 * pseudo moving sum with 11th value to 19th value would have an error. But 2169 * the sum with 20th value will not have the error. 2170 * 2171 * Return: Pseudo-moving average after getting the @new_value. 2172 */ 2173 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 2174 unsigned int len_window, unsigned int new_value) 2175 { 2176 return mvsum - nomvsum / len_window + new_value; 2177 } 2178 2179 /** 2180 * damon_update_region_access_rate() - Update the access rate of a region. 2181 * @r: The DAMON region to update for its access check result. 2182 * @accessed: Whether the region has accessed during last sampling interval. 2183 * @attrs: The damon_attrs of the DAMON context. 2184 * 2185 * Update the access rate of a region with the region's last sampling interval 2186 * access check result. 2187 * 2188 * Usually this will be called by &damon_operations->check_accesses callback. 2189 */ 2190 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 2191 struct damon_attrs *attrs) 2192 { 2193 unsigned int len_window = 1; 2194 2195 /* 2196 * sample_interval can be zero, but cannot be larger than 2197 * aggr_interval, owing to validation of damon_set_attrs(). 2198 */ 2199 if (attrs->sample_interval) 2200 len_window = damon_max_nr_accesses(attrs); 2201 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 2202 r->last_nr_accesses * 10000, len_window, 2203 accessed ? 10000 : 0); 2204 2205 if (accessed) 2206 r->nr_accesses++; 2207 } 2208 2209 static int __init damon_init(void) 2210 { 2211 damon_region_cache = KMEM_CACHE(damon_region, 0); 2212 if (unlikely(!damon_region_cache)) { 2213 pr_err("creating damon_region_cache fails\n"); 2214 return -ENOMEM; 2215 } 2216 2217 return 0; 2218 } 2219 2220 subsys_initcall(damon_init); 2221 2222 #include "tests/core-kunit.h" 2223