1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/psi.h> 15 #include <linux/slab.h> 16 #include <linux/string.h> 17 #include <linux/string_choices.h> 18 19 #define CREATE_TRACE_POINTS 20 #include <trace/events/damon.h> 21 22 #ifdef CONFIG_DAMON_KUNIT_TEST 23 #undef DAMON_MIN_REGION 24 #define DAMON_MIN_REGION 1 25 #endif 26 27 static DEFINE_MUTEX(damon_lock); 28 static int nr_running_ctxs; 29 static bool running_exclusive_ctxs; 30 31 static DEFINE_MUTEX(damon_ops_lock); 32 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 33 34 static struct kmem_cache *damon_region_cache __ro_after_init; 35 36 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 37 static bool __damon_is_registered_ops(enum damon_ops_id id) 38 { 39 struct damon_operations empty_ops = {}; 40 41 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 42 return false; 43 return true; 44 } 45 46 /** 47 * damon_is_registered_ops() - Check if a given damon_operations is registered. 48 * @id: Id of the damon_operations to check if registered. 49 * 50 * Return: true if the ops is set, false otherwise. 51 */ 52 bool damon_is_registered_ops(enum damon_ops_id id) 53 { 54 bool registered; 55 56 if (id >= NR_DAMON_OPS) 57 return false; 58 mutex_lock(&damon_ops_lock); 59 registered = __damon_is_registered_ops(id); 60 mutex_unlock(&damon_ops_lock); 61 return registered; 62 } 63 64 /** 65 * damon_register_ops() - Register a monitoring operations set to DAMON. 66 * @ops: monitoring operations set to register. 67 * 68 * This function registers a monitoring operations set of valid &struct 69 * damon_operations->id so that others can find and use them later. 70 * 71 * Return: 0 on success, negative error code otherwise. 72 */ 73 int damon_register_ops(struct damon_operations *ops) 74 { 75 int err = 0; 76 77 if (ops->id >= NR_DAMON_OPS) 78 return -EINVAL; 79 mutex_lock(&damon_ops_lock); 80 /* Fail for already registered ops */ 81 if (__damon_is_registered_ops(ops->id)) { 82 err = -EINVAL; 83 goto out; 84 } 85 damon_registered_ops[ops->id] = *ops; 86 out: 87 mutex_unlock(&damon_ops_lock); 88 return err; 89 } 90 91 /** 92 * damon_select_ops() - Select a monitoring operations to use with the context. 93 * @ctx: monitoring context to use the operations. 94 * @id: id of the registered monitoring operations to select. 95 * 96 * This function finds registered monitoring operations set of @id and make 97 * @ctx to use it. 98 * 99 * Return: 0 on success, negative error code otherwise. 100 */ 101 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 102 { 103 int err = 0; 104 105 if (id >= NR_DAMON_OPS) 106 return -EINVAL; 107 108 mutex_lock(&damon_ops_lock); 109 if (!__damon_is_registered_ops(id)) 110 err = -EINVAL; 111 else 112 ctx->ops = damon_registered_ops[id]; 113 mutex_unlock(&damon_ops_lock); 114 return err; 115 } 116 117 /* 118 * Construct a damon_region struct 119 * 120 * Returns the pointer to the new struct if success, or NULL otherwise 121 */ 122 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 123 { 124 struct damon_region *region; 125 126 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 127 if (!region) 128 return NULL; 129 130 region->ar.start = start; 131 region->ar.end = end; 132 region->nr_accesses = 0; 133 region->nr_accesses_bp = 0; 134 INIT_LIST_HEAD(®ion->list); 135 136 region->age = 0; 137 region->last_nr_accesses = 0; 138 139 return region; 140 } 141 142 void damon_add_region(struct damon_region *r, struct damon_target *t) 143 { 144 list_add_tail(&r->list, &t->regions_list); 145 t->nr_regions++; 146 } 147 148 static void damon_del_region(struct damon_region *r, struct damon_target *t) 149 { 150 list_del(&r->list); 151 t->nr_regions--; 152 } 153 154 static void damon_free_region(struct damon_region *r) 155 { 156 kmem_cache_free(damon_region_cache, r); 157 } 158 159 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 160 { 161 damon_del_region(r, t); 162 damon_free_region(r); 163 } 164 165 /* 166 * Check whether a region is intersecting an address range 167 * 168 * Returns true if it is. 169 */ 170 static bool damon_intersect(struct damon_region *r, 171 struct damon_addr_range *re) 172 { 173 return !(r->ar.end <= re->start || re->end <= r->ar.start); 174 } 175 176 /* 177 * Fill holes in regions with new regions. 178 */ 179 static int damon_fill_regions_holes(struct damon_region *first, 180 struct damon_region *last, struct damon_target *t) 181 { 182 struct damon_region *r = first; 183 184 damon_for_each_region_from(r, t) { 185 struct damon_region *next, *newr; 186 187 if (r == last) 188 break; 189 next = damon_next_region(r); 190 if (r->ar.end != next->ar.start) { 191 newr = damon_new_region(r->ar.end, next->ar.start); 192 if (!newr) 193 return -ENOMEM; 194 damon_insert_region(newr, r, next, t); 195 } 196 } 197 return 0; 198 } 199 200 /* 201 * damon_set_regions() - Set regions of a target for given address ranges. 202 * @t: the given target. 203 * @ranges: array of new monitoring target ranges. 204 * @nr_ranges: length of @ranges. 205 * 206 * This function adds new regions to, or modify existing regions of a 207 * monitoring target to fit in specific ranges. 208 * 209 * Return: 0 if success, or negative error code otherwise. 210 */ 211 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 212 unsigned int nr_ranges) 213 { 214 struct damon_region *r, *next; 215 unsigned int i; 216 int err; 217 218 /* Remove regions which are not in the new ranges */ 219 damon_for_each_region_safe(r, next, t) { 220 for (i = 0; i < nr_ranges; i++) { 221 if (damon_intersect(r, &ranges[i])) 222 break; 223 } 224 if (i == nr_ranges) 225 damon_destroy_region(r, t); 226 } 227 228 r = damon_first_region(t); 229 /* Add new regions or resize existing regions to fit in the ranges */ 230 for (i = 0; i < nr_ranges; i++) { 231 struct damon_region *first = NULL, *last, *newr; 232 struct damon_addr_range *range; 233 234 range = &ranges[i]; 235 /* Get the first/last regions intersecting with the range */ 236 damon_for_each_region_from(r, t) { 237 if (damon_intersect(r, range)) { 238 if (!first) 239 first = r; 240 last = r; 241 } 242 if (r->ar.start >= range->end) 243 break; 244 } 245 if (!first) { 246 /* no region intersects with this range */ 247 newr = damon_new_region( 248 ALIGN_DOWN(range->start, 249 DAMON_MIN_REGION), 250 ALIGN(range->end, DAMON_MIN_REGION)); 251 if (!newr) 252 return -ENOMEM; 253 damon_insert_region(newr, damon_prev_region(r), r, t); 254 } else { 255 /* resize intersecting regions to fit in this range */ 256 first->ar.start = ALIGN_DOWN(range->start, 257 DAMON_MIN_REGION); 258 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 259 260 /* fill possible holes in the range */ 261 err = damon_fill_regions_holes(first, last, t); 262 if (err) 263 return err; 264 } 265 } 266 return 0; 267 } 268 269 struct damos_filter *damos_new_filter(enum damos_filter_type type, 270 bool matching, bool allow) 271 { 272 struct damos_filter *filter; 273 274 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 275 if (!filter) 276 return NULL; 277 filter->type = type; 278 filter->matching = matching; 279 filter->allow = allow; 280 INIT_LIST_HEAD(&filter->list); 281 return filter; 282 } 283 284 /** 285 * damos_filter_for_ops() - Return if the filter is ops-hndled one. 286 * @type: type of the filter. 287 * 288 * Return: true if the filter of @type needs to be handled by ops layer, false 289 * otherwise. 290 */ 291 bool damos_filter_for_ops(enum damos_filter_type type) 292 { 293 switch (type) { 294 case DAMOS_FILTER_TYPE_ADDR: 295 case DAMOS_FILTER_TYPE_TARGET: 296 return false; 297 default: 298 break; 299 } 300 return true; 301 } 302 303 void damos_add_filter(struct damos *s, struct damos_filter *f) 304 { 305 if (damos_filter_for_ops(f->type)) 306 list_add_tail(&f->list, &s->ops_filters); 307 else 308 list_add_tail(&f->list, &s->filters); 309 } 310 311 static void damos_del_filter(struct damos_filter *f) 312 { 313 list_del(&f->list); 314 } 315 316 static void damos_free_filter(struct damos_filter *f) 317 { 318 kfree(f); 319 } 320 321 void damos_destroy_filter(struct damos_filter *f) 322 { 323 damos_del_filter(f); 324 damos_free_filter(f); 325 } 326 327 struct damos_quota_goal *damos_new_quota_goal( 328 enum damos_quota_goal_metric metric, 329 unsigned long target_value) 330 { 331 struct damos_quota_goal *goal; 332 333 goal = kmalloc(sizeof(*goal), GFP_KERNEL); 334 if (!goal) 335 return NULL; 336 goal->metric = metric; 337 goal->target_value = target_value; 338 INIT_LIST_HEAD(&goal->list); 339 return goal; 340 } 341 342 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) 343 { 344 list_add_tail(&g->list, &q->goals); 345 } 346 347 static void damos_del_quota_goal(struct damos_quota_goal *g) 348 { 349 list_del(&g->list); 350 } 351 352 static void damos_free_quota_goal(struct damos_quota_goal *g) 353 { 354 kfree(g); 355 } 356 357 void damos_destroy_quota_goal(struct damos_quota_goal *g) 358 { 359 damos_del_quota_goal(g); 360 damos_free_quota_goal(g); 361 } 362 363 /* initialize fields of @quota that normally API users wouldn't set */ 364 static struct damos_quota *damos_quota_init(struct damos_quota *quota) 365 { 366 quota->esz = 0; 367 quota->total_charged_sz = 0; 368 quota->total_charged_ns = 0; 369 quota->charged_sz = 0; 370 quota->charged_from = 0; 371 quota->charge_target_from = NULL; 372 quota->charge_addr_from = 0; 373 quota->esz_bp = 0; 374 return quota; 375 } 376 377 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 378 enum damos_action action, 379 unsigned long apply_interval_us, 380 struct damos_quota *quota, 381 struct damos_watermarks *wmarks, 382 int target_nid) 383 { 384 struct damos *scheme; 385 386 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 387 if (!scheme) 388 return NULL; 389 scheme->pattern = *pattern; 390 scheme->action = action; 391 scheme->apply_interval_us = apply_interval_us; 392 /* 393 * next_apply_sis will be set when kdamond starts. While kdamond is 394 * running, it will also updated when it is added to the DAMON context, 395 * or damon_attrs are updated. 396 */ 397 scheme->next_apply_sis = 0; 398 scheme->walk_completed = false; 399 INIT_LIST_HEAD(&scheme->filters); 400 INIT_LIST_HEAD(&scheme->ops_filters); 401 scheme->stat = (struct damos_stat){}; 402 INIT_LIST_HEAD(&scheme->list); 403 404 scheme->quota = *(damos_quota_init(quota)); 405 /* quota.goals should be separately set by caller */ 406 INIT_LIST_HEAD(&scheme->quota.goals); 407 408 scheme->wmarks = *wmarks; 409 scheme->wmarks.activated = true; 410 411 scheme->target_nid = target_nid; 412 413 return scheme; 414 } 415 416 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 417 { 418 unsigned long sample_interval = ctx->attrs.sample_interval ? 419 ctx->attrs.sample_interval : 1; 420 unsigned long apply_interval = s->apply_interval_us ? 421 s->apply_interval_us : ctx->attrs.aggr_interval; 422 423 s->next_apply_sis = ctx->passed_sample_intervals + 424 apply_interval / sample_interval; 425 } 426 427 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 428 { 429 list_add_tail(&s->list, &ctx->schemes); 430 damos_set_next_apply_sis(s, ctx); 431 } 432 433 static void damon_del_scheme(struct damos *s) 434 { 435 list_del(&s->list); 436 } 437 438 static void damon_free_scheme(struct damos *s) 439 { 440 kfree(s); 441 } 442 443 void damon_destroy_scheme(struct damos *s) 444 { 445 struct damos_quota_goal *g, *g_next; 446 struct damos_filter *f, *next; 447 448 damos_for_each_quota_goal_safe(g, g_next, &s->quota) 449 damos_destroy_quota_goal(g); 450 451 damos_for_each_filter_safe(f, next, s) 452 damos_destroy_filter(f); 453 damon_del_scheme(s); 454 damon_free_scheme(s); 455 } 456 457 /* 458 * Construct a damon_target struct 459 * 460 * Returns the pointer to the new struct if success, or NULL otherwise 461 */ 462 struct damon_target *damon_new_target(void) 463 { 464 struct damon_target *t; 465 466 t = kmalloc(sizeof(*t), GFP_KERNEL); 467 if (!t) 468 return NULL; 469 470 t->pid = NULL; 471 t->nr_regions = 0; 472 INIT_LIST_HEAD(&t->regions_list); 473 INIT_LIST_HEAD(&t->list); 474 475 return t; 476 } 477 478 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 479 { 480 list_add_tail(&t->list, &ctx->adaptive_targets); 481 } 482 483 bool damon_targets_empty(struct damon_ctx *ctx) 484 { 485 return list_empty(&ctx->adaptive_targets); 486 } 487 488 static void damon_del_target(struct damon_target *t) 489 { 490 list_del(&t->list); 491 } 492 493 void damon_free_target(struct damon_target *t) 494 { 495 struct damon_region *r, *next; 496 497 damon_for_each_region_safe(r, next, t) 498 damon_free_region(r); 499 kfree(t); 500 } 501 502 void damon_destroy_target(struct damon_target *t) 503 { 504 damon_del_target(t); 505 damon_free_target(t); 506 } 507 508 unsigned int damon_nr_regions(struct damon_target *t) 509 { 510 return t->nr_regions; 511 } 512 513 struct damon_ctx *damon_new_ctx(void) 514 { 515 struct damon_ctx *ctx; 516 517 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 518 if (!ctx) 519 return NULL; 520 521 init_completion(&ctx->kdamond_started); 522 523 ctx->attrs.sample_interval = 5 * 1000; 524 ctx->attrs.aggr_interval = 100 * 1000; 525 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 526 527 ctx->passed_sample_intervals = 0; 528 /* These will be set from kdamond_init_ctx() */ 529 ctx->next_aggregation_sis = 0; 530 ctx->next_ops_update_sis = 0; 531 532 mutex_init(&ctx->kdamond_lock); 533 mutex_init(&ctx->call_control_lock); 534 mutex_init(&ctx->walk_control_lock); 535 536 ctx->attrs.min_nr_regions = 10; 537 ctx->attrs.max_nr_regions = 1000; 538 539 INIT_LIST_HEAD(&ctx->adaptive_targets); 540 INIT_LIST_HEAD(&ctx->schemes); 541 542 return ctx; 543 } 544 545 static void damon_destroy_targets(struct damon_ctx *ctx) 546 { 547 struct damon_target *t, *next_t; 548 549 if (ctx->ops.cleanup) { 550 ctx->ops.cleanup(ctx); 551 return; 552 } 553 554 damon_for_each_target_safe(t, next_t, ctx) 555 damon_destroy_target(t); 556 } 557 558 void damon_destroy_ctx(struct damon_ctx *ctx) 559 { 560 struct damos *s, *next_s; 561 562 damon_destroy_targets(ctx); 563 564 damon_for_each_scheme_safe(s, next_s, ctx) 565 damon_destroy_scheme(s); 566 567 kfree(ctx); 568 } 569 570 static unsigned int damon_age_for_new_attrs(unsigned int age, 571 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 572 { 573 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 574 } 575 576 /* convert access ratio in bp (per 10,000) to nr_accesses */ 577 static unsigned int damon_accesses_bp_to_nr_accesses( 578 unsigned int accesses_bp, struct damon_attrs *attrs) 579 { 580 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 581 } 582 583 /* 584 * Convert nr_accesses to access ratio in bp (per 10,000). 585 * 586 * Callers should ensure attrs.aggr_interval is not zero, like 587 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would 588 * happen. 589 */ 590 static unsigned int damon_nr_accesses_to_accesses_bp( 591 unsigned int nr_accesses, struct damon_attrs *attrs) 592 { 593 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 594 } 595 596 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 597 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 598 { 599 return damon_accesses_bp_to_nr_accesses( 600 damon_nr_accesses_to_accesses_bp( 601 nr_accesses, old_attrs), 602 new_attrs); 603 } 604 605 static void damon_update_monitoring_result(struct damon_region *r, 606 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, 607 bool aggregating) 608 { 609 if (!aggregating) { 610 r->nr_accesses = damon_nr_accesses_for_new_attrs( 611 r->nr_accesses, old_attrs, new_attrs); 612 r->nr_accesses_bp = r->nr_accesses * 10000; 613 } else { 614 /* 615 * if this is called in the middle of the aggregation, reset 616 * the aggregations we made so far for this aggregation 617 * interval. In other words, make the status like 618 * kdamond_reset_aggregated() is called. 619 */ 620 r->last_nr_accesses = damon_nr_accesses_for_new_attrs( 621 r->last_nr_accesses, old_attrs, new_attrs); 622 r->nr_accesses_bp = r->last_nr_accesses * 10000; 623 r->nr_accesses = 0; 624 } 625 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 626 } 627 628 /* 629 * region->nr_accesses is the number of sampling intervals in the last 630 * aggregation interval that access to the region has found, and region->age is 631 * the number of aggregation intervals that its access pattern has maintained. 632 * For the reason, the real meaning of the two fields depend on current 633 * sampling interval and aggregation interval. This function updates 634 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 635 */ 636 static void damon_update_monitoring_results(struct damon_ctx *ctx, 637 struct damon_attrs *new_attrs, bool aggregating) 638 { 639 struct damon_attrs *old_attrs = &ctx->attrs; 640 struct damon_target *t; 641 struct damon_region *r; 642 643 /* if any interval is zero, simply forgive conversion */ 644 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 645 !new_attrs->sample_interval || 646 !new_attrs->aggr_interval) 647 return; 648 649 damon_for_each_target(t, ctx) 650 damon_for_each_region(r, t) 651 damon_update_monitoring_result( 652 r, old_attrs, new_attrs, aggregating); 653 } 654 655 /* 656 * damon_valid_intervals_goal() - return if the intervals goal of @attrs is 657 * valid. 658 */ 659 static bool damon_valid_intervals_goal(struct damon_attrs *attrs) 660 { 661 struct damon_intervals_goal *goal = &attrs->intervals_goal; 662 663 /* tuning is disabled */ 664 if (!goal->aggrs) 665 return true; 666 if (goal->min_sample_us > goal->max_sample_us) 667 return false; 668 if (attrs->sample_interval < goal->min_sample_us || 669 goal->max_sample_us < attrs->sample_interval) 670 return false; 671 return true; 672 } 673 674 /** 675 * damon_set_attrs() - Set attributes for the monitoring. 676 * @ctx: monitoring context 677 * @attrs: monitoring attributes 678 * 679 * This function should be called while the kdamond is not running, an access 680 * check results aggregation is not ongoing (e.g., from &struct 681 * damon_callback->after_aggregation or &struct 682 * damon_callback->after_wmarks_check callbacks), or from damon_call(). 683 * 684 * Every time interval is in micro-seconds. 685 * 686 * Return: 0 on success, negative error code otherwise. 687 */ 688 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 689 { 690 unsigned long sample_interval = attrs->sample_interval ? 691 attrs->sample_interval : 1; 692 struct damos *s; 693 bool aggregating = ctx->passed_sample_intervals < 694 ctx->next_aggregation_sis; 695 696 if (!damon_valid_intervals_goal(attrs)) 697 return -EINVAL; 698 699 if (attrs->min_nr_regions < 3) 700 return -EINVAL; 701 if (attrs->min_nr_regions > attrs->max_nr_regions) 702 return -EINVAL; 703 if (attrs->sample_interval > attrs->aggr_interval) 704 return -EINVAL; 705 706 /* calls from core-external doesn't set this. */ 707 if (!attrs->aggr_samples) 708 attrs->aggr_samples = attrs->aggr_interval / sample_interval; 709 710 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 711 attrs->aggr_interval / sample_interval; 712 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 713 attrs->ops_update_interval / sample_interval; 714 715 damon_update_monitoring_results(ctx, attrs, aggregating); 716 ctx->attrs = *attrs; 717 718 damon_for_each_scheme(s, ctx) 719 damos_set_next_apply_sis(s, ctx); 720 721 return 0; 722 } 723 724 /** 725 * damon_set_schemes() - Set data access monitoring based operation schemes. 726 * @ctx: monitoring context 727 * @schemes: array of the schemes 728 * @nr_schemes: number of entries in @schemes 729 * 730 * This function should not be called while the kdamond of the context is 731 * running. 732 */ 733 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 734 ssize_t nr_schemes) 735 { 736 struct damos *s, *next; 737 ssize_t i; 738 739 damon_for_each_scheme_safe(s, next, ctx) 740 damon_destroy_scheme(s); 741 for (i = 0; i < nr_schemes; i++) 742 damon_add_scheme(ctx, schemes[i]); 743 } 744 745 static struct damos_quota_goal *damos_nth_quota_goal( 746 int n, struct damos_quota *q) 747 { 748 struct damos_quota_goal *goal; 749 int i = 0; 750 751 damos_for_each_quota_goal(goal, q) { 752 if (i++ == n) 753 return goal; 754 } 755 return NULL; 756 } 757 758 static void damos_commit_quota_goal( 759 struct damos_quota_goal *dst, struct damos_quota_goal *src) 760 { 761 dst->metric = src->metric; 762 dst->target_value = src->target_value; 763 if (dst->metric == DAMOS_QUOTA_USER_INPUT) 764 dst->current_value = src->current_value; 765 /* keep last_psi_total as is, since it will be updated in next cycle */ 766 } 767 768 /** 769 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. 770 * @dst: The commit destination DAMOS quota. 771 * @src: The commit source DAMOS quota. 772 * 773 * Copies user-specified parameters for quota goals from @src to @dst. Users 774 * should use this function for quota goals-level parameters update of running 775 * DAMON contexts, instead of manual in-place updates. 776 * 777 * This function should be called from parameters-update safe context, like 778 * DAMON callbacks. 779 */ 780 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) 781 { 782 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; 783 int i = 0, j = 0; 784 785 damos_for_each_quota_goal_safe(dst_goal, next, dst) { 786 src_goal = damos_nth_quota_goal(i++, src); 787 if (src_goal) 788 damos_commit_quota_goal(dst_goal, src_goal); 789 else 790 damos_destroy_quota_goal(dst_goal); 791 } 792 damos_for_each_quota_goal_safe(src_goal, next, src) { 793 if (j++ < i) 794 continue; 795 new_goal = damos_new_quota_goal( 796 src_goal->metric, src_goal->target_value); 797 if (!new_goal) 798 return -ENOMEM; 799 damos_add_quota_goal(dst, new_goal); 800 } 801 return 0; 802 } 803 804 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) 805 { 806 int err; 807 808 dst->reset_interval = src->reset_interval; 809 dst->ms = src->ms; 810 dst->sz = src->sz; 811 err = damos_commit_quota_goals(dst, src); 812 if (err) 813 return err; 814 dst->weight_sz = src->weight_sz; 815 dst->weight_nr_accesses = src->weight_nr_accesses; 816 dst->weight_age = src->weight_age; 817 return 0; 818 } 819 820 static struct damos_filter *damos_nth_filter(int n, struct damos *s) 821 { 822 struct damos_filter *filter; 823 int i = 0; 824 825 damos_for_each_filter(filter, s) { 826 if (i++ == n) 827 return filter; 828 } 829 return NULL; 830 } 831 832 static void damos_commit_filter_arg( 833 struct damos_filter *dst, struct damos_filter *src) 834 { 835 switch (dst->type) { 836 case DAMOS_FILTER_TYPE_MEMCG: 837 dst->memcg_id = src->memcg_id; 838 break; 839 case DAMOS_FILTER_TYPE_ADDR: 840 dst->addr_range = src->addr_range; 841 break; 842 case DAMOS_FILTER_TYPE_TARGET: 843 dst->target_idx = src->target_idx; 844 break; 845 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 846 dst->sz_range = src->sz_range; 847 break; 848 default: 849 break; 850 } 851 } 852 853 static void damos_commit_filter( 854 struct damos_filter *dst, struct damos_filter *src) 855 { 856 dst->type = src->type; 857 dst->matching = src->matching; 858 damos_commit_filter_arg(dst, src); 859 } 860 861 static int damos_commit_core_filters(struct damos *dst, struct damos *src) 862 { 863 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 864 int i = 0, j = 0; 865 866 damos_for_each_filter_safe(dst_filter, next, dst) { 867 src_filter = damos_nth_filter(i++, src); 868 if (src_filter) 869 damos_commit_filter(dst_filter, src_filter); 870 else 871 damos_destroy_filter(dst_filter); 872 } 873 874 damos_for_each_filter_safe(src_filter, next, src) { 875 if (j++ < i) 876 continue; 877 878 new_filter = damos_new_filter( 879 src_filter->type, src_filter->matching, 880 src_filter->allow); 881 if (!new_filter) 882 return -ENOMEM; 883 damos_commit_filter_arg(new_filter, src_filter); 884 damos_add_filter(dst, new_filter); 885 } 886 return 0; 887 } 888 889 static int damos_commit_ops_filters(struct damos *dst, struct damos *src) 890 { 891 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 892 int i = 0, j = 0; 893 894 damos_for_each_ops_filter_safe(dst_filter, next, dst) { 895 src_filter = damos_nth_filter(i++, src); 896 if (src_filter) 897 damos_commit_filter(dst_filter, src_filter); 898 else 899 damos_destroy_filter(dst_filter); 900 } 901 902 damos_for_each_ops_filter_safe(src_filter, next, src) { 903 if (j++ < i) 904 continue; 905 906 new_filter = damos_new_filter( 907 src_filter->type, src_filter->matching, 908 src_filter->allow); 909 if (!new_filter) 910 return -ENOMEM; 911 damos_commit_filter_arg(new_filter, src_filter); 912 damos_add_filter(dst, new_filter); 913 } 914 return 0; 915 } 916 917 /** 918 * damos_filters_default_reject() - decide whether to reject memory that didn't 919 * match with any given filter. 920 * @filters: Given DAMOS filters of a group. 921 */ 922 static bool damos_filters_default_reject(struct list_head *filters) 923 { 924 struct damos_filter *last_filter; 925 926 if (list_empty(filters)) 927 return false; 928 last_filter = list_last_entry(filters, struct damos_filter, list); 929 return last_filter->allow; 930 } 931 932 static void damos_set_filters_default_reject(struct damos *s) 933 { 934 if (!list_empty(&s->ops_filters)) 935 s->core_filters_default_reject = false; 936 else 937 s->core_filters_default_reject = 938 damos_filters_default_reject(&s->filters); 939 s->ops_filters_default_reject = 940 damos_filters_default_reject(&s->ops_filters); 941 } 942 943 static int damos_commit_filters(struct damos *dst, struct damos *src) 944 { 945 int err; 946 947 err = damos_commit_core_filters(dst, src); 948 if (err) 949 return err; 950 err = damos_commit_ops_filters(dst, src); 951 if (err) 952 return err; 953 damos_set_filters_default_reject(dst); 954 return 0; 955 } 956 957 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) 958 { 959 struct damos *s; 960 int i = 0; 961 962 damon_for_each_scheme(s, ctx) { 963 if (i++ == n) 964 return s; 965 } 966 return NULL; 967 } 968 969 static int damos_commit(struct damos *dst, struct damos *src) 970 { 971 int err; 972 973 dst->pattern = src->pattern; 974 dst->action = src->action; 975 dst->apply_interval_us = src->apply_interval_us; 976 977 err = damos_commit_quota(&dst->quota, &src->quota); 978 if (err) 979 return err; 980 981 dst->wmarks = src->wmarks; 982 983 err = damos_commit_filters(dst, src); 984 return err; 985 } 986 987 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) 988 { 989 struct damos *dst_scheme, *next, *src_scheme, *new_scheme; 990 int i = 0, j = 0, err; 991 992 damon_for_each_scheme_safe(dst_scheme, next, dst) { 993 src_scheme = damon_nth_scheme(i++, src); 994 if (src_scheme) { 995 err = damos_commit(dst_scheme, src_scheme); 996 if (err) 997 return err; 998 } else { 999 damon_destroy_scheme(dst_scheme); 1000 } 1001 } 1002 1003 damon_for_each_scheme_safe(src_scheme, next, src) { 1004 if (j++ < i) 1005 continue; 1006 new_scheme = damon_new_scheme(&src_scheme->pattern, 1007 src_scheme->action, 1008 src_scheme->apply_interval_us, 1009 &src_scheme->quota, &src_scheme->wmarks, 1010 NUMA_NO_NODE); 1011 if (!new_scheme) 1012 return -ENOMEM; 1013 err = damos_commit(new_scheme, src_scheme); 1014 if (err) { 1015 damon_destroy_scheme(new_scheme); 1016 return err; 1017 } 1018 damon_add_scheme(dst, new_scheme); 1019 } 1020 return 0; 1021 } 1022 1023 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) 1024 { 1025 struct damon_target *t; 1026 int i = 0; 1027 1028 damon_for_each_target(t, ctx) { 1029 if (i++ == n) 1030 return t; 1031 } 1032 return NULL; 1033 } 1034 1035 /* 1036 * The caller should ensure the regions of @src are 1037 * 1. valid (end >= src) and 1038 * 2. sorted by starting address. 1039 * 1040 * If @src has no region, @dst keeps current regions. 1041 */ 1042 static int damon_commit_target_regions( 1043 struct damon_target *dst, struct damon_target *src) 1044 { 1045 struct damon_region *src_region; 1046 struct damon_addr_range *ranges; 1047 int i = 0, err; 1048 1049 damon_for_each_region(src_region, src) 1050 i++; 1051 if (!i) 1052 return 0; 1053 1054 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); 1055 if (!ranges) 1056 return -ENOMEM; 1057 i = 0; 1058 damon_for_each_region(src_region, src) 1059 ranges[i++] = src_region->ar; 1060 err = damon_set_regions(dst, ranges, i); 1061 kfree(ranges); 1062 return err; 1063 } 1064 1065 static int damon_commit_target( 1066 struct damon_target *dst, bool dst_has_pid, 1067 struct damon_target *src, bool src_has_pid) 1068 { 1069 int err; 1070 1071 err = damon_commit_target_regions(dst, src); 1072 if (err) 1073 return err; 1074 if (dst_has_pid) 1075 put_pid(dst->pid); 1076 if (src_has_pid) 1077 get_pid(src->pid); 1078 dst->pid = src->pid; 1079 return 0; 1080 } 1081 1082 static int damon_commit_targets( 1083 struct damon_ctx *dst, struct damon_ctx *src) 1084 { 1085 struct damon_target *dst_target, *next, *src_target, *new_target; 1086 int i = 0, j = 0, err; 1087 1088 damon_for_each_target_safe(dst_target, next, dst) { 1089 src_target = damon_nth_target(i++, src); 1090 if (src_target) { 1091 err = damon_commit_target( 1092 dst_target, damon_target_has_pid(dst), 1093 src_target, damon_target_has_pid(src)); 1094 if (err) 1095 return err; 1096 } else { 1097 if (damon_target_has_pid(dst)) 1098 put_pid(dst_target->pid); 1099 damon_destroy_target(dst_target); 1100 } 1101 } 1102 1103 damon_for_each_target_safe(src_target, next, src) { 1104 if (j++ < i) 1105 continue; 1106 new_target = damon_new_target(); 1107 if (!new_target) 1108 return -ENOMEM; 1109 err = damon_commit_target(new_target, false, 1110 src_target, damon_target_has_pid(src)); 1111 if (err) { 1112 damon_destroy_target(new_target); 1113 return err; 1114 } 1115 damon_add_target(dst, new_target); 1116 } 1117 return 0; 1118 } 1119 1120 /** 1121 * damon_commit_ctx() - Commit parameters of a DAMON context to another. 1122 * @dst: The commit destination DAMON context. 1123 * @src: The commit source DAMON context. 1124 * 1125 * This function copies user-specified parameters from @src to @dst and update 1126 * the internal status and results accordingly. Users should use this function 1127 * for context-level parameters update of running context, instead of manual 1128 * in-place updates. 1129 * 1130 * This function should be called from parameters-update safe context, like 1131 * DAMON callbacks. 1132 */ 1133 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) 1134 { 1135 int err; 1136 1137 err = damon_commit_schemes(dst, src); 1138 if (err) 1139 return err; 1140 err = damon_commit_targets(dst, src); 1141 if (err) 1142 return err; 1143 /* 1144 * schemes and targets should be updated first, since 1145 * 1. damon_set_attrs() updates monitoring results of targets and 1146 * next_apply_sis of schemes, and 1147 * 2. ops update should be done after pid handling is done (target 1148 * committing require putting pids). 1149 */ 1150 err = damon_set_attrs(dst, &src->attrs); 1151 if (err) 1152 return err; 1153 dst->ops = src->ops; 1154 1155 return 0; 1156 } 1157 1158 /** 1159 * damon_nr_running_ctxs() - Return number of currently running contexts. 1160 */ 1161 int damon_nr_running_ctxs(void) 1162 { 1163 int nr_ctxs; 1164 1165 mutex_lock(&damon_lock); 1166 nr_ctxs = nr_running_ctxs; 1167 mutex_unlock(&damon_lock); 1168 1169 return nr_ctxs; 1170 } 1171 1172 /* Returns the size upper limit for each monitoring region */ 1173 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 1174 { 1175 struct damon_target *t; 1176 struct damon_region *r; 1177 unsigned long sz = 0; 1178 1179 damon_for_each_target(t, ctx) { 1180 damon_for_each_region(r, t) 1181 sz += damon_sz_region(r); 1182 } 1183 1184 if (ctx->attrs.min_nr_regions) 1185 sz /= ctx->attrs.min_nr_regions; 1186 if (sz < DAMON_MIN_REGION) 1187 sz = DAMON_MIN_REGION; 1188 1189 return sz; 1190 } 1191 1192 static int kdamond_fn(void *data); 1193 1194 /* 1195 * __damon_start() - Starts monitoring with given context. 1196 * @ctx: monitoring context 1197 * 1198 * This function should be called while damon_lock is hold. 1199 * 1200 * Return: 0 on success, negative error code otherwise. 1201 */ 1202 static int __damon_start(struct damon_ctx *ctx) 1203 { 1204 int err = -EBUSY; 1205 1206 mutex_lock(&ctx->kdamond_lock); 1207 if (!ctx->kdamond) { 1208 err = 0; 1209 reinit_completion(&ctx->kdamond_started); 1210 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 1211 nr_running_ctxs); 1212 if (IS_ERR(ctx->kdamond)) { 1213 err = PTR_ERR(ctx->kdamond); 1214 ctx->kdamond = NULL; 1215 } else { 1216 wait_for_completion(&ctx->kdamond_started); 1217 } 1218 } 1219 mutex_unlock(&ctx->kdamond_lock); 1220 1221 return err; 1222 } 1223 1224 /** 1225 * damon_start() - Starts the monitorings for a given group of contexts. 1226 * @ctxs: an array of the pointers for contexts to start monitoring 1227 * @nr_ctxs: size of @ctxs 1228 * @exclusive: exclusiveness of this contexts group 1229 * 1230 * This function starts a group of monitoring threads for a group of monitoring 1231 * contexts. One thread per each context is created and run in parallel. The 1232 * caller should handle synchronization between the threads by itself. If 1233 * @exclusive is true and a group of threads that created by other 1234 * 'damon_start()' call is currently running, this function does nothing but 1235 * returns -EBUSY. 1236 * 1237 * Return: 0 on success, negative error code otherwise. 1238 */ 1239 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 1240 { 1241 int i; 1242 int err = 0; 1243 1244 mutex_lock(&damon_lock); 1245 if ((exclusive && nr_running_ctxs) || 1246 (!exclusive && running_exclusive_ctxs)) { 1247 mutex_unlock(&damon_lock); 1248 return -EBUSY; 1249 } 1250 1251 for (i = 0; i < nr_ctxs; i++) { 1252 err = __damon_start(ctxs[i]); 1253 if (err) 1254 break; 1255 nr_running_ctxs++; 1256 } 1257 if (exclusive && nr_running_ctxs) 1258 running_exclusive_ctxs = true; 1259 mutex_unlock(&damon_lock); 1260 1261 return err; 1262 } 1263 1264 /* 1265 * __damon_stop() - Stops monitoring of a given context. 1266 * @ctx: monitoring context 1267 * 1268 * Return: 0 on success, negative error code otherwise. 1269 */ 1270 static int __damon_stop(struct damon_ctx *ctx) 1271 { 1272 struct task_struct *tsk; 1273 1274 mutex_lock(&ctx->kdamond_lock); 1275 tsk = ctx->kdamond; 1276 if (tsk) { 1277 get_task_struct(tsk); 1278 mutex_unlock(&ctx->kdamond_lock); 1279 kthread_stop_put(tsk); 1280 return 0; 1281 } 1282 mutex_unlock(&ctx->kdamond_lock); 1283 1284 return -EPERM; 1285 } 1286 1287 /** 1288 * damon_stop() - Stops the monitorings for a given group of contexts. 1289 * @ctxs: an array of the pointers for contexts to stop monitoring 1290 * @nr_ctxs: size of @ctxs 1291 * 1292 * Return: 0 on success, negative error code otherwise. 1293 */ 1294 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 1295 { 1296 int i, err = 0; 1297 1298 for (i = 0; i < nr_ctxs; i++) { 1299 /* nr_running_ctxs is decremented in kdamond_fn */ 1300 err = __damon_stop(ctxs[i]); 1301 if (err) 1302 break; 1303 } 1304 return err; 1305 } 1306 1307 static bool damon_is_running(struct damon_ctx *ctx) 1308 { 1309 bool running; 1310 1311 mutex_lock(&ctx->kdamond_lock); 1312 running = ctx->kdamond != NULL; 1313 mutex_unlock(&ctx->kdamond_lock); 1314 return running; 1315 } 1316 1317 /** 1318 * damon_call() - Invoke a given function on DAMON worker thread (kdamond). 1319 * @ctx: DAMON context to call the function for. 1320 * @control: Control variable of the call request. 1321 * 1322 * Ask DAMON worker thread (kdamond) of @ctx to call a function with an 1323 * argument data that respectively passed via &damon_call_control->fn and 1324 * &damon_call_control->data of @control, and wait until the kdamond finishes 1325 * handling of the request. 1326 * 1327 * The kdamond executes the function with the argument in the main loop, just 1328 * after a sampling of the iteration is finished. The function can hence 1329 * safely access the internal data of the &struct damon_ctx without additional 1330 * synchronization. The return value of the function will be saved in 1331 * &damon_call_control->return_code. 1332 * 1333 * Return: 0 on success, negative error code otherwise. 1334 */ 1335 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) 1336 { 1337 init_completion(&control->completion); 1338 control->canceled = false; 1339 1340 mutex_lock(&ctx->call_control_lock); 1341 if (ctx->call_control) { 1342 mutex_unlock(&ctx->call_control_lock); 1343 return -EBUSY; 1344 } 1345 ctx->call_control = control; 1346 mutex_unlock(&ctx->call_control_lock); 1347 if (!damon_is_running(ctx)) 1348 return -EINVAL; 1349 wait_for_completion(&control->completion); 1350 if (control->canceled) 1351 return -ECANCELED; 1352 return 0; 1353 } 1354 1355 /** 1356 * damos_walk() - Invoke a given functions while DAMOS walk regions. 1357 * @ctx: DAMON context to call the functions for. 1358 * @control: Control variable of the walk request. 1359 * 1360 * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region 1361 * that the kdamond will apply DAMOS action to, and wait until the kdamond 1362 * finishes handling of the request. 1363 * 1364 * The kdamond executes the given function in the main loop, for each region 1365 * just after it applied any DAMOS actions of @ctx to it. The invocation is 1366 * made only within one &damos->apply_interval_us since damos_walk() 1367 * invocation, for each scheme. The given callback function can hence safely 1368 * access the internal data of &struct damon_ctx and &struct damon_region that 1369 * each of the scheme will apply the action for next interval, without 1370 * additional synchronizations against the kdamond. If every scheme of @ctx 1371 * passed at least one &damos->apply_interval_us, kdamond marks the request as 1372 * completed so that damos_walk() can wakeup and return. 1373 * 1374 * Return: 0 on success, negative error code otherwise. 1375 */ 1376 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) 1377 { 1378 init_completion(&control->completion); 1379 control->canceled = false; 1380 mutex_lock(&ctx->walk_control_lock); 1381 if (ctx->walk_control) { 1382 mutex_unlock(&ctx->walk_control_lock); 1383 return -EBUSY; 1384 } 1385 ctx->walk_control = control; 1386 mutex_unlock(&ctx->walk_control_lock); 1387 if (!damon_is_running(ctx)) 1388 return -EINVAL; 1389 wait_for_completion(&control->completion); 1390 if (control->canceled) 1391 return -ECANCELED; 1392 return 0; 1393 } 1394 1395 /* 1396 * Reset the aggregated monitoring results ('nr_accesses' of each region). 1397 */ 1398 static void kdamond_reset_aggregated(struct damon_ctx *c) 1399 { 1400 struct damon_target *t; 1401 unsigned int ti = 0; /* target's index */ 1402 1403 damon_for_each_target(t, c) { 1404 struct damon_region *r; 1405 1406 damon_for_each_region(r, t) { 1407 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 1408 r->last_nr_accesses = r->nr_accesses; 1409 r->nr_accesses = 0; 1410 } 1411 ti++; 1412 } 1413 } 1414 1415 static unsigned long damon_get_intervals_score(struct damon_ctx *c) 1416 { 1417 struct damon_target *t; 1418 struct damon_region *r; 1419 unsigned long sz_region, max_access_events = 0, access_events = 0; 1420 unsigned long target_access_events; 1421 unsigned long goal_bp = c->attrs.intervals_goal.access_bp; 1422 1423 damon_for_each_target(t, c) { 1424 damon_for_each_region(r, t) { 1425 sz_region = damon_sz_region(r); 1426 max_access_events += sz_region * c->attrs.aggr_samples; 1427 access_events += sz_region * r->nr_accesses; 1428 } 1429 } 1430 target_access_events = max_access_events * goal_bp / 10000; 1431 return access_events * 10000 / target_access_events; 1432 } 1433 1434 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1435 unsigned long score); 1436 1437 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) 1438 { 1439 unsigned long score_bp, adaptation_bp; 1440 1441 score_bp = damon_get_intervals_score(c); 1442 adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / 1443 10000; 1444 /* 1445 * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of 1446 * the intervals by rescaling [1,10,000] to [5000, 10,000]. 1447 */ 1448 if (adaptation_bp <= 10000) 1449 adaptation_bp = 5000 + adaptation_bp / 2; 1450 return adaptation_bp; 1451 } 1452 1453 static void kdamond_tune_intervals(struct damon_ctx *c) 1454 { 1455 unsigned long adaptation_bp; 1456 struct damon_attrs new_attrs; 1457 struct damon_intervals_goal *goal; 1458 1459 adaptation_bp = damon_get_intervals_adaptation_bp(c); 1460 if (adaptation_bp == 10000) 1461 return; 1462 1463 new_attrs = c->attrs; 1464 goal = &c->attrs.intervals_goal; 1465 new_attrs.sample_interval = min(goal->max_sample_us, 1466 c->attrs.sample_interval * adaptation_bp / 10000); 1467 new_attrs.sample_interval = max(goal->min_sample_us, 1468 new_attrs.sample_interval); 1469 new_attrs.aggr_interval = new_attrs.sample_interval * 1470 c->attrs.aggr_samples; 1471 damon_set_attrs(c, &new_attrs); 1472 } 1473 1474 static void damon_split_region_at(struct damon_target *t, 1475 struct damon_region *r, unsigned long sz_r); 1476 1477 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 1478 { 1479 unsigned long sz; 1480 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 1481 1482 sz = damon_sz_region(r); 1483 return s->pattern.min_sz_region <= sz && 1484 sz <= s->pattern.max_sz_region && 1485 s->pattern.min_nr_accesses <= nr_accesses && 1486 nr_accesses <= s->pattern.max_nr_accesses && 1487 s->pattern.min_age_region <= r->age && 1488 r->age <= s->pattern.max_age_region; 1489 } 1490 1491 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 1492 struct damon_region *r, struct damos *s) 1493 { 1494 bool ret = __damos_valid_target(r, s); 1495 1496 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 1497 return ret; 1498 1499 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 1500 } 1501 1502 /* 1503 * damos_skip_charged_region() - Check if the given region or starting part of 1504 * it is already charged for the DAMOS quota. 1505 * @t: The target of the region. 1506 * @rp: The pointer to the region. 1507 * @s: The scheme to be applied. 1508 * 1509 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 1510 * action would applied to only a part of the target access pattern fulfilling 1511 * regions. To avoid applying the scheme action to only already applied 1512 * regions, DAMON skips applying the scheme action to the regions that charged 1513 * in the previous charge window. 1514 * 1515 * This function checks if a given region should be skipped or not for the 1516 * reason. If only the starting part of the region has previously charged, 1517 * this function splits the region into two so that the second one covers the 1518 * area that not charged in the previous charge widnow and saves the second 1519 * region in *rp and returns false, so that the caller can apply DAMON action 1520 * to the second one. 1521 * 1522 * Return: true if the region should be entirely skipped, false otherwise. 1523 */ 1524 static bool damos_skip_charged_region(struct damon_target *t, 1525 struct damon_region **rp, struct damos *s) 1526 { 1527 struct damon_region *r = *rp; 1528 struct damos_quota *quota = &s->quota; 1529 unsigned long sz_to_skip; 1530 1531 /* Skip previously charged regions */ 1532 if (quota->charge_target_from) { 1533 if (t != quota->charge_target_from) 1534 return true; 1535 if (r == damon_last_region(t)) { 1536 quota->charge_target_from = NULL; 1537 quota->charge_addr_from = 0; 1538 return true; 1539 } 1540 if (quota->charge_addr_from && 1541 r->ar.end <= quota->charge_addr_from) 1542 return true; 1543 1544 if (quota->charge_addr_from && r->ar.start < 1545 quota->charge_addr_from) { 1546 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 1547 r->ar.start, DAMON_MIN_REGION); 1548 if (!sz_to_skip) { 1549 if (damon_sz_region(r) <= DAMON_MIN_REGION) 1550 return true; 1551 sz_to_skip = DAMON_MIN_REGION; 1552 } 1553 damon_split_region_at(t, r, sz_to_skip); 1554 r = damon_next_region(r); 1555 *rp = r; 1556 } 1557 quota->charge_target_from = NULL; 1558 quota->charge_addr_from = 0; 1559 } 1560 return false; 1561 } 1562 1563 static void damos_update_stat(struct damos *s, 1564 unsigned long sz_tried, unsigned long sz_applied, 1565 unsigned long sz_ops_filter_passed) 1566 { 1567 s->stat.nr_tried++; 1568 s->stat.sz_tried += sz_tried; 1569 if (sz_applied) 1570 s->stat.nr_applied++; 1571 s->stat.sz_applied += sz_applied; 1572 s->stat.sz_ops_filter_passed += sz_ops_filter_passed; 1573 } 1574 1575 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, 1576 struct damon_region *r, struct damos_filter *filter) 1577 { 1578 bool matched = false; 1579 struct damon_target *ti; 1580 int target_idx = 0; 1581 unsigned long start, end; 1582 1583 switch (filter->type) { 1584 case DAMOS_FILTER_TYPE_TARGET: 1585 damon_for_each_target(ti, ctx) { 1586 if (ti == t) 1587 break; 1588 target_idx++; 1589 } 1590 matched = target_idx == filter->target_idx; 1591 break; 1592 case DAMOS_FILTER_TYPE_ADDR: 1593 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 1594 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 1595 1596 /* inside the range */ 1597 if (start <= r->ar.start && r->ar.end <= end) { 1598 matched = true; 1599 break; 1600 } 1601 /* outside of the range */ 1602 if (r->ar.end <= start || end <= r->ar.start) { 1603 matched = false; 1604 break; 1605 } 1606 /* start before the range and overlap */ 1607 if (r->ar.start < start) { 1608 damon_split_region_at(t, r, start - r->ar.start); 1609 matched = false; 1610 break; 1611 } 1612 /* start inside the range */ 1613 damon_split_region_at(t, r, end - r->ar.start); 1614 matched = true; 1615 break; 1616 default: 1617 return false; 1618 } 1619 1620 return matched == filter->matching; 1621 } 1622 1623 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 1624 struct damon_region *r, struct damos *s) 1625 { 1626 struct damos_filter *filter; 1627 1628 s->core_filters_allowed = false; 1629 damos_for_each_filter(filter, s) { 1630 if (damos_filter_match(ctx, t, r, filter)) { 1631 if (filter->allow) 1632 s->core_filters_allowed = true; 1633 return !filter->allow; 1634 } 1635 } 1636 return s->core_filters_default_reject; 1637 } 1638 1639 /* 1640 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. 1641 * @ctx: The context of &damon_ctx->walk_control. 1642 * @t: The monitoring target of @r that @s will be applied. 1643 * @r: The region of @t that @s will be applied. 1644 * @s: The scheme of @ctx that will be applied to @r. 1645 * 1646 * This function is called from kdamond whenever it asked the operation set to 1647 * apply a DAMOS scheme action to a region. If a DAMOS walk request is 1648 * installed by damos_walk() and not yet uninstalled, invoke it. 1649 */ 1650 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, 1651 struct damon_region *r, struct damos *s, 1652 unsigned long sz_filter_passed) 1653 { 1654 struct damos_walk_control *control; 1655 1656 if (s->walk_completed) 1657 return; 1658 1659 control = ctx->walk_control; 1660 if (!control) 1661 return; 1662 1663 control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); 1664 } 1665 1666 /* 1667 * damos_walk_complete() - Complete DAMOS walk request if all walks are done. 1668 * @ctx: The context of &damon_ctx->walk_control. 1669 * @s: A scheme of @ctx that all walks are now done. 1670 * 1671 * This function is called when kdamond finished applying the action of a DAMOS 1672 * scheme to all regions that eligible for the given &damos->apply_interval_us. 1673 * If every scheme of @ctx including @s now finished walking for at least one 1674 * &damos->apply_interval_us, this function makrs the handling of the given 1675 * DAMOS walk request is done, so that damos_walk() can wake up and return. 1676 */ 1677 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) 1678 { 1679 struct damos *siter; 1680 struct damos_walk_control *control; 1681 1682 control = ctx->walk_control; 1683 if (!control) 1684 return; 1685 1686 s->walk_completed = true; 1687 /* if all schemes completed, signal completion to walker */ 1688 damon_for_each_scheme(siter, ctx) { 1689 if (!siter->walk_completed) 1690 return; 1691 } 1692 damon_for_each_scheme(siter, ctx) 1693 siter->walk_completed = false; 1694 1695 complete(&control->completion); 1696 ctx->walk_control = NULL; 1697 } 1698 1699 /* 1700 * damos_walk_cancel() - Cancel the current DAMOS walk request. 1701 * @ctx: The context of &damon_ctx->walk_control. 1702 * 1703 * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS 1704 * walk is requested but there is no DAMOS scheme to walk for, or the kdamond 1705 * is already out of the main loop and therefore gonna be terminated, and hence 1706 * cannot continue the walks. This function therefore marks the walk request 1707 * as canceled, so that damos_walk() can wake up and return. 1708 */ 1709 static void damos_walk_cancel(struct damon_ctx *ctx) 1710 { 1711 struct damos_walk_control *control; 1712 1713 mutex_lock(&ctx->walk_control_lock); 1714 control = ctx->walk_control; 1715 mutex_unlock(&ctx->walk_control_lock); 1716 1717 if (!control) 1718 return; 1719 control->canceled = true; 1720 complete(&control->completion); 1721 mutex_lock(&ctx->walk_control_lock); 1722 ctx->walk_control = NULL; 1723 mutex_unlock(&ctx->walk_control_lock); 1724 } 1725 1726 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 1727 struct damon_region *r, struct damos *s) 1728 { 1729 struct damos_quota *quota = &s->quota; 1730 unsigned long sz = damon_sz_region(r); 1731 struct timespec64 begin, end; 1732 unsigned long sz_applied = 0; 1733 unsigned long sz_ops_filter_passed = 0; 1734 /* 1735 * We plan to support multiple context per kdamond, as DAMON sysfs 1736 * implies with 'nr_contexts' file. Nevertheless, only single context 1737 * per kdamond is supported for now. So, we can simply use '0' context 1738 * index here. 1739 */ 1740 unsigned int cidx = 0; 1741 struct damos *siter; /* schemes iterator */ 1742 unsigned int sidx = 0; 1743 struct damon_target *titer; /* targets iterator */ 1744 unsigned int tidx = 0; 1745 bool do_trace = false; 1746 1747 /* get indices for trace_damos_before_apply() */ 1748 if (trace_damos_before_apply_enabled()) { 1749 damon_for_each_scheme(siter, c) { 1750 if (siter == s) 1751 break; 1752 sidx++; 1753 } 1754 damon_for_each_target(titer, c) { 1755 if (titer == t) 1756 break; 1757 tidx++; 1758 } 1759 do_trace = true; 1760 } 1761 1762 if (c->ops.apply_scheme) { 1763 if (quota->esz && quota->charged_sz + sz > quota->esz) { 1764 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 1765 DAMON_MIN_REGION); 1766 if (!sz) 1767 goto update_stat; 1768 damon_split_region_at(t, r, sz); 1769 } 1770 if (damos_filter_out(c, t, r, s)) 1771 return; 1772 ktime_get_coarse_ts64(&begin); 1773 trace_damos_before_apply(cidx, sidx, tidx, r, 1774 damon_nr_regions(t), do_trace); 1775 sz_applied = c->ops.apply_scheme(c, t, r, s, 1776 &sz_ops_filter_passed); 1777 damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); 1778 ktime_get_coarse_ts64(&end); 1779 quota->total_charged_ns += timespec64_to_ns(&end) - 1780 timespec64_to_ns(&begin); 1781 quota->charged_sz += sz; 1782 if (quota->esz && quota->charged_sz >= quota->esz) { 1783 quota->charge_target_from = t; 1784 quota->charge_addr_from = r->ar.end + 1; 1785 } 1786 } 1787 if (s->action != DAMOS_STAT) 1788 r->age = 0; 1789 1790 update_stat: 1791 damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); 1792 } 1793 1794 static void damon_do_apply_schemes(struct damon_ctx *c, 1795 struct damon_target *t, 1796 struct damon_region *r) 1797 { 1798 struct damos *s; 1799 1800 damon_for_each_scheme(s, c) { 1801 struct damos_quota *quota = &s->quota; 1802 1803 if (c->passed_sample_intervals < s->next_apply_sis) 1804 continue; 1805 1806 if (!s->wmarks.activated) 1807 continue; 1808 1809 /* Check the quota */ 1810 if (quota->esz && quota->charged_sz >= quota->esz) 1811 continue; 1812 1813 if (damos_skip_charged_region(t, &r, s)) 1814 continue; 1815 1816 if (!damos_valid_target(c, t, r, s)) 1817 continue; 1818 1819 damos_apply_scheme(c, t, r, s); 1820 } 1821 } 1822 1823 /* 1824 * damon_feed_loop_next_input() - get next input to achieve a target score. 1825 * @last_input The last input. 1826 * @score Current score that made with @last_input. 1827 * 1828 * Calculate next input to achieve the target score, based on the last input 1829 * and current score. Assuming the input and the score are positively 1830 * proportional, calculate how much compensation should be added to or 1831 * subtracted from the last input as a proportion of the last input. Avoid 1832 * next input always being zero by setting it non-zero always. In short form 1833 * (assuming support of float and signed calculations), the algorithm is as 1834 * below. 1835 * 1836 * next_input = max(last_input * ((goal - current) / goal + 1), 1) 1837 * 1838 * For simple implementation, we assume the target score is always 10,000. The 1839 * caller should adjust @score for this. 1840 * 1841 * Returns next input that assumed to achieve the target score. 1842 */ 1843 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1844 unsigned long score) 1845 { 1846 const unsigned long goal = 10000; 1847 /* Set minimum input as 10000 to avoid compensation be zero */ 1848 const unsigned long min_input = 10000; 1849 unsigned long score_goal_diff, compensation; 1850 bool over_achieving = score > goal; 1851 1852 if (score == goal) 1853 return last_input; 1854 if (score >= goal * 2) 1855 return min_input; 1856 1857 if (over_achieving) 1858 score_goal_diff = score - goal; 1859 else 1860 score_goal_diff = goal - score; 1861 1862 if (last_input < ULONG_MAX / score_goal_diff) 1863 compensation = last_input * score_goal_diff / goal; 1864 else 1865 compensation = last_input / goal * score_goal_diff; 1866 1867 if (over_achieving) 1868 return max(last_input - compensation, min_input); 1869 if (last_input < ULONG_MAX - compensation) 1870 return last_input + compensation; 1871 return ULONG_MAX; 1872 } 1873 1874 #ifdef CONFIG_PSI 1875 1876 static u64 damos_get_some_mem_psi_total(void) 1877 { 1878 if (static_branch_likely(&psi_disabled)) 1879 return 0; 1880 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], 1881 NSEC_PER_USEC); 1882 } 1883 1884 #else /* CONFIG_PSI */ 1885 1886 static inline u64 damos_get_some_mem_psi_total(void) 1887 { 1888 return 0; 1889 }; 1890 1891 #endif /* CONFIG_PSI */ 1892 1893 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) 1894 { 1895 u64 now_psi_total; 1896 1897 switch (goal->metric) { 1898 case DAMOS_QUOTA_USER_INPUT: 1899 /* User should already set goal->current_value */ 1900 break; 1901 case DAMOS_QUOTA_SOME_MEM_PSI_US: 1902 now_psi_total = damos_get_some_mem_psi_total(); 1903 goal->current_value = now_psi_total - goal->last_psi_total; 1904 goal->last_psi_total = now_psi_total; 1905 break; 1906 default: 1907 break; 1908 } 1909 } 1910 1911 /* Return the highest score since it makes schemes least aggressive */ 1912 static unsigned long damos_quota_score(struct damos_quota *quota) 1913 { 1914 struct damos_quota_goal *goal; 1915 unsigned long highest_score = 0; 1916 1917 damos_for_each_quota_goal(goal, quota) { 1918 damos_set_quota_goal_current_value(goal); 1919 highest_score = max(highest_score, 1920 goal->current_value * 10000 / 1921 goal->target_value); 1922 } 1923 1924 return highest_score; 1925 } 1926 1927 /* 1928 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty 1929 */ 1930 static void damos_set_effective_quota(struct damos_quota *quota) 1931 { 1932 unsigned long throughput; 1933 unsigned long esz = ULONG_MAX; 1934 1935 if (!quota->ms && list_empty("a->goals)) { 1936 quota->esz = quota->sz; 1937 return; 1938 } 1939 1940 if (!list_empty("a->goals)) { 1941 unsigned long score = damos_quota_score(quota); 1942 1943 quota->esz_bp = damon_feed_loop_next_input( 1944 max(quota->esz_bp, 10000UL), 1945 score); 1946 esz = quota->esz_bp / 10000; 1947 } 1948 1949 if (quota->ms) { 1950 if (quota->total_charged_ns) 1951 throughput = quota->total_charged_sz * 1000000 / 1952 quota->total_charged_ns; 1953 else 1954 throughput = PAGE_SIZE * 1024; 1955 esz = min(throughput * quota->ms, esz); 1956 } 1957 1958 if (quota->sz && quota->sz < esz) 1959 esz = quota->sz; 1960 1961 quota->esz = esz; 1962 } 1963 1964 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1965 { 1966 struct damos_quota *quota = &s->quota; 1967 struct damon_target *t; 1968 struct damon_region *r; 1969 unsigned long cumulated_sz; 1970 unsigned int score, max_score = 0; 1971 1972 if (!quota->ms && !quota->sz && list_empty("a->goals)) 1973 return; 1974 1975 /* New charge window starts */ 1976 if (time_after_eq(jiffies, quota->charged_from + 1977 msecs_to_jiffies(quota->reset_interval))) { 1978 if (quota->esz && quota->charged_sz >= quota->esz) 1979 s->stat.qt_exceeds++; 1980 quota->total_charged_sz += quota->charged_sz; 1981 quota->charged_from = jiffies; 1982 quota->charged_sz = 0; 1983 damos_set_effective_quota(quota); 1984 } 1985 1986 if (!c->ops.get_scheme_score) 1987 return; 1988 1989 /* Fill up the score histogram */ 1990 memset(c->regions_score_histogram, 0, 1991 sizeof(*c->regions_score_histogram) * 1992 (DAMOS_MAX_SCORE + 1)); 1993 damon_for_each_target(t, c) { 1994 damon_for_each_region(r, t) { 1995 if (!__damos_valid_target(r, s)) 1996 continue; 1997 score = c->ops.get_scheme_score(c, t, r, s); 1998 c->regions_score_histogram[score] += 1999 damon_sz_region(r); 2000 if (score > max_score) 2001 max_score = score; 2002 } 2003 } 2004 2005 /* Set the min score limit */ 2006 for (cumulated_sz = 0, score = max_score; ; score--) { 2007 cumulated_sz += c->regions_score_histogram[score]; 2008 if (cumulated_sz >= quota->esz || !score) 2009 break; 2010 } 2011 quota->min_score = score; 2012 } 2013 2014 static void kdamond_apply_schemes(struct damon_ctx *c) 2015 { 2016 struct damon_target *t; 2017 struct damon_region *r, *next_r; 2018 struct damos *s; 2019 unsigned long sample_interval = c->attrs.sample_interval ? 2020 c->attrs.sample_interval : 1; 2021 bool has_schemes_to_apply = false; 2022 2023 damon_for_each_scheme(s, c) { 2024 if (c->passed_sample_intervals < s->next_apply_sis) 2025 continue; 2026 2027 if (!s->wmarks.activated) 2028 continue; 2029 2030 has_schemes_to_apply = true; 2031 2032 damos_adjust_quota(c, s); 2033 } 2034 2035 if (!has_schemes_to_apply) 2036 return; 2037 2038 mutex_lock(&c->walk_control_lock); 2039 damon_for_each_target(t, c) { 2040 damon_for_each_region_safe(r, next_r, t) 2041 damon_do_apply_schemes(c, t, r); 2042 } 2043 2044 damon_for_each_scheme(s, c) { 2045 if (c->passed_sample_intervals < s->next_apply_sis) 2046 continue; 2047 damos_walk_complete(c, s); 2048 s->next_apply_sis = c->passed_sample_intervals + 2049 (s->apply_interval_us ? s->apply_interval_us : 2050 c->attrs.aggr_interval) / sample_interval; 2051 s->last_applied = NULL; 2052 } 2053 mutex_unlock(&c->walk_control_lock); 2054 } 2055 2056 /* 2057 * Merge two adjacent regions into one region 2058 */ 2059 static void damon_merge_two_regions(struct damon_target *t, 2060 struct damon_region *l, struct damon_region *r) 2061 { 2062 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 2063 2064 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 2065 (sz_l + sz_r); 2066 l->nr_accesses_bp = l->nr_accesses * 10000; 2067 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 2068 l->ar.end = r->ar.end; 2069 damon_destroy_region(r, t); 2070 } 2071 2072 /* 2073 * Merge adjacent regions having similar access frequencies 2074 * 2075 * t target affected by this merge operation 2076 * thres '->nr_accesses' diff threshold for the merge 2077 * sz_limit size upper limit of each region 2078 */ 2079 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 2080 unsigned long sz_limit) 2081 { 2082 struct damon_region *r, *prev = NULL, *next; 2083 2084 damon_for_each_region_safe(r, next, t) { 2085 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 2086 r->age = 0; 2087 else 2088 r->age++; 2089 2090 if (prev && prev->ar.end == r->ar.start && 2091 abs(prev->nr_accesses - r->nr_accesses) <= thres && 2092 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 2093 damon_merge_two_regions(t, prev, r); 2094 else 2095 prev = r; 2096 } 2097 } 2098 2099 /* 2100 * Merge adjacent regions having similar access frequencies 2101 * 2102 * threshold '->nr_accesses' diff threshold for the merge 2103 * sz_limit size upper limit of each region 2104 * 2105 * This function merges monitoring target regions which are adjacent and their 2106 * access frequencies are similar. This is for minimizing the monitoring 2107 * overhead under the dynamically changeable access pattern. If a merge was 2108 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 2109 * 2110 * The total number of regions could be higher than the user-defined limit, 2111 * max_nr_regions for some cases. For example, the user can update 2112 * max_nr_regions to a number that lower than the current number of regions 2113 * while DAMON is running. For such a case, repeat merging until the limit is 2114 * met while increasing @threshold up to possible maximum level. 2115 */ 2116 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 2117 unsigned long sz_limit) 2118 { 2119 struct damon_target *t; 2120 unsigned int nr_regions; 2121 unsigned int max_thres; 2122 2123 max_thres = c->attrs.aggr_interval / 2124 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); 2125 do { 2126 nr_regions = 0; 2127 damon_for_each_target(t, c) { 2128 damon_merge_regions_of(t, threshold, sz_limit); 2129 nr_regions += damon_nr_regions(t); 2130 } 2131 threshold = max(1, threshold * 2); 2132 } while (nr_regions > c->attrs.max_nr_regions && 2133 threshold / 2 < max_thres); 2134 } 2135 2136 /* 2137 * Split a region in two 2138 * 2139 * r the region to be split 2140 * sz_r size of the first sub-region that will be made 2141 */ 2142 static void damon_split_region_at(struct damon_target *t, 2143 struct damon_region *r, unsigned long sz_r) 2144 { 2145 struct damon_region *new; 2146 2147 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 2148 if (!new) 2149 return; 2150 2151 r->ar.end = new->ar.start; 2152 2153 new->age = r->age; 2154 new->last_nr_accesses = r->last_nr_accesses; 2155 new->nr_accesses_bp = r->nr_accesses_bp; 2156 new->nr_accesses = r->nr_accesses; 2157 2158 damon_insert_region(new, r, damon_next_region(r), t); 2159 } 2160 2161 /* Split every region in the given target into 'nr_subs' regions */ 2162 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 2163 { 2164 struct damon_region *r, *next; 2165 unsigned long sz_region, sz_sub = 0; 2166 int i; 2167 2168 damon_for_each_region_safe(r, next, t) { 2169 sz_region = damon_sz_region(r); 2170 2171 for (i = 0; i < nr_subs - 1 && 2172 sz_region > 2 * DAMON_MIN_REGION; i++) { 2173 /* 2174 * Randomly select size of left sub-region to be at 2175 * least 10 percent and at most 90% of original region 2176 */ 2177 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 2178 sz_region / 10, DAMON_MIN_REGION); 2179 /* Do not allow blank region */ 2180 if (sz_sub == 0 || sz_sub >= sz_region) 2181 continue; 2182 2183 damon_split_region_at(t, r, sz_sub); 2184 sz_region = sz_sub; 2185 } 2186 } 2187 } 2188 2189 /* 2190 * Split every target region into randomly-sized small regions 2191 * 2192 * This function splits every target region into random-sized small regions if 2193 * current total number of the regions is equal or smaller than half of the 2194 * user-specified maximum number of regions. This is for maximizing the 2195 * monitoring accuracy under the dynamically changeable access patterns. If a 2196 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 2197 * it. 2198 */ 2199 static void kdamond_split_regions(struct damon_ctx *ctx) 2200 { 2201 struct damon_target *t; 2202 unsigned int nr_regions = 0; 2203 static unsigned int last_nr_regions; 2204 int nr_subregions = 2; 2205 2206 damon_for_each_target(t, ctx) 2207 nr_regions += damon_nr_regions(t); 2208 2209 if (nr_regions > ctx->attrs.max_nr_regions / 2) 2210 return; 2211 2212 /* Maybe the middle of the region has different access frequency */ 2213 if (last_nr_regions == nr_regions && 2214 nr_regions < ctx->attrs.max_nr_regions / 3) 2215 nr_subregions = 3; 2216 2217 damon_for_each_target(t, ctx) 2218 damon_split_regions_of(t, nr_subregions); 2219 2220 last_nr_regions = nr_regions; 2221 } 2222 2223 /* 2224 * Check whether current monitoring should be stopped 2225 * 2226 * The monitoring is stopped when either the user requested to stop, or all 2227 * monitoring targets are invalid. 2228 * 2229 * Returns true if need to stop current monitoring. 2230 */ 2231 static bool kdamond_need_stop(struct damon_ctx *ctx) 2232 { 2233 struct damon_target *t; 2234 2235 if (kthread_should_stop()) 2236 return true; 2237 2238 if (!ctx->ops.target_valid) 2239 return false; 2240 2241 damon_for_each_target(t, ctx) { 2242 if (ctx->ops.target_valid(t)) 2243 return false; 2244 } 2245 2246 return true; 2247 } 2248 2249 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, 2250 unsigned long *metric_value) 2251 { 2252 switch (metric) { 2253 case DAMOS_WMARK_FREE_MEM_RATE: 2254 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / 2255 totalram_pages(); 2256 return 0; 2257 default: 2258 break; 2259 } 2260 return -EINVAL; 2261 } 2262 2263 /* 2264 * Returns zero if the scheme is active. Else, returns time to wait for next 2265 * watermark check in micro-seconds. 2266 */ 2267 static unsigned long damos_wmark_wait_us(struct damos *scheme) 2268 { 2269 unsigned long metric; 2270 2271 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) 2272 return 0; 2273 2274 /* higher than high watermark or lower than low watermark */ 2275 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 2276 if (scheme->wmarks.activated) 2277 pr_debug("deactivate a scheme (%d) for %s wmark\n", 2278 scheme->action, 2279 str_high_low(metric > scheme->wmarks.high)); 2280 scheme->wmarks.activated = false; 2281 return scheme->wmarks.interval; 2282 } 2283 2284 /* inactive and higher than middle watermark */ 2285 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 2286 !scheme->wmarks.activated) 2287 return scheme->wmarks.interval; 2288 2289 if (!scheme->wmarks.activated) 2290 pr_debug("activate a scheme (%d)\n", scheme->action); 2291 scheme->wmarks.activated = true; 2292 return 0; 2293 } 2294 2295 static void kdamond_usleep(unsigned long usecs) 2296 { 2297 if (usecs >= USLEEP_RANGE_UPPER_BOUND) 2298 schedule_timeout_idle(usecs_to_jiffies(usecs)); 2299 else 2300 usleep_range_idle(usecs, usecs + 1); 2301 } 2302 2303 /* 2304 * kdamond_call() - handle damon_call_control. 2305 * @ctx: The &struct damon_ctx of the kdamond. 2306 * @cancel: Whether to cancel the invocation of the function. 2307 * 2308 * If there is a &struct damon_call_control request that registered via 2309 * &damon_call() on @ctx, do or cancel the invocation of the function depending 2310 * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS 2311 * watermarks, or the kdamond is already out of the main loop and therefore 2312 * will be terminated. 2313 */ 2314 static void kdamond_call(struct damon_ctx *ctx, bool cancel) 2315 { 2316 struct damon_call_control *control; 2317 int ret = 0; 2318 2319 mutex_lock(&ctx->call_control_lock); 2320 control = ctx->call_control; 2321 mutex_unlock(&ctx->call_control_lock); 2322 if (!control) 2323 return; 2324 if (cancel) { 2325 control->canceled = true; 2326 } else { 2327 ret = control->fn(control->data); 2328 control->return_code = ret; 2329 } 2330 complete(&control->completion); 2331 mutex_lock(&ctx->call_control_lock); 2332 ctx->call_control = NULL; 2333 mutex_unlock(&ctx->call_control_lock); 2334 } 2335 2336 /* Returns negative error code if it's not activated but should return */ 2337 static int kdamond_wait_activation(struct damon_ctx *ctx) 2338 { 2339 struct damos *s; 2340 unsigned long wait_time; 2341 unsigned long min_wait_time = 0; 2342 bool init_wait_time = false; 2343 2344 while (!kdamond_need_stop(ctx)) { 2345 damon_for_each_scheme(s, ctx) { 2346 wait_time = damos_wmark_wait_us(s); 2347 if (!init_wait_time || wait_time < min_wait_time) { 2348 init_wait_time = true; 2349 min_wait_time = wait_time; 2350 } 2351 } 2352 if (!min_wait_time) 2353 return 0; 2354 2355 kdamond_usleep(min_wait_time); 2356 2357 if (ctx->callback.after_wmarks_check && 2358 ctx->callback.after_wmarks_check(ctx)) 2359 break; 2360 kdamond_call(ctx, true); 2361 damos_walk_cancel(ctx); 2362 } 2363 return -EBUSY; 2364 } 2365 2366 static void kdamond_init_ctx(struct damon_ctx *ctx) 2367 { 2368 unsigned long sample_interval = ctx->attrs.sample_interval ? 2369 ctx->attrs.sample_interval : 1; 2370 unsigned long apply_interval; 2371 struct damos *scheme; 2372 2373 ctx->passed_sample_intervals = 0; 2374 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 2375 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 2376 sample_interval; 2377 ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * 2378 ctx->attrs.intervals_goal.aggrs; 2379 2380 damon_for_each_scheme(scheme, ctx) { 2381 apply_interval = scheme->apply_interval_us ? 2382 scheme->apply_interval_us : ctx->attrs.aggr_interval; 2383 scheme->next_apply_sis = apply_interval / sample_interval; 2384 damos_set_filters_default_reject(scheme); 2385 } 2386 } 2387 2388 /* 2389 * The monitoring daemon that runs as a kernel thread 2390 */ 2391 static int kdamond_fn(void *data) 2392 { 2393 struct damon_ctx *ctx = data; 2394 struct damon_target *t; 2395 struct damon_region *r, *next; 2396 unsigned int max_nr_accesses = 0; 2397 unsigned long sz_limit = 0; 2398 2399 pr_debug("kdamond (%d) starts\n", current->pid); 2400 2401 complete(&ctx->kdamond_started); 2402 kdamond_init_ctx(ctx); 2403 2404 if (ctx->ops.init) 2405 ctx->ops.init(ctx); 2406 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, 2407 sizeof(*ctx->regions_score_histogram), GFP_KERNEL); 2408 if (!ctx->regions_score_histogram) 2409 goto done; 2410 2411 sz_limit = damon_region_sz_limit(ctx); 2412 2413 while (!kdamond_need_stop(ctx)) { 2414 /* 2415 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 2416 * be changed from after_wmarks_check() or after_aggregation() 2417 * callbacks. Read the values here, and use those for this 2418 * iteration. That is, damon_set_attrs() updated new values 2419 * are respected from next iteration. 2420 */ 2421 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 2422 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 2423 unsigned long sample_interval = ctx->attrs.sample_interval; 2424 2425 if (kdamond_wait_activation(ctx)) 2426 break; 2427 2428 if (ctx->ops.prepare_access_checks) 2429 ctx->ops.prepare_access_checks(ctx); 2430 2431 kdamond_usleep(sample_interval); 2432 ctx->passed_sample_intervals++; 2433 2434 if (ctx->ops.check_accesses) 2435 max_nr_accesses = ctx->ops.check_accesses(ctx); 2436 2437 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 2438 kdamond_merge_regions(ctx, 2439 max_nr_accesses / 10, 2440 sz_limit); 2441 if (ctx->callback.after_aggregation && 2442 ctx->callback.after_aggregation(ctx)) 2443 break; 2444 } 2445 2446 /* 2447 * do kdamond_call() and kdamond_apply_schemes() after 2448 * kdamond_merge_regions() if possible, to reduce overhead 2449 */ 2450 kdamond_call(ctx, false); 2451 if (!list_empty(&ctx->schemes)) 2452 kdamond_apply_schemes(ctx); 2453 else 2454 damos_walk_cancel(ctx); 2455 2456 sample_interval = ctx->attrs.sample_interval ? 2457 ctx->attrs.sample_interval : 1; 2458 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 2459 if (ctx->attrs.intervals_goal.aggrs && 2460 ctx->passed_sample_intervals >= 2461 ctx->next_intervals_tune_sis) { 2462 /* 2463 * ctx->next_aggregation_sis might be updated 2464 * from kdamond_call(). In the case, 2465 * damon_set_attrs() which will be called from 2466 * kdamond_tune_interval() may wrongly think 2467 * this is in the middle of the current 2468 * aggregation, and make aggregation 2469 * information reset for all regions. Then, 2470 * following kdamond_reset_aggregated() call 2471 * will make the region information invalid, 2472 * particularly for ->nr_accesses_bp. 2473 * 2474 * Reset ->next_aggregation_sis to avoid that. 2475 * It will anyway correctly updated after this 2476 * if caluse. 2477 */ 2478 ctx->next_aggregation_sis = 2479 next_aggregation_sis; 2480 ctx->next_intervals_tune_sis += 2481 ctx->attrs.aggr_samples * 2482 ctx->attrs.intervals_goal.aggrs; 2483 kdamond_tune_intervals(ctx); 2484 sample_interval = ctx->attrs.sample_interval ? 2485 ctx->attrs.sample_interval : 1; 2486 2487 } 2488 ctx->next_aggregation_sis = next_aggregation_sis + 2489 ctx->attrs.aggr_interval / sample_interval; 2490 2491 kdamond_reset_aggregated(ctx); 2492 kdamond_split_regions(ctx); 2493 } 2494 2495 if (ctx->passed_sample_intervals >= next_ops_update_sis) { 2496 ctx->next_ops_update_sis = next_ops_update_sis + 2497 ctx->attrs.ops_update_interval / 2498 sample_interval; 2499 if (ctx->ops.update) 2500 ctx->ops.update(ctx); 2501 sz_limit = damon_region_sz_limit(ctx); 2502 } 2503 } 2504 done: 2505 damon_for_each_target(t, ctx) { 2506 damon_for_each_region_safe(r, next, t) 2507 damon_destroy_region(r, t); 2508 } 2509 2510 if (ctx->callback.before_terminate) 2511 ctx->callback.before_terminate(ctx); 2512 if (ctx->ops.cleanup) 2513 ctx->ops.cleanup(ctx); 2514 kfree(ctx->regions_score_histogram); 2515 2516 pr_debug("kdamond (%d) finishes\n", current->pid); 2517 mutex_lock(&ctx->kdamond_lock); 2518 ctx->kdamond = NULL; 2519 mutex_unlock(&ctx->kdamond_lock); 2520 2521 kdamond_call(ctx, true); 2522 damos_walk_cancel(ctx); 2523 2524 mutex_lock(&damon_lock); 2525 nr_running_ctxs--; 2526 if (!nr_running_ctxs && running_exclusive_ctxs) 2527 running_exclusive_ctxs = false; 2528 mutex_unlock(&damon_lock); 2529 2530 return 0; 2531 } 2532 2533 /* 2534 * struct damon_system_ram_region - System RAM resource address region of 2535 * [@start, @end). 2536 * @start: Start address of the region (inclusive). 2537 * @end: End address of the region (exclusive). 2538 */ 2539 struct damon_system_ram_region { 2540 unsigned long start; 2541 unsigned long end; 2542 }; 2543 2544 static int walk_system_ram(struct resource *res, void *arg) 2545 { 2546 struct damon_system_ram_region *a = arg; 2547 2548 if (a->end - a->start < resource_size(res)) { 2549 a->start = res->start; 2550 a->end = res->end; 2551 } 2552 return 0; 2553 } 2554 2555 /* 2556 * Find biggest 'System RAM' resource and store its start and end address in 2557 * @start and @end, respectively. If no System RAM is found, returns false. 2558 */ 2559 static bool damon_find_biggest_system_ram(unsigned long *start, 2560 unsigned long *end) 2561 2562 { 2563 struct damon_system_ram_region arg = {}; 2564 2565 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 2566 if (arg.end <= arg.start) 2567 return false; 2568 2569 *start = arg.start; 2570 *end = arg.end; 2571 return true; 2572 } 2573 2574 /** 2575 * damon_set_region_biggest_system_ram_default() - Set the region of the given 2576 * monitoring target as requested, or biggest 'System RAM'. 2577 * @t: The monitoring target to set the region. 2578 * @start: The pointer to the start address of the region. 2579 * @end: The pointer to the end address of the region. 2580 * 2581 * This function sets the region of @t as requested by @start and @end. If the 2582 * values of @start and @end are zero, however, this function finds the biggest 2583 * 'System RAM' resource and sets the region to cover the resource. In the 2584 * latter case, this function saves the start and end addresses of the resource 2585 * in @start and @end, respectively. 2586 * 2587 * Return: 0 on success, negative error code otherwise. 2588 */ 2589 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 2590 unsigned long *start, unsigned long *end) 2591 { 2592 struct damon_addr_range addr_range; 2593 2594 if (*start > *end) 2595 return -EINVAL; 2596 2597 if (!*start && !*end && 2598 !damon_find_biggest_system_ram(start, end)) 2599 return -EINVAL; 2600 2601 addr_range.start = *start; 2602 addr_range.end = *end; 2603 return damon_set_regions(t, &addr_range, 1); 2604 } 2605 2606 /* 2607 * damon_moving_sum() - Calculate an inferred moving sum value. 2608 * @mvsum: Inferred sum of the last @len_window values. 2609 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 2610 * @len_window: The number of last values to take care of. 2611 * @new_value: New value that will be added to the pseudo moving sum. 2612 * 2613 * Moving sum (moving average * window size) is good for handling noise, but 2614 * the cost of keeping past values can be high for arbitrary window size. This 2615 * function implements a lightweight pseudo moving sum function that doesn't 2616 * keep the past window values. 2617 * 2618 * It simply assumes there was no noise in the past, and get the no-noise 2619 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 2620 * non-moving sum of the last window. For example, if @len_window is 10 and we 2621 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 2622 * values. Hence, this function simply drops @nomvsum / @len_window from 2623 * given @mvsum and add @new_value. 2624 * 2625 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 2626 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 2627 * calculating next moving sum with a new value, we should drop 0 from 50 and 2628 * add the new value. However, this function assumes it got value 5 for each 2629 * of the last ten times. Based on the assumption, when the next value is 2630 * measured, it drops the assumed past value, 5 from the current sum, and add 2631 * the new value to get the updated pseduo-moving average. 2632 * 2633 * This means the value could have errors, but the errors will be disappeared 2634 * for every @len_window aligned calls. For example, if @len_window is 10, the 2635 * pseudo moving sum with 11th value to 19th value would have an error. But 2636 * the sum with 20th value will not have the error. 2637 * 2638 * Return: Pseudo-moving average after getting the @new_value. 2639 */ 2640 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 2641 unsigned int len_window, unsigned int new_value) 2642 { 2643 return mvsum - nomvsum / len_window + new_value; 2644 } 2645 2646 /** 2647 * damon_update_region_access_rate() - Update the access rate of a region. 2648 * @r: The DAMON region to update for its access check result. 2649 * @accessed: Whether the region has accessed during last sampling interval. 2650 * @attrs: The damon_attrs of the DAMON context. 2651 * 2652 * Update the access rate of a region with the region's last sampling interval 2653 * access check result. 2654 * 2655 * Usually this will be called by &damon_operations->check_accesses callback. 2656 */ 2657 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 2658 struct damon_attrs *attrs) 2659 { 2660 unsigned int len_window = 1; 2661 2662 /* 2663 * sample_interval can be zero, but cannot be larger than 2664 * aggr_interval, owing to validation of damon_set_attrs(). 2665 */ 2666 if (attrs->sample_interval) 2667 len_window = damon_max_nr_accesses(attrs); 2668 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 2669 r->last_nr_accesses * 10000, len_window, 2670 accessed ? 10000 : 0); 2671 2672 if (accessed) 2673 r->nr_accesses++; 2674 } 2675 2676 static int __init damon_init(void) 2677 { 2678 damon_region_cache = KMEM_CACHE(damon_region, 0); 2679 if (unlikely(!damon_region_cache)) { 2680 pr_err("creating damon_region_cache fails\n"); 2681 return -ENOMEM; 2682 } 2683 2684 return 0; 2685 } 2686 2687 subsys_initcall(damon_init); 2688 2689 #include "tests/core-kunit.h" 2690