1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/memcontrol.h> 14 #include <linux/mm.h> 15 #include <linux/psi.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/string_choices.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/damon.h> 22 23 static DEFINE_MUTEX(damon_lock); 24 static int nr_running_ctxs; 25 static bool running_exclusive_ctxs; 26 27 static DEFINE_MUTEX(damon_ops_lock); 28 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 29 30 static struct kmem_cache *damon_region_cache __ro_after_init; 31 32 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 33 static bool __damon_is_registered_ops(enum damon_ops_id id) 34 { 35 struct damon_operations empty_ops = {}; 36 37 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 38 return false; 39 return true; 40 } 41 42 /** 43 * damon_is_registered_ops() - Check if a given damon_operations is registered. 44 * @id: Id of the damon_operations to check if registered. 45 * 46 * Return: true if the ops is set, false otherwise. 47 */ 48 bool damon_is_registered_ops(enum damon_ops_id id) 49 { 50 bool registered; 51 52 if (id >= NR_DAMON_OPS) 53 return false; 54 mutex_lock(&damon_ops_lock); 55 registered = __damon_is_registered_ops(id); 56 mutex_unlock(&damon_ops_lock); 57 return registered; 58 } 59 60 /** 61 * damon_register_ops() - Register a monitoring operations set to DAMON. 62 * @ops: monitoring operations set to register. 63 * 64 * This function registers a monitoring operations set of valid &struct 65 * damon_operations->id so that others can find and use them later. 66 * 67 * Return: 0 on success, negative error code otherwise. 68 */ 69 int damon_register_ops(struct damon_operations *ops) 70 { 71 int err = 0; 72 73 if (ops->id >= NR_DAMON_OPS) 74 return -EINVAL; 75 76 mutex_lock(&damon_ops_lock); 77 /* Fail for already registered ops */ 78 if (__damon_is_registered_ops(ops->id)) 79 err = -EINVAL; 80 else 81 damon_registered_ops[ops->id] = *ops; 82 mutex_unlock(&damon_ops_lock); 83 return err; 84 } 85 86 /** 87 * damon_select_ops() - Select a monitoring operations to use with the context. 88 * @ctx: monitoring context to use the operations. 89 * @id: id of the registered monitoring operations to select. 90 * 91 * This function finds registered monitoring operations set of @id and make 92 * @ctx to use it. 93 * 94 * Return: 0 on success, negative error code otherwise. 95 */ 96 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 97 { 98 int err = 0; 99 100 if (id >= NR_DAMON_OPS) 101 return -EINVAL; 102 103 mutex_lock(&damon_ops_lock); 104 if (!__damon_is_registered_ops(id)) 105 err = -EINVAL; 106 else 107 ctx->ops = damon_registered_ops[id]; 108 mutex_unlock(&damon_ops_lock); 109 return err; 110 } 111 112 /* 113 * Construct a damon_region struct 114 * 115 * Returns the pointer to the new struct if success, or NULL otherwise 116 */ 117 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 118 { 119 struct damon_region *region; 120 121 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 122 if (!region) 123 return NULL; 124 125 region->ar.start = start; 126 region->ar.end = end; 127 region->nr_accesses = 0; 128 region->nr_accesses_bp = 0; 129 INIT_LIST_HEAD(®ion->list); 130 131 region->age = 0; 132 region->last_nr_accesses = 0; 133 134 return region; 135 } 136 137 void damon_add_region(struct damon_region *r, struct damon_target *t) 138 { 139 list_add_tail(&r->list, &t->regions_list); 140 t->nr_regions++; 141 } 142 143 static void damon_del_region(struct damon_region *r, struct damon_target *t) 144 { 145 list_del(&r->list); 146 t->nr_regions--; 147 } 148 149 static void damon_free_region(struct damon_region *r) 150 { 151 kmem_cache_free(damon_region_cache, r); 152 } 153 154 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 155 { 156 damon_del_region(r, t); 157 damon_free_region(r); 158 } 159 160 static bool damon_is_last_region(struct damon_region *r, 161 struct damon_target *t) 162 { 163 return list_is_last(&r->list, &t->regions_list); 164 } 165 166 /* 167 * Check whether a region is intersecting an address range 168 * 169 * Returns true if it is. 170 */ 171 static bool damon_intersect(struct damon_region *r, 172 struct damon_addr_range *re) 173 { 174 return !(r->ar.end <= re->start || re->end <= r->ar.start); 175 } 176 177 /* 178 * Fill holes in regions with new regions. 179 */ 180 static int damon_fill_regions_holes(struct damon_region *first, 181 struct damon_region *last, struct damon_target *t) 182 { 183 struct damon_region *r = first; 184 185 damon_for_each_region_from(r, t) { 186 struct damon_region *next, *newr; 187 188 if (r == last) 189 break; 190 next = damon_next_region(r); 191 if (r->ar.end != next->ar.start) { 192 newr = damon_new_region(r->ar.end, next->ar.start); 193 if (!newr) 194 return -ENOMEM; 195 damon_insert_region(newr, r, next, t); 196 } 197 } 198 return 0; 199 } 200 201 /* 202 * damon_set_regions() - Set regions of a target for given address ranges. 203 * @t: the given target. 204 * @ranges: array of new monitoring target ranges. 205 * @nr_ranges: length of @ranges. 206 * @min_region_sz: minimum region size. 207 * 208 * This function adds new regions to, or modify existing regions of a 209 * monitoring target to fit in specific ranges. 210 * 211 * Return: 0 if success, or negative error code otherwise. 212 */ 213 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 214 unsigned int nr_ranges, unsigned long min_region_sz) 215 { 216 struct damon_region *r, *next; 217 unsigned int i; 218 int err; 219 220 /* Remove regions which are not in the new ranges */ 221 damon_for_each_region_safe(r, next, t) { 222 for (i = 0; i < nr_ranges; i++) { 223 if (damon_intersect(r, &ranges[i])) 224 break; 225 } 226 if (i == nr_ranges) 227 damon_destroy_region(r, t); 228 } 229 230 r = damon_first_region(t); 231 /* Add new regions or resize existing regions to fit in the ranges */ 232 for (i = 0; i < nr_ranges; i++) { 233 struct damon_region *first = NULL, *last, *newr; 234 struct damon_addr_range *range; 235 236 range = &ranges[i]; 237 /* Get the first/last regions intersecting with the range */ 238 damon_for_each_region_from(r, t) { 239 if (damon_intersect(r, range)) { 240 if (!first) 241 first = r; 242 last = r; 243 } 244 if (r->ar.start >= range->end) 245 break; 246 } 247 if (!first) { 248 /* no region intersects with this range */ 249 newr = damon_new_region( 250 ALIGN_DOWN(range->start, 251 min_region_sz), 252 ALIGN(range->end, min_region_sz)); 253 if (!newr) 254 return -ENOMEM; 255 damon_insert_region(newr, damon_prev_region(r), r, t); 256 } else { 257 /* resize intersecting regions to fit in this range */ 258 first->ar.start = ALIGN_DOWN(range->start, 259 min_region_sz); 260 last->ar.end = ALIGN(range->end, min_region_sz); 261 262 /* fill possible holes in the range */ 263 err = damon_fill_regions_holes(first, last, t); 264 if (err) 265 return err; 266 } 267 } 268 return 0; 269 } 270 271 struct damos_filter *damos_new_filter(enum damos_filter_type type, 272 bool matching, bool allow) 273 { 274 struct damos_filter *filter; 275 276 filter = kmalloc_obj(*filter); 277 if (!filter) 278 return NULL; 279 filter->type = type; 280 filter->matching = matching; 281 filter->allow = allow; 282 INIT_LIST_HEAD(&filter->list); 283 return filter; 284 } 285 286 /** 287 * damos_filter_for_ops() - Return if the filter is ops-handled one. 288 * @type: type of the filter. 289 * 290 * Return: true if the filter of @type needs to be handled by ops layer, false 291 * otherwise. 292 */ 293 bool damos_filter_for_ops(enum damos_filter_type type) 294 { 295 switch (type) { 296 case DAMOS_FILTER_TYPE_ADDR: 297 case DAMOS_FILTER_TYPE_TARGET: 298 return false; 299 default: 300 break; 301 } 302 return true; 303 } 304 305 void damos_add_filter(struct damos *s, struct damos_filter *f) 306 { 307 if (damos_filter_for_ops(f->type)) 308 list_add_tail(&f->list, &s->ops_filters); 309 else 310 list_add_tail(&f->list, &s->core_filters); 311 } 312 313 static void damos_del_filter(struct damos_filter *f) 314 { 315 list_del(&f->list); 316 } 317 318 static void damos_free_filter(struct damos_filter *f) 319 { 320 kfree(f); 321 } 322 323 void damos_destroy_filter(struct damos_filter *f) 324 { 325 damos_del_filter(f); 326 damos_free_filter(f); 327 } 328 329 struct damos_quota_goal *damos_new_quota_goal( 330 enum damos_quota_goal_metric metric, 331 unsigned long target_value) 332 { 333 struct damos_quota_goal *goal; 334 335 goal = kmalloc_obj(*goal); 336 if (!goal) 337 return NULL; 338 goal->metric = metric; 339 goal->target_value = target_value; 340 INIT_LIST_HEAD(&goal->list); 341 return goal; 342 } 343 344 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) 345 { 346 list_add_tail(&g->list, &q->goals); 347 } 348 349 static void damos_del_quota_goal(struct damos_quota_goal *g) 350 { 351 list_del(&g->list); 352 } 353 354 static void damos_free_quota_goal(struct damos_quota_goal *g) 355 { 356 kfree(g); 357 } 358 359 void damos_destroy_quota_goal(struct damos_quota_goal *g) 360 { 361 damos_del_quota_goal(g); 362 damos_free_quota_goal(g); 363 } 364 365 /* initialize fields of @quota that normally API users wouldn't set */ 366 static struct damos_quota *damos_quota_init(struct damos_quota *quota) 367 { 368 quota->esz = 0; 369 quota->total_charged_sz = 0; 370 quota->total_charged_ns = 0; 371 quota->charged_sz = 0; 372 quota->charged_from = 0; 373 quota->charge_target_from = NULL; 374 quota->charge_addr_from = 0; 375 quota->esz_bp = 0; 376 return quota; 377 } 378 379 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 380 enum damos_action action, 381 unsigned long apply_interval_us, 382 struct damos_quota *quota, 383 struct damos_watermarks *wmarks, 384 int target_nid) 385 { 386 struct damos *scheme; 387 388 scheme = kmalloc_obj(*scheme); 389 if (!scheme) 390 return NULL; 391 scheme->pattern = *pattern; 392 scheme->action = action; 393 scheme->apply_interval_us = apply_interval_us; 394 /* 395 * next_apply_sis will be set when kdamond starts. While kdamond is 396 * running, it will also updated when it is added to the DAMON context, 397 * or damon_attrs are updated. 398 */ 399 scheme->next_apply_sis = 0; 400 scheme->walk_completed = false; 401 INIT_LIST_HEAD(&scheme->core_filters); 402 INIT_LIST_HEAD(&scheme->ops_filters); 403 scheme->stat = (struct damos_stat){}; 404 scheme->max_nr_snapshots = 0; 405 INIT_LIST_HEAD(&scheme->list); 406 407 scheme->quota = *(damos_quota_init(quota)); 408 /* quota.goals should be separately set by caller */ 409 INIT_LIST_HEAD(&scheme->quota.goals); 410 411 scheme->wmarks = *wmarks; 412 scheme->wmarks.activated = true; 413 414 scheme->migrate_dests = (struct damos_migrate_dests){}; 415 scheme->target_nid = target_nid; 416 417 return scheme; 418 } 419 420 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 421 { 422 unsigned long sample_interval = ctx->attrs.sample_interval ? 423 ctx->attrs.sample_interval : 1; 424 unsigned long apply_interval = s->apply_interval_us ? 425 s->apply_interval_us : ctx->attrs.aggr_interval; 426 427 s->next_apply_sis = ctx->passed_sample_intervals + 428 apply_interval / sample_interval; 429 } 430 431 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 432 { 433 list_add_tail(&s->list, &ctx->schemes); 434 damos_set_next_apply_sis(s, ctx); 435 } 436 437 static void damon_del_scheme(struct damos *s) 438 { 439 list_del(&s->list); 440 } 441 442 static void damon_free_scheme(struct damos *s) 443 { 444 kfree(s); 445 } 446 447 void damon_destroy_scheme(struct damos *s) 448 { 449 struct damos_quota_goal *g, *g_next; 450 struct damos_filter *f, *next; 451 452 damos_for_each_quota_goal_safe(g, g_next, &s->quota) 453 damos_destroy_quota_goal(g); 454 455 damos_for_each_core_filter_safe(f, next, s) 456 damos_destroy_filter(f); 457 458 damos_for_each_ops_filter_safe(f, next, s) 459 damos_destroy_filter(f); 460 461 kfree(s->migrate_dests.node_id_arr); 462 kfree(s->migrate_dests.weight_arr); 463 damon_del_scheme(s); 464 damon_free_scheme(s); 465 } 466 467 /* 468 * Construct a damon_target struct 469 * 470 * Returns the pointer to the new struct if success, or NULL otherwise 471 */ 472 struct damon_target *damon_new_target(void) 473 { 474 struct damon_target *t; 475 476 t = kmalloc_obj(*t); 477 if (!t) 478 return NULL; 479 480 t->pid = NULL; 481 t->nr_regions = 0; 482 INIT_LIST_HEAD(&t->regions_list); 483 INIT_LIST_HEAD(&t->list); 484 t->obsolete = false; 485 486 return t; 487 } 488 489 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 490 { 491 list_add_tail(&t->list, &ctx->adaptive_targets); 492 } 493 494 bool damon_targets_empty(struct damon_ctx *ctx) 495 { 496 return list_empty(&ctx->adaptive_targets); 497 } 498 499 static void damon_del_target(struct damon_target *t) 500 { 501 list_del(&t->list); 502 } 503 504 void damon_free_target(struct damon_target *t) 505 { 506 struct damon_region *r, *next; 507 508 damon_for_each_region_safe(r, next, t) 509 damon_free_region(r); 510 kfree(t); 511 } 512 513 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx) 514 { 515 516 if (ctx && ctx->ops.cleanup_target) 517 ctx->ops.cleanup_target(t); 518 519 damon_del_target(t); 520 damon_free_target(t); 521 } 522 523 unsigned int damon_nr_regions(struct damon_target *t) 524 { 525 return t->nr_regions; 526 } 527 528 struct damon_ctx *damon_new_ctx(void) 529 { 530 struct damon_ctx *ctx; 531 532 ctx = kzalloc_obj(*ctx); 533 if (!ctx) 534 return NULL; 535 536 init_completion(&ctx->kdamond_started); 537 538 ctx->attrs.sample_interval = 5 * 1000; 539 ctx->attrs.aggr_interval = 100 * 1000; 540 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 541 542 ctx->passed_sample_intervals = 0; 543 /* These will be set from kdamond_init_ctx() */ 544 ctx->next_aggregation_sis = 0; 545 ctx->next_ops_update_sis = 0; 546 547 mutex_init(&ctx->kdamond_lock); 548 INIT_LIST_HEAD(&ctx->call_controls); 549 mutex_init(&ctx->call_controls_lock); 550 mutex_init(&ctx->walk_control_lock); 551 552 ctx->attrs.min_nr_regions = 10; 553 ctx->attrs.max_nr_regions = 1000; 554 555 ctx->addr_unit = 1; 556 ctx->min_region_sz = DAMON_MIN_REGION_SZ; 557 558 INIT_LIST_HEAD(&ctx->adaptive_targets); 559 INIT_LIST_HEAD(&ctx->schemes); 560 561 return ctx; 562 } 563 564 static void damon_destroy_targets(struct damon_ctx *ctx) 565 { 566 struct damon_target *t, *next_t; 567 568 damon_for_each_target_safe(t, next_t, ctx) 569 damon_destroy_target(t, ctx); 570 } 571 572 void damon_destroy_ctx(struct damon_ctx *ctx) 573 { 574 struct damos *s, *next_s; 575 576 damon_destroy_targets(ctx); 577 578 damon_for_each_scheme_safe(s, next_s, ctx) 579 damon_destroy_scheme(s); 580 581 kfree(ctx); 582 } 583 584 static bool damon_attrs_equals(const struct damon_attrs *attrs1, 585 const struct damon_attrs *attrs2) 586 { 587 const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal; 588 const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal; 589 590 return attrs1->sample_interval == attrs2->sample_interval && 591 attrs1->aggr_interval == attrs2->aggr_interval && 592 attrs1->ops_update_interval == attrs2->ops_update_interval && 593 attrs1->min_nr_regions == attrs2->min_nr_regions && 594 attrs1->max_nr_regions == attrs2->max_nr_regions && 595 ig1->access_bp == ig2->access_bp && 596 ig1->aggrs == ig2->aggrs && 597 ig1->min_sample_us == ig2->min_sample_us && 598 ig1->max_sample_us == ig2->max_sample_us; 599 } 600 601 static unsigned int damon_age_for_new_attrs(unsigned int age, 602 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 603 { 604 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 605 } 606 607 /* convert access ratio in bp (per 10,000) to nr_accesses */ 608 static unsigned int damon_accesses_bp_to_nr_accesses( 609 unsigned int accesses_bp, struct damon_attrs *attrs) 610 { 611 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 612 } 613 614 /* 615 * Convert nr_accesses to access ratio in bp (per 10,000). 616 * 617 * Callers should ensure attrs.aggr_interval is not zero, like 618 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would 619 * happen. 620 */ 621 static unsigned int damon_nr_accesses_to_accesses_bp( 622 unsigned int nr_accesses, struct damon_attrs *attrs) 623 { 624 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 625 } 626 627 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 628 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 629 { 630 return damon_accesses_bp_to_nr_accesses( 631 damon_nr_accesses_to_accesses_bp( 632 nr_accesses, old_attrs), 633 new_attrs); 634 } 635 636 static void damon_update_monitoring_result(struct damon_region *r, 637 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, 638 bool aggregating) 639 { 640 if (!aggregating) { 641 r->nr_accesses = damon_nr_accesses_for_new_attrs( 642 r->nr_accesses, old_attrs, new_attrs); 643 r->nr_accesses_bp = r->nr_accesses * 10000; 644 } else { 645 /* 646 * if this is called in the middle of the aggregation, reset 647 * the aggregations we made so far for this aggregation 648 * interval. In other words, make the status like 649 * kdamond_reset_aggregated() is called. 650 */ 651 r->last_nr_accesses = damon_nr_accesses_for_new_attrs( 652 r->last_nr_accesses, old_attrs, new_attrs); 653 r->nr_accesses_bp = r->last_nr_accesses * 10000; 654 r->nr_accesses = 0; 655 } 656 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 657 } 658 659 /* 660 * region->nr_accesses is the number of sampling intervals in the last 661 * aggregation interval that access to the region has found, and region->age is 662 * the number of aggregation intervals that its access pattern has maintained. 663 * For the reason, the real meaning of the two fields depend on current 664 * sampling interval and aggregation interval. This function updates 665 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 666 */ 667 static void damon_update_monitoring_results(struct damon_ctx *ctx, 668 struct damon_attrs *new_attrs, bool aggregating) 669 { 670 struct damon_attrs *old_attrs = &ctx->attrs; 671 struct damon_target *t; 672 struct damon_region *r; 673 674 /* if any interval is zero, simply forgive conversion */ 675 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 676 !new_attrs->sample_interval || 677 !new_attrs->aggr_interval) 678 return; 679 680 damon_for_each_target(t, ctx) 681 damon_for_each_region(r, t) 682 damon_update_monitoring_result( 683 r, old_attrs, new_attrs, aggregating); 684 } 685 686 /* 687 * damon_valid_intervals_goal() - return if the intervals goal of @attrs is 688 * valid. 689 */ 690 static bool damon_valid_intervals_goal(struct damon_attrs *attrs) 691 { 692 struct damon_intervals_goal *goal = &attrs->intervals_goal; 693 694 /* tuning is disabled */ 695 if (!goal->aggrs) 696 return true; 697 if (goal->min_sample_us > goal->max_sample_us) 698 return false; 699 if (attrs->sample_interval < goal->min_sample_us || 700 goal->max_sample_us < attrs->sample_interval) 701 return false; 702 return true; 703 } 704 705 /** 706 * damon_set_attrs() - Set attributes for the monitoring. 707 * @ctx: monitoring context 708 * @attrs: monitoring attributes 709 * 710 * This function should be called while the kdamond is not running, an access 711 * check results aggregation is not ongoing (e.g., from damon_call(). 712 * 713 * Every time interval is in micro-seconds. 714 * 715 * Return: 0 on success, negative error code otherwise. 716 */ 717 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 718 { 719 unsigned long sample_interval = attrs->sample_interval ? 720 attrs->sample_interval : 1; 721 struct damos *s; 722 bool aggregating = ctx->passed_sample_intervals < 723 ctx->next_aggregation_sis; 724 725 if (!damon_valid_intervals_goal(attrs)) 726 return -EINVAL; 727 728 if (attrs->min_nr_regions < 3) 729 return -EINVAL; 730 if (attrs->min_nr_regions > attrs->max_nr_regions) 731 return -EINVAL; 732 if (attrs->sample_interval > attrs->aggr_interval) 733 return -EINVAL; 734 735 /* calls from core-external doesn't set this. */ 736 if (!attrs->aggr_samples) 737 attrs->aggr_samples = attrs->aggr_interval / sample_interval; 738 739 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 740 attrs->aggr_interval / sample_interval; 741 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 742 attrs->ops_update_interval / sample_interval; 743 744 damon_update_monitoring_results(ctx, attrs, aggregating); 745 ctx->attrs = *attrs; 746 747 damon_for_each_scheme(s, ctx) 748 damos_set_next_apply_sis(s, ctx); 749 750 return 0; 751 } 752 753 /** 754 * damon_set_schemes() - Set data access monitoring based operation schemes. 755 * @ctx: monitoring context 756 * @schemes: array of the schemes 757 * @nr_schemes: number of entries in @schemes 758 * 759 * This function should not be called while the kdamond of the context is 760 * running. 761 */ 762 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 763 ssize_t nr_schemes) 764 { 765 struct damos *s, *next; 766 ssize_t i; 767 768 damon_for_each_scheme_safe(s, next, ctx) 769 damon_destroy_scheme(s); 770 for (i = 0; i < nr_schemes; i++) 771 damon_add_scheme(ctx, schemes[i]); 772 } 773 774 static struct damos_quota_goal *damos_nth_quota_goal( 775 int n, struct damos_quota *q) 776 { 777 struct damos_quota_goal *goal; 778 int i = 0; 779 780 damos_for_each_quota_goal(goal, q) { 781 if (i++ == n) 782 return goal; 783 } 784 return NULL; 785 } 786 787 static void damos_commit_quota_goal_union( 788 struct damos_quota_goal *dst, struct damos_quota_goal *src) 789 { 790 switch (dst->metric) { 791 case DAMOS_QUOTA_NODE_MEM_USED_BP: 792 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 793 dst->nid = src->nid; 794 break; 795 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 796 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 797 dst->nid = src->nid; 798 dst->memcg_id = src->memcg_id; 799 break; 800 default: 801 break; 802 } 803 } 804 805 static void damos_commit_quota_goal( 806 struct damos_quota_goal *dst, struct damos_quota_goal *src) 807 { 808 dst->metric = src->metric; 809 dst->target_value = src->target_value; 810 if (dst->metric == DAMOS_QUOTA_USER_INPUT) 811 dst->current_value = src->current_value; 812 /* keep last_psi_total as is, since it will be updated in next cycle */ 813 damos_commit_quota_goal_union(dst, src); 814 } 815 816 /** 817 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. 818 * @dst: The commit destination DAMOS quota. 819 * @src: The commit source DAMOS quota. 820 * 821 * Copies user-specified parameters for quota goals from @src to @dst. Users 822 * should use this function for quota goals-level parameters update of running 823 * DAMON contexts, instead of manual in-place updates. 824 * 825 * This function should be called from parameters-update safe context, like 826 * damon_call(). 827 */ 828 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) 829 { 830 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; 831 int i = 0, j = 0; 832 833 damos_for_each_quota_goal_safe(dst_goal, next, dst) { 834 src_goal = damos_nth_quota_goal(i++, src); 835 if (src_goal) 836 damos_commit_quota_goal(dst_goal, src_goal); 837 else 838 damos_destroy_quota_goal(dst_goal); 839 } 840 damos_for_each_quota_goal_safe(src_goal, next, src) { 841 if (j++ < i) 842 continue; 843 new_goal = damos_new_quota_goal( 844 src_goal->metric, src_goal->target_value); 845 if (!new_goal) 846 return -ENOMEM; 847 damos_commit_quota_goal(new_goal, src_goal); 848 damos_add_quota_goal(dst, new_goal); 849 } 850 return 0; 851 } 852 853 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) 854 { 855 int err; 856 857 dst->reset_interval = src->reset_interval; 858 dst->ms = src->ms; 859 dst->sz = src->sz; 860 err = damos_commit_quota_goals(dst, src); 861 if (err) 862 return err; 863 dst->weight_sz = src->weight_sz; 864 dst->weight_nr_accesses = src->weight_nr_accesses; 865 dst->weight_age = src->weight_age; 866 return 0; 867 } 868 869 static struct damos_filter *damos_nth_core_filter(int n, struct damos *s) 870 { 871 struct damos_filter *filter; 872 int i = 0; 873 874 damos_for_each_core_filter(filter, s) { 875 if (i++ == n) 876 return filter; 877 } 878 return NULL; 879 } 880 881 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s) 882 { 883 struct damos_filter *filter; 884 int i = 0; 885 886 damos_for_each_ops_filter(filter, s) { 887 if (i++ == n) 888 return filter; 889 } 890 return NULL; 891 } 892 893 static void damos_commit_filter_arg( 894 struct damos_filter *dst, struct damos_filter *src) 895 { 896 switch (dst->type) { 897 case DAMOS_FILTER_TYPE_MEMCG: 898 dst->memcg_id = src->memcg_id; 899 break; 900 case DAMOS_FILTER_TYPE_ADDR: 901 dst->addr_range = src->addr_range; 902 break; 903 case DAMOS_FILTER_TYPE_TARGET: 904 dst->target_idx = src->target_idx; 905 break; 906 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 907 dst->sz_range = src->sz_range; 908 break; 909 default: 910 break; 911 } 912 } 913 914 static void damos_commit_filter( 915 struct damos_filter *dst, struct damos_filter *src) 916 { 917 dst->type = src->type; 918 dst->matching = src->matching; 919 dst->allow = src->allow; 920 damos_commit_filter_arg(dst, src); 921 } 922 923 static int damos_commit_core_filters(struct damos *dst, struct damos *src) 924 { 925 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 926 int i = 0, j = 0; 927 928 damos_for_each_core_filter_safe(dst_filter, next, dst) { 929 src_filter = damos_nth_core_filter(i++, src); 930 if (src_filter) 931 damos_commit_filter(dst_filter, src_filter); 932 else 933 damos_destroy_filter(dst_filter); 934 } 935 936 damos_for_each_core_filter_safe(src_filter, next, src) { 937 if (j++ < i) 938 continue; 939 940 new_filter = damos_new_filter( 941 src_filter->type, src_filter->matching, 942 src_filter->allow); 943 if (!new_filter) 944 return -ENOMEM; 945 damos_commit_filter_arg(new_filter, src_filter); 946 damos_add_filter(dst, new_filter); 947 } 948 return 0; 949 } 950 951 static int damos_commit_ops_filters(struct damos *dst, struct damos *src) 952 { 953 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 954 int i = 0, j = 0; 955 956 damos_for_each_ops_filter_safe(dst_filter, next, dst) { 957 src_filter = damos_nth_ops_filter(i++, src); 958 if (src_filter) 959 damos_commit_filter(dst_filter, src_filter); 960 else 961 damos_destroy_filter(dst_filter); 962 } 963 964 damos_for_each_ops_filter_safe(src_filter, next, src) { 965 if (j++ < i) 966 continue; 967 968 new_filter = damos_new_filter( 969 src_filter->type, src_filter->matching, 970 src_filter->allow); 971 if (!new_filter) 972 return -ENOMEM; 973 damos_commit_filter_arg(new_filter, src_filter); 974 damos_add_filter(dst, new_filter); 975 } 976 return 0; 977 } 978 979 /** 980 * damos_filters_default_reject() - decide whether to reject memory that didn't 981 * match with any given filter. 982 * @filters: Given DAMOS filters of a group. 983 */ 984 static bool damos_filters_default_reject(struct list_head *filters) 985 { 986 struct damos_filter *last_filter; 987 988 if (list_empty(filters)) 989 return false; 990 last_filter = list_last_entry(filters, struct damos_filter, list); 991 return last_filter->allow; 992 } 993 994 static void damos_set_filters_default_reject(struct damos *s) 995 { 996 if (!list_empty(&s->ops_filters)) 997 s->core_filters_default_reject = false; 998 else 999 s->core_filters_default_reject = 1000 damos_filters_default_reject(&s->core_filters); 1001 s->ops_filters_default_reject = 1002 damos_filters_default_reject(&s->ops_filters); 1003 } 1004 1005 static int damos_commit_dests(struct damos_migrate_dests *dst, 1006 struct damos_migrate_dests *src) 1007 { 1008 if (dst->nr_dests != src->nr_dests) { 1009 kfree(dst->node_id_arr); 1010 kfree(dst->weight_arr); 1011 1012 dst->node_id_arr = kmalloc_array(src->nr_dests, 1013 sizeof(*dst->node_id_arr), GFP_KERNEL); 1014 if (!dst->node_id_arr) { 1015 dst->weight_arr = NULL; 1016 return -ENOMEM; 1017 } 1018 1019 dst->weight_arr = kmalloc_array(src->nr_dests, 1020 sizeof(*dst->weight_arr), GFP_KERNEL); 1021 if (!dst->weight_arr) { 1022 /* ->node_id_arr will be freed by scheme destruction */ 1023 return -ENOMEM; 1024 } 1025 } 1026 1027 dst->nr_dests = src->nr_dests; 1028 for (int i = 0; i < src->nr_dests; i++) { 1029 dst->node_id_arr[i] = src->node_id_arr[i]; 1030 dst->weight_arr[i] = src->weight_arr[i]; 1031 } 1032 1033 return 0; 1034 } 1035 1036 static int damos_commit_filters(struct damos *dst, struct damos *src) 1037 { 1038 int err; 1039 1040 err = damos_commit_core_filters(dst, src); 1041 if (err) 1042 return err; 1043 err = damos_commit_ops_filters(dst, src); 1044 if (err) 1045 return err; 1046 damos_set_filters_default_reject(dst); 1047 return 0; 1048 } 1049 1050 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) 1051 { 1052 struct damos *s; 1053 int i = 0; 1054 1055 damon_for_each_scheme(s, ctx) { 1056 if (i++ == n) 1057 return s; 1058 } 1059 return NULL; 1060 } 1061 1062 static int damos_commit(struct damos *dst, struct damos *src) 1063 { 1064 int err; 1065 1066 dst->pattern = src->pattern; 1067 dst->action = src->action; 1068 dst->apply_interval_us = src->apply_interval_us; 1069 1070 err = damos_commit_quota(&dst->quota, &src->quota); 1071 if (err) 1072 return err; 1073 1074 dst->wmarks = src->wmarks; 1075 dst->target_nid = src->target_nid; 1076 1077 err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests); 1078 if (err) 1079 return err; 1080 1081 err = damos_commit_filters(dst, src); 1082 if (err) 1083 return err; 1084 1085 dst->max_nr_snapshots = src->max_nr_snapshots; 1086 return 0; 1087 } 1088 1089 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) 1090 { 1091 struct damos *dst_scheme, *next, *src_scheme, *new_scheme; 1092 int i = 0, j = 0, err; 1093 1094 damon_for_each_scheme_safe(dst_scheme, next, dst) { 1095 src_scheme = damon_nth_scheme(i++, src); 1096 if (src_scheme) { 1097 err = damos_commit(dst_scheme, src_scheme); 1098 if (err) 1099 return err; 1100 } else { 1101 damon_destroy_scheme(dst_scheme); 1102 } 1103 } 1104 1105 damon_for_each_scheme_safe(src_scheme, next, src) { 1106 if (j++ < i) 1107 continue; 1108 new_scheme = damon_new_scheme(&src_scheme->pattern, 1109 src_scheme->action, 1110 src_scheme->apply_interval_us, 1111 &src_scheme->quota, &src_scheme->wmarks, 1112 NUMA_NO_NODE); 1113 if (!new_scheme) 1114 return -ENOMEM; 1115 err = damos_commit(new_scheme, src_scheme); 1116 if (err) { 1117 damon_destroy_scheme(new_scheme); 1118 return err; 1119 } 1120 damon_add_scheme(dst, new_scheme); 1121 } 1122 return 0; 1123 } 1124 1125 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) 1126 { 1127 struct damon_target *t; 1128 int i = 0; 1129 1130 damon_for_each_target(t, ctx) { 1131 if (i++ == n) 1132 return t; 1133 } 1134 return NULL; 1135 } 1136 1137 /* 1138 * The caller should ensure the regions of @src are 1139 * 1. valid (end >= src) and 1140 * 2. sorted by starting address. 1141 * 1142 * If @src has no region, @dst keeps current regions. 1143 */ 1144 static int damon_commit_target_regions(struct damon_target *dst, 1145 struct damon_target *src, unsigned long src_min_region_sz) 1146 { 1147 struct damon_region *src_region; 1148 struct damon_addr_range *ranges; 1149 int i = 0, err; 1150 1151 damon_for_each_region(src_region, src) 1152 i++; 1153 if (!i) 1154 return 0; 1155 1156 ranges = kmalloc_objs(*ranges, i, GFP_KERNEL | __GFP_NOWARN); 1157 if (!ranges) 1158 return -ENOMEM; 1159 i = 0; 1160 damon_for_each_region(src_region, src) 1161 ranges[i++] = src_region->ar; 1162 err = damon_set_regions(dst, ranges, i, src_min_region_sz); 1163 kfree(ranges); 1164 return err; 1165 } 1166 1167 static int damon_commit_target( 1168 struct damon_target *dst, bool dst_has_pid, 1169 struct damon_target *src, bool src_has_pid, 1170 unsigned long src_min_region_sz) 1171 { 1172 int err; 1173 1174 err = damon_commit_target_regions(dst, src, src_min_region_sz); 1175 if (err) 1176 return err; 1177 if (dst_has_pid) 1178 put_pid(dst->pid); 1179 if (src_has_pid) 1180 get_pid(src->pid); 1181 dst->pid = src->pid; 1182 return 0; 1183 } 1184 1185 static int damon_commit_targets( 1186 struct damon_ctx *dst, struct damon_ctx *src) 1187 { 1188 struct damon_target *dst_target, *next, *src_target, *new_target; 1189 int i = 0, j = 0, err; 1190 1191 damon_for_each_target_safe(dst_target, next, dst) { 1192 src_target = damon_nth_target(i++, src); 1193 /* 1194 * If src target is obsolete, do not commit the parameters to 1195 * the dst target, and further remove the dst target. 1196 */ 1197 if (src_target && !src_target->obsolete) { 1198 err = damon_commit_target( 1199 dst_target, damon_target_has_pid(dst), 1200 src_target, damon_target_has_pid(src), 1201 src->min_region_sz); 1202 if (err) 1203 return err; 1204 } else { 1205 struct damos *s; 1206 1207 damon_destroy_target(dst_target, dst); 1208 damon_for_each_scheme(s, dst) { 1209 if (s->quota.charge_target_from == dst_target) { 1210 s->quota.charge_target_from = NULL; 1211 s->quota.charge_addr_from = 0; 1212 } 1213 } 1214 } 1215 } 1216 1217 damon_for_each_target_safe(src_target, next, src) { 1218 if (j++ < i) 1219 continue; 1220 /* target to remove has no matching dst */ 1221 if (src_target->obsolete) 1222 return -EINVAL; 1223 new_target = damon_new_target(); 1224 if (!new_target) 1225 return -ENOMEM; 1226 err = damon_commit_target(new_target, false, 1227 src_target, damon_target_has_pid(src), 1228 src->min_region_sz); 1229 if (err) { 1230 damon_destroy_target(new_target, NULL); 1231 return err; 1232 } 1233 damon_add_target(dst, new_target); 1234 } 1235 return 0; 1236 } 1237 1238 /** 1239 * damon_commit_ctx() - Commit parameters of a DAMON context to another. 1240 * @dst: The commit destination DAMON context. 1241 * @src: The commit source DAMON context. 1242 * 1243 * This function copies user-specified parameters from @src to @dst and update 1244 * the internal status and results accordingly. Users should use this function 1245 * for context-level parameters update of running context, instead of manual 1246 * in-place updates. 1247 * 1248 * This function should be called from parameters-update safe context, like 1249 * damon_call(). 1250 */ 1251 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) 1252 { 1253 int err; 1254 1255 dst->maybe_corrupted = true; 1256 if (!is_power_of_2(src->min_region_sz)) 1257 return -EINVAL; 1258 1259 err = damon_commit_schemes(dst, src); 1260 if (err) 1261 return err; 1262 err = damon_commit_targets(dst, src); 1263 if (err) 1264 return err; 1265 /* 1266 * schemes and targets should be updated first, since 1267 * 1. damon_set_attrs() updates monitoring results of targets and 1268 * next_apply_sis of schemes, and 1269 * 2. ops update should be done after pid handling is done (target 1270 * committing require putting pids). 1271 */ 1272 if (!damon_attrs_equals(&dst->attrs, &src->attrs)) { 1273 err = damon_set_attrs(dst, &src->attrs); 1274 if (err) 1275 return err; 1276 } 1277 dst->ops = src->ops; 1278 dst->addr_unit = src->addr_unit; 1279 dst->min_region_sz = src->min_region_sz; 1280 1281 dst->maybe_corrupted = false; 1282 return 0; 1283 } 1284 1285 /** 1286 * damon_nr_running_ctxs() - Return number of currently running contexts. 1287 */ 1288 int damon_nr_running_ctxs(void) 1289 { 1290 int nr_ctxs; 1291 1292 mutex_lock(&damon_lock); 1293 nr_ctxs = nr_running_ctxs; 1294 mutex_unlock(&damon_lock); 1295 1296 return nr_ctxs; 1297 } 1298 1299 /* Returns the size upper limit for each monitoring region */ 1300 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 1301 { 1302 struct damon_target *t; 1303 struct damon_region *r; 1304 unsigned long sz = 0; 1305 1306 damon_for_each_target(t, ctx) { 1307 damon_for_each_region(r, t) 1308 sz += damon_sz_region(r); 1309 } 1310 1311 if (ctx->attrs.min_nr_regions) 1312 sz /= ctx->attrs.min_nr_regions; 1313 if (sz < ctx->min_region_sz) 1314 sz = ctx->min_region_sz; 1315 1316 return sz; 1317 } 1318 1319 static int kdamond_fn(void *data); 1320 1321 /* 1322 * __damon_start() - Starts monitoring with given context. 1323 * @ctx: monitoring context 1324 * 1325 * This function should be called while damon_lock is hold. 1326 * 1327 * Return: 0 on success, negative error code otherwise. 1328 */ 1329 static int __damon_start(struct damon_ctx *ctx) 1330 { 1331 int err = -EBUSY; 1332 1333 mutex_lock(&ctx->kdamond_lock); 1334 if (!ctx->kdamond) { 1335 err = 0; 1336 reinit_completion(&ctx->kdamond_started); 1337 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 1338 nr_running_ctxs); 1339 if (IS_ERR(ctx->kdamond)) { 1340 err = PTR_ERR(ctx->kdamond); 1341 ctx->kdamond = NULL; 1342 } else { 1343 wait_for_completion(&ctx->kdamond_started); 1344 } 1345 } 1346 mutex_unlock(&ctx->kdamond_lock); 1347 1348 return err; 1349 } 1350 1351 /** 1352 * damon_start() - Starts the monitorings for a given group of contexts. 1353 * @ctxs: an array of the pointers for contexts to start monitoring 1354 * @nr_ctxs: size of @ctxs 1355 * @exclusive: exclusiveness of this contexts group 1356 * 1357 * This function starts a group of monitoring threads for a group of monitoring 1358 * contexts. One thread per each context is created and run in parallel. The 1359 * caller should handle synchronization between the threads by itself. If 1360 * @exclusive is true and a group of threads that created by other 1361 * 'damon_start()' call is currently running, this function does nothing but 1362 * returns -EBUSY. 1363 * 1364 * Return: 0 on success, negative error code otherwise. 1365 */ 1366 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 1367 { 1368 int i; 1369 int err = 0; 1370 1371 mutex_lock(&damon_lock); 1372 if ((exclusive && nr_running_ctxs) || 1373 (!exclusive && running_exclusive_ctxs)) { 1374 mutex_unlock(&damon_lock); 1375 return -EBUSY; 1376 } 1377 1378 for (i = 0; i < nr_ctxs; i++) { 1379 err = __damon_start(ctxs[i]); 1380 if (err) 1381 break; 1382 nr_running_ctxs++; 1383 } 1384 if (exclusive && nr_running_ctxs) 1385 running_exclusive_ctxs = true; 1386 mutex_unlock(&damon_lock); 1387 1388 return err; 1389 } 1390 1391 /* 1392 * __damon_stop() - Stops monitoring of a given context. 1393 * @ctx: monitoring context 1394 * 1395 * Return: 0 on success, negative error code otherwise. 1396 */ 1397 static int __damon_stop(struct damon_ctx *ctx) 1398 { 1399 struct task_struct *tsk; 1400 1401 mutex_lock(&ctx->kdamond_lock); 1402 tsk = ctx->kdamond; 1403 if (tsk) { 1404 get_task_struct(tsk); 1405 mutex_unlock(&ctx->kdamond_lock); 1406 kthread_stop_put(tsk); 1407 return 0; 1408 } 1409 mutex_unlock(&ctx->kdamond_lock); 1410 1411 return -EPERM; 1412 } 1413 1414 /** 1415 * damon_stop() - Stops the monitorings for a given group of contexts. 1416 * @ctxs: an array of the pointers for contexts to stop monitoring 1417 * @nr_ctxs: size of @ctxs 1418 * 1419 * Return: 0 on success, negative error code otherwise. 1420 */ 1421 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 1422 { 1423 int i, err = 0; 1424 1425 for (i = 0; i < nr_ctxs; i++) { 1426 /* nr_running_ctxs is decremented in kdamond_fn */ 1427 err = __damon_stop(ctxs[i]); 1428 if (err) 1429 break; 1430 } 1431 return err; 1432 } 1433 1434 /** 1435 * damon_is_running() - Returns if a given DAMON context is running. 1436 * @ctx: The DAMON context to see if running. 1437 * 1438 * Return: true if @ctx is running, false otherwise. 1439 */ 1440 bool damon_is_running(struct damon_ctx *ctx) 1441 { 1442 bool running; 1443 1444 mutex_lock(&ctx->kdamond_lock); 1445 running = ctx->kdamond != NULL; 1446 mutex_unlock(&ctx->kdamond_lock); 1447 return running; 1448 } 1449 1450 /** 1451 * damon_kdamond_pid() - Return pid of a given DAMON context's worker thread. 1452 * @ctx: The DAMON context of the question. 1453 * 1454 * Return: pid if @ctx is running, negative error code otherwise. 1455 */ 1456 int damon_kdamond_pid(struct damon_ctx *ctx) 1457 { 1458 int pid = -EINVAL; 1459 1460 mutex_lock(&ctx->kdamond_lock); 1461 if (ctx->kdamond) 1462 pid = ctx->kdamond->pid; 1463 mutex_unlock(&ctx->kdamond_lock); 1464 return pid; 1465 } 1466 1467 /* 1468 * damon_call_handle_inactive_ctx() - handle DAMON call request that added to 1469 * an inactive context. 1470 * @ctx: The inactive DAMON context. 1471 * @control: Control variable of the call request. 1472 * 1473 * This function is called in a case that @control is added to @ctx but @ctx is 1474 * not running (inactive). See if @ctx handled @control or not, and cleanup 1475 * @control if it was not handled. 1476 * 1477 * Returns 0 if @control was handled by @ctx, negative error code otherwise. 1478 */ 1479 static int damon_call_handle_inactive_ctx( 1480 struct damon_ctx *ctx, struct damon_call_control *control) 1481 { 1482 struct damon_call_control *c; 1483 1484 mutex_lock(&ctx->call_controls_lock); 1485 list_for_each_entry(c, &ctx->call_controls, list) { 1486 if (c == control) { 1487 list_del(&control->list); 1488 mutex_unlock(&ctx->call_controls_lock); 1489 return -EINVAL; 1490 } 1491 } 1492 mutex_unlock(&ctx->call_controls_lock); 1493 return 0; 1494 } 1495 1496 /** 1497 * damon_call() - Invoke a given function on DAMON worker thread (kdamond). 1498 * @ctx: DAMON context to call the function for. 1499 * @control: Control variable of the call request. 1500 * 1501 * Ask DAMON worker thread (kdamond) of @ctx to call a function with an 1502 * argument data that respectively passed via &damon_call_control->fn and 1503 * &damon_call_control->data of @control. If &damon_call_control->repeat of 1504 * @control is unset, further wait until the kdamond finishes handling of the 1505 * request. Otherwise, return as soon as the request is made. 1506 * 1507 * The kdamond executes the function with the argument in the main loop, just 1508 * after a sampling of the iteration is finished. The function can hence 1509 * safely access the internal data of the &struct damon_ctx without additional 1510 * synchronization. The return value of the function will be saved in 1511 * &damon_call_control->return_code. 1512 * 1513 * Return: 0 on success, negative error code otherwise. 1514 */ 1515 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) 1516 { 1517 if (!control->repeat) 1518 init_completion(&control->completion); 1519 control->canceled = false; 1520 INIT_LIST_HEAD(&control->list); 1521 1522 mutex_lock(&ctx->call_controls_lock); 1523 list_add_tail(&control->list, &ctx->call_controls); 1524 mutex_unlock(&ctx->call_controls_lock); 1525 if (!damon_is_running(ctx)) 1526 return damon_call_handle_inactive_ctx(ctx, control); 1527 if (control->repeat) 1528 return 0; 1529 wait_for_completion(&control->completion); 1530 if (control->canceled) 1531 return -ECANCELED; 1532 return 0; 1533 } 1534 1535 /** 1536 * damos_walk() - Invoke a given functions while DAMOS walk regions. 1537 * @ctx: DAMON context to call the functions for. 1538 * @control: Control variable of the walk request. 1539 * 1540 * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region 1541 * that the kdamond will apply DAMOS action to, and wait until the kdamond 1542 * finishes handling of the request. 1543 * 1544 * The kdamond executes the given function in the main loop, for each region 1545 * just after it applied any DAMOS actions of @ctx to it. The invocation is 1546 * made only within one &damos->apply_interval_us since damos_walk() 1547 * invocation, for each scheme. The given callback function can hence safely 1548 * access the internal data of &struct damon_ctx and &struct damon_region that 1549 * each of the scheme will apply the action for next interval, without 1550 * additional synchronizations against the kdamond. If every scheme of @ctx 1551 * passed at least one &damos->apply_interval_us, kdamond marks the request as 1552 * completed so that damos_walk() can wakeup and return. 1553 * 1554 * Return: 0 on success, negative error code otherwise. 1555 */ 1556 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) 1557 { 1558 init_completion(&control->completion); 1559 control->canceled = false; 1560 mutex_lock(&ctx->walk_control_lock); 1561 if (ctx->walk_control) { 1562 mutex_unlock(&ctx->walk_control_lock); 1563 return -EBUSY; 1564 } 1565 ctx->walk_control = control; 1566 mutex_unlock(&ctx->walk_control_lock); 1567 if (!damon_is_running(ctx)) { 1568 mutex_lock(&ctx->walk_control_lock); 1569 if (ctx->walk_control == control) 1570 ctx->walk_control = NULL; 1571 mutex_unlock(&ctx->walk_control_lock); 1572 return -EINVAL; 1573 } 1574 wait_for_completion(&control->completion); 1575 if (control->canceled) 1576 return -ECANCELED; 1577 return 0; 1578 } 1579 1580 /* 1581 * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing 1582 * the problem being propagated. 1583 */ 1584 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r) 1585 { 1586 if (r->nr_accesses_bp == r->nr_accesses * 10000) 1587 return; 1588 WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n", 1589 r->nr_accesses_bp, r->nr_accesses); 1590 r->nr_accesses_bp = r->nr_accesses * 10000; 1591 } 1592 1593 /* 1594 * Reset the aggregated monitoring results ('nr_accesses' of each region). 1595 */ 1596 static void kdamond_reset_aggregated(struct damon_ctx *c) 1597 { 1598 struct damon_target *t; 1599 unsigned int ti = 0; /* target's index */ 1600 1601 damon_for_each_target(t, c) { 1602 struct damon_region *r; 1603 1604 damon_for_each_region(r, t) { 1605 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 1606 damon_warn_fix_nr_accesses_corruption(r); 1607 r->last_nr_accesses = r->nr_accesses; 1608 r->nr_accesses = 0; 1609 } 1610 ti++; 1611 } 1612 } 1613 1614 static unsigned long damon_get_intervals_score(struct damon_ctx *c) 1615 { 1616 struct damon_target *t; 1617 struct damon_region *r; 1618 unsigned long sz_region, max_access_events = 0, access_events = 0; 1619 unsigned long target_access_events; 1620 unsigned long goal_bp = c->attrs.intervals_goal.access_bp; 1621 1622 damon_for_each_target(t, c) { 1623 damon_for_each_region(r, t) { 1624 sz_region = damon_sz_region(r); 1625 max_access_events += sz_region * c->attrs.aggr_samples; 1626 access_events += sz_region * r->nr_accesses; 1627 } 1628 } 1629 target_access_events = max_access_events * goal_bp / 10000; 1630 target_access_events = target_access_events ? : 1; 1631 return access_events * 10000 / target_access_events; 1632 } 1633 1634 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1635 unsigned long score); 1636 1637 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) 1638 { 1639 unsigned long score_bp, adaptation_bp; 1640 1641 score_bp = damon_get_intervals_score(c); 1642 adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / 1643 10000; 1644 /* 1645 * adaptation_bp ranges from 1 to 20,000. Avoid too rapid reduction of 1646 * the intervals by rescaling [1,10,000] to [5000, 10,000]. 1647 */ 1648 if (adaptation_bp <= 10000) 1649 adaptation_bp = 5000 + adaptation_bp / 2; 1650 return adaptation_bp; 1651 } 1652 1653 static void kdamond_tune_intervals(struct damon_ctx *c) 1654 { 1655 unsigned long adaptation_bp; 1656 struct damon_attrs new_attrs; 1657 struct damon_intervals_goal *goal; 1658 1659 adaptation_bp = damon_get_intervals_adaptation_bp(c); 1660 if (adaptation_bp == 10000) 1661 return; 1662 1663 new_attrs = c->attrs; 1664 goal = &c->attrs.intervals_goal; 1665 new_attrs.sample_interval = min(goal->max_sample_us, 1666 c->attrs.sample_interval * adaptation_bp / 10000); 1667 new_attrs.sample_interval = max(goal->min_sample_us, 1668 new_attrs.sample_interval); 1669 new_attrs.aggr_interval = new_attrs.sample_interval * 1670 c->attrs.aggr_samples; 1671 trace_damon_monitor_intervals_tune(new_attrs.sample_interval); 1672 damon_set_attrs(c, &new_attrs); 1673 } 1674 1675 static void damon_split_region_at(struct damon_target *t, 1676 struct damon_region *r, unsigned long sz_r); 1677 1678 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 1679 { 1680 unsigned long sz; 1681 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 1682 1683 sz = damon_sz_region(r); 1684 return s->pattern.min_sz_region <= sz && 1685 sz <= s->pattern.max_sz_region && 1686 s->pattern.min_nr_accesses <= nr_accesses && 1687 nr_accesses <= s->pattern.max_nr_accesses && 1688 s->pattern.min_age_region <= r->age && 1689 r->age <= s->pattern.max_age_region; 1690 } 1691 1692 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 1693 struct damon_region *r, struct damos *s) 1694 { 1695 bool ret = __damos_valid_target(r, s); 1696 1697 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 1698 return ret; 1699 1700 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 1701 } 1702 1703 /* 1704 * damos_skip_charged_region() - Check if the given region or starting part of 1705 * it is already charged for the DAMOS quota. 1706 * @t: The target of the region. 1707 * @rp: The pointer to the region. 1708 * @s: The scheme to be applied. 1709 * @min_region_sz: minimum region size. 1710 * 1711 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 1712 * action would applied to only a part of the target access pattern fulfilling 1713 * regions. To avoid applying the scheme action to only already applied 1714 * regions, DAMON skips applying the scheme action to the regions that charged 1715 * in the previous charge window. 1716 * 1717 * This function checks if a given region should be skipped or not for the 1718 * reason. If only the starting part of the region has previously charged, 1719 * this function splits the region into two so that the second one covers the 1720 * area that not charged in the previous charge widnow and saves the second 1721 * region in *rp and returns false, so that the caller can apply DAMON action 1722 * to the second one. 1723 * 1724 * Return: true if the region should be entirely skipped, false otherwise. 1725 */ 1726 static bool damos_skip_charged_region(struct damon_target *t, 1727 struct damon_region **rp, struct damos *s, 1728 unsigned long min_region_sz) 1729 { 1730 struct damon_region *r = *rp; 1731 struct damos_quota *quota = &s->quota; 1732 unsigned long sz_to_skip; 1733 1734 /* Skip previously charged regions */ 1735 if (quota->charge_target_from) { 1736 if (t != quota->charge_target_from) 1737 return true; 1738 if (r == damon_last_region(t)) { 1739 quota->charge_target_from = NULL; 1740 quota->charge_addr_from = 0; 1741 return true; 1742 } 1743 if (quota->charge_addr_from && 1744 r->ar.end <= quota->charge_addr_from) 1745 return true; 1746 1747 if (quota->charge_addr_from && r->ar.start < 1748 quota->charge_addr_from) { 1749 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 1750 r->ar.start, min_region_sz); 1751 if (!sz_to_skip) { 1752 if (damon_sz_region(r) <= min_region_sz) 1753 return true; 1754 sz_to_skip = min_region_sz; 1755 } 1756 damon_split_region_at(t, r, sz_to_skip); 1757 r = damon_next_region(r); 1758 *rp = r; 1759 } 1760 quota->charge_target_from = NULL; 1761 quota->charge_addr_from = 0; 1762 } 1763 return false; 1764 } 1765 1766 static void damos_update_stat(struct damos *s, 1767 unsigned long sz_tried, unsigned long sz_applied, 1768 unsigned long sz_ops_filter_passed) 1769 { 1770 s->stat.nr_tried++; 1771 s->stat.sz_tried += sz_tried; 1772 if (sz_applied) 1773 s->stat.nr_applied++; 1774 s->stat.sz_applied += sz_applied; 1775 s->stat.sz_ops_filter_passed += sz_ops_filter_passed; 1776 } 1777 1778 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, 1779 struct damon_region *r, struct damos_filter *filter, 1780 unsigned long min_region_sz) 1781 { 1782 bool matched = false; 1783 struct damon_target *ti; 1784 int target_idx = 0; 1785 unsigned long start, end; 1786 1787 switch (filter->type) { 1788 case DAMOS_FILTER_TYPE_TARGET: 1789 damon_for_each_target(ti, ctx) { 1790 if (ti == t) 1791 break; 1792 target_idx++; 1793 } 1794 matched = target_idx == filter->target_idx; 1795 break; 1796 case DAMOS_FILTER_TYPE_ADDR: 1797 start = ALIGN_DOWN(filter->addr_range.start, min_region_sz); 1798 end = ALIGN_DOWN(filter->addr_range.end, min_region_sz); 1799 1800 /* inside the range */ 1801 if (start <= r->ar.start && r->ar.end <= end) { 1802 matched = true; 1803 break; 1804 } 1805 /* outside of the range */ 1806 if (r->ar.end <= start || end <= r->ar.start) { 1807 matched = false; 1808 break; 1809 } 1810 /* start before the range and overlap */ 1811 if (r->ar.start < start) { 1812 damon_split_region_at(t, r, start - r->ar.start); 1813 matched = false; 1814 break; 1815 } 1816 /* start inside the range */ 1817 damon_split_region_at(t, r, end - r->ar.start); 1818 matched = true; 1819 break; 1820 default: 1821 return false; 1822 } 1823 1824 return matched == filter->matching; 1825 } 1826 1827 static bool damos_core_filter_out(struct damon_ctx *ctx, struct damon_target *t, 1828 struct damon_region *r, struct damos *s) 1829 { 1830 struct damos_filter *filter; 1831 1832 s->core_filters_allowed = false; 1833 damos_for_each_core_filter(filter, s) { 1834 if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) { 1835 if (filter->allow) 1836 s->core_filters_allowed = true; 1837 return !filter->allow; 1838 } 1839 } 1840 return s->core_filters_default_reject; 1841 } 1842 1843 /* 1844 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. 1845 * @ctx: The context of &damon_ctx->walk_control. 1846 * @t: The monitoring target of @r that @s will be applied. 1847 * @r: The region of @t that @s will be applied. 1848 * @s: The scheme of @ctx that will be applied to @r. 1849 * 1850 * This function is called from kdamond whenever it asked the operation set to 1851 * apply a DAMOS scheme action to a region. If a DAMOS walk request is 1852 * installed by damos_walk() and not yet uninstalled, invoke it. 1853 */ 1854 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, 1855 struct damon_region *r, struct damos *s, 1856 unsigned long sz_filter_passed) 1857 { 1858 struct damos_walk_control *control; 1859 1860 if (s->walk_completed) 1861 return; 1862 1863 control = ctx->walk_control; 1864 if (!control) 1865 return; 1866 1867 control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); 1868 } 1869 1870 /* 1871 * damos_walk_complete() - Complete DAMOS walk request if all walks are done. 1872 * @ctx: The context of &damon_ctx->walk_control. 1873 * @s: A scheme of @ctx that all walks are now done. 1874 * 1875 * This function is called when kdamond finished applying the action of a DAMOS 1876 * scheme to all regions that eligible for the given &damos->apply_interval_us. 1877 * If every scheme of @ctx including @s now finished walking for at least one 1878 * &damos->apply_interval_us, this function makrs the handling of the given 1879 * DAMOS walk request is done, so that damos_walk() can wake up and return. 1880 */ 1881 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) 1882 { 1883 struct damos *siter; 1884 struct damos_walk_control *control; 1885 1886 control = ctx->walk_control; 1887 if (!control) 1888 return; 1889 1890 s->walk_completed = true; 1891 /* if all schemes completed, signal completion to walker */ 1892 damon_for_each_scheme(siter, ctx) { 1893 if (!siter->walk_completed) 1894 return; 1895 } 1896 damon_for_each_scheme(siter, ctx) 1897 siter->walk_completed = false; 1898 1899 complete(&control->completion); 1900 ctx->walk_control = NULL; 1901 } 1902 1903 /* 1904 * damos_walk_cancel() - Cancel the current DAMOS walk request. 1905 * @ctx: The context of &damon_ctx->walk_control. 1906 * 1907 * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS 1908 * walk is requested but there is no DAMOS scheme to walk for, or the kdamond 1909 * is already out of the main loop and therefore gonna be terminated, and hence 1910 * cannot continue the walks. This function therefore marks the walk request 1911 * as canceled, so that damos_walk() can wake up and return. 1912 */ 1913 static void damos_walk_cancel(struct damon_ctx *ctx) 1914 { 1915 struct damos_walk_control *control; 1916 1917 mutex_lock(&ctx->walk_control_lock); 1918 control = ctx->walk_control; 1919 mutex_unlock(&ctx->walk_control_lock); 1920 1921 if (!control) 1922 return; 1923 control->canceled = true; 1924 complete(&control->completion); 1925 mutex_lock(&ctx->walk_control_lock); 1926 ctx->walk_control = NULL; 1927 mutex_unlock(&ctx->walk_control_lock); 1928 } 1929 1930 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 1931 struct damon_region *r, struct damos *s) 1932 { 1933 struct damos_quota *quota = &s->quota; 1934 unsigned long sz = damon_sz_region(r); 1935 struct timespec64 begin, end; 1936 unsigned long sz_applied = 0; 1937 unsigned long sz_ops_filter_passed = 0; 1938 /* 1939 * We plan to support multiple context per kdamond, as DAMON sysfs 1940 * implies with 'nr_contexts' file. Nevertheless, only single context 1941 * per kdamond is supported for now. So, we can simply use '0' context 1942 * index here. 1943 */ 1944 unsigned int cidx = 0; 1945 struct damos *siter; /* schemes iterator */ 1946 unsigned int sidx = 0; 1947 struct damon_target *titer; /* targets iterator */ 1948 unsigned int tidx = 0; 1949 bool do_trace = false; 1950 1951 /* get indices for trace_damos_before_apply() */ 1952 if (trace_damos_before_apply_enabled()) { 1953 damon_for_each_scheme(siter, c) { 1954 if (siter == s) 1955 break; 1956 sidx++; 1957 } 1958 damon_for_each_target(titer, c) { 1959 if (titer == t) 1960 break; 1961 tidx++; 1962 } 1963 do_trace = true; 1964 } 1965 1966 if (c->ops.apply_scheme) { 1967 if (quota->esz && quota->charged_sz + sz > quota->esz) { 1968 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 1969 c->min_region_sz); 1970 if (!sz) 1971 goto update_stat; 1972 damon_split_region_at(t, r, sz); 1973 } 1974 if (damos_core_filter_out(c, t, r, s)) 1975 return; 1976 ktime_get_coarse_ts64(&begin); 1977 trace_damos_before_apply(cidx, sidx, tidx, r, 1978 damon_nr_regions(t), do_trace); 1979 sz_applied = c->ops.apply_scheme(c, t, r, s, 1980 &sz_ops_filter_passed); 1981 damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); 1982 ktime_get_coarse_ts64(&end); 1983 quota->total_charged_ns += timespec64_to_ns(&end) - 1984 timespec64_to_ns(&begin); 1985 quota->charged_sz += sz; 1986 if (quota->esz && quota->charged_sz >= quota->esz) { 1987 quota->charge_target_from = t; 1988 quota->charge_addr_from = r->ar.end + 1; 1989 } 1990 } 1991 if (s->action != DAMOS_STAT) 1992 r->age = 0; 1993 1994 update_stat: 1995 damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); 1996 } 1997 1998 static void damon_do_apply_schemes(struct damon_ctx *c, 1999 struct damon_target *t, 2000 struct damon_region *r) 2001 { 2002 struct damos *s; 2003 2004 damon_for_each_scheme(s, c) { 2005 struct damos_quota *quota = &s->quota; 2006 2007 if (c->passed_sample_intervals < s->next_apply_sis) 2008 continue; 2009 2010 if (!s->wmarks.activated) 2011 continue; 2012 2013 /* Check the quota */ 2014 if (quota->esz && quota->charged_sz >= quota->esz) 2015 continue; 2016 2017 if (damos_skip_charged_region(t, &r, s, c->min_region_sz)) 2018 continue; 2019 2020 if (s->max_nr_snapshots && 2021 s->max_nr_snapshots <= s->stat.nr_snapshots) 2022 continue; 2023 2024 if (damos_valid_target(c, t, r, s)) 2025 damos_apply_scheme(c, t, r, s); 2026 2027 if (damon_is_last_region(r, t)) 2028 s->stat.nr_snapshots++; 2029 } 2030 } 2031 2032 /* 2033 * damon_feed_loop_next_input() - get next input to achieve a target score. 2034 * @last_input The last input. 2035 * @score Current score that made with @last_input. 2036 * 2037 * Calculate next input to achieve the target score, based on the last input 2038 * and current score. Assuming the input and the score are positively 2039 * proportional, calculate how much compensation should be added to or 2040 * subtracted from the last input as a proportion of the last input. Avoid 2041 * next input always being zero by setting it non-zero always. In short form 2042 * (assuming support of float and signed calculations), the algorithm is as 2043 * below. 2044 * 2045 * next_input = max(last_input * ((goal - current) / goal + 1), 1) 2046 * 2047 * For simple implementation, we assume the target score is always 10,000. The 2048 * caller should adjust @score for this. 2049 * 2050 * Returns next input that assumed to achieve the target score. 2051 */ 2052 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 2053 unsigned long score) 2054 { 2055 const unsigned long goal = 10000; 2056 /* Set minimum input as 10000 to avoid compensation be zero */ 2057 const unsigned long min_input = 10000; 2058 unsigned long score_goal_diff, compensation; 2059 bool over_achieving = score > goal; 2060 2061 if (score == goal) 2062 return last_input; 2063 if (score >= goal * 2) 2064 return min_input; 2065 2066 if (over_achieving) 2067 score_goal_diff = score - goal; 2068 else 2069 score_goal_diff = goal - score; 2070 2071 if (last_input < ULONG_MAX / score_goal_diff) 2072 compensation = last_input * score_goal_diff / goal; 2073 else 2074 compensation = last_input / goal * score_goal_diff; 2075 2076 if (over_achieving) 2077 return max(last_input - compensation, min_input); 2078 if (last_input < ULONG_MAX - compensation) 2079 return last_input + compensation; 2080 return ULONG_MAX; 2081 } 2082 2083 #ifdef CONFIG_PSI 2084 2085 static u64 damos_get_some_mem_psi_total(void) 2086 { 2087 if (static_branch_likely(&psi_disabled)) 2088 return 0; 2089 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], 2090 NSEC_PER_USEC); 2091 } 2092 2093 #else /* CONFIG_PSI */ 2094 2095 static inline u64 damos_get_some_mem_psi_total(void) 2096 { 2097 return 0; 2098 }; 2099 2100 #endif /* CONFIG_PSI */ 2101 2102 #ifdef CONFIG_NUMA 2103 static __kernel_ulong_t damos_get_node_mem_bp( 2104 struct damos_quota_goal *goal) 2105 { 2106 struct sysinfo i; 2107 __kernel_ulong_t numerator; 2108 2109 si_meminfo_node(&i, goal->nid); 2110 if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP) 2111 numerator = i.totalram - i.freeram; 2112 else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */ 2113 numerator = i.freeram; 2114 return numerator * 10000 / i.totalram; 2115 } 2116 2117 static unsigned long damos_get_node_memcg_used_bp( 2118 struct damos_quota_goal *goal) 2119 { 2120 struct mem_cgroup *memcg; 2121 struct lruvec *lruvec; 2122 unsigned long used_pages, numerator; 2123 struct sysinfo i; 2124 2125 memcg = mem_cgroup_get_from_id(goal->memcg_id); 2126 if (!memcg) { 2127 if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP) 2128 return 0; 2129 else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */ 2130 return 10000; 2131 } 2132 2133 mem_cgroup_flush_stats(memcg); 2134 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid)); 2135 used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON); 2136 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON); 2137 used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE); 2138 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE); 2139 2140 mem_cgroup_put(memcg); 2141 2142 si_meminfo_node(&i, goal->nid); 2143 if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP) 2144 numerator = used_pages; 2145 else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */ 2146 numerator = i.totalram - used_pages; 2147 return numerator * 10000 / i.totalram; 2148 } 2149 #else 2150 static __kernel_ulong_t damos_get_node_mem_bp( 2151 struct damos_quota_goal *goal) 2152 { 2153 return 0; 2154 } 2155 2156 static unsigned long damos_get_node_memcg_used_bp( 2157 struct damos_quota_goal *goal) 2158 { 2159 return 0; 2160 } 2161 #endif 2162 2163 /* 2164 * Returns LRU-active or inactive memory to total LRU memory size ratio. 2165 */ 2166 static unsigned int damos_get_in_active_mem_bp(bool active_ratio) 2167 { 2168 unsigned long active, inactive, total; 2169 2170 /* This should align with /proc/meminfo output */ 2171 active = global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON) + 2172 global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE); 2173 inactive = global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON) + 2174 global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE); 2175 total = active + inactive; 2176 if (active_ratio) 2177 return active * 10000 / total; 2178 return inactive * 10000 / total; 2179 } 2180 2181 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) 2182 { 2183 u64 now_psi_total; 2184 2185 switch (goal->metric) { 2186 case DAMOS_QUOTA_USER_INPUT: 2187 /* User should already set goal->current_value */ 2188 break; 2189 case DAMOS_QUOTA_SOME_MEM_PSI_US: 2190 now_psi_total = damos_get_some_mem_psi_total(); 2191 goal->current_value = now_psi_total - goal->last_psi_total; 2192 goal->last_psi_total = now_psi_total; 2193 break; 2194 case DAMOS_QUOTA_NODE_MEM_USED_BP: 2195 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 2196 goal->current_value = damos_get_node_mem_bp(goal); 2197 break; 2198 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 2199 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 2200 goal->current_value = damos_get_node_memcg_used_bp(goal); 2201 break; 2202 case DAMOS_QUOTA_ACTIVE_MEM_BP: 2203 case DAMOS_QUOTA_INACTIVE_MEM_BP: 2204 goal->current_value = damos_get_in_active_mem_bp( 2205 goal->metric == DAMOS_QUOTA_ACTIVE_MEM_BP); 2206 break; 2207 default: 2208 break; 2209 } 2210 } 2211 2212 /* Return the highest score since it makes schemes least aggressive */ 2213 static unsigned long damos_quota_score(struct damos_quota *quota) 2214 { 2215 struct damos_quota_goal *goal; 2216 unsigned long highest_score = 0; 2217 2218 damos_for_each_quota_goal(goal, quota) { 2219 damos_set_quota_goal_current_value(goal); 2220 highest_score = max(highest_score, 2221 goal->current_value * 10000 / 2222 goal->target_value); 2223 } 2224 2225 return highest_score; 2226 } 2227 2228 /* 2229 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty 2230 */ 2231 static void damos_set_effective_quota(struct damos_quota *quota) 2232 { 2233 unsigned long throughput; 2234 unsigned long esz = ULONG_MAX; 2235 2236 if (!quota->ms && list_empty("a->goals)) { 2237 quota->esz = quota->sz; 2238 return; 2239 } 2240 2241 if (!list_empty("a->goals)) { 2242 unsigned long score = damos_quota_score(quota); 2243 2244 quota->esz_bp = damon_feed_loop_next_input( 2245 max(quota->esz_bp, 10000UL), 2246 score); 2247 esz = quota->esz_bp / 10000; 2248 } 2249 2250 if (quota->ms) { 2251 if (quota->total_charged_ns) 2252 throughput = mult_frac(quota->total_charged_sz, 1000000, 2253 quota->total_charged_ns); 2254 else 2255 throughput = PAGE_SIZE * 1024; 2256 esz = min(throughput * quota->ms, esz); 2257 } 2258 2259 if (quota->sz && quota->sz < esz) 2260 esz = quota->sz; 2261 2262 quota->esz = esz; 2263 } 2264 2265 static void damos_trace_esz(struct damon_ctx *c, struct damos *s, 2266 struct damos_quota *quota) 2267 { 2268 unsigned int cidx = 0, sidx = 0; 2269 struct damos *siter; 2270 2271 damon_for_each_scheme(siter, c) { 2272 if (siter == s) 2273 break; 2274 sidx++; 2275 } 2276 trace_damos_esz(cidx, sidx, quota->esz); 2277 } 2278 2279 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 2280 { 2281 struct damos_quota *quota = &s->quota; 2282 struct damon_target *t; 2283 struct damon_region *r; 2284 unsigned long cumulated_sz, cached_esz; 2285 unsigned int score, max_score = 0; 2286 2287 if (!quota->ms && !quota->sz && list_empty("a->goals)) 2288 return; 2289 2290 /* First charge window */ 2291 if (!quota->total_charged_sz && !quota->charged_from) { 2292 quota->charged_from = jiffies; 2293 damos_set_effective_quota(quota); 2294 } 2295 2296 /* New charge window starts */ 2297 if (time_after_eq(jiffies, quota->charged_from + 2298 msecs_to_jiffies(quota->reset_interval))) { 2299 if (quota->esz && quota->charged_sz >= quota->esz) 2300 s->stat.qt_exceeds++; 2301 quota->total_charged_sz += quota->charged_sz; 2302 quota->charged_from = jiffies; 2303 quota->charged_sz = 0; 2304 if (trace_damos_esz_enabled()) 2305 cached_esz = quota->esz; 2306 damos_set_effective_quota(quota); 2307 if (trace_damos_esz_enabled() && quota->esz != cached_esz) 2308 damos_trace_esz(c, s, quota); 2309 } 2310 2311 if (!c->ops.get_scheme_score) 2312 return; 2313 2314 /* Fill up the score histogram */ 2315 memset(c->regions_score_histogram, 0, 2316 sizeof(*c->regions_score_histogram) * 2317 (DAMOS_MAX_SCORE + 1)); 2318 damon_for_each_target(t, c) { 2319 damon_for_each_region(r, t) { 2320 if (!__damos_valid_target(r, s)) 2321 continue; 2322 score = c->ops.get_scheme_score(c, t, r, s); 2323 c->regions_score_histogram[score] += 2324 damon_sz_region(r); 2325 if (score > max_score) 2326 max_score = score; 2327 } 2328 } 2329 2330 /* Set the min score limit */ 2331 for (cumulated_sz = 0, score = max_score; ; score--) { 2332 cumulated_sz += c->regions_score_histogram[score]; 2333 if (cumulated_sz >= quota->esz || !score) 2334 break; 2335 } 2336 quota->min_score = score; 2337 } 2338 2339 static void damos_trace_stat(struct damon_ctx *c, struct damos *s) 2340 { 2341 unsigned int cidx = 0, sidx = 0; 2342 struct damos *siter; 2343 2344 if (!trace_damos_stat_after_apply_interval_enabled()) 2345 return; 2346 2347 damon_for_each_scheme(siter, c) { 2348 if (siter == s) 2349 break; 2350 sidx++; 2351 } 2352 trace_damos_stat_after_apply_interval(cidx, sidx, &s->stat); 2353 } 2354 2355 static void kdamond_apply_schemes(struct damon_ctx *c) 2356 { 2357 struct damon_target *t; 2358 struct damon_region *r, *next_r; 2359 struct damos *s; 2360 unsigned long sample_interval = c->attrs.sample_interval ? 2361 c->attrs.sample_interval : 1; 2362 bool has_schemes_to_apply = false; 2363 2364 damon_for_each_scheme(s, c) { 2365 if (c->passed_sample_intervals < s->next_apply_sis) 2366 continue; 2367 2368 if (!s->wmarks.activated) 2369 continue; 2370 2371 has_schemes_to_apply = true; 2372 2373 damos_adjust_quota(c, s); 2374 } 2375 2376 if (!has_schemes_to_apply) 2377 return; 2378 2379 mutex_lock(&c->walk_control_lock); 2380 damon_for_each_target(t, c) { 2381 if (c->ops.target_valid && c->ops.target_valid(t) == false) 2382 continue; 2383 2384 damon_for_each_region_safe(r, next_r, t) 2385 damon_do_apply_schemes(c, t, r); 2386 } 2387 2388 damon_for_each_scheme(s, c) { 2389 if (c->passed_sample_intervals < s->next_apply_sis) 2390 continue; 2391 damos_walk_complete(c, s); 2392 s->next_apply_sis = c->passed_sample_intervals + 2393 (s->apply_interval_us ? s->apply_interval_us : 2394 c->attrs.aggr_interval) / sample_interval; 2395 s->last_applied = NULL; 2396 damos_trace_stat(c, s); 2397 } 2398 mutex_unlock(&c->walk_control_lock); 2399 } 2400 2401 /* 2402 * Merge two adjacent regions into one region 2403 */ 2404 static void damon_merge_two_regions(struct damon_target *t, 2405 struct damon_region *l, struct damon_region *r) 2406 { 2407 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 2408 2409 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 2410 (sz_l + sz_r); 2411 l->nr_accesses_bp = l->nr_accesses * 10000; 2412 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 2413 l->ar.end = r->ar.end; 2414 damon_destroy_region(r, t); 2415 } 2416 2417 /* 2418 * Merge adjacent regions having similar access frequencies 2419 * 2420 * t target affected by this merge operation 2421 * thres '->nr_accesses' diff threshold for the merge 2422 * sz_limit size upper limit of each region 2423 */ 2424 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 2425 unsigned long sz_limit) 2426 { 2427 struct damon_region *r, *prev = NULL, *next; 2428 2429 damon_for_each_region_safe(r, next, t) { 2430 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 2431 r->age = 0; 2432 else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0)) 2433 r->age = 0; 2434 else 2435 r->age++; 2436 2437 if (prev && prev->ar.end == r->ar.start && 2438 abs(prev->nr_accesses - r->nr_accesses) <= thres && 2439 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 2440 damon_merge_two_regions(t, prev, r); 2441 else 2442 prev = r; 2443 } 2444 } 2445 2446 /* 2447 * Merge adjacent regions having similar access frequencies 2448 * 2449 * threshold '->nr_accesses' diff threshold for the merge 2450 * sz_limit size upper limit of each region 2451 * 2452 * This function merges monitoring target regions which are adjacent and their 2453 * access frequencies are similar. This is for minimizing the monitoring 2454 * overhead under the dynamically changeable access pattern. If a merge was 2455 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 2456 * 2457 * The total number of regions could be higher than the user-defined limit, 2458 * max_nr_regions for some cases. For example, the user can update 2459 * max_nr_regions to a number that lower than the current number of regions 2460 * while DAMON is running. For such a case, repeat merging until the limit is 2461 * met while increasing @threshold up to possible maximum level. 2462 */ 2463 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 2464 unsigned long sz_limit) 2465 { 2466 struct damon_target *t; 2467 unsigned int nr_regions; 2468 unsigned int max_thres; 2469 2470 max_thres = c->attrs.aggr_interval / 2471 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); 2472 do { 2473 nr_regions = 0; 2474 damon_for_each_target(t, c) { 2475 damon_merge_regions_of(t, threshold, sz_limit); 2476 nr_regions += damon_nr_regions(t); 2477 } 2478 threshold = max(1, threshold * 2); 2479 } while (nr_regions > c->attrs.max_nr_regions && 2480 threshold / 2 < max_thres); 2481 } 2482 2483 /* 2484 * Split a region in two 2485 * 2486 * r the region to be split 2487 * sz_r size of the first sub-region that will be made 2488 */ 2489 static void damon_split_region_at(struct damon_target *t, 2490 struct damon_region *r, unsigned long sz_r) 2491 { 2492 struct damon_region *new; 2493 2494 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 2495 if (!new) 2496 return; 2497 2498 r->ar.end = new->ar.start; 2499 2500 new->age = r->age; 2501 new->last_nr_accesses = r->last_nr_accesses; 2502 new->nr_accesses_bp = r->nr_accesses_bp; 2503 new->nr_accesses = r->nr_accesses; 2504 2505 damon_insert_region(new, r, damon_next_region(r), t); 2506 } 2507 2508 /* Split every region in the given target into 'nr_subs' regions */ 2509 static void damon_split_regions_of(struct damon_target *t, int nr_subs, 2510 unsigned long min_region_sz) 2511 { 2512 struct damon_region *r, *next; 2513 unsigned long sz_region, sz_sub = 0; 2514 int i; 2515 2516 damon_for_each_region_safe(r, next, t) { 2517 sz_region = damon_sz_region(r); 2518 2519 for (i = 0; i < nr_subs - 1 && 2520 sz_region > 2 * min_region_sz; i++) { 2521 /* 2522 * Randomly select size of left sub-region to be at 2523 * least 10 percent and at most 90% of original region 2524 */ 2525 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 2526 sz_region / 10, min_region_sz); 2527 /* Do not allow blank region */ 2528 if (sz_sub == 0 || sz_sub >= sz_region) 2529 continue; 2530 2531 damon_split_region_at(t, r, sz_sub); 2532 sz_region = sz_sub; 2533 } 2534 } 2535 } 2536 2537 /* 2538 * Split every target region into randomly-sized small regions 2539 * 2540 * This function splits every target region into random-sized small regions if 2541 * current total number of the regions is equal or smaller than half of the 2542 * user-specified maximum number of regions. This is for maximizing the 2543 * monitoring accuracy under the dynamically changeable access patterns. If a 2544 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 2545 * it. 2546 */ 2547 static void kdamond_split_regions(struct damon_ctx *ctx) 2548 { 2549 struct damon_target *t; 2550 unsigned int nr_regions = 0; 2551 static unsigned int last_nr_regions; 2552 int nr_subregions = 2; 2553 2554 damon_for_each_target(t, ctx) 2555 nr_regions += damon_nr_regions(t); 2556 2557 if (nr_regions > ctx->attrs.max_nr_regions / 2) 2558 return; 2559 2560 /* Maybe the middle of the region has different access frequency */ 2561 if (last_nr_regions == nr_regions && 2562 nr_regions < ctx->attrs.max_nr_regions / 3) 2563 nr_subregions = 3; 2564 2565 damon_for_each_target(t, ctx) 2566 damon_split_regions_of(t, nr_subregions, ctx->min_region_sz); 2567 2568 last_nr_regions = nr_regions; 2569 } 2570 2571 /* 2572 * Check whether current monitoring should be stopped 2573 * 2574 * The monitoring is stopped when either the user requested to stop, or all 2575 * monitoring targets are invalid. 2576 * 2577 * Returns true if need to stop current monitoring. 2578 */ 2579 static bool kdamond_need_stop(struct damon_ctx *ctx) 2580 { 2581 struct damon_target *t; 2582 2583 if (kthread_should_stop()) 2584 return true; 2585 2586 if (!ctx->ops.target_valid) 2587 return false; 2588 2589 damon_for_each_target(t, ctx) { 2590 if (ctx->ops.target_valid(t)) 2591 return false; 2592 } 2593 2594 return true; 2595 } 2596 2597 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, 2598 unsigned long *metric_value) 2599 { 2600 switch (metric) { 2601 case DAMOS_WMARK_FREE_MEM_RATE: 2602 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / 2603 totalram_pages(); 2604 return 0; 2605 default: 2606 break; 2607 } 2608 return -EINVAL; 2609 } 2610 2611 /* 2612 * Returns zero if the scheme is active. Else, returns time to wait for next 2613 * watermark check in micro-seconds. 2614 */ 2615 static unsigned long damos_wmark_wait_us(struct damos *scheme) 2616 { 2617 unsigned long metric; 2618 2619 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) 2620 return 0; 2621 2622 /* higher than high watermark or lower than low watermark */ 2623 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 2624 if (scheme->wmarks.activated) 2625 pr_debug("deactivate a scheme (%d) for %s wmark\n", 2626 scheme->action, 2627 str_high_low(metric > scheme->wmarks.high)); 2628 scheme->wmarks.activated = false; 2629 return scheme->wmarks.interval; 2630 } 2631 2632 /* inactive and higher than middle watermark */ 2633 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 2634 !scheme->wmarks.activated) 2635 return scheme->wmarks.interval; 2636 2637 if (!scheme->wmarks.activated) 2638 pr_debug("activate a scheme (%d)\n", scheme->action); 2639 scheme->wmarks.activated = true; 2640 return 0; 2641 } 2642 2643 static void kdamond_usleep(unsigned long usecs) 2644 { 2645 if (usecs >= USLEEP_RANGE_UPPER_BOUND) 2646 schedule_timeout_idle(usecs_to_jiffies(usecs)); 2647 else 2648 usleep_range_idle(usecs, usecs + 1); 2649 } 2650 2651 /* 2652 * kdamond_call() - handle damon_call_control objects. 2653 * @ctx: The &struct damon_ctx of the kdamond. 2654 * @cancel: Whether to cancel the invocation of the function. 2655 * 2656 * If there are &struct damon_call_control requests that registered via 2657 * &damon_call() on @ctx, do or cancel the invocation of the function depending 2658 * on @cancel. @cancel is set when the kdamond is already out of the main loop 2659 * and therefore will be terminated. 2660 */ 2661 static void kdamond_call(struct damon_ctx *ctx, bool cancel) 2662 { 2663 struct damon_call_control *control, *next; 2664 LIST_HEAD(controls); 2665 2666 mutex_lock(&ctx->call_controls_lock); 2667 list_splice_tail_init(&ctx->call_controls, &controls); 2668 mutex_unlock(&ctx->call_controls_lock); 2669 2670 list_for_each_entry_safe(control, next, &controls, list) { 2671 if (!control->repeat || cancel) 2672 list_del(&control->list); 2673 2674 if (cancel) 2675 control->canceled = true; 2676 else 2677 control->return_code = control->fn(control->data); 2678 2679 if (!control->repeat) 2680 complete(&control->completion); 2681 else if (control->canceled && control->dealloc_on_cancel) 2682 kfree(control); 2683 if (!cancel && ctx->maybe_corrupted) 2684 break; 2685 } 2686 2687 mutex_lock(&ctx->call_controls_lock); 2688 list_splice_tail(&controls, &ctx->call_controls); 2689 mutex_unlock(&ctx->call_controls_lock); 2690 } 2691 2692 /* Returns negative error code if it's not activated but should return */ 2693 static int kdamond_wait_activation(struct damon_ctx *ctx) 2694 { 2695 struct damos *s; 2696 unsigned long wait_time; 2697 unsigned long min_wait_time = 0; 2698 bool init_wait_time = false; 2699 2700 while (!kdamond_need_stop(ctx)) { 2701 damon_for_each_scheme(s, ctx) { 2702 wait_time = damos_wmark_wait_us(s); 2703 if (!init_wait_time || wait_time < min_wait_time) { 2704 init_wait_time = true; 2705 min_wait_time = wait_time; 2706 } 2707 } 2708 if (!min_wait_time) 2709 return 0; 2710 2711 kdamond_usleep(min_wait_time); 2712 2713 kdamond_call(ctx, false); 2714 if (ctx->maybe_corrupted) 2715 return -EINVAL; 2716 damos_walk_cancel(ctx); 2717 } 2718 return -EBUSY; 2719 } 2720 2721 static void kdamond_init_ctx(struct damon_ctx *ctx) 2722 { 2723 unsigned long sample_interval = ctx->attrs.sample_interval ? 2724 ctx->attrs.sample_interval : 1; 2725 unsigned long apply_interval; 2726 struct damos *scheme; 2727 2728 ctx->passed_sample_intervals = 0; 2729 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 2730 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 2731 sample_interval; 2732 ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * 2733 ctx->attrs.intervals_goal.aggrs; 2734 2735 damon_for_each_scheme(scheme, ctx) { 2736 apply_interval = scheme->apply_interval_us ? 2737 scheme->apply_interval_us : ctx->attrs.aggr_interval; 2738 scheme->next_apply_sis = apply_interval / sample_interval; 2739 damos_set_filters_default_reject(scheme); 2740 } 2741 } 2742 2743 /* 2744 * The monitoring daemon that runs as a kernel thread 2745 */ 2746 static int kdamond_fn(void *data) 2747 { 2748 struct damon_ctx *ctx = data; 2749 unsigned int max_nr_accesses = 0; 2750 unsigned long sz_limit = 0; 2751 2752 pr_debug("kdamond (%d) starts\n", current->pid); 2753 2754 complete(&ctx->kdamond_started); 2755 kdamond_init_ctx(ctx); 2756 2757 if (ctx->ops.init) 2758 ctx->ops.init(ctx); 2759 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, 2760 sizeof(*ctx->regions_score_histogram), GFP_KERNEL); 2761 if (!ctx->regions_score_histogram) 2762 goto done; 2763 2764 sz_limit = damon_region_sz_limit(ctx); 2765 2766 while (!kdamond_need_stop(ctx)) { 2767 /* 2768 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 2769 * be changed from kdamond_call(). Read the values here, and 2770 * use those for this iteration. That is, damon_set_attrs() 2771 * updated new values are respected from next iteration. 2772 */ 2773 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 2774 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 2775 unsigned long sample_interval = ctx->attrs.sample_interval; 2776 2777 if (kdamond_wait_activation(ctx)) 2778 break; 2779 2780 if (ctx->ops.prepare_access_checks) 2781 ctx->ops.prepare_access_checks(ctx); 2782 2783 kdamond_usleep(sample_interval); 2784 ctx->passed_sample_intervals++; 2785 2786 if (ctx->ops.check_accesses) 2787 max_nr_accesses = ctx->ops.check_accesses(ctx); 2788 2789 if (ctx->passed_sample_intervals >= next_aggregation_sis) 2790 kdamond_merge_regions(ctx, 2791 max_nr_accesses / 10, 2792 sz_limit); 2793 2794 /* 2795 * do kdamond_call() and kdamond_apply_schemes() after 2796 * kdamond_merge_regions() if possible, to reduce overhead 2797 */ 2798 kdamond_call(ctx, false); 2799 if (ctx->maybe_corrupted) 2800 break; 2801 if (!list_empty(&ctx->schemes)) 2802 kdamond_apply_schemes(ctx); 2803 else 2804 damos_walk_cancel(ctx); 2805 2806 sample_interval = ctx->attrs.sample_interval ? 2807 ctx->attrs.sample_interval : 1; 2808 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 2809 if (ctx->attrs.intervals_goal.aggrs && 2810 ctx->passed_sample_intervals >= 2811 ctx->next_intervals_tune_sis) { 2812 /* 2813 * ctx->next_aggregation_sis might be updated 2814 * from kdamond_call(). In the case, 2815 * damon_set_attrs() which will be called from 2816 * kdamond_tune_interval() may wrongly think 2817 * this is in the middle of the current 2818 * aggregation, and make aggregation 2819 * information reset for all regions. Then, 2820 * following kdamond_reset_aggregated() call 2821 * will make the region information invalid, 2822 * particularly for ->nr_accesses_bp. 2823 * 2824 * Reset ->next_aggregation_sis to avoid that. 2825 * It will anyway correctly updated after this 2826 * if clause. 2827 */ 2828 ctx->next_aggregation_sis = 2829 next_aggregation_sis; 2830 ctx->next_intervals_tune_sis += 2831 ctx->attrs.aggr_samples * 2832 ctx->attrs.intervals_goal.aggrs; 2833 kdamond_tune_intervals(ctx); 2834 sample_interval = ctx->attrs.sample_interval ? 2835 ctx->attrs.sample_interval : 1; 2836 2837 } 2838 ctx->next_aggregation_sis = next_aggregation_sis + 2839 ctx->attrs.aggr_interval / sample_interval; 2840 2841 kdamond_reset_aggregated(ctx); 2842 kdamond_split_regions(ctx); 2843 } 2844 2845 if (ctx->passed_sample_intervals >= next_ops_update_sis) { 2846 ctx->next_ops_update_sis = next_ops_update_sis + 2847 ctx->attrs.ops_update_interval / 2848 sample_interval; 2849 if (ctx->ops.update) 2850 ctx->ops.update(ctx); 2851 sz_limit = damon_region_sz_limit(ctx); 2852 } 2853 } 2854 done: 2855 damon_destroy_targets(ctx); 2856 2857 kfree(ctx->regions_score_histogram); 2858 kdamond_call(ctx, true); 2859 damos_walk_cancel(ctx); 2860 2861 pr_debug("kdamond (%d) finishes\n", current->pid); 2862 mutex_lock(&ctx->kdamond_lock); 2863 ctx->kdamond = NULL; 2864 mutex_unlock(&ctx->kdamond_lock); 2865 2866 mutex_lock(&damon_lock); 2867 nr_running_ctxs--; 2868 if (!nr_running_ctxs && running_exclusive_ctxs) 2869 running_exclusive_ctxs = false; 2870 mutex_unlock(&damon_lock); 2871 2872 return 0; 2873 } 2874 2875 static int walk_system_ram(struct resource *res, void *arg) 2876 { 2877 struct damon_addr_range *a = arg; 2878 2879 if (a->end - a->start < resource_size(res)) { 2880 a->start = res->start; 2881 a->end = res->end; 2882 } 2883 return 0; 2884 } 2885 2886 /* 2887 * Find biggest 'System RAM' resource and store its start and end address in 2888 * @start and @end, respectively. If no System RAM is found, returns false. 2889 */ 2890 static bool damon_find_biggest_system_ram(unsigned long *start, 2891 unsigned long *end) 2892 2893 { 2894 struct damon_addr_range arg = {}; 2895 2896 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 2897 if (arg.end <= arg.start) 2898 return false; 2899 2900 *start = arg.start; 2901 *end = arg.end; 2902 return true; 2903 } 2904 2905 /** 2906 * damon_set_region_biggest_system_ram_default() - Set the region of the given 2907 * monitoring target as requested, or biggest 'System RAM'. 2908 * @t: The monitoring target to set the region. 2909 * @start: The pointer to the start address of the region. 2910 * @end: The pointer to the end address of the region. 2911 * @min_region_sz: Minimum region size. 2912 * 2913 * This function sets the region of @t as requested by @start and @end. If the 2914 * values of @start and @end are zero, however, this function finds the biggest 2915 * 'System RAM' resource and sets the region to cover the resource. In the 2916 * latter case, this function saves the start and end addresses of the resource 2917 * in @start and @end, respectively. 2918 * 2919 * Return: 0 on success, negative error code otherwise. 2920 */ 2921 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 2922 unsigned long *start, unsigned long *end, 2923 unsigned long min_region_sz) 2924 { 2925 struct damon_addr_range addr_range; 2926 2927 if (*start > *end) 2928 return -EINVAL; 2929 2930 if (!*start && !*end && 2931 !damon_find_biggest_system_ram(start, end)) 2932 return -EINVAL; 2933 2934 addr_range.start = *start; 2935 addr_range.end = *end; 2936 return damon_set_regions(t, &addr_range, 1, min_region_sz); 2937 } 2938 2939 /* 2940 * damon_moving_sum() - Calculate an inferred moving sum value. 2941 * @mvsum: Inferred sum of the last @len_window values. 2942 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 2943 * @len_window: The number of last values to take care of. 2944 * @new_value: New value that will be added to the pseudo moving sum. 2945 * 2946 * Moving sum (moving average * window size) is good for handling noise, but 2947 * the cost of keeping past values can be high for arbitrary window size. This 2948 * function implements a lightweight pseudo moving sum function that doesn't 2949 * keep the past window values. 2950 * 2951 * It simply assumes there was no noise in the past, and get the no-noise 2952 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 2953 * non-moving sum of the last window. For example, if @len_window is 10 and we 2954 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 2955 * values. Hence, this function simply drops @nomvsum / @len_window from 2956 * given @mvsum and add @new_value. 2957 * 2958 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 2959 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 2960 * calculating next moving sum with a new value, we should drop 0 from 50 and 2961 * add the new value. However, this function assumes it got value 5 for each 2962 * of the last ten times. Based on the assumption, when the next value is 2963 * measured, it drops the assumed past value, 5 from the current sum, and add 2964 * the new value to get the updated pseduo-moving average. 2965 * 2966 * This means the value could have errors, but the errors will be disappeared 2967 * for every @len_window aligned calls. For example, if @len_window is 10, the 2968 * pseudo moving sum with 11th value to 19th value would have an error. But 2969 * the sum with 20th value will not have the error. 2970 * 2971 * Return: Pseudo-moving average after getting the @new_value. 2972 */ 2973 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 2974 unsigned int len_window, unsigned int new_value) 2975 { 2976 return mvsum - nomvsum / len_window + new_value; 2977 } 2978 2979 /** 2980 * damon_update_region_access_rate() - Update the access rate of a region. 2981 * @r: The DAMON region to update for its access check result. 2982 * @accessed: Whether the region has accessed during last sampling interval. 2983 * @attrs: The damon_attrs of the DAMON context. 2984 * 2985 * Update the access rate of a region with the region's last sampling interval 2986 * access check result. 2987 * 2988 * Usually this will be called by &damon_operations->check_accesses callback. 2989 */ 2990 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 2991 struct damon_attrs *attrs) 2992 { 2993 unsigned int len_window = 1; 2994 2995 /* 2996 * sample_interval can be zero, but cannot be larger than 2997 * aggr_interval, owing to validation of damon_set_attrs(). 2998 */ 2999 if (attrs->sample_interval) 3000 len_window = damon_max_nr_accesses(attrs); 3001 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 3002 r->last_nr_accesses * 10000, len_window, 3003 accessed ? 10000 : 0); 3004 3005 if (accessed) 3006 r->nr_accesses++; 3007 } 3008 3009 /** 3010 * damon_initialized() - Return if DAMON is ready to be used. 3011 * 3012 * Return: true if DAMON is ready to be used, false otherwise. 3013 */ 3014 bool damon_initialized(void) 3015 { 3016 return damon_region_cache != NULL; 3017 } 3018 3019 static int __init damon_init(void) 3020 { 3021 damon_region_cache = KMEM_CACHE(damon_region, 0); 3022 if (unlikely(!damon_region_cache)) { 3023 pr_err("creating damon_region_cache fails\n"); 3024 return -ENOMEM; 3025 } 3026 3027 return 0; 3028 } 3029 3030 subsys_initcall(damon_init); 3031 3032 #include "tests/core-kunit.h" 3033