1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/memcontrol.h> 14 #include <linux/mm.h> 15 #include <linux/psi.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/string_choices.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/damon.h> 22 23 #ifdef CONFIG_DAMON_KUNIT_TEST 24 #undef DAMON_MIN_REGION 25 #define DAMON_MIN_REGION 1 26 #endif 27 28 static DEFINE_MUTEX(damon_lock); 29 static int nr_running_ctxs; 30 static bool running_exclusive_ctxs; 31 32 static DEFINE_MUTEX(damon_ops_lock); 33 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 34 35 static struct kmem_cache *damon_region_cache __ro_after_init; 36 37 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 38 static bool __damon_is_registered_ops(enum damon_ops_id id) 39 { 40 struct damon_operations empty_ops = {}; 41 42 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 43 return false; 44 return true; 45 } 46 47 /** 48 * damon_is_registered_ops() - Check if a given damon_operations is registered. 49 * @id: Id of the damon_operations to check if registered. 50 * 51 * Return: true if the ops is set, false otherwise. 52 */ 53 bool damon_is_registered_ops(enum damon_ops_id id) 54 { 55 bool registered; 56 57 if (id >= NR_DAMON_OPS) 58 return false; 59 mutex_lock(&damon_ops_lock); 60 registered = __damon_is_registered_ops(id); 61 mutex_unlock(&damon_ops_lock); 62 return registered; 63 } 64 65 /** 66 * damon_register_ops() - Register a monitoring operations set to DAMON. 67 * @ops: monitoring operations set to register. 68 * 69 * This function registers a monitoring operations set of valid &struct 70 * damon_operations->id so that others can find and use them later. 71 * 72 * Return: 0 on success, negative error code otherwise. 73 */ 74 int damon_register_ops(struct damon_operations *ops) 75 { 76 int err = 0; 77 78 if (ops->id >= NR_DAMON_OPS) 79 return -EINVAL; 80 81 mutex_lock(&damon_ops_lock); 82 /* Fail for already registered ops */ 83 if (__damon_is_registered_ops(ops->id)) 84 err = -EINVAL; 85 else 86 damon_registered_ops[ops->id] = *ops; 87 mutex_unlock(&damon_ops_lock); 88 return err; 89 } 90 91 /** 92 * damon_select_ops() - Select a monitoring operations to use with the context. 93 * @ctx: monitoring context to use the operations. 94 * @id: id of the registered monitoring operations to select. 95 * 96 * This function finds registered monitoring operations set of @id and make 97 * @ctx to use it. 98 * 99 * Return: 0 on success, negative error code otherwise. 100 */ 101 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 102 { 103 int err = 0; 104 105 if (id >= NR_DAMON_OPS) 106 return -EINVAL; 107 108 mutex_lock(&damon_ops_lock); 109 if (!__damon_is_registered_ops(id)) 110 err = -EINVAL; 111 else 112 ctx->ops = damon_registered_ops[id]; 113 mutex_unlock(&damon_ops_lock); 114 return err; 115 } 116 117 /* 118 * Construct a damon_region struct 119 * 120 * Returns the pointer to the new struct if success, or NULL otherwise 121 */ 122 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 123 { 124 struct damon_region *region; 125 126 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 127 if (!region) 128 return NULL; 129 130 region->ar.start = start; 131 region->ar.end = end; 132 region->nr_accesses = 0; 133 region->nr_accesses_bp = 0; 134 INIT_LIST_HEAD(®ion->list); 135 136 region->age = 0; 137 region->last_nr_accesses = 0; 138 139 return region; 140 } 141 142 void damon_add_region(struct damon_region *r, struct damon_target *t) 143 { 144 list_add_tail(&r->list, &t->regions_list); 145 t->nr_regions++; 146 } 147 148 static void damon_del_region(struct damon_region *r, struct damon_target *t) 149 { 150 list_del(&r->list); 151 t->nr_regions--; 152 } 153 154 static void damon_free_region(struct damon_region *r) 155 { 156 kmem_cache_free(damon_region_cache, r); 157 } 158 159 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 160 { 161 damon_del_region(r, t); 162 damon_free_region(r); 163 } 164 165 /* 166 * Check whether a region is intersecting an address range 167 * 168 * Returns true if it is. 169 */ 170 static bool damon_intersect(struct damon_region *r, 171 struct damon_addr_range *re) 172 { 173 return !(r->ar.end <= re->start || re->end <= r->ar.start); 174 } 175 176 /* 177 * Fill holes in regions with new regions. 178 */ 179 static int damon_fill_regions_holes(struct damon_region *first, 180 struct damon_region *last, struct damon_target *t) 181 { 182 struct damon_region *r = first; 183 184 damon_for_each_region_from(r, t) { 185 struct damon_region *next, *newr; 186 187 if (r == last) 188 break; 189 next = damon_next_region(r); 190 if (r->ar.end != next->ar.start) { 191 newr = damon_new_region(r->ar.end, next->ar.start); 192 if (!newr) 193 return -ENOMEM; 194 damon_insert_region(newr, r, next, t); 195 } 196 } 197 return 0; 198 } 199 200 /* 201 * damon_set_regions() - Set regions of a target for given address ranges. 202 * @t: the given target. 203 * @ranges: array of new monitoring target ranges. 204 * @nr_ranges: length of @ranges. 205 * @min_sz_region: minimum region size. 206 * 207 * This function adds new regions to, or modify existing regions of a 208 * monitoring target to fit in specific ranges. 209 * 210 * Return: 0 if success, or negative error code otherwise. 211 */ 212 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 213 unsigned int nr_ranges, unsigned long min_sz_region) 214 { 215 struct damon_region *r, *next; 216 unsigned int i; 217 int err; 218 219 /* Remove regions which are not in the new ranges */ 220 damon_for_each_region_safe(r, next, t) { 221 for (i = 0; i < nr_ranges; i++) { 222 if (damon_intersect(r, &ranges[i])) 223 break; 224 } 225 if (i == nr_ranges) 226 damon_destroy_region(r, t); 227 } 228 229 r = damon_first_region(t); 230 /* Add new regions or resize existing regions to fit in the ranges */ 231 for (i = 0; i < nr_ranges; i++) { 232 struct damon_region *first = NULL, *last, *newr; 233 struct damon_addr_range *range; 234 235 range = &ranges[i]; 236 /* Get the first/last regions intersecting with the range */ 237 damon_for_each_region_from(r, t) { 238 if (damon_intersect(r, range)) { 239 if (!first) 240 first = r; 241 last = r; 242 } 243 if (r->ar.start >= range->end) 244 break; 245 } 246 if (!first) { 247 /* no region intersects with this range */ 248 newr = damon_new_region( 249 ALIGN_DOWN(range->start, 250 min_sz_region), 251 ALIGN(range->end, min_sz_region)); 252 if (!newr) 253 return -ENOMEM; 254 damon_insert_region(newr, damon_prev_region(r), r, t); 255 } else { 256 /* resize intersecting regions to fit in this range */ 257 first->ar.start = ALIGN_DOWN(range->start, 258 min_sz_region); 259 last->ar.end = ALIGN(range->end, min_sz_region); 260 261 /* fill possible holes in the range */ 262 err = damon_fill_regions_holes(first, last, t); 263 if (err) 264 return err; 265 } 266 } 267 return 0; 268 } 269 270 struct damos_filter *damos_new_filter(enum damos_filter_type type, 271 bool matching, bool allow) 272 { 273 struct damos_filter *filter; 274 275 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 276 if (!filter) 277 return NULL; 278 filter->type = type; 279 filter->matching = matching; 280 filter->allow = allow; 281 INIT_LIST_HEAD(&filter->list); 282 return filter; 283 } 284 285 /** 286 * damos_filter_for_ops() - Return if the filter is ops-hndled one. 287 * @type: type of the filter. 288 * 289 * Return: true if the filter of @type needs to be handled by ops layer, false 290 * otherwise. 291 */ 292 bool damos_filter_for_ops(enum damos_filter_type type) 293 { 294 switch (type) { 295 case DAMOS_FILTER_TYPE_ADDR: 296 case DAMOS_FILTER_TYPE_TARGET: 297 return false; 298 default: 299 break; 300 } 301 return true; 302 } 303 304 void damos_add_filter(struct damos *s, struct damos_filter *f) 305 { 306 if (damos_filter_for_ops(f->type)) 307 list_add_tail(&f->list, &s->ops_filters); 308 else 309 list_add_tail(&f->list, &s->filters); 310 } 311 312 static void damos_del_filter(struct damos_filter *f) 313 { 314 list_del(&f->list); 315 } 316 317 static void damos_free_filter(struct damos_filter *f) 318 { 319 kfree(f); 320 } 321 322 void damos_destroy_filter(struct damos_filter *f) 323 { 324 damos_del_filter(f); 325 damos_free_filter(f); 326 } 327 328 struct damos_quota_goal *damos_new_quota_goal( 329 enum damos_quota_goal_metric metric, 330 unsigned long target_value) 331 { 332 struct damos_quota_goal *goal; 333 334 goal = kmalloc(sizeof(*goal), GFP_KERNEL); 335 if (!goal) 336 return NULL; 337 goal->metric = metric; 338 goal->target_value = target_value; 339 INIT_LIST_HEAD(&goal->list); 340 return goal; 341 } 342 343 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) 344 { 345 list_add_tail(&g->list, &q->goals); 346 } 347 348 static void damos_del_quota_goal(struct damos_quota_goal *g) 349 { 350 list_del(&g->list); 351 } 352 353 static void damos_free_quota_goal(struct damos_quota_goal *g) 354 { 355 kfree(g); 356 } 357 358 void damos_destroy_quota_goal(struct damos_quota_goal *g) 359 { 360 damos_del_quota_goal(g); 361 damos_free_quota_goal(g); 362 } 363 364 /* initialize fields of @quota that normally API users wouldn't set */ 365 static struct damos_quota *damos_quota_init(struct damos_quota *quota) 366 { 367 quota->esz = 0; 368 quota->total_charged_sz = 0; 369 quota->total_charged_ns = 0; 370 quota->charged_sz = 0; 371 quota->charged_from = 0; 372 quota->charge_target_from = NULL; 373 quota->charge_addr_from = 0; 374 quota->esz_bp = 0; 375 return quota; 376 } 377 378 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 379 enum damos_action action, 380 unsigned long apply_interval_us, 381 struct damos_quota *quota, 382 struct damos_watermarks *wmarks, 383 int target_nid) 384 { 385 struct damos *scheme; 386 387 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 388 if (!scheme) 389 return NULL; 390 scheme->pattern = *pattern; 391 scheme->action = action; 392 scheme->apply_interval_us = apply_interval_us; 393 /* 394 * next_apply_sis will be set when kdamond starts. While kdamond is 395 * running, it will also updated when it is added to the DAMON context, 396 * or damon_attrs are updated. 397 */ 398 scheme->next_apply_sis = 0; 399 scheme->walk_completed = false; 400 INIT_LIST_HEAD(&scheme->filters); 401 INIT_LIST_HEAD(&scheme->ops_filters); 402 scheme->stat = (struct damos_stat){}; 403 INIT_LIST_HEAD(&scheme->list); 404 405 scheme->quota = *(damos_quota_init(quota)); 406 /* quota.goals should be separately set by caller */ 407 INIT_LIST_HEAD(&scheme->quota.goals); 408 409 scheme->wmarks = *wmarks; 410 scheme->wmarks.activated = true; 411 412 scheme->migrate_dests = (struct damos_migrate_dests){}; 413 scheme->target_nid = target_nid; 414 415 return scheme; 416 } 417 418 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 419 { 420 unsigned long sample_interval = ctx->attrs.sample_interval ? 421 ctx->attrs.sample_interval : 1; 422 unsigned long apply_interval = s->apply_interval_us ? 423 s->apply_interval_us : ctx->attrs.aggr_interval; 424 425 s->next_apply_sis = ctx->passed_sample_intervals + 426 apply_interval / sample_interval; 427 } 428 429 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 430 { 431 list_add_tail(&s->list, &ctx->schemes); 432 damos_set_next_apply_sis(s, ctx); 433 } 434 435 static void damon_del_scheme(struct damos *s) 436 { 437 list_del(&s->list); 438 } 439 440 static void damon_free_scheme(struct damos *s) 441 { 442 kfree(s); 443 } 444 445 void damon_destroy_scheme(struct damos *s) 446 { 447 struct damos_quota_goal *g, *g_next; 448 struct damos_filter *f, *next; 449 450 damos_for_each_quota_goal_safe(g, g_next, &s->quota) 451 damos_destroy_quota_goal(g); 452 453 damos_for_each_filter_safe(f, next, s) 454 damos_destroy_filter(f); 455 456 damos_for_each_ops_filter_safe(f, next, s) 457 damos_destroy_filter(f); 458 459 kfree(s->migrate_dests.node_id_arr); 460 kfree(s->migrate_dests.weight_arr); 461 damon_del_scheme(s); 462 damon_free_scheme(s); 463 } 464 465 /* 466 * Construct a damon_target struct 467 * 468 * Returns the pointer to the new struct if success, or NULL otherwise 469 */ 470 struct damon_target *damon_new_target(void) 471 { 472 struct damon_target *t; 473 474 t = kmalloc(sizeof(*t), GFP_KERNEL); 475 if (!t) 476 return NULL; 477 478 t->pid = NULL; 479 t->nr_regions = 0; 480 INIT_LIST_HEAD(&t->regions_list); 481 INIT_LIST_HEAD(&t->list); 482 t->obsolete = false; 483 484 return t; 485 } 486 487 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 488 { 489 list_add_tail(&t->list, &ctx->adaptive_targets); 490 } 491 492 bool damon_targets_empty(struct damon_ctx *ctx) 493 { 494 return list_empty(&ctx->adaptive_targets); 495 } 496 497 static void damon_del_target(struct damon_target *t) 498 { 499 list_del(&t->list); 500 } 501 502 void damon_free_target(struct damon_target *t) 503 { 504 struct damon_region *r, *next; 505 506 damon_for_each_region_safe(r, next, t) 507 damon_free_region(r); 508 kfree(t); 509 } 510 511 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx) 512 { 513 514 if (ctx && ctx->ops.cleanup_target) 515 ctx->ops.cleanup_target(t); 516 517 damon_del_target(t); 518 damon_free_target(t); 519 } 520 521 unsigned int damon_nr_regions(struct damon_target *t) 522 { 523 return t->nr_regions; 524 } 525 526 struct damon_ctx *damon_new_ctx(void) 527 { 528 struct damon_ctx *ctx; 529 530 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 531 if (!ctx) 532 return NULL; 533 534 init_completion(&ctx->kdamond_started); 535 536 ctx->attrs.sample_interval = 5 * 1000; 537 ctx->attrs.aggr_interval = 100 * 1000; 538 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 539 540 ctx->passed_sample_intervals = 0; 541 /* These will be set from kdamond_init_ctx() */ 542 ctx->next_aggregation_sis = 0; 543 ctx->next_ops_update_sis = 0; 544 545 mutex_init(&ctx->kdamond_lock); 546 INIT_LIST_HEAD(&ctx->call_controls); 547 mutex_init(&ctx->call_controls_lock); 548 mutex_init(&ctx->walk_control_lock); 549 550 ctx->attrs.min_nr_regions = 10; 551 ctx->attrs.max_nr_regions = 1000; 552 553 ctx->addr_unit = 1; 554 ctx->min_sz_region = DAMON_MIN_REGION; 555 556 INIT_LIST_HEAD(&ctx->adaptive_targets); 557 INIT_LIST_HEAD(&ctx->schemes); 558 559 return ctx; 560 } 561 562 static void damon_destroy_targets(struct damon_ctx *ctx) 563 { 564 struct damon_target *t, *next_t; 565 566 damon_for_each_target_safe(t, next_t, ctx) 567 damon_destroy_target(t, ctx); 568 } 569 570 void damon_destroy_ctx(struct damon_ctx *ctx) 571 { 572 struct damos *s, *next_s; 573 574 damon_destroy_targets(ctx); 575 576 damon_for_each_scheme_safe(s, next_s, ctx) 577 damon_destroy_scheme(s); 578 579 kfree(ctx); 580 } 581 582 static bool damon_attrs_equals(const struct damon_attrs *attrs1, 583 const struct damon_attrs *attrs2) 584 { 585 const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal; 586 const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal; 587 588 return attrs1->sample_interval == attrs2->sample_interval && 589 attrs1->aggr_interval == attrs2->aggr_interval && 590 attrs1->ops_update_interval == attrs2->ops_update_interval && 591 attrs1->min_nr_regions == attrs2->min_nr_regions && 592 attrs1->max_nr_regions == attrs2->max_nr_regions && 593 ig1->access_bp == ig2->access_bp && 594 ig1->aggrs == ig2->aggrs && 595 ig1->min_sample_us == ig2->min_sample_us && 596 ig1->max_sample_us == ig2->max_sample_us; 597 } 598 599 static unsigned int damon_age_for_new_attrs(unsigned int age, 600 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 601 { 602 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 603 } 604 605 /* convert access ratio in bp (per 10,000) to nr_accesses */ 606 static unsigned int damon_accesses_bp_to_nr_accesses( 607 unsigned int accesses_bp, struct damon_attrs *attrs) 608 { 609 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 610 } 611 612 /* 613 * Convert nr_accesses to access ratio in bp (per 10,000). 614 * 615 * Callers should ensure attrs.aggr_interval is not zero, like 616 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would 617 * happen. 618 */ 619 static unsigned int damon_nr_accesses_to_accesses_bp( 620 unsigned int nr_accesses, struct damon_attrs *attrs) 621 { 622 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 623 } 624 625 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 626 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 627 { 628 return damon_accesses_bp_to_nr_accesses( 629 damon_nr_accesses_to_accesses_bp( 630 nr_accesses, old_attrs), 631 new_attrs); 632 } 633 634 static void damon_update_monitoring_result(struct damon_region *r, 635 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, 636 bool aggregating) 637 { 638 if (!aggregating) { 639 r->nr_accesses = damon_nr_accesses_for_new_attrs( 640 r->nr_accesses, old_attrs, new_attrs); 641 r->nr_accesses_bp = r->nr_accesses * 10000; 642 } else { 643 /* 644 * if this is called in the middle of the aggregation, reset 645 * the aggregations we made so far for this aggregation 646 * interval. In other words, make the status like 647 * kdamond_reset_aggregated() is called. 648 */ 649 r->last_nr_accesses = damon_nr_accesses_for_new_attrs( 650 r->last_nr_accesses, old_attrs, new_attrs); 651 r->nr_accesses_bp = r->last_nr_accesses * 10000; 652 r->nr_accesses = 0; 653 } 654 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 655 } 656 657 /* 658 * region->nr_accesses is the number of sampling intervals in the last 659 * aggregation interval that access to the region has found, and region->age is 660 * the number of aggregation intervals that its access pattern has maintained. 661 * For the reason, the real meaning of the two fields depend on current 662 * sampling interval and aggregation interval. This function updates 663 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 664 */ 665 static void damon_update_monitoring_results(struct damon_ctx *ctx, 666 struct damon_attrs *new_attrs, bool aggregating) 667 { 668 struct damon_attrs *old_attrs = &ctx->attrs; 669 struct damon_target *t; 670 struct damon_region *r; 671 672 /* if any interval is zero, simply forgive conversion */ 673 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 674 !new_attrs->sample_interval || 675 !new_attrs->aggr_interval) 676 return; 677 678 damon_for_each_target(t, ctx) 679 damon_for_each_region(r, t) 680 damon_update_monitoring_result( 681 r, old_attrs, new_attrs, aggregating); 682 } 683 684 /* 685 * damon_valid_intervals_goal() - return if the intervals goal of @attrs is 686 * valid. 687 */ 688 static bool damon_valid_intervals_goal(struct damon_attrs *attrs) 689 { 690 struct damon_intervals_goal *goal = &attrs->intervals_goal; 691 692 /* tuning is disabled */ 693 if (!goal->aggrs) 694 return true; 695 if (goal->min_sample_us > goal->max_sample_us) 696 return false; 697 if (attrs->sample_interval < goal->min_sample_us || 698 goal->max_sample_us < attrs->sample_interval) 699 return false; 700 return true; 701 } 702 703 /** 704 * damon_set_attrs() - Set attributes for the monitoring. 705 * @ctx: monitoring context 706 * @attrs: monitoring attributes 707 * 708 * This function should be called while the kdamond is not running, an access 709 * check results aggregation is not ongoing (e.g., from damon_call(). 710 * 711 * Every time interval is in micro-seconds. 712 * 713 * Return: 0 on success, negative error code otherwise. 714 */ 715 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 716 { 717 unsigned long sample_interval = attrs->sample_interval ? 718 attrs->sample_interval : 1; 719 struct damos *s; 720 bool aggregating = ctx->passed_sample_intervals < 721 ctx->next_aggregation_sis; 722 723 if (!damon_valid_intervals_goal(attrs)) 724 return -EINVAL; 725 726 if (attrs->min_nr_regions < 3) 727 return -EINVAL; 728 if (attrs->min_nr_regions > attrs->max_nr_regions) 729 return -EINVAL; 730 if (attrs->sample_interval > attrs->aggr_interval) 731 return -EINVAL; 732 733 /* calls from core-external doesn't set this. */ 734 if (!attrs->aggr_samples) 735 attrs->aggr_samples = attrs->aggr_interval / sample_interval; 736 737 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 738 attrs->aggr_interval / sample_interval; 739 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 740 attrs->ops_update_interval / sample_interval; 741 742 damon_update_monitoring_results(ctx, attrs, aggregating); 743 ctx->attrs = *attrs; 744 745 damon_for_each_scheme(s, ctx) 746 damos_set_next_apply_sis(s, ctx); 747 748 return 0; 749 } 750 751 /** 752 * damon_set_schemes() - Set data access monitoring based operation schemes. 753 * @ctx: monitoring context 754 * @schemes: array of the schemes 755 * @nr_schemes: number of entries in @schemes 756 * 757 * This function should not be called while the kdamond of the context is 758 * running. 759 */ 760 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 761 ssize_t nr_schemes) 762 { 763 struct damos *s, *next; 764 ssize_t i; 765 766 damon_for_each_scheme_safe(s, next, ctx) 767 damon_destroy_scheme(s); 768 for (i = 0; i < nr_schemes; i++) 769 damon_add_scheme(ctx, schemes[i]); 770 } 771 772 static struct damos_quota_goal *damos_nth_quota_goal( 773 int n, struct damos_quota *q) 774 { 775 struct damos_quota_goal *goal; 776 int i = 0; 777 778 damos_for_each_quota_goal(goal, q) { 779 if (i++ == n) 780 return goal; 781 } 782 return NULL; 783 } 784 785 static void damos_commit_quota_goal_union( 786 struct damos_quota_goal *dst, struct damos_quota_goal *src) 787 { 788 switch (dst->metric) { 789 case DAMOS_QUOTA_NODE_MEM_USED_BP: 790 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 791 dst->nid = src->nid; 792 break; 793 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 794 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 795 dst->nid = src->nid; 796 dst->memcg_id = src->memcg_id; 797 break; 798 default: 799 break; 800 } 801 } 802 803 static void damos_commit_quota_goal( 804 struct damos_quota_goal *dst, struct damos_quota_goal *src) 805 { 806 dst->metric = src->metric; 807 dst->target_value = src->target_value; 808 if (dst->metric == DAMOS_QUOTA_USER_INPUT) 809 dst->current_value = src->current_value; 810 /* keep last_psi_total as is, since it will be updated in next cycle */ 811 damos_commit_quota_goal_union(dst, src); 812 } 813 814 /** 815 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. 816 * @dst: The commit destination DAMOS quota. 817 * @src: The commit source DAMOS quota. 818 * 819 * Copies user-specified parameters for quota goals from @src to @dst. Users 820 * should use this function for quota goals-level parameters update of running 821 * DAMON contexts, instead of manual in-place updates. 822 * 823 * This function should be called from parameters-update safe context, like 824 * damon_call(). 825 */ 826 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) 827 { 828 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; 829 int i = 0, j = 0; 830 831 damos_for_each_quota_goal_safe(dst_goal, next, dst) { 832 src_goal = damos_nth_quota_goal(i++, src); 833 if (src_goal) 834 damos_commit_quota_goal(dst_goal, src_goal); 835 else 836 damos_destroy_quota_goal(dst_goal); 837 } 838 damos_for_each_quota_goal_safe(src_goal, next, src) { 839 if (j++ < i) 840 continue; 841 new_goal = damos_new_quota_goal( 842 src_goal->metric, src_goal->target_value); 843 if (!new_goal) 844 return -ENOMEM; 845 damos_commit_quota_goal(new_goal, src_goal); 846 damos_add_quota_goal(dst, new_goal); 847 } 848 return 0; 849 } 850 851 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) 852 { 853 int err; 854 855 dst->reset_interval = src->reset_interval; 856 dst->ms = src->ms; 857 dst->sz = src->sz; 858 err = damos_commit_quota_goals(dst, src); 859 if (err) 860 return err; 861 dst->weight_sz = src->weight_sz; 862 dst->weight_nr_accesses = src->weight_nr_accesses; 863 dst->weight_age = src->weight_age; 864 return 0; 865 } 866 867 static struct damos_filter *damos_nth_filter(int n, struct damos *s) 868 { 869 struct damos_filter *filter; 870 int i = 0; 871 872 damos_for_each_filter(filter, s) { 873 if (i++ == n) 874 return filter; 875 } 876 return NULL; 877 } 878 879 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s) 880 { 881 struct damos_filter *filter; 882 int i = 0; 883 884 damos_for_each_ops_filter(filter, s) { 885 if (i++ == n) 886 return filter; 887 } 888 return NULL; 889 } 890 891 static void damos_commit_filter_arg( 892 struct damos_filter *dst, struct damos_filter *src) 893 { 894 switch (dst->type) { 895 case DAMOS_FILTER_TYPE_MEMCG: 896 dst->memcg_id = src->memcg_id; 897 break; 898 case DAMOS_FILTER_TYPE_ADDR: 899 dst->addr_range = src->addr_range; 900 break; 901 case DAMOS_FILTER_TYPE_TARGET: 902 dst->target_idx = src->target_idx; 903 break; 904 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 905 dst->sz_range = src->sz_range; 906 break; 907 default: 908 break; 909 } 910 } 911 912 static void damos_commit_filter( 913 struct damos_filter *dst, struct damos_filter *src) 914 { 915 dst->type = src->type; 916 dst->matching = src->matching; 917 dst->allow = src->allow; 918 damos_commit_filter_arg(dst, src); 919 } 920 921 static int damos_commit_core_filters(struct damos *dst, struct damos *src) 922 { 923 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 924 int i = 0, j = 0; 925 926 damos_for_each_filter_safe(dst_filter, next, dst) { 927 src_filter = damos_nth_filter(i++, src); 928 if (src_filter) 929 damos_commit_filter(dst_filter, src_filter); 930 else 931 damos_destroy_filter(dst_filter); 932 } 933 934 damos_for_each_filter_safe(src_filter, next, src) { 935 if (j++ < i) 936 continue; 937 938 new_filter = damos_new_filter( 939 src_filter->type, src_filter->matching, 940 src_filter->allow); 941 if (!new_filter) 942 return -ENOMEM; 943 damos_commit_filter_arg(new_filter, src_filter); 944 damos_add_filter(dst, new_filter); 945 } 946 return 0; 947 } 948 949 static int damos_commit_ops_filters(struct damos *dst, struct damos *src) 950 { 951 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 952 int i = 0, j = 0; 953 954 damos_for_each_ops_filter_safe(dst_filter, next, dst) { 955 src_filter = damos_nth_ops_filter(i++, src); 956 if (src_filter) 957 damos_commit_filter(dst_filter, src_filter); 958 else 959 damos_destroy_filter(dst_filter); 960 } 961 962 damos_for_each_ops_filter_safe(src_filter, next, src) { 963 if (j++ < i) 964 continue; 965 966 new_filter = damos_new_filter( 967 src_filter->type, src_filter->matching, 968 src_filter->allow); 969 if (!new_filter) 970 return -ENOMEM; 971 damos_commit_filter_arg(new_filter, src_filter); 972 damos_add_filter(dst, new_filter); 973 } 974 return 0; 975 } 976 977 /** 978 * damos_filters_default_reject() - decide whether to reject memory that didn't 979 * match with any given filter. 980 * @filters: Given DAMOS filters of a group. 981 */ 982 static bool damos_filters_default_reject(struct list_head *filters) 983 { 984 struct damos_filter *last_filter; 985 986 if (list_empty(filters)) 987 return false; 988 last_filter = list_last_entry(filters, struct damos_filter, list); 989 return last_filter->allow; 990 } 991 992 static void damos_set_filters_default_reject(struct damos *s) 993 { 994 if (!list_empty(&s->ops_filters)) 995 s->core_filters_default_reject = false; 996 else 997 s->core_filters_default_reject = 998 damos_filters_default_reject(&s->filters); 999 s->ops_filters_default_reject = 1000 damos_filters_default_reject(&s->ops_filters); 1001 } 1002 1003 static int damos_commit_dests(struct damos_migrate_dests *dst, 1004 struct damos_migrate_dests *src) 1005 { 1006 if (dst->nr_dests != src->nr_dests) { 1007 kfree(dst->node_id_arr); 1008 kfree(dst->weight_arr); 1009 1010 dst->node_id_arr = kmalloc_array(src->nr_dests, 1011 sizeof(*dst->node_id_arr), GFP_KERNEL); 1012 if (!dst->node_id_arr) { 1013 dst->weight_arr = NULL; 1014 return -ENOMEM; 1015 } 1016 1017 dst->weight_arr = kmalloc_array(src->nr_dests, 1018 sizeof(*dst->weight_arr), GFP_KERNEL); 1019 if (!dst->weight_arr) { 1020 /* ->node_id_arr will be freed by scheme destruction */ 1021 return -ENOMEM; 1022 } 1023 } 1024 1025 dst->nr_dests = src->nr_dests; 1026 for (int i = 0; i < src->nr_dests; i++) { 1027 dst->node_id_arr[i] = src->node_id_arr[i]; 1028 dst->weight_arr[i] = src->weight_arr[i]; 1029 } 1030 1031 return 0; 1032 } 1033 1034 static int damos_commit_filters(struct damos *dst, struct damos *src) 1035 { 1036 int err; 1037 1038 err = damos_commit_core_filters(dst, src); 1039 if (err) 1040 return err; 1041 err = damos_commit_ops_filters(dst, src); 1042 if (err) 1043 return err; 1044 damos_set_filters_default_reject(dst); 1045 return 0; 1046 } 1047 1048 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) 1049 { 1050 struct damos *s; 1051 int i = 0; 1052 1053 damon_for_each_scheme(s, ctx) { 1054 if (i++ == n) 1055 return s; 1056 } 1057 return NULL; 1058 } 1059 1060 static int damos_commit(struct damos *dst, struct damos *src) 1061 { 1062 int err; 1063 1064 dst->pattern = src->pattern; 1065 dst->action = src->action; 1066 dst->apply_interval_us = src->apply_interval_us; 1067 1068 err = damos_commit_quota(&dst->quota, &src->quota); 1069 if (err) 1070 return err; 1071 1072 dst->wmarks = src->wmarks; 1073 dst->target_nid = src->target_nid; 1074 1075 err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests); 1076 if (err) 1077 return err; 1078 1079 err = damos_commit_filters(dst, src); 1080 return err; 1081 } 1082 1083 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) 1084 { 1085 struct damos *dst_scheme, *next, *src_scheme, *new_scheme; 1086 int i = 0, j = 0, err; 1087 1088 damon_for_each_scheme_safe(dst_scheme, next, dst) { 1089 src_scheme = damon_nth_scheme(i++, src); 1090 if (src_scheme) { 1091 err = damos_commit(dst_scheme, src_scheme); 1092 if (err) 1093 return err; 1094 } else { 1095 damon_destroy_scheme(dst_scheme); 1096 } 1097 } 1098 1099 damon_for_each_scheme_safe(src_scheme, next, src) { 1100 if (j++ < i) 1101 continue; 1102 new_scheme = damon_new_scheme(&src_scheme->pattern, 1103 src_scheme->action, 1104 src_scheme->apply_interval_us, 1105 &src_scheme->quota, &src_scheme->wmarks, 1106 NUMA_NO_NODE); 1107 if (!new_scheme) 1108 return -ENOMEM; 1109 err = damos_commit(new_scheme, src_scheme); 1110 if (err) { 1111 damon_destroy_scheme(new_scheme); 1112 return err; 1113 } 1114 damon_add_scheme(dst, new_scheme); 1115 } 1116 return 0; 1117 } 1118 1119 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) 1120 { 1121 struct damon_target *t; 1122 int i = 0; 1123 1124 damon_for_each_target(t, ctx) { 1125 if (i++ == n) 1126 return t; 1127 } 1128 return NULL; 1129 } 1130 1131 /* 1132 * The caller should ensure the regions of @src are 1133 * 1. valid (end >= src) and 1134 * 2. sorted by starting address. 1135 * 1136 * If @src has no region, @dst keeps current regions. 1137 */ 1138 static int damon_commit_target_regions(struct damon_target *dst, 1139 struct damon_target *src, unsigned long src_min_sz_region) 1140 { 1141 struct damon_region *src_region; 1142 struct damon_addr_range *ranges; 1143 int i = 0, err; 1144 1145 damon_for_each_region(src_region, src) 1146 i++; 1147 if (!i) 1148 return 0; 1149 1150 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); 1151 if (!ranges) 1152 return -ENOMEM; 1153 i = 0; 1154 damon_for_each_region(src_region, src) 1155 ranges[i++] = src_region->ar; 1156 err = damon_set_regions(dst, ranges, i, src_min_sz_region); 1157 kfree(ranges); 1158 return err; 1159 } 1160 1161 static int damon_commit_target( 1162 struct damon_target *dst, bool dst_has_pid, 1163 struct damon_target *src, bool src_has_pid, 1164 unsigned long src_min_sz_region) 1165 { 1166 int err; 1167 1168 err = damon_commit_target_regions(dst, src, src_min_sz_region); 1169 if (err) 1170 return err; 1171 if (dst_has_pid) 1172 put_pid(dst->pid); 1173 if (src_has_pid) 1174 get_pid(src->pid); 1175 dst->pid = src->pid; 1176 return 0; 1177 } 1178 1179 static int damon_commit_targets( 1180 struct damon_ctx *dst, struct damon_ctx *src) 1181 { 1182 struct damon_target *dst_target, *next, *src_target, *new_target; 1183 int i = 0, j = 0, err; 1184 1185 damon_for_each_target_safe(dst_target, next, dst) { 1186 src_target = damon_nth_target(i++, src); 1187 /* 1188 * If src target is obsolete, do not commit the parameters to 1189 * the dst target, and further remove the dst target. 1190 */ 1191 if (src_target && !src_target->obsolete) { 1192 err = damon_commit_target( 1193 dst_target, damon_target_has_pid(dst), 1194 src_target, damon_target_has_pid(src), 1195 src->min_sz_region); 1196 if (err) 1197 return err; 1198 } else { 1199 struct damos *s; 1200 1201 damon_destroy_target(dst_target, dst); 1202 damon_for_each_scheme(s, dst) { 1203 if (s->quota.charge_target_from == dst_target) { 1204 s->quota.charge_target_from = NULL; 1205 s->quota.charge_addr_from = 0; 1206 } 1207 } 1208 } 1209 } 1210 1211 damon_for_each_target_safe(src_target, next, src) { 1212 if (j++ < i) 1213 continue; 1214 /* target to remove has no matching dst */ 1215 if (src_target->obsolete) 1216 return -EINVAL; 1217 new_target = damon_new_target(); 1218 if (!new_target) 1219 return -ENOMEM; 1220 err = damon_commit_target(new_target, false, 1221 src_target, damon_target_has_pid(src), 1222 src->min_sz_region); 1223 if (err) { 1224 damon_destroy_target(new_target, NULL); 1225 return err; 1226 } 1227 damon_add_target(dst, new_target); 1228 } 1229 return 0; 1230 } 1231 1232 /** 1233 * damon_commit_ctx() - Commit parameters of a DAMON context to another. 1234 * @dst: The commit destination DAMON context. 1235 * @src: The commit source DAMON context. 1236 * 1237 * This function copies user-specified parameters from @src to @dst and update 1238 * the internal status and results accordingly. Users should use this function 1239 * for context-level parameters update of running context, instead of manual 1240 * in-place updates. 1241 * 1242 * This function should be called from parameters-update safe context, like 1243 * damon_call(). 1244 */ 1245 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) 1246 { 1247 int err; 1248 1249 err = damon_commit_schemes(dst, src); 1250 if (err) 1251 return err; 1252 err = damon_commit_targets(dst, src); 1253 if (err) 1254 return err; 1255 /* 1256 * schemes and targets should be updated first, since 1257 * 1. damon_set_attrs() updates monitoring results of targets and 1258 * next_apply_sis of schemes, and 1259 * 2. ops update should be done after pid handling is done (target 1260 * committing require putting pids). 1261 */ 1262 if (!damon_attrs_equals(&dst->attrs, &src->attrs)) { 1263 err = damon_set_attrs(dst, &src->attrs); 1264 if (err) 1265 return err; 1266 } 1267 dst->ops = src->ops; 1268 dst->addr_unit = src->addr_unit; 1269 dst->min_sz_region = src->min_sz_region; 1270 1271 return 0; 1272 } 1273 1274 /** 1275 * damon_nr_running_ctxs() - Return number of currently running contexts. 1276 */ 1277 int damon_nr_running_ctxs(void) 1278 { 1279 int nr_ctxs; 1280 1281 mutex_lock(&damon_lock); 1282 nr_ctxs = nr_running_ctxs; 1283 mutex_unlock(&damon_lock); 1284 1285 return nr_ctxs; 1286 } 1287 1288 /* Returns the size upper limit for each monitoring region */ 1289 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 1290 { 1291 struct damon_target *t; 1292 struct damon_region *r; 1293 unsigned long sz = 0; 1294 1295 damon_for_each_target(t, ctx) { 1296 damon_for_each_region(r, t) 1297 sz += damon_sz_region(r); 1298 } 1299 1300 if (ctx->attrs.min_nr_regions) 1301 sz /= ctx->attrs.min_nr_regions; 1302 if (sz < ctx->min_sz_region) 1303 sz = ctx->min_sz_region; 1304 1305 return sz; 1306 } 1307 1308 static int kdamond_fn(void *data); 1309 1310 /* 1311 * __damon_start() - Starts monitoring with given context. 1312 * @ctx: monitoring context 1313 * 1314 * This function should be called while damon_lock is hold. 1315 * 1316 * Return: 0 on success, negative error code otherwise. 1317 */ 1318 static int __damon_start(struct damon_ctx *ctx) 1319 { 1320 int err = -EBUSY; 1321 1322 mutex_lock(&ctx->kdamond_lock); 1323 if (!ctx->kdamond) { 1324 err = 0; 1325 reinit_completion(&ctx->kdamond_started); 1326 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 1327 nr_running_ctxs); 1328 if (IS_ERR(ctx->kdamond)) { 1329 err = PTR_ERR(ctx->kdamond); 1330 ctx->kdamond = NULL; 1331 } else { 1332 wait_for_completion(&ctx->kdamond_started); 1333 } 1334 } 1335 mutex_unlock(&ctx->kdamond_lock); 1336 1337 return err; 1338 } 1339 1340 /** 1341 * damon_start() - Starts the monitorings for a given group of contexts. 1342 * @ctxs: an array of the pointers for contexts to start monitoring 1343 * @nr_ctxs: size of @ctxs 1344 * @exclusive: exclusiveness of this contexts group 1345 * 1346 * This function starts a group of monitoring threads for a group of monitoring 1347 * contexts. One thread per each context is created and run in parallel. The 1348 * caller should handle synchronization between the threads by itself. If 1349 * @exclusive is true and a group of threads that created by other 1350 * 'damon_start()' call is currently running, this function does nothing but 1351 * returns -EBUSY. 1352 * 1353 * Return: 0 on success, negative error code otherwise. 1354 */ 1355 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 1356 { 1357 int i; 1358 int err = 0; 1359 1360 mutex_lock(&damon_lock); 1361 if ((exclusive && nr_running_ctxs) || 1362 (!exclusive && running_exclusive_ctxs)) { 1363 mutex_unlock(&damon_lock); 1364 return -EBUSY; 1365 } 1366 1367 for (i = 0; i < nr_ctxs; i++) { 1368 err = __damon_start(ctxs[i]); 1369 if (err) 1370 break; 1371 nr_running_ctxs++; 1372 } 1373 if (exclusive && nr_running_ctxs) 1374 running_exclusive_ctxs = true; 1375 mutex_unlock(&damon_lock); 1376 1377 return err; 1378 } 1379 1380 /* 1381 * __damon_stop() - Stops monitoring of a given context. 1382 * @ctx: monitoring context 1383 * 1384 * Return: 0 on success, negative error code otherwise. 1385 */ 1386 static int __damon_stop(struct damon_ctx *ctx) 1387 { 1388 struct task_struct *tsk; 1389 1390 mutex_lock(&ctx->kdamond_lock); 1391 tsk = ctx->kdamond; 1392 if (tsk) { 1393 get_task_struct(tsk); 1394 mutex_unlock(&ctx->kdamond_lock); 1395 kthread_stop_put(tsk); 1396 return 0; 1397 } 1398 mutex_unlock(&ctx->kdamond_lock); 1399 1400 return -EPERM; 1401 } 1402 1403 /** 1404 * damon_stop() - Stops the monitorings for a given group of contexts. 1405 * @ctxs: an array of the pointers for contexts to stop monitoring 1406 * @nr_ctxs: size of @ctxs 1407 * 1408 * Return: 0 on success, negative error code otherwise. 1409 */ 1410 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 1411 { 1412 int i, err = 0; 1413 1414 for (i = 0; i < nr_ctxs; i++) { 1415 /* nr_running_ctxs is decremented in kdamond_fn */ 1416 err = __damon_stop(ctxs[i]); 1417 if (err) 1418 break; 1419 } 1420 return err; 1421 } 1422 1423 /** 1424 * damon_is_running() - Returns if a given DAMON context is running. 1425 * @ctx: The DAMON context to see if running. 1426 * 1427 * Return: true if @ctx is running, false otherwise. 1428 */ 1429 bool damon_is_running(struct damon_ctx *ctx) 1430 { 1431 bool running; 1432 1433 mutex_lock(&ctx->kdamond_lock); 1434 running = ctx->kdamond != NULL; 1435 mutex_unlock(&ctx->kdamond_lock); 1436 return running; 1437 } 1438 1439 /** 1440 * damon_call() - Invoke a given function on DAMON worker thread (kdamond). 1441 * @ctx: DAMON context to call the function for. 1442 * @control: Control variable of the call request. 1443 * 1444 * Ask DAMON worker thread (kdamond) of @ctx to call a function with an 1445 * argument data that respectively passed via &damon_call_control->fn and 1446 * &damon_call_control->data of @control. If &damon_call_control->repeat of 1447 * @control is unset, further wait until the kdamond finishes handling of the 1448 * request. Otherwise, return as soon as the request is made. 1449 * 1450 * The kdamond executes the function with the argument in the main loop, just 1451 * after a sampling of the iteration is finished. The function can hence 1452 * safely access the internal data of the &struct damon_ctx without additional 1453 * synchronization. The return value of the function will be saved in 1454 * &damon_call_control->return_code. 1455 * 1456 * Return: 0 on success, negative error code otherwise. 1457 */ 1458 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) 1459 { 1460 if (!control->repeat) 1461 init_completion(&control->completion); 1462 control->canceled = false; 1463 INIT_LIST_HEAD(&control->list); 1464 1465 mutex_lock(&ctx->call_controls_lock); 1466 list_add_tail(&control->list, &ctx->call_controls); 1467 mutex_unlock(&ctx->call_controls_lock); 1468 if (!damon_is_running(ctx)) 1469 return -EINVAL; 1470 if (control->repeat) 1471 return 0; 1472 wait_for_completion(&control->completion); 1473 if (control->canceled) 1474 return -ECANCELED; 1475 return 0; 1476 } 1477 1478 /** 1479 * damos_walk() - Invoke a given functions while DAMOS walk regions. 1480 * @ctx: DAMON context to call the functions for. 1481 * @control: Control variable of the walk request. 1482 * 1483 * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region 1484 * that the kdamond will apply DAMOS action to, and wait until the kdamond 1485 * finishes handling of the request. 1486 * 1487 * The kdamond executes the given function in the main loop, for each region 1488 * just after it applied any DAMOS actions of @ctx to it. The invocation is 1489 * made only within one &damos->apply_interval_us since damos_walk() 1490 * invocation, for each scheme. The given callback function can hence safely 1491 * access the internal data of &struct damon_ctx and &struct damon_region that 1492 * each of the scheme will apply the action for next interval, without 1493 * additional synchronizations against the kdamond. If every scheme of @ctx 1494 * passed at least one &damos->apply_interval_us, kdamond marks the request as 1495 * completed so that damos_walk() can wakeup and return. 1496 * 1497 * Return: 0 on success, negative error code otherwise. 1498 */ 1499 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) 1500 { 1501 init_completion(&control->completion); 1502 control->canceled = false; 1503 mutex_lock(&ctx->walk_control_lock); 1504 if (ctx->walk_control) { 1505 mutex_unlock(&ctx->walk_control_lock); 1506 return -EBUSY; 1507 } 1508 ctx->walk_control = control; 1509 mutex_unlock(&ctx->walk_control_lock); 1510 if (!damon_is_running(ctx)) 1511 return -EINVAL; 1512 wait_for_completion(&control->completion); 1513 if (control->canceled) 1514 return -ECANCELED; 1515 return 0; 1516 } 1517 1518 /* 1519 * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing 1520 * the problem being propagated. 1521 */ 1522 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r) 1523 { 1524 if (r->nr_accesses_bp == r->nr_accesses * 10000) 1525 return; 1526 WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n", 1527 r->nr_accesses_bp, r->nr_accesses); 1528 r->nr_accesses_bp = r->nr_accesses * 10000; 1529 } 1530 1531 /* 1532 * Reset the aggregated monitoring results ('nr_accesses' of each region). 1533 */ 1534 static void kdamond_reset_aggregated(struct damon_ctx *c) 1535 { 1536 struct damon_target *t; 1537 unsigned int ti = 0; /* target's index */ 1538 1539 damon_for_each_target(t, c) { 1540 struct damon_region *r; 1541 1542 damon_for_each_region(r, t) { 1543 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 1544 damon_warn_fix_nr_accesses_corruption(r); 1545 r->last_nr_accesses = r->nr_accesses; 1546 r->nr_accesses = 0; 1547 } 1548 ti++; 1549 } 1550 } 1551 1552 static unsigned long damon_get_intervals_score(struct damon_ctx *c) 1553 { 1554 struct damon_target *t; 1555 struct damon_region *r; 1556 unsigned long sz_region, max_access_events = 0, access_events = 0; 1557 unsigned long target_access_events; 1558 unsigned long goal_bp = c->attrs.intervals_goal.access_bp; 1559 1560 damon_for_each_target(t, c) { 1561 damon_for_each_region(r, t) { 1562 sz_region = damon_sz_region(r); 1563 max_access_events += sz_region * c->attrs.aggr_samples; 1564 access_events += sz_region * r->nr_accesses; 1565 } 1566 } 1567 target_access_events = max_access_events * goal_bp / 10000; 1568 target_access_events = target_access_events ? : 1; 1569 return access_events * 10000 / target_access_events; 1570 } 1571 1572 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1573 unsigned long score); 1574 1575 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) 1576 { 1577 unsigned long score_bp, adaptation_bp; 1578 1579 score_bp = damon_get_intervals_score(c); 1580 adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / 1581 10000; 1582 /* 1583 * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of 1584 * the intervals by rescaling [1,10,000] to [5000, 10,000]. 1585 */ 1586 if (adaptation_bp <= 10000) 1587 adaptation_bp = 5000 + adaptation_bp / 2; 1588 return adaptation_bp; 1589 } 1590 1591 static void kdamond_tune_intervals(struct damon_ctx *c) 1592 { 1593 unsigned long adaptation_bp; 1594 struct damon_attrs new_attrs; 1595 struct damon_intervals_goal *goal; 1596 1597 adaptation_bp = damon_get_intervals_adaptation_bp(c); 1598 if (adaptation_bp == 10000) 1599 return; 1600 1601 new_attrs = c->attrs; 1602 goal = &c->attrs.intervals_goal; 1603 new_attrs.sample_interval = min(goal->max_sample_us, 1604 c->attrs.sample_interval * adaptation_bp / 10000); 1605 new_attrs.sample_interval = max(goal->min_sample_us, 1606 new_attrs.sample_interval); 1607 new_attrs.aggr_interval = new_attrs.sample_interval * 1608 c->attrs.aggr_samples; 1609 trace_damon_monitor_intervals_tune(new_attrs.sample_interval); 1610 damon_set_attrs(c, &new_attrs); 1611 } 1612 1613 static void damon_split_region_at(struct damon_target *t, 1614 struct damon_region *r, unsigned long sz_r); 1615 1616 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 1617 { 1618 unsigned long sz; 1619 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 1620 1621 sz = damon_sz_region(r); 1622 return s->pattern.min_sz_region <= sz && 1623 sz <= s->pattern.max_sz_region && 1624 s->pattern.min_nr_accesses <= nr_accesses && 1625 nr_accesses <= s->pattern.max_nr_accesses && 1626 s->pattern.min_age_region <= r->age && 1627 r->age <= s->pattern.max_age_region; 1628 } 1629 1630 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 1631 struct damon_region *r, struct damos *s) 1632 { 1633 bool ret = __damos_valid_target(r, s); 1634 1635 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 1636 return ret; 1637 1638 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 1639 } 1640 1641 /* 1642 * damos_skip_charged_region() - Check if the given region or starting part of 1643 * it is already charged for the DAMOS quota. 1644 * @t: The target of the region. 1645 * @rp: The pointer to the region. 1646 * @s: The scheme to be applied. 1647 * @min_sz_region: minimum region size. 1648 * 1649 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 1650 * action would applied to only a part of the target access pattern fulfilling 1651 * regions. To avoid applying the scheme action to only already applied 1652 * regions, DAMON skips applying the scheme action to the regions that charged 1653 * in the previous charge window. 1654 * 1655 * This function checks if a given region should be skipped or not for the 1656 * reason. If only the starting part of the region has previously charged, 1657 * this function splits the region into two so that the second one covers the 1658 * area that not charged in the previous charge widnow and saves the second 1659 * region in *rp and returns false, so that the caller can apply DAMON action 1660 * to the second one. 1661 * 1662 * Return: true if the region should be entirely skipped, false otherwise. 1663 */ 1664 static bool damos_skip_charged_region(struct damon_target *t, 1665 struct damon_region **rp, struct damos *s, unsigned long min_sz_region) 1666 { 1667 struct damon_region *r = *rp; 1668 struct damos_quota *quota = &s->quota; 1669 unsigned long sz_to_skip; 1670 1671 /* Skip previously charged regions */ 1672 if (quota->charge_target_from) { 1673 if (t != quota->charge_target_from) 1674 return true; 1675 if (r == damon_last_region(t)) { 1676 quota->charge_target_from = NULL; 1677 quota->charge_addr_from = 0; 1678 return true; 1679 } 1680 if (quota->charge_addr_from && 1681 r->ar.end <= quota->charge_addr_from) 1682 return true; 1683 1684 if (quota->charge_addr_from && r->ar.start < 1685 quota->charge_addr_from) { 1686 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 1687 r->ar.start, min_sz_region); 1688 if (!sz_to_skip) { 1689 if (damon_sz_region(r) <= min_sz_region) 1690 return true; 1691 sz_to_skip = min_sz_region; 1692 } 1693 damon_split_region_at(t, r, sz_to_skip); 1694 r = damon_next_region(r); 1695 *rp = r; 1696 } 1697 quota->charge_target_from = NULL; 1698 quota->charge_addr_from = 0; 1699 } 1700 return false; 1701 } 1702 1703 static void damos_update_stat(struct damos *s, 1704 unsigned long sz_tried, unsigned long sz_applied, 1705 unsigned long sz_ops_filter_passed) 1706 { 1707 s->stat.nr_tried++; 1708 s->stat.sz_tried += sz_tried; 1709 if (sz_applied) 1710 s->stat.nr_applied++; 1711 s->stat.sz_applied += sz_applied; 1712 s->stat.sz_ops_filter_passed += sz_ops_filter_passed; 1713 } 1714 1715 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, 1716 struct damon_region *r, struct damos_filter *filter, 1717 unsigned long min_sz_region) 1718 { 1719 bool matched = false; 1720 struct damon_target *ti; 1721 int target_idx = 0; 1722 unsigned long start, end; 1723 1724 switch (filter->type) { 1725 case DAMOS_FILTER_TYPE_TARGET: 1726 damon_for_each_target(ti, ctx) { 1727 if (ti == t) 1728 break; 1729 target_idx++; 1730 } 1731 matched = target_idx == filter->target_idx; 1732 break; 1733 case DAMOS_FILTER_TYPE_ADDR: 1734 start = ALIGN_DOWN(filter->addr_range.start, min_sz_region); 1735 end = ALIGN_DOWN(filter->addr_range.end, min_sz_region); 1736 1737 /* inside the range */ 1738 if (start <= r->ar.start && r->ar.end <= end) { 1739 matched = true; 1740 break; 1741 } 1742 /* outside of the range */ 1743 if (r->ar.end <= start || end <= r->ar.start) { 1744 matched = false; 1745 break; 1746 } 1747 /* start before the range and overlap */ 1748 if (r->ar.start < start) { 1749 damon_split_region_at(t, r, start - r->ar.start); 1750 matched = false; 1751 break; 1752 } 1753 /* start inside the range */ 1754 damon_split_region_at(t, r, end - r->ar.start); 1755 matched = true; 1756 break; 1757 default: 1758 return false; 1759 } 1760 1761 return matched == filter->matching; 1762 } 1763 1764 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 1765 struct damon_region *r, struct damos *s) 1766 { 1767 struct damos_filter *filter; 1768 1769 s->core_filters_allowed = false; 1770 damos_for_each_filter(filter, s) { 1771 if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) { 1772 if (filter->allow) 1773 s->core_filters_allowed = true; 1774 return !filter->allow; 1775 } 1776 } 1777 return s->core_filters_default_reject; 1778 } 1779 1780 /* 1781 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. 1782 * @ctx: The context of &damon_ctx->walk_control. 1783 * @t: The monitoring target of @r that @s will be applied. 1784 * @r: The region of @t that @s will be applied. 1785 * @s: The scheme of @ctx that will be applied to @r. 1786 * 1787 * This function is called from kdamond whenever it asked the operation set to 1788 * apply a DAMOS scheme action to a region. If a DAMOS walk request is 1789 * installed by damos_walk() and not yet uninstalled, invoke it. 1790 */ 1791 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, 1792 struct damon_region *r, struct damos *s, 1793 unsigned long sz_filter_passed) 1794 { 1795 struct damos_walk_control *control; 1796 1797 if (s->walk_completed) 1798 return; 1799 1800 control = ctx->walk_control; 1801 if (!control) 1802 return; 1803 1804 control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); 1805 } 1806 1807 /* 1808 * damos_walk_complete() - Complete DAMOS walk request if all walks are done. 1809 * @ctx: The context of &damon_ctx->walk_control. 1810 * @s: A scheme of @ctx that all walks are now done. 1811 * 1812 * This function is called when kdamond finished applying the action of a DAMOS 1813 * scheme to all regions that eligible for the given &damos->apply_interval_us. 1814 * If every scheme of @ctx including @s now finished walking for at least one 1815 * &damos->apply_interval_us, this function makrs the handling of the given 1816 * DAMOS walk request is done, so that damos_walk() can wake up and return. 1817 */ 1818 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) 1819 { 1820 struct damos *siter; 1821 struct damos_walk_control *control; 1822 1823 control = ctx->walk_control; 1824 if (!control) 1825 return; 1826 1827 s->walk_completed = true; 1828 /* if all schemes completed, signal completion to walker */ 1829 damon_for_each_scheme(siter, ctx) { 1830 if (!siter->walk_completed) 1831 return; 1832 } 1833 damon_for_each_scheme(siter, ctx) 1834 siter->walk_completed = false; 1835 1836 complete(&control->completion); 1837 ctx->walk_control = NULL; 1838 } 1839 1840 /* 1841 * damos_walk_cancel() - Cancel the current DAMOS walk request. 1842 * @ctx: The context of &damon_ctx->walk_control. 1843 * 1844 * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS 1845 * walk is requested but there is no DAMOS scheme to walk for, or the kdamond 1846 * is already out of the main loop and therefore gonna be terminated, and hence 1847 * cannot continue the walks. This function therefore marks the walk request 1848 * as canceled, so that damos_walk() can wake up and return. 1849 */ 1850 static void damos_walk_cancel(struct damon_ctx *ctx) 1851 { 1852 struct damos_walk_control *control; 1853 1854 mutex_lock(&ctx->walk_control_lock); 1855 control = ctx->walk_control; 1856 mutex_unlock(&ctx->walk_control_lock); 1857 1858 if (!control) 1859 return; 1860 control->canceled = true; 1861 complete(&control->completion); 1862 mutex_lock(&ctx->walk_control_lock); 1863 ctx->walk_control = NULL; 1864 mutex_unlock(&ctx->walk_control_lock); 1865 } 1866 1867 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 1868 struct damon_region *r, struct damos *s) 1869 { 1870 struct damos_quota *quota = &s->quota; 1871 unsigned long sz = damon_sz_region(r); 1872 struct timespec64 begin, end; 1873 unsigned long sz_applied = 0; 1874 unsigned long sz_ops_filter_passed = 0; 1875 /* 1876 * We plan to support multiple context per kdamond, as DAMON sysfs 1877 * implies with 'nr_contexts' file. Nevertheless, only single context 1878 * per kdamond is supported for now. So, we can simply use '0' context 1879 * index here. 1880 */ 1881 unsigned int cidx = 0; 1882 struct damos *siter; /* schemes iterator */ 1883 unsigned int sidx = 0; 1884 struct damon_target *titer; /* targets iterator */ 1885 unsigned int tidx = 0; 1886 bool do_trace = false; 1887 1888 /* get indices for trace_damos_before_apply() */ 1889 if (trace_damos_before_apply_enabled()) { 1890 damon_for_each_scheme(siter, c) { 1891 if (siter == s) 1892 break; 1893 sidx++; 1894 } 1895 damon_for_each_target(titer, c) { 1896 if (titer == t) 1897 break; 1898 tidx++; 1899 } 1900 do_trace = true; 1901 } 1902 1903 if (c->ops.apply_scheme) { 1904 if (quota->esz && quota->charged_sz + sz > quota->esz) { 1905 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 1906 c->min_sz_region); 1907 if (!sz) 1908 goto update_stat; 1909 damon_split_region_at(t, r, sz); 1910 } 1911 if (damos_filter_out(c, t, r, s)) 1912 return; 1913 ktime_get_coarse_ts64(&begin); 1914 trace_damos_before_apply(cidx, sidx, tidx, r, 1915 damon_nr_regions(t), do_trace); 1916 sz_applied = c->ops.apply_scheme(c, t, r, s, 1917 &sz_ops_filter_passed); 1918 damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); 1919 ktime_get_coarse_ts64(&end); 1920 quota->total_charged_ns += timespec64_to_ns(&end) - 1921 timespec64_to_ns(&begin); 1922 quota->charged_sz += sz; 1923 if (quota->esz && quota->charged_sz >= quota->esz) { 1924 quota->charge_target_from = t; 1925 quota->charge_addr_from = r->ar.end + 1; 1926 } 1927 } 1928 if (s->action != DAMOS_STAT) 1929 r->age = 0; 1930 1931 update_stat: 1932 damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); 1933 } 1934 1935 static void damon_do_apply_schemes(struct damon_ctx *c, 1936 struct damon_target *t, 1937 struct damon_region *r) 1938 { 1939 struct damos *s; 1940 1941 damon_for_each_scheme(s, c) { 1942 struct damos_quota *quota = &s->quota; 1943 1944 if (c->passed_sample_intervals < s->next_apply_sis) 1945 continue; 1946 1947 if (!s->wmarks.activated) 1948 continue; 1949 1950 /* Check the quota */ 1951 if (quota->esz && quota->charged_sz >= quota->esz) 1952 continue; 1953 1954 if (damos_skip_charged_region(t, &r, s, c->min_sz_region)) 1955 continue; 1956 1957 if (!damos_valid_target(c, t, r, s)) 1958 continue; 1959 1960 damos_apply_scheme(c, t, r, s); 1961 } 1962 } 1963 1964 /* 1965 * damon_feed_loop_next_input() - get next input to achieve a target score. 1966 * @last_input The last input. 1967 * @score Current score that made with @last_input. 1968 * 1969 * Calculate next input to achieve the target score, based on the last input 1970 * and current score. Assuming the input and the score are positively 1971 * proportional, calculate how much compensation should be added to or 1972 * subtracted from the last input as a proportion of the last input. Avoid 1973 * next input always being zero by setting it non-zero always. In short form 1974 * (assuming support of float and signed calculations), the algorithm is as 1975 * below. 1976 * 1977 * next_input = max(last_input * ((goal - current) / goal + 1), 1) 1978 * 1979 * For simple implementation, we assume the target score is always 10,000. The 1980 * caller should adjust @score for this. 1981 * 1982 * Returns next input that assumed to achieve the target score. 1983 */ 1984 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1985 unsigned long score) 1986 { 1987 const unsigned long goal = 10000; 1988 /* Set minimum input as 10000 to avoid compensation be zero */ 1989 const unsigned long min_input = 10000; 1990 unsigned long score_goal_diff, compensation; 1991 bool over_achieving = score > goal; 1992 1993 if (score == goal) 1994 return last_input; 1995 if (score >= goal * 2) 1996 return min_input; 1997 1998 if (over_achieving) 1999 score_goal_diff = score - goal; 2000 else 2001 score_goal_diff = goal - score; 2002 2003 if (last_input < ULONG_MAX / score_goal_diff) 2004 compensation = last_input * score_goal_diff / goal; 2005 else 2006 compensation = last_input / goal * score_goal_diff; 2007 2008 if (over_achieving) 2009 return max(last_input - compensation, min_input); 2010 if (last_input < ULONG_MAX - compensation) 2011 return last_input + compensation; 2012 return ULONG_MAX; 2013 } 2014 2015 #ifdef CONFIG_PSI 2016 2017 static u64 damos_get_some_mem_psi_total(void) 2018 { 2019 if (static_branch_likely(&psi_disabled)) 2020 return 0; 2021 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], 2022 NSEC_PER_USEC); 2023 } 2024 2025 #else /* CONFIG_PSI */ 2026 2027 static inline u64 damos_get_some_mem_psi_total(void) 2028 { 2029 return 0; 2030 }; 2031 2032 #endif /* CONFIG_PSI */ 2033 2034 #ifdef CONFIG_NUMA 2035 static __kernel_ulong_t damos_get_node_mem_bp( 2036 struct damos_quota_goal *goal) 2037 { 2038 struct sysinfo i; 2039 __kernel_ulong_t numerator; 2040 2041 si_meminfo_node(&i, goal->nid); 2042 if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP) 2043 numerator = i.totalram - i.freeram; 2044 else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */ 2045 numerator = i.freeram; 2046 return numerator * 10000 / i.totalram; 2047 } 2048 2049 static unsigned long damos_get_node_memcg_used_bp( 2050 struct damos_quota_goal *goal) 2051 { 2052 struct mem_cgroup *memcg; 2053 struct lruvec *lruvec; 2054 unsigned long used_pages, numerator; 2055 struct sysinfo i; 2056 2057 rcu_read_lock(); 2058 memcg = mem_cgroup_from_id(goal->memcg_id); 2059 rcu_read_unlock(); 2060 if (!memcg) { 2061 if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP) 2062 return 0; 2063 else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */ 2064 return 10000; 2065 } 2066 mem_cgroup_flush_stats(memcg); 2067 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid)); 2068 used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON); 2069 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON); 2070 used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE); 2071 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE); 2072 2073 si_meminfo_node(&i, goal->nid); 2074 if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP) 2075 numerator = used_pages; 2076 else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */ 2077 numerator = i.totalram - used_pages; 2078 return numerator * 10000 / i.totalram; 2079 } 2080 #else 2081 static __kernel_ulong_t damos_get_node_mem_bp( 2082 struct damos_quota_goal *goal) 2083 { 2084 return 0; 2085 } 2086 2087 static unsigned long damos_get_node_memcg_used_bp( 2088 struct damos_quota_goal *goal) 2089 { 2090 return 0; 2091 } 2092 #endif 2093 2094 2095 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) 2096 { 2097 u64 now_psi_total; 2098 2099 switch (goal->metric) { 2100 case DAMOS_QUOTA_USER_INPUT: 2101 /* User should already set goal->current_value */ 2102 break; 2103 case DAMOS_QUOTA_SOME_MEM_PSI_US: 2104 now_psi_total = damos_get_some_mem_psi_total(); 2105 goal->current_value = now_psi_total - goal->last_psi_total; 2106 goal->last_psi_total = now_psi_total; 2107 break; 2108 case DAMOS_QUOTA_NODE_MEM_USED_BP: 2109 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 2110 goal->current_value = damos_get_node_mem_bp(goal); 2111 break; 2112 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 2113 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 2114 goal->current_value = damos_get_node_memcg_used_bp(goal); 2115 break; 2116 default: 2117 break; 2118 } 2119 } 2120 2121 /* Return the highest score since it makes schemes least aggressive */ 2122 static unsigned long damos_quota_score(struct damos_quota *quota) 2123 { 2124 struct damos_quota_goal *goal; 2125 unsigned long highest_score = 0; 2126 2127 damos_for_each_quota_goal(goal, quota) { 2128 damos_set_quota_goal_current_value(goal); 2129 highest_score = max(highest_score, 2130 goal->current_value * 10000 / 2131 goal->target_value); 2132 } 2133 2134 return highest_score; 2135 } 2136 2137 /* 2138 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty 2139 */ 2140 static void damos_set_effective_quota(struct damos_quota *quota) 2141 { 2142 unsigned long throughput; 2143 unsigned long esz = ULONG_MAX; 2144 2145 if (!quota->ms && list_empty("a->goals)) { 2146 quota->esz = quota->sz; 2147 return; 2148 } 2149 2150 if (!list_empty("a->goals)) { 2151 unsigned long score = damos_quota_score(quota); 2152 2153 quota->esz_bp = damon_feed_loop_next_input( 2154 max(quota->esz_bp, 10000UL), 2155 score); 2156 esz = quota->esz_bp / 10000; 2157 } 2158 2159 if (quota->ms) { 2160 if (quota->total_charged_ns) 2161 throughput = mult_frac(quota->total_charged_sz, 1000000, 2162 quota->total_charged_ns); 2163 else 2164 throughput = PAGE_SIZE * 1024; 2165 esz = min(throughput * quota->ms, esz); 2166 } 2167 2168 if (quota->sz && quota->sz < esz) 2169 esz = quota->sz; 2170 2171 quota->esz = esz; 2172 } 2173 2174 static void damos_trace_esz(struct damon_ctx *c, struct damos *s, 2175 struct damos_quota *quota) 2176 { 2177 unsigned int cidx = 0, sidx = 0; 2178 struct damos *siter; 2179 2180 damon_for_each_scheme(siter, c) { 2181 if (siter == s) 2182 break; 2183 sidx++; 2184 } 2185 trace_damos_esz(cidx, sidx, quota->esz); 2186 } 2187 2188 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 2189 { 2190 struct damos_quota *quota = &s->quota; 2191 struct damon_target *t; 2192 struct damon_region *r; 2193 unsigned long cumulated_sz, cached_esz; 2194 unsigned int score, max_score = 0; 2195 2196 if (!quota->ms && !quota->sz && list_empty("a->goals)) 2197 return; 2198 2199 /* First charge window */ 2200 if (!quota->total_charged_sz && !quota->charged_from) { 2201 quota->charged_from = jiffies; 2202 damos_set_effective_quota(quota); 2203 } 2204 2205 /* New charge window starts */ 2206 if (time_after_eq(jiffies, quota->charged_from + 2207 msecs_to_jiffies(quota->reset_interval))) { 2208 if (quota->esz && quota->charged_sz >= quota->esz) 2209 s->stat.qt_exceeds++; 2210 quota->total_charged_sz += quota->charged_sz; 2211 quota->charged_from = jiffies; 2212 quota->charged_sz = 0; 2213 if (trace_damos_esz_enabled()) 2214 cached_esz = quota->esz; 2215 damos_set_effective_quota(quota); 2216 if (trace_damos_esz_enabled() && quota->esz != cached_esz) 2217 damos_trace_esz(c, s, quota); 2218 } 2219 2220 if (!c->ops.get_scheme_score) 2221 return; 2222 2223 /* Fill up the score histogram */ 2224 memset(c->regions_score_histogram, 0, 2225 sizeof(*c->regions_score_histogram) * 2226 (DAMOS_MAX_SCORE + 1)); 2227 damon_for_each_target(t, c) { 2228 damon_for_each_region(r, t) { 2229 if (!__damos_valid_target(r, s)) 2230 continue; 2231 score = c->ops.get_scheme_score(c, t, r, s); 2232 c->regions_score_histogram[score] += 2233 damon_sz_region(r); 2234 if (score > max_score) 2235 max_score = score; 2236 } 2237 } 2238 2239 /* Set the min score limit */ 2240 for (cumulated_sz = 0, score = max_score; ; score--) { 2241 cumulated_sz += c->regions_score_histogram[score]; 2242 if (cumulated_sz >= quota->esz || !score) 2243 break; 2244 } 2245 quota->min_score = score; 2246 } 2247 2248 static void kdamond_apply_schemes(struct damon_ctx *c) 2249 { 2250 struct damon_target *t; 2251 struct damon_region *r, *next_r; 2252 struct damos *s; 2253 unsigned long sample_interval = c->attrs.sample_interval ? 2254 c->attrs.sample_interval : 1; 2255 bool has_schemes_to_apply = false; 2256 2257 damon_for_each_scheme(s, c) { 2258 if (c->passed_sample_intervals < s->next_apply_sis) 2259 continue; 2260 2261 if (!s->wmarks.activated) 2262 continue; 2263 2264 has_schemes_to_apply = true; 2265 2266 damos_adjust_quota(c, s); 2267 } 2268 2269 if (!has_schemes_to_apply) 2270 return; 2271 2272 mutex_lock(&c->walk_control_lock); 2273 damon_for_each_target(t, c) { 2274 damon_for_each_region_safe(r, next_r, t) 2275 damon_do_apply_schemes(c, t, r); 2276 } 2277 2278 damon_for_each_scheme(s, c) { 2279 if (c->passed_sample_intervals < s->next_apply_sis) 2280 continue; 2281 damos_walk_complete(c, s); 2282 s->next_apply_sis = c->passed_sample_intervals + 2283 (s->apply_interval_us ? s->apply_interval_us : 2284 c->attrs.aggr_interval) / sample_interval; 2285 s->last_applied = NULL; 2286 } 2287 mutex_unlock(&c->walk_control_lock); 2288 } 2289 2290 /* 2291 * Merge two adjacent regions into one region 2292 */ 2293 static void damon_merge_two_regions(struct damon_target *t, 2294 struct damon_region *l, struct damon_region *r) 2295 { 2296 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 2297 2298 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 2299 (sz_l + sz_r); 2300 l->nr_accesses_bp = l->nr_accesses * 10000; 2301 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 2302 l->ar.end = r->ar.end; 2303 damon_destroy_region(r, t); 2304 } 2305 2306 /* 2307 * Merge adjacent regions having similar access frequencies 2308 * 2309 * t target affected by this merge operation 2310 * thres '->nr_accesses' diff threshold for the merge 2311 * sz_limit size upper limit of each region 2312 */ 2313 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 2314 unsigned long sz_limit) 2315 { 2316 struct damon_region *r, *prev = NULL, *next; 2317 2318 damon_for_each_region_safe(r, next, t) { 2319 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 2320 r->age = 0; 2321 else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0)) 2322 r->age = 0; 2323 else 2324 r->age++; 2325 2326 if (prev && prev->ar.end == r->ar.start && 2327 abs(prev->nr_accesses - r->nr_accesses) <= thres && 2328 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 2329 damon_merge_two_regions(t, prev, r); 2330 else 2331 prev = r; 2332 } 2333 } 2334 2335 /* 2336 * Merge adjacent regions having similar access frequencies 2337 * 2338 * threshold '->nr_accesses' diff threshold for the merge 2339 * sz_limit size upper limit of each region 2340 * 2341 * This function merges monitoring target regions which are adjacent and their 2342 * access frequencies are similar. This is for minimizing the monitoring 2343 * overhead under the dynamically changeable access pattern. If a merge was 2344 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 2345 * 2346 * The total number of regions could be higher than the user-defined limit, 2347 * max_nr_regions for some cases. For example, the user can update 2348 * max_nr_regions to a number that lower than the current number of regions 2349 * while DAMON is running. For such a case, repeat merging until the limit is 2350 * met while increasing @threshold up to possible maximum level. 2351 */ 2352 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 2353 unsigned long sz_limit) 2354 { 2355 struct damon_target *t; 2356 unsigned int nr_regions; 2357 unsigned int max_thres; 2358 2359 max_thres = c->attrs.aggr_interval / 2360 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); 2361 do { 2362 nr_regions = 0; 2363 damon_for_each_target(t, c) { 2364 damon_merge_regions_of(t, threshold, sz_limit); 2365 nr_regions += damon_nr_regions(t); 2366 } 2367 threshold = max(1, threshold * 2); 2368 } while (nr_regions > c->attrs.max_nr_regions && 2369 threshold / 2 < max_thres); 2370 } 2371 2372 /* 2373 * Split a region in two 2374 * 2375 * r the region to be split 2376 * sz_r size of the first sub-region that will be made 2377 */ 2378 static void damon_split_region_at(struct damon_target *t, 2379 struct damon_region *r, unsigned long sz_r) 2380 { 2381 struct damon_region *new; 2382 2383 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 2384 if (!new) 2385 return; 2386 2387 r->ar.end = new->ar.start; 2388 2389 new->age = r->age; 2390 new->last_nr_accesses = r->last_nr_accesses; 2391 new->nr_accesses_bp = r->nr_accesses_bp; 2392 new->nr_accesses = r->nr_accesses; 2393 2394 damon_insert_region(new, r, damon_next_region(r), t); 2395 } 2396 2397 /* Split every region in the given target into 'nr_subs' regions */ 2398 static void damon_split_regions_of(struct damon_target *t, int nr_subs, 2399 unsigned long min_sz_region) 2400 { 2401 struct damon_region *r, *next; 2402 unsigned long sz_region, sz_sub = 0; 2403 int i; 2404 2405 damon_for_each_region_safe(r, next, t) { 2406 sz_region = damon_sz_region(r); 2407 2408 for (i = 0; i < nr_subs - 1 && 2409 sz_region > 2 * min_sz_region; i++) { 2410 /* 2411 * Randomly select size of left sub-region to be at 2412 * least 10 percent and at most 90% of original region 2413 */ 2414 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 2415 sz_region / 10, min_sz_region); 2416 /* Do not allow blank region */ 2417 if (sz_sub == 0 || sz_sub >= sz_region) 2418 continue; 2419 2420 damon_split_region_at(t, r, sz_sub); 2421 sz_region = sz_sub; 2422 } 2423 } 2424 } 2425 2426 /* 2427 * Split every target region into randomly-sized small regions 2428 * 2429 * This function splits every target region into random-sized small regions if 2430 * current total number of the regions is equal or smaller than half of the 2431 * user-specified maximum number of regions. This is for maximizing the 2432 * monitoring accuracy under the dynamically changeable access patterns. If a 2433 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 2434 * it. 2435 */ 2436 static void kdamond_split_regions(struct damon_ctx *ctx) 2437 { 2438 struct damon_target *t; 2439 unsigned int nr_regions = 0; 2440 static unsigned int last_nr_regions; 2441 int nr_subregions = 2; 2442 2443 damon_for_each_target(t, ctx) 2444 nr_regions += damon_nr_regions(t); 2445 2446 if (nr_regions > ctx->attrs.max_nr_regions / 2) 2447 return; 2448 2449 /* Maybe the middle of the region has different access frequency */ 2450 if (last_nr_regions == nr_regions && 2451 nr_regions < ctx->attrs.max_nr_regions / 3) 2452 nr_subregions = 3; 2453 2454 damon_for_each_target(t, ctx) 2455 damon_split_regions_of(t, nr_subregions, ctx->min_sz_region); 2456 2457 last_nr_regions = nr_regions; 2458 } 2459 2460 /* 2461 * Check whether current monitoring should be stopped 2462 * 2463 * The monitoring is stopped when either the user requested to stop, or all 2464 * monitoring targets are invalid. 2465 * 2466 * Returns true if need to stop current monitoring. 2467 */ 2468 static bool kdamond_need_stop(struct damon_ctx *ctx) 2469 { 2470 struct damon_target *t; 2471 2472 if (kthread_should_stop()) 2473 return true; 2474 2475 if (!ctx->ops.target_valid) 2476 return false; 2477 2478 damon_for_each_target(t, ctx) { 2479 if (ctx->ops.target_valid(t)) 2480 return false; 2481 } 2482 2483 return true; 2484 } 2485 2486 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, 2487 unsigned long *metric_value) 2488 { 2489 switch (metric) { 2490 case DAMOS_WMARK_FREE_MEM_RATE: 2491 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / 2492 totalram_pages(); 2493 return 0; 2494 default: 2495 break; 2496 } 2497 return -EINVAL; 2498 } 2499 2500 /* 2501 * Returns zero if the scheme is active. Else, returns time to wait for next 2502 * watermark check in micro-seconds. 2503 */ 2504 static unsigned long damos_wmark_wait_us(struct damos *scheme) 2505 { 2506 unsigned long metric; 2507 2508 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) 2509 return 0; 2510 2511 /* higher than high watermark or lower than low watermark */ 2512 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 2513 if (scheme->wmarks.activated) 2514 pr_debug("deactivate a scheme (%d) for %s wmark\n", 2515 scheme->action, 2516 str_high_low(metric > scheme->wmarks.high)); 2517 scheme->wmarks.activated = false; 2518 return scheme->wmarks.interval; 2519 } 2520 2521 /* inactive and higher than middle watermark */ 2522 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 2523 !scheme->wmarks.activated) 2524 return scheme->wmarks.interval; 2525 2526 if (!scheme->wmarks.activated) 2527 pr_debug("activate a scheme (%d)\n", scheme->action); 2528 scheme->wmarks.activated = true; 2529 return 0; 2530 } 2531 2532 static void kdamond_usleep(unsigned long usecs) 2533 { 2534 if (usecs >= USLEEP_RANGE_UPPER_BOUND) 2535 schedule_timeout_idle(usecs_to_jiffies(usecs)); 2536 else 2537 usleep_range_idle(usecs, usecs + 1); 2538 } 2539 2540 /* 2541 * kdamond_call() - handle damon_call_control objects. 2542 * @ctx: The &struct damon_ctx of the kdamond. 2543 * @cancel: Whether to cancel the invocation of the function. 2544 * 2545 * If there are &struct damon_call_control requests that registered via 2546 * &damon_call() on @ctx, do or cancel the invocation of the function depending 2547 * on @cancel. @cancel is set when the kdamond is already out of the main loop 2548 * and therefore will be terminated. 2549 */ 2550 static void kdamond_call(struct damon_ctx *ctx, bool cancel) 2551 { 2552 struct damon_call_control *control; 2553 LIST_HEAD(repeat_controls); 2554 int ret = 0; 2555 2556 while (true) { 2557 mutex_lock(&ctx->call_controls_lock); 2558 control = list_first_entry_or_null(&ctx->call_controls, 2559 struct damon_call_control, list); 2560 mutex_unlock(&ctx->call_controls_lock); 2561 if (!control) 2562 break; 2563 if (cancel) { 2564 control->canceled = true; 2565 } else { 2566 ret = control->fn(control->data); 2567 control->return_code = ret; 2568 } 2569 mutex_lock(&ctx->call_controls_lock); 2570 list_del(&control->list); 2571 mutex_unlock(&ctx->call_controls_lock); 2572 if (!control->repeat) { 2573 complete(&control->completion); 2574 } else if (control->canceled && control->dealloc_on_cancel) { 2575 kfree(control); 2576 continue; 2577 } else { 2578 list_add(&control->list, &repeat_controls); 2579 } 2580 } 2581 control = list_first_entry_or_null(&repeat_controls, 2582 struct damon_call_control, list); 2583 if (!control || cancel) 2584 return; 2585 mutex_lock(&ctx->call_controls_lock); 2586 list_add_tail(&control->list, &ctx->call_controls); 2587 mutex_unlock(&ctx->call_controls_lock); 2588 } 2589 2590 /* Returns negative error code if it's not activated but should return */ 2591 static int kdamond_wait_activation(struct damon_ctx *ctx) 2592 { 2593 struct damos *s; 2594 unsigned long wait_time; 2595 unsigned long min_wait_time = 0; 2596 bool init_wait_time = false; 2597 2598 while (!kdamond_need_stop(ctx)) { 2599 damon_for_each_scheme(s, ctx) { 2600 wait_time = damos_wmark_wait_us(s); 2601 if (!init_wait_time || wait_time < min_wait_time) { 2602 init_wait_time = true; 2603 min_wait_time = wait_time; 2604 } 2605 } 2606 if (!min_wait_time) 2607 return 0; 2608 2609 kdamond_usleep(min_wait_time); 2610 2611 kdamond_call(ctx, false); 2612 damos_walk_cancel(ctx); 2613 } 2614 return -EBUSY; 2615 } 2616 2617 static void kdamond_init_ctx(struct damon_ctx *ctx) 2618 { 2619 unsigned long sample_interval = ctx->attrs.sample_interval ? 2620 ctx->attrs.sample_interval : 1; 2621 unsigned long apply_interval; 2622 struct damos *scheme; 2623 2624 ctx->passed_sample_intervals = 0; 2625 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 2626 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 2627 sample_interval; 2628 ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * 2629 ctx->attrs.intervals_goal.aggrs; 2630 2631 damon_for_each_scheme(scheme, ctx) { 2632 apply_interval = scheme->apply_interval_us ? 2633 scheme->apply_interval_us : ctx->attrs.aggr_interval; 2634 scheme->next_apply_sis = apply_interval / sample_interval; 2635 damos_set_filters_default_reject(scheme); 2636 } 2637 } 2638 2639 /* 2640 * The monitoring daemon that runs as a kernel thread 2641 */ 2642 static int kdamond_fn(void *data) 2643 { 2644 struct damon_ctx *ctx = data; 2645 struct damon_target *t; 2646 struct damon_region *r, *next; 2647 unsigned int max_nr_accesses = 0; 2648 unsigned long sz_limit = 0; 2649 2650 pr_debug("kdamond (%d) starts\n", current->pid); 2651 2652 complete(&ctx->kdamond_started); 2653 kdamond_init_ctx(ctx); 2654 2655 if (ctx->ops.init) 2656 ctx->ops.init(ctx); 2657 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, 2658 sizeof(*ctx->regions_score_histogram), GFP_KERNEL); 2659 if (!ctx->regions_score_histogram) 2660 goto done; 2661 2662 sz_limit = damon_region_sz_limit(ctx); 2663 2664 while (!kdamond_need_stop(ctx)) { 2665 /* 2666 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 2667 * be changed from kdamond_call(). Read the values here, and 2668 * use those for this iteration. That is, damon_set_attrs() 2669 * updated new values are respected from next iteration. 2670 */ 2671 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 2672 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 2673 unsigned long sample_interval = ctx->attrs.sample_interval; 2674 2675 if (kdamond_wait_activation(ctx)) 2676 break; 2677 2678 if (ctx->ops.prepare_access_checks) 2679 ctx->ops.prepare_access_checks(ctx); 2680 2681 kdamond_usleep(sample_interval); 2682 ctx->passed_sample_intervals++; 2683 2684 if (ctx->ops.check_accesses) 2685 max_nr_accesses = ctx->ops.check_accesses(ctx); 2686 2687 if (ctx->passed_sample_intervals >= next_aggregation_sis) 2688 kdamond_merge_regions(ctx, 2689 max_nr_accesses / 10, 2690 sz_limit); 2691 2692 /* 2693 * do kdamond_call() and kdamond_apply_schemes() after 2694 * kdamond_merge_regions() if possible, to reduce overhead 2695 */ 2696 kdamond_call(ctx, false); 2697 if (!list_empty(&ctx->schemes)) 2698 kdamond_apply_schemes(ctx); 2699 else 2700 damos_walk_cancel(ctx); 2701 2702 sample_interval = ctx->attrs.sample_interval ? 2703 ctx->attrs.sample_interval : 1; 2704 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 2705 if (ctx->attrs.intervals_goal.aggrs && 2706 ctx->passed_sample_intervals >= 2707 ctx->next_intervals_tune_sis) { 2708 /* 2709 * ctx->next_aggregation_sis might be updated 2710 * from kdamond_call(). In the case, 2711 * damon_set_attrs() which will be called from 2712 * kdamond_tune_interval() may wrongly think 2713 * this is in the middle of the current 2714 * aggregation, and make aggregation 2715 * information reset for all regions. Then, 2716 * following kdamond_reset_aggregated() call 2717 * will make the region information invalid, 2718 * particularly for ->nr_accesses_bp. 2719 * 2720 * Reset ->next_aggregation_sis to avoid that. 2721 * It will anyway correctly updated after this 2722 * if caluse. 2723 */ 2724 ctx->next_aggregation_sis = 2725 next_aggregation_sis; 2726 ctx->next_intervals_tune_sis += 2727 ctx->attrs.aggr_samples * 2728 ctx->attrs.intervals_goal.aggrs; 2729 kdamond_tune_intervals(ctx); 2730 sample_interval = ctx->attrs.sample_interval ? 2731 ctx->attrs.sample_interval : 1; 2732 2733 } 2734 ctx->next_aggregation_sis = next_aggregation_sis + 2735 ctx->attrs.aggr_interval / sample_interval; 2736 2737 kdamond_reset_aggregated(ctx); 2738 kdamond_split_regions(ctx); 2739 } 2740 2741 if (ctx->passed_sample_intervals >= next_ops_update_sis) { 2742 ctx->next_ops_update_sis = next_ops_update_sis + 2743 ctx->attrs.ops_update_interval / 2744 sample_interval; 2745 if (ctx->ops.update) 2746 ctx->ops.update(ctx); 2747 sz_limit = damon_region_sz_limit(ctx); 2748 } 2749 } 2750 done: 2751 damon_for_each_target(t, ctx) { 2752 damon_for_each_region_safe(r, next, t) 2753 damon_destroy_region(r, t); 2754 } 2755 2756 if (ctx->ops.cleanup) 2757 ctx->ops.cleanup(ctx); 2758 kfree(ctx->regions_score_histogram); 2759 2760 pr_debug("kdamond (%d) finishes\n", current->pid); 2761 mutex_lock(&ctx->kdamond_lock); 2762 ctx->kdamond = NULL; 2763 mutex_unlock(&ctx->kdamond_lock); 2764 2765 kdamond_call(ctx, true); 2766 damos_walk_cancel(ctx); 2767 2768 mutex_lock(&damon_lock); 2769 nr_running_ctxs--; 2770 if (!nr_running_ctxs && running_exclusive_ctxs) 2771 running_exclusive_ctxs = false; 2772 mutex_unlock(&damon_lock); 2773 2774 damon_destroy_targets(ctx); 2775 return 0; 2776 } 2777 2778 /* 2779 * struct damon_system_ram_region - System RAM resource address region of 2780 * [@start, @end). 2781 * @start: Start address of the region (inclusive). 2782 * @end: End address of the region (exclusive). 2783 */ 2784 struct damon_system_ram_region { 2785 unsigned long start; 2786 unsigned long end; 2787 }; 2788 2789 static int walk_system_ram(struct resource *res, void *arg) 2790 { 2791 struct damon_system_ram_region *a = arg; 2792 2793 if (a->end - a->start < resource_size(res)) { 2794 a->start = res->start; 2795 a->end = res->end; 2796 } 2797 return 0; 2798 } 2799 2800 /* 2801 * Find biggest 'System RAM' resource and store its start and end address in 2802 * @start and @end, respectively. If no System RAM is found, returns false. 2803 */ 2804 static bool damon_find_biggest_system_ram(unsigned long *start, 2805 unsigned long *end) 2806 2807 { 2808 struct damon_system_ram_region arg = {}; 2809 2810 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 2811 if (arg.end <= arg.start) 2812 return false; 2813 2814 *start = arg.start; 2815 *end = arg.end; 2816 return true; 2817 } 2818 2819 /** 2820 * damon_set_region_biggest_system_ram_default() - Set the region of the given 2821 * monitoring target as requested, or biggest 'System RAM'. 2822 * @t: The monitoring target to set the region. 2823 * @start: The pointer to the start address of the region. 2824 * @end: The pointer to the end address of the region. 2825 * @min_sz_region: Minimum region size. 2826 * 2827 * This function sets the region of @t as requested by @start and @end. If the 2828 * values of @start and @end are zero, however, this function finds the biggest 2829 * 'System RAM' resource and sets the region to cover the resource. In the 2830 * latter case, this function saves the start and end addresses of the resource 2831 * in @start and @end, respectively. 2832 * 2833 * Return: 0 on success, negative error code otherwise. 2834 */ 2835 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 2836 unsigned long *start, unsigned long *end, 2837 unsigned long min_sz_region) 2838 { 2839 struct damon_addr_range addr_range; 2840 2841 if (*start > *end) 2842 return -EINVAL; 2843 2844 if (!*start && !*end && 2845 !damon_find_biggest_system_ram(start, end)) 2846 return -EINVAL; 2847 2848 addr_range.start = *start; 2849 addr_range.end = *end; 2850 return damon_set_regions(t, &addr_range, 1, min_sz_region); 2851 } 2852 2853 /* 2854 * damon_moving_sum() - Calculate an inferred moving sum value. 2855 * @mvsum: Inferred sum of the last @len_window values. 2856 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 2857 * @len_window: The number of last values to take care of. 2858 * @new_value: New value that will be added to the pseudo moving sum. 2859 * 2860 * Moving sum (moving average * window size) is good for handling noise, but 2861 * the cost of keeping past values can be high for arbitrary window size. This 2862 * function implements a lightweight pseudo moving sum function that doesn't 2863 * keep the past window values. 2864 * 2865 * It simply assumes there was no noise in the past, and get the no-noise 2866 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 2867 * non-moving sum of the last window. For example, if @len_window is 10 and we 2868 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 2869 * values. Hence, this function simply drops @nomvsum / @len_window from 2870 * given @mvsum and add @new_value. 2871 * 2872 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 2873 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 2874 * calculating next moving sum with a new value, we should drop 0 from 50 and 2875 * add the new value. However, this function assumes it got value 5 for each 2876 * of the last ten times. Based on the assumption, when the next value is 2877 * measured, it drops the assumed past value, 5 from the current sum, and add 2878 * the new value to get the updated pseduo-moving average. 2879 * 2880 * This means the value could have errors, but the errors will be disappeared 2881 * for every @len_window aligned calls. For example, if @len_window is 10, the 2882 * pseudo moving sum with 11th value to 19th value would have an error. But 2883 * the sum with 20th value will not have the error. 2884 * 2885 * Return: Pseudo-moving average after getting the @new_value. 2886 */ 2887 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 2888 unsigned int len_window, unsigned int new_value) 2889 { 2890 return mvsum - nomvsum / len_window + new_value; 2891 } 2892 2893 /** 2894 * damon_update_region_access_rate() - Update the access rate of a region. 2895 * @r: The DAMON region to update for its access check result. 2896 * @accessed: Whether the region has accessed during last sampling interval. 2897 * @attrs: The damon_attrs of the DAMON context. 2898 * 2899 * Update the access rate of a region with the region's last sampling interval 2900 * access check result. 2901 * 2902 * Usually this will be called by &damon_operations->check_accesses callback. 2903 */ 2904 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 2905 struct damon_attrs *attrs) 2906 { 2907 unsigned int len_window = 1; 2908 2909 /* 2910 * sample_interval can be zero, but cannot be larger than 2911 * aggr_interval, owing to validation of damon_set_attrs(). 2912 */ 2913 if (attrs->sample_interval) 2914 len_window = damon_max_nr_accesses(attrs); 2915 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 2916 r->last_nr_accesses * 10000, len_window, 2917 accessed ? 10000 : 0); 2918 2919 if (accessed) 2920 r->nr_accesses++; 2921 } 2922 2923 /** 2924 * damon_initialized() - Return if DAMON is ready to be used. 2925 * 2926 * Return: true if DAMON is ready to be used, false otherwise. 2927 */ 2928 bool damon_initialized(void) 2929 { 2930 return damon_region_cache != NULL; 2931 } 2932 2933 static int __init damon_init(void) 2934 { 2935 damon_region_cache = KMEM_CACHE(damon_region, 0); 2936 if (unlikely(!damon_region_cache)) { 2937 pr_err("creating damon_region_cache fails\n"); 2938 return -ENOMEM; 2939 } 2940 2941 return 0; 2942 } 2943 2944 subsys_initcall(damon_init); 2945 2946 #include "tests/core-kunit.h" 2947