1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/memcontrol.h> 14 #include <linux/mm.h> 15 #include <linux/psi.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/string_choices.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/damon.h> 22 23 static DEFINE_MUTEX(damon_lock); 24 static int nr_running_ctxs; 25 static bool running_exclusive_ctxs; 26 27 static DEFINE_MUTEX(damon_ops_lock); 28 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 29 30 static struct kmem_cache *damon_region_cache __ro_after_init; 31 32 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 33 static bool __damon_is_registered_ops(enum damon_ops_id id) 34 { 35 struct damon_operations empty_ops = {}; 36 37 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 38 return false; 39 return true; 40 } 41 42 /** 43 * damon_is_registered_ops() - Check if a given damon_operations is registered. 44 * @id: Id of the damon_operations to check if registered. 45 * 46 * Return: true if the ops is set, false otherwise. 47 */ 48 bool damon_is_registered_ops(enum damon_ops_id id) 49 { 50 bool registered; 51 52 if (id >= NR_DAMON_OPS) 53 return false; 54 mutex_lock(&damon_ops_lock); 55 registered = __damon_is_registered_ops(id); 56 mutex_unlock(&damon_ops_lock); 57 return registered; 58 } 59 60 /** 61 * damon_register_ops() - Register a monitoring operations set to DAMON. 62 * @ops: monitoring operations set to register. 63 * 64 * This function registers a monitoring operations set of valid &struct 65 * damon_operations->id so that others can find and use them later. 66 * 67 * Return: 0 on success, negative error code otherwise. 68 */ 69 int damon_register_ops(struct damon_operations *ops) 70 { 71 int err = 0; 72 73 if (ops->id >= NR_DAMON_OPS) 74 return -EINVAL; 75 76 mutex_lock(&damon_ops_lock); 77 /* Fail for already registered ops */ 78 if (__damon_is_registered_ops(ops->id)) 79 err = -EINVAL; 80 else 81 damon_registered_ops[ops->id] = *ops; 82 mutex_unlock(&damon_ops_lock); 83 return err; 84 } 85 86 /** 87 * damon_select_ops() - Select a monitoring operations to use with the context. 88 * @ctx: monitoring context to use the operations. 89 * @id: id of the registered monitoring operations to select. 90 * 91 * This function finds registered monitoring operations set of @id and make 92 * @ctx to use it. 93 * 94 * Return: 0 on success, negative error code otherwise. 95 */ 96 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 97 { 98 int err = 0; 99 100 if (id >= NR_DAMON_OPS) 101 return -EINVAL; 102 103 mutex_lock(&damon_ops_lock); 104 if (!__damon_is_registered_ops(id)) 105 err = -EINVAL; 106 else 107 ctx->ops = damon_registered_ops[id]; 108 mutex_unlock(&damon_ops_lock); 109 return err; 110 } 111 112 /* 113 * Construct a damon_region struct 114 * 115 * Returns the pointer to the new struct if success, or NULL otherwise 116 */ 117 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 118 { 119 struct damon_region *region; 120 121 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 122 if (!region) 123 return NULL; 124 125 region->ar.start = start; 126 region->ar.end = end; 127 region->nr_accesses = 0; 128 region->nr_accesses_bp = 0; 129 INIT_LIST_HEAD(®ion->list); 130 131 region->age = 0; 132 region->last_nr_accesses = 0; 133 134 return region; 135 } 136 137 void damon_add_region(struct damon_region *r, struct damon_target *t) 138 { 139 list_add_tail(&r->list, &t->regions_list); 140 t->nr_regions++; 141 } 142 143 static void damon_del_region(struct damon_region *r, struct damon_target *t) 144 { 145 list_del(&r->list); 146 t->nr_regions--; 147 } 148 149 static void damon_free_region(struct damon_region *r) 150 { 151 kmem_cache_free(damon_region_cache, r); 152 } 153 154 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 155 { 156 damon_del_region(r, t); 157 damon_free_region(r); 158 } 159 160 /* 161 * Check whether a region is intersecting an address range 162 * 163 * Returns true if it is. 164 */ 165 static bool damon_intersect(struct damon_region *r, 166 struct damon_addr_range *re) 167 { 168 return !(r->ar.end <= re->start || re->end <= r->ar.start); 169 } 170 171 /* 172 * Fill holes in regions with new regions. 173 */ 174 static int damon_fill_regions_holes(struct damon_region *first, 175 struct damon_region *last, struct damon_target *t) 176 { 177 struct damon_region *r = first; 178 179 damon_for_each_region_from(r, t) { 180 struct damon_region *next, *newr; 181 182 if (r == last) 183 break; 184 next = damon_next_region(r); 185 if (r->ar.end != next->ar.start) { 186 newr = damon_new_region(r->ar.end, next->ar.start); 187 if (!newr) 188 return -ENOMEM; 189 damon_insert_region(newr, r, next, t); 190 } 191 } 192 return 0; 193 } 194 195 /* 196 * damon_set_regions() - Set regions of a target for given address ranges. 197 * @t: the given target. 198 * @ranges: array of new monitoring target ranges. 199 * @nr_ranges: length of @ranges. 200 * @min_sz_region: minimum region size. 201 * 202 * This function adds new regions to, or modify existing regions of a 203 * monitoring target to fit in specific ranges. 204 * 205 * Return: 0 if success, or negative error code otherwise. 206 */ 207 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 208 unsigned int nr_ranges, unsigned long min_sz_region) 209 { 210 struct damon_region *r, *next; 211 unsigned int i; 212 int err; 213 214 /* Remove regions which are not in the new ranges */ 215 damon_for_each_region_safe(r, next, t) { 216 for (i = 0; i < nr_ranges; i++) { 217 if (damon_intersect(r, &ranges[i])) 218 break; 219 } 220 if (i == nr_ranges) 221 damon_destroy_region(r, t); 222 } 223 224 r = damon_first_region(t); 225 /* Add new regions or resize existing regions to fit in the ranges */ 226 for (i = 0; i < nr_ranges; i++) { 227 struct damon_region *first = NULL, *last, *newr; 228 struct damon_addr_range *range; 229 230 range = &ranges[i]; 231 /* Get the first/last regions intersecting with the range */ 232 damon_for_each_region_from(r, t) { 233 if (damon_intersect(r, range)) { 234 if (!first) 235 first = r; 236 last = r; 237 } 238 if (r->ar.start >= range->end) 239 break; 240 } 241 if (!first) { 242 /* no region intersects with this range */ 243 newr = damon_new_region( 244 ALIGN_DOWN(range->start, 245 min_sz_region), 246 ALIGN(range->end, min_sz_region)); 247 if (!newr) 248 return -ENOMEM; 249 damon_insert_region(newr, damon_prev_region(r), r, t); 250 } else { 251 /* resize intersecting regions to fit in this range */ 252 first->ar.start = ALIGN_DOWN(range->start, 253 min_sz_region); 254 last->ar.end = ALIGN(range->end, min_sz_region); 255 256 /* fill possible holes in the range */ 257 err = damon_fill_regions_holes(first, last, t); 258 if (err) 259 return err; 260 } 261 } 262 return 0; 263 } 264 265 struct damos_filter *damos_new_filter(enum damos_filter_type type, 266 bool matching, bool allow) 267 { 268 struct damos_filter *filter; 269 270 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 271 if (!filter) 272 return NULL; 273 filter->type = type; 274 filter->matching = matching; 275 filter->allow = allow; 276 INIT_LIST_HEAD(&filter->list); 277 return filter; 278 } 279 280 /** 281 * damos_filter_for_ops() - Return if the filter is ops-hndled one. 282 * @type: type of the filter. 283 * 284 * Return: true if the filter of @type needs to be handled by ops layer, false 285 * otherwise. 286 */ 287 bool damos_filter_for_ops(enum damos_filter_type type) 288 { 289 switch (type) { 290 case DAMOS_FILTER_TYPE_ADDR: 291 case DAMOS_FILTER_TYPE_TARGET: 292 return false; 293 default: 294 break; 295 } 296 return true; 297 } 298 299 void damos_add_filter(struct damos *s, struct damos_filter *f) 300 { 301 if (damos_filter_for_ops(f->type)) 302 list_add_tail(&f->list, &s->ops_filters); 303 else 304 list_add_tail(&f->list, &s->core_filters); 305 } 306 307 static void damos_del_filter(struct damos_filter *f) 308 { 309 list_del(&f->list); 310 } 311 312 static void damos_free_filter(struct damos_filter *f) 313 { 314 kfree(f); 315 } 316 317 void damos_destroy_filter(struct damos_filter *f) 318 { 319 damos_del_filter(f); 320 damos_free_filter(f); 321 } 322 323 struct damos_quota_goal *damos_new_quota_goal( 324 enum damos_quota_goal_metric metric, 325 unsigned long target_value) 326 { 327 struct damos_quota_goal *goal; 328 329 goal = kmalloc(sizeof(*goal), GFP_KERNEL); 330 if (!goal) 331 return NULL; 332 goal->metric = metric; 333 goal->target_value = target_value; 334 INIT_LIST_HEAD(&goal->list); 335 return goal; 336 } 337 338 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) 339 { 340 list_add_tail(&g->list, &q->goals); 341 } 342 343 static void damos_del_quota_goal(struct damos_quota_goal *g) 344 { 345 list_del(&g->list); 346 } 347 348 static void damos_free_quota_goal(struct damos_quota_goal *g) 349 { 350 kfree(g); 351 } 352 353 void damos_destroy_quota_goal(struct damos_quota_goal *g) 354 { 355 damos_del_quota_goal(g); 356 damos_free_quota_goal(g); 357 } 358 359 /* initialize fields of @quota that normally API users wouldn't set */ 360 static struct damos_quota *damos_quota_init(struct damos_quota *quota) 361 { 362 quota->esz = 0; 363 quota->total_charged_sz = 0; 364 quota->total_charged_ns = 0; 365 quota->charged_sz = 0; 366 quota->charged_from = 0; 367 quota->charge_target_from = NULL; 368 quota->charge_addr_from = 0; 369 quota->esz_bp = 0; 370 return quota; 371 } 372 373 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 374 enum damos_action action, 375 unsigned long apply_interval_us, 376 struct damos_quota *quota, 377 struct damos_watermarks *wmarks, 378 int target_nid) 379 { 380 struct damos *scheme; 381 382 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 383 if (!scheme) 384 return NULL; 385 scheme->pattern = *pattern; 386 scheme->action = action; 387 scheme->apply_interval_us = apply_interval_us; 388 /* 389 * next_apply_sis will be set when kdamond starts. While kdamond is 390 * running, it will also updated when it is added to the DAMON context, 391 * or damon_attrs are updated. 392 */ 393 scheme->next_apply_sis = 0; 394 scheme->walk_completed = false; 395 INIT_LIST_HEAD(&scheme->core_filters); 396 INIT_LIST_HEAD(&scheme->ops_filters); 397 scheme->stat = (struct damos_stat){}; 398 INIT_LIST_HEAD(&scheme->list); 399 400 scheme->quota = *(damos_quota_init(quota)); 401 /* quota.goals should be separately set by caller */ 402 INIT_LIST_HEAD(&scheme->quota.goals); 403 404 scheme->wmarks = *wmarks; 405 scheme->wmarks.activated = true; 406 407 scheme->migrate_dests = (struct damos_migrate_dests){}; 408 scheme->target_nid = target_nid; 409 410 return scheme; 411 } 412 413 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 414 { 415 unsigned long sample_interval = ctx->attrs.sample_interval ? 416 ctx->attrs.sample_interval : 1; 417 unsigned long apply_interval = s->apply_interval_us ? 418 s->apply_interval_us : ctx->attrs.aggr_interval; 419 420 s->next_apply_sis = ctx->passed_sample_intervals + 421 apply_interval / sample_interval; 422 } 423 424 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 425 { 426 list_add_tail(&s->list, &ctx->schemes); 427 damos_set_next_apply_sis(s, ctx); 428 } 429 430 static void damon_del_scheme(struct damos *s) 431 { 432 list_del(&s->list); 433 } 434 435 static void damon_free_scheme(struct damos *s) 436 { 437 kfree(s); 438 } 439 440 void damon_destroy_scheme(struct damos *s) 441 { 442 struct damos_quota_goal *g, *g_next; 443 struct damos_filter *f, *next; 444 445 damos_for_each_quota_goal_safe(g, g_next, &s->quota) 446 damos_destroy_quota_goal(g); 447 448 damos_for_each_core_filter_safe(f, next, s) 449 damos_destroy_filter(f); 450 451 damos_for_each_ops_filter_safe(f, next, s) 452 damos_destroy_filter(f); 453 454 kfree(s->migrate_dests.node_id_arr); 455 kfree(s->migrate_dests.weight_arr); 456 damon_del_scheme(s); 457 damon_free_scheme(s); 458 } 459 460 /* 461 * Construct a damon_target struct 462 * 463 * Returns the pointer to the new struct if success, or NULL otherwise 464 */ 465 struct damon_target *damon_new_target(void) 466 { 467 struct damon_target *t; 468 469 t = kmalloc(sizeof(*t), GFP_KERNEL); 470 if (!t) 471 return NULL; 472 473 t->pid = NULL; 474 t->nr_regions = 0; 475 INIT_LIST_HEAD(&t->regions_list); 476 INIT_LIST_HEAD(&t->list); 477 t->obsolete = false; 478 479 return t; 480 } 481 482 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 483 { 484 list_add_tail(&t->list, &ctx->adaptive_targets); 485 } 486 487 bool damon_targets_empty(struct damon_ctx *ctx) 488 { 489 return list_empty(&ctx->adaptive_targets); 490 } 491 492 static void damon_del_target(struct damon_target *t) 493 { 494 list_del(&t->list); 495 } 496 497 void damon_free_target(struct damon_target *t) 498 { 499 struct damon_region *r, *next; 500 501 damon_for_each_region_safe(r, next, t) 502 damon_free_region(r); 503 kfree(t); 504 } 505 506 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx) 507 { 508 509 if (ctx && ctx->ops.cleanup_target) 510 ctx->ops.cleanup_target(t); 511 512 damon_del_target(t); 513 damon_free_target(t); 514 } 515 516 unsigned int damon_nr_regions(struct damon_target *t) 517 { 518 return t->nr_regions; 519 } 520 521 struct damon_ctx *damon_new_ctx(void) 522 { 523 struct damon_ctx *ctx; 524 525 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 526 if (!ctx) 527 return NULL; 528 529 init_completion(&ctx->kdamond_started); 530 531 ctx->attrs.sample_interval = 5 * 1000; 532 ctx->attrs.aggr_interval = 100 * 1000; 533 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 534 535 ctx->passed_sample_intervals = 0; 536 /* These will be set from kdamond_init_ctx() */ 537 ctx->next_aggregation_sis = 0; 538 ctx->next_ops_update_sis = 0; 539 540 mutex_init(&ctx->kdamond_lock); 541 INIT_LIST_HEAD(&ctx->call_controls); 542 mutex_init(&ctx->call_controls_lock); 543 mutex_init(&ctx->walk_control_lock); 544 545 ctx->attrs.min_nr_regions = 10; 546 ctx->attrs.max_nr_regions = 1000; 547 548 ctx->addr_unit = 1; 549 ctx->min_sz_region = DAMON_MIN_REGION; 550 551 INIT_LIST_HEAD(&ctx->adaptive_targets); 552 INIT_LIST_HEAD(&ctx->schemes); 553 554 return ctx; 555 } 556 557 static void damon_destroy_targets(struct damon_ctx *ctx) 558 { 559 struct damon_target *t, *next_t; 560 561 damon_for_each_target_safe(t, next_t, ctx) 562 damon_destroy_target(t, ctx); 563 } 564 565 void damon_destroy_ctx(struct damon_ctx *ctx) 566 { 567 struct damos *s, *next_s; 568 569 damon_destroy_targets(ctx); 570 571 damon_for_each_scheme_safe(s, next_s, ctx) 572 damon_destroy_scheme(s); 573 574 kfree(ctx); 575 } 576 577 static bool damon_attrs_equals(const struct damon_attrs *attrs1, 578 const struct damon_attrs *attrs2) 579 { 580 const struct damon_intervals_goal *ig1 = &attrs1->intervals_goal; 581 const struct damon_intervals_goal *ig2 = &attrs2->intervals_goal; 582 583 return attrs1->sample_interval == attrs2->sample_interval && 584 attrs1->aggr_interval == attrs2->aggr_interval && 585 attrs1->ops_update_interval == attrs2->ops_update_interval && 586 attrs1->min_nr_regions == attrs2->min_nr_regions && 587 attrs1->max_nr_regions == attrs2->max_nr_regions && 588 ig1->access_bp == ig2->access_bp && 589 ig1->aggrs == ig2->aggrs && 590 ig1->min_sample_us == ig2->min_sample_us && 591 ig1->max_sample_us == ig2->max_sample_us; 592 } 593 594 static unsigned int damon_age_for_new_attrs(unsigned int age, 595 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 596 { 597 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 598 } 599 600 /* convert access ratio in bp (per 10,000) to nr_accesses */ 601 static unsigned int damon_accesses_bp_to_nr_accesses( 602 unsigned int accesses_bp, struct damon_attrs *attrs) 603 { 604 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 605 } 606 607 /* 608 * Convert nr_accesses to access ratio in bp (per 10,000). 609 * 610 * Callers should ensure attrs.aggr_interval is not zero, like 611 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would 612 * happen. 613 */ 614 static unsigned int damon_nr_accesses_to_accesses_bp( 615 unsigned int nr_accesses, struct damon_attrs *attrs) 616 { 617 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 618 } 619 620 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 621 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 622 { 623 return damon_accesses_bp_to_nr_accesses( 624 damon_nr_accesses_to_accesses_bp( 625 nr_accesses, old_attrs), 626 new_attrs); 627 } 628 629 static void damon_update_monitoring_result(struct damon_region *r, 630 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, 631 bool aggregating) 632 { 633 if (!aggregating) { 634 r->nr_accesses = damon_nr_accesses_for_new_attrs( 635 r->nr_accesses, old_attrs, new_attrs); 636 r->nr_accesses_bp = r->nr_accesses * 10000; 637 } else { 638 /* 639 * if this is called in the middle of the aggregation, reset 640 * the aggregations we made so far for this aggregation 641 * interval. In other words, make the status like 642 * kdamond_reset_aggregated() is called. 643 */ 644 r->last_nr_accesses = damon_nr_accesses_for_new_attrs( 645 r->last_nr_accesses, old_attrs, new_attrs); 646 r->nr_accesses_bp = r->last_nr_accesses * 10000; 647 r->nr_accesses = 0; 648 } 649 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 650 } 651 652 /* 653 * region->nr_accesses is the number of sampling intervals in the last 654 * aggregation interval that access to the region has found, and region->age is 655 * the number of aggregation intervals that its access pattern has maintained. 656 * For the reason, the real meaning of the two fields depend on current 657 * sampling interval and aggregation interval. This function updates 658 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 659 */ 660 static void damon_update_monitoring_results(struct damon_ctx *ctx, 661 struct damon_attrs *new_attrs, bool aggregating) 662 { 663 struct damon_attrs *old_attrs = &ctx->attrs; 664 struct damon_target *t; 665 struct damon_region *r; 666 667 /* if any interval is zero, simply forgive conversion */ 668 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 669 !new_attrs->sample_interval || 670 !new_attrs->aggr_interval) 671 return; 672 673 damon_for_each_target(t, ctx) 674 damon_for_each_region(r, t) 675 damon_update_monitoring_result( 676 r, old_attrs, new_attrs, aggregating); 677 } 678 679 /* 680 * damon_valid_intervals_goal() - return if the intervals goal of @attrs is 681 * valid. 682 */ 683 static bool damon_valid_intervals_goal(struct damon_attrs *attrs) 684 { 685 struct damon_intervals_goal *goal = &attrs->intervals_goal; 686 687 /* tuning is disabled */ 688 if (!goal->aggrs) 689 return true; 690 if (goal->min_sample_us > goal->max_sample_us) 691 return false; 692 if (attrs->sample_interval < goal->min_sample_us || 693 goal->max_sample_us < attrs->sample_interval) 694 return false; 695 return true; 696 } 697 698 /** 699 * damon_set_attrs() - Set attributes for the monitoring. 700 * @ctx: monitoring context 701 * @attrs: monitoring attributes 702 * 703 * This function should be called while the kdamond is not running, an access 704 * check results aggregation is not ongoing (e.g., from damon_call(). 705 * 706 * Every time interval is in micro-seconds. 707 * 708 * Return: 0 on success, negative error code otherwise. 709 */ 710 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 711 { 712 unsigned long sample_interval = attrs->sample_interval ? 713 attrs->sample_interval : 1; 714 struct damos *s; 715 bool aggregating = ctx->passed_sample_intervals < 716 ctx->next_aggregation_sis; 717 718 if (!damon_valid_intervals_goal(attrs)) 719 return -EINVAL; 720 721 if (attrs->min_nr_regions < 3) 722 return -EINVAL; 723 if (attrs->min_nr_regions > attrs->max_nr_regions) 724 return -EINVAL; 725 if (attrs->sample_interval > attrs->aggr_interval) 726 return -EINVAL; 727 728 /* calls from core-external doesn't set this. */ 729 if (!attrs->aggr_samples) 730 attrs->aggr_samples = attrs->aggr_interval / sample_interval; 731 732 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 733 attrs->aggr_interval / sample_interval; 734 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 735 attrs->ops_update_interval / sample_interval; 736 737 damon_update_monitoring_results(ctx, attrs, aggregating); 738 ctx->attrs = *attrs; 739 740 damon_for_each_scheme(s, ctx) 741 damos_set_next_apply_sis(s, ctx); 742 743 return 0; 744 } 745 746 /** 747 * damon_set_schemes() - Set data access monitoring based operation schemes. 748 * @ctx: monitoring context 749 * @schemes: array of the schemes 750 * @nr_schemes: number of entries in @schemes 751 * 752 * This function should not be called while the kdamond of the context is 753 * running. 754 */ 755 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 756 ssize_t nr_schemes) 757 { 758 struct damos *s, *next; 759 ssize_t i; 760 761 damon_for_each_scheme_safe(s, next, ctx) 762 damon_destroy_scheme(s); 763 for (i = 0; i < nr_schemes; i++) 764 damon_add_scheme(ctx, schemes[i]); 765 } 766 767 static struct damos_quota_goal *damos_nth_quota_goal( 768 int n, struct damos_quota *q) 769 { 770 struct damos_quota_goal *goal; 771 int i = 0; 772 773 damos_for_each_quota_goal(goal, q) { 774 if (i++ == n) 775 return goal; 776 } 777 return NULL; 778 } 779 780 static void damos_commit_quota_goal_union( 781 struct damos_quota_goal *dst, struct damos_quota_goal *src) 782 { 783 switch (dst->metric) { 784 case DAMOS_QUOTA_NODE_MEM_USED_BP: 785 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 786 dst->nid = src->nid; 787 break; 788 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 789 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 790 dst->nid = src->nid; 791 dst->memcg_id = src->memcg_id; 792 break; 793 default: 794 break; 795 } 796 } 797 798 static void damos_commit_quota_goal( 799 struct damos_quota_goal *dst, struct damos_quota_goal *src) 800 { 801 dst->metric = src->metric; 802 dst->target_value = src->target_value; 803 if (dst->metric == DAMOS_QUOTA_USER_INPUT) 804 dst->current_value = src->current_value; 805 /* keep last_psi_total as is, since it will be updated in next cycle */ 806 damos_commit_quota_goal_union(dst, src); 807 } 808 809 /** 810 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. 811 * @dst: The commit destination DAMOS quota. 812 * @src: The commit source DAMOS quota. 813 * 814 * Copies user-specified parameters for quota goals from @src to @dst. Users 815 * should use this function for quota goals-level parameters update of running 816 * DAMON contexts, instead of manual in-place updates. 817 * 818 * This function should be called from parameters-update safe context, like 819 * damon_call(). 820 */ 821 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) 822 { 823 struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; 824 int i = 0, j = 0; 825 826 damos_for_each_quota_goal_safe(dst_goal, next, dst) { 827 src_goal = damos_nth_quota_goal(i++, src); 828 if (src_goal) 829 damos_commit_quota_goal(dst_goal, src_goal); 830 else 831 damos_destroy_quota_goal(dst_goal); 832 } 833 damos_for_each_quota_goal_safe(src_goal, next, src) { 834 if (j++ < i) 835 continue; 836 new_goal = damos_new_quota_goal( 837 src_goal->metric, src_goal->target_value); 838 if (!new_goal) 839 return -ENOMEM; 840 damos_commit_quota_goal(new_goal, src_goal); 841 damos_add_quota_goal(dst, new_goal); 842 } 843 return 0; 844 } 845 846 static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) 847 { 848 int err; 849 850 dst->reset_interval = src->reset_interval; 851 dst->ms = src->ms; 852 dst->sz = src->sz; 853 err = damos_commit_quota_goals(dst, src); 854 if (err) 855 return err; 856 dst->weight_sz = src->weight_sz; 857 dst->weight_nr_accesses = src->weight_nr_accesses; 858 dst->weight_age = src->weight_age; 859 return 0; 860 } 861 862 static struct damos_filter *damos_nth_core_filter(int n, struct damos *s) 863 { 864 struct damos_filter *filter; 865 int i = 0; 866 867 damos_for_each_core_filter(filter, s) { 868 if (i++ == n) 869 return filter; 870 } 871 return NULL; 872 } 873 874 static struct damos_filter *damos_nth_ops_filter(int n, struct damos *s) 875 { 876 struct damos_filter *filter; 877 int i = 0; 878 879 damos_for_each_ops_filter(filter, s) { 880 if (i++ == n) 881 return filter; 882 } 883 return NULL; 884 } 885 886 static void damos_commit_filter_arg( 887 struct damos_filter *dst, struct damos_filter *src) 888 { 889 switch (dst->type) { 890 case DAMOS_FILTER_TYPE_MEMCG: 891 dst->memcg_id = src->memcg_id; 892 break; 893 case DAMOS_FILTER_TYPE_ADDR: 894 dst->addr_range = src->addr_range; 895 break; 896 case DAMOS_FILTER_TYPE_TARGET: 897 dst->target_idx = src->target_idx; 898 break; 899 case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: 900 dst->sz_range = src->sz_range; 901 break; 902 default: 903 break; 904 } 905 } 906 907 static void damos_commit_filter( 908 struct damos_filter *dst, struct damos_filter *src) 909 { 910 dst->type = src->type; 911 dst->matching = src->matching; 912 dst->allow = src->allow; 913 damos_commit_filter_arg(dst, src); 914 } 915 916 static int damos_commit_core_filters(struct damos *dst, struct damos *src) 917 { 918 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 919 int i = 0, j = 0; 920 921 damos_for_each_core_filter_safe(dst_filter, next, dst) { 922 src_filter = damos_nth_core_filter(i++, src); 923 if (src_filter) 924 damos_commit_filter(dst_filter, src_filter); 925 else 926 damos_destroy_filter(dst_filter); 927 } 928 929 damos_for_each_core_filter_safe(src_filter, next, src) { 930 if (j++ < i) 931 continue; 932 933 new_filter = damos_new_filter( 934 src_filter->type, src_filter->matching, 935 src_filter->allow); 936 if (!new_filter) 937 return -ENOMEM; 938 damos_commit_filter_arg(new_filter, src_filter); 939 damos_add_filter(dst, new_filter); 940 } 941 return 0; 942 } 943 944 static int damos_commit_ops_filters(struct damos *dst, struct damos *src) 945 { 946 struct damos_filter *dst_filter, *next, *src_filter, *new_filter; 947 int i = 0, j = 0; 948 949 damos_for_each_ops_filter_safe(dst_filter, next, dst) { 950 src_filter = damos_nth_ops_filter(i++, src); 951 if (src_filter) 952 damos_commit_filter(dst_filter, src_filter); 953 else 954 damos_destroy_filter(dst_filter); 955 } 956 957 damos_for_each_ops_filter_safe(src_filter, next, src) { 958 if (j++ < i) 959 continue; 960 961 new_filter = damos_new_filter( 962 src_filter->type, src_filter->matching, 963 src_filter->allow); 964 if (!new_filter) 965 return -ENOMEM; 966 damos_commit_filter_arg(new_filter, src_filter); 967 damos_add_filter(dst, new_filter); 968 } 969 return 0; 970 } 971 972 /** 973 * damos_filters_default_reject() - decide whether to reject memory that didn't 974 * match with any given filter. 975 * @filters: Given DAMOS filters of a group. 976 */ 977 static bool damos_filters_default_reject(struct list_head *filters) 978 { 979 struct damos_filter *last_filter; 980 981 if (list_empty(filters)) 982 return false; 983 last_filter = list_last_entry(filters, struct damos_filter, list); 984 return last_filter->allow; 985 } 986 987 static void damos_set_filters_default_reject(struct damos *s) 988 { 989 if (!list_empty(&s->ops_filters)) 990 s->core_filters_default_reject = false; 991 else 992 s->core_filters_default_reject = 993 damos_filters_default_reject(&s->core_filters); 994 s->ops_filters_default_reject = 995 damos_filters_default_reject(&s->ops_filters); 996 } 997 998 static int damos_commit_dests(struct damos_migrate_dests *dst, 999 struct damos_migrate_dests *src) 1000 { 1001 if (dst->nr_dests != src->nr_dests) { 1002 kfree(dst->node_id_arr); 1003 kfree(dst->weight_arr); 1004 1005 dst->node_id_arr = kmalloc_array(src->nr_dests, 1006 sizeof(*dst->node_id_arr), GFP_KERNEL); 1007 if (!dst->node_id_arr) { 1008 dst->weight_arr = NULL; 1009 return -ENOMEM; 1010 } 1011 1012 dst->weight_arr = kmalloc_array(src->nr_dests, 1013 sizeof(*dst->weight_arr), GFP_KERNEL); 1014 if (!dst->weight_arr) { 1015 /* ->node_id_arr will be freed by scheme destruction */ 1016 return -ENOMEM; 1017 } 1018 } 1019 1020 dst->nr_dests = src->nr_dests; 1021 for (int i = 0; i < src->nr_dests; i++) { 1022 dst->node_id_arr[i] = src->node_id_arr[i]; 1023 dst->weight_arr[i] = src->weight_arr[i]; 1024 } 1025 1026 return 0; 1027 } 1028 1029 static int damos_commit_filters(struct damos *dst, struct damos *src) 1030 { 1031 int err; 1032 1033 err = damos_commit_core_filters(dst, src); 1034 if (err) 1035 return err; 1036 err = damos_commit_ops_filters(dst, src); 1037 if (err) 1038 return err; 1039 damos_set_filters_default_reject(dst); 1040 return 0; 1041 } 1042 1043 static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) 1044 { 1045 struct damos *s; 1046 int i = 0; 1047 1048 damon_for_each_scheme(s, ctx) { 1049 if (i++ == n) 1050 return s; 1051 } 1052 return NULL; 1053 } 1054 1055 static int damos_commit(struct damos *dst, struct damos *src) 1056 { 1057 int err; 1058 1059 dst->pattern = src->pattern; 1060 dst->action = src->action; 1061 dst->apply_interval_us = src->apply_interval_us; 1062 1063 err = damos_commit_quota(&dst->quota, &src->quota); 1064 if (err) 1065 return err; 1066 1067 dst->wmarks = src->wmarks; 1068 dst->target_nid = src->target_nid; 1069 1070 err = damos_commit_dests(&dst->migrate_dests, &src->migrate_dests); 1071 if (err) 1072 return err; 1073 1074 err = damos_commit_filters(dst, src); 1075 return err; 1076 } 1077 1078 static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) 1079 { 1080 struct damos *dst_scheme, *next, *src_scheme, *new_scheme; 1081 int i = 0, j = 0, err; 1082 1083 damon_for_each_scheme_safe(dst_scheme, next, dst) { 1084 src_scheme = damon_nth_scheme(i++, src); 1085 if (src_scheme) { 1086 err = damos_commit(dst_scheme, src_scheme); 1087 if (err) 1088 return err; 1089 } else { 1090 damon_destroy_scheme(dst_scheme); 1091 } 1092 } 1093 1094 damon_for_each_scheme_safe(src_scheme, next, src) { 1095 if (j++ < i) 1096 continue; 1097 new_scheme = damon_new_scheme(&src_scheme->pattern, 1098 src_scheme->action, 1099 src_scheme->apply_interval_us, 1100 &src_scheme->quota, &src_scheme->wmarks, 1101 NUMA_NO_NODE); 1102 if (!new_scheme) 1103 return -ENOMEM; 1104 err = damos_commit(new_scheme, src_scheme); 1105 if (err) { 1106 damon_destroy_scheme(new_scheme); 1107 return err; 1108 } 1109 damon_add_scheme(dst, new_scheme); 1110 } 1111 return 0; 1112 } 1113 1114 static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) 1115 { 1116 struct damon_target *t; 1117 int i = 0; 1118 1119 damon_for_each_target(t, ctx) { 1120 if (i++ == n) 1121 return t; 1122 } 1123 return NULL; 1124 } 1125 1126 /* 1127 * The caller should ensure the regions of @src are 1128 * 1. valid (end >= src) and 1129 * 2. sorted by starting address. 1130 * 1131 * If @src has no region, @dst keeps current regions. 1132 */ 1133 static int damon_commit_target_regions(struct damon_target *dst, 1134 struct damon_target *src, unsigned long src_min_sz_region) 1135 { 1136 struct damon_region *src_region; 1137 struct damon_addr_range *ranges; 1138 int i = 0, err; 1139 1140 damon_for_each_region(src_region, src) 1141 i++; 1142 if (!i) 1143 return 0; 1144 1145 ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); 1146 if (!ranges) 1147 return -ENOMEM; 1148 i = 0; 1149 damon_for_each_region(src_region, src) 1150 ranges[i++] = src_region->ar; 1151 err = damon_set_regions(dst, ranges, i, src_min_sz_region); 1152 kfree(ranges); 1153 return err; 1154 } 1155 1156 static int damon_commit_target( 1157 struct damon_target *dst, bool dst_has_pid, 1158 struct damon_target *src, bool src_has_pid, 1159 unsigned long src_min_sz_region) 1160 { 1161 int err; 1162 1163 err = damon_commit_target_regions(dst, src, src_min_sz_region); 1164 if (err) 1165 return err; 1166 if (dst_has_pid) 1167 put_pid(dst->pid); 1168 if (src_has_pid) 1169 get_pid(src->pid); 1170 dst->pid = src->pid; 1171 return 0; 1172 } 1173 1174 static int damon_commit_targets( 1175 struct damon_ctx *dst, struct damon_ctx *src) 1176 { 1177 struct damon_target *dst_target, *next, *src_target, *new_target; 1178 int i = 0, j = 0, err; 1179 1180 damon_for_each_target_safe(dst_target, next, dst) { 1181 src_target = damon_nth_target(i++, src); 1182 /* 1183 * If src target is obsolete, do not commit the parameters to 1184 * the dst target, and further remove the dst target. 1185 */ 1186 if (src_target && !src_target->obsolete) { 1187 err = damon_commit_target( 1188 dst_target, damon_target_has_pid(dst), 1189 src_target, damon_target_has_pid(src), 1190 src->min_sz_region); 1191 if (err) 1192 return err; 1193 } else { 1194 struct damos *s; 1195 1196 damon_destroy_target(dst_target, dst); 1197 damon_for_each_scheme(s, dst) { 1198 if (s->quota.charge_target_from == dst_target) { 1199 s->quota.charge_target_from = NULL; 1200 s->quota.charge_addr_from = 0; 1201 } 1202 } 1203 } 1204 } 1205 1206 damon_for_each_target_safe(src_target, next, src) { 1207 if (j++ < i) 1208 continue; 1209 /* target to remove has no matching dst */ 1210 if (src_target->obsolete) 1211 return -EINVAL; 1212 new_target = damon_new_target(); 1213 if (!new_target) 1214 return -ENOMEM; 1215 err = damon_commit_target(new_target, false, 1216 src_target, damon_target_has_pid(src), 1217 src->min_sz_region); 1218 if (err) { 1219 damon_destroy_target(new_target, NULL); 1220 return err; 1221 } 1222 damon_add_target(dst, new_target); 1223 } 1224 return 0; 1225 } 1226 1227 /** 1228 * damon_commit_ctx() - Commit parameters of a DAMON context to another. 1229 * @dst: The commit destination DAMON context. 1230 * @src: The commit source DAMON context. 1231 * 1232 * This function copies user-specified parameters from @src to @dst and update 1233 * the internal status and results accordingly. Users should use this function 1234 * for context-level parameters update of running context, instead of manual 1235 * in-place updates. 1236 * 1237 * This function should be called from parameters-update safe context, like 1238 * damon_call(). 1239 */ 1240 int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) 1241 { 1242 int err; 1243 1244 err = damon_commit_schemes(dst, src); 1245 if (err) 1246 return err; 1247 err = damon_commit_targets(dst, src); 1248 if (err) 1249 return err; 1250 /* 1251 * schemes and targets should be updated first, since 1252 * 1. damon_set_attrs() updates monitoring results of targets and 1253 * next_apply_sis of schemes, and 1254 * 2. ops update should be done after pid handling is done (target 1255 * committing require putting pids). 1256 */ 1257 if (!damon_attrs_equals(&dst->attrs, &src->attrs)) { 1258 err = damon_set_attrs(dst, &src->attrs); 1259 if (err) 1260 return err; 1261 } 1262 dst->ops = src->ops; 1263 dst->addr_unit = src->addr_unit; 1264 dst->min_sz_region = src->min_sz_region; 1265 1266 return 0; 1267 } 1268 1269 /** 1270 * damon_nr_running_ctxs() - Return number of currently running contexts. 1271 */ 1272 int damon_nr_running_ctxs(void) 1273 { 1274 int nr_ctxs; 1275 1276 mutex_lock(&damon_lock); 1277 nr_ctxs = nr_running_ctxs; 1278 mutex_unlock(&damon_lock); 1279 1280 return nr_ctxs; 1281 } 1282 1283 /* Returns the size upper limit for each monitoring region */ 1284 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 1285 { 1286 struct damon_target *t; 1287 struct damon_region *r; 1288 unsigned long sz = 0; 1289 1290 damon_for_each_target(t, ctx) { 1291 damon_for_each_region(r, t) 1292 sz += damon_sz_region(r); 1293 } 1294 1295 if (ctx->attrs.min_nr_regions) 1296 sz /= ctx->attrs.min_nr_regions; 1297 if (sz < ctx->min_sz_region) 1298 sz = ctx->min_sz_region; 1299 1300 return sz; 1301 } 1302 1303 static int kdamond_fn(void *data); 1304 1305 /* 1306 * __damon_start() - Starts monitoring with given context. 1307 * @ctx: monitoring context 1308 * 1309 * This function should be called while damon_lock is hold. 1310 * 1311 * Return: 0 on success, negative error code otherwise. 1312 */ 1313 static int __damon_start(struct damon_ctx *ctx) 1314 { 1315 int err = -EBUSY; 1316 1317 mutex_lock(&ctx->kdamond_lock); 1318 if (!ctx->kdamond) { 1319 err = 0; 1320 reinit_completion(&ctx->kdamond_started); 1321 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 1322 nr_running_ctxs); 1323 if (IS_ERR(ctx->kdamond)) { 1324 err = PTR_ERR(ctx->kdamond); 1325 ctx->kdamond = NULL; 1326 } else { 1327 wait_for_completion(&ctx->kdamond_started); 1328 } 1329 } 1330 mutex_unlock(&ctx->kdamond_lock); 1331 1332 return err; 1333 } 1334 1335 /** 1336 * damon_start() - Starts the monitorings for a given group of contexts. 1337 * @ctxs: an array of the pointers for contexts to start monitoring 1338 * @nr_ctxs: size of @ctxs 1339 * @exclusive: exclusiveness of this contexts group 1340 * 1341 * This function starts a group of monitoring threads for a group of monitoring 1342 * contexts. One thread per each context is created and run in parallel. The 1343 * caller should handle synchronization between the threads by itself. If 1344 * @exclusive is true and a group of threads that created by other 1345 * 'damon_start()' call is currently running, this function does nothing but 1346 * returns -EBUSY. 1347 * 1348 * Return: 0 on success, negative error code otherwise. 1349 */ 1350 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 1351 { 1352 int i; 1353 int err = 0; 1354 1355 mutex_lock(&damon_lock); 1356 if ((exclusive && nr_running_ctxs) || 1357 (!exclusive && running_exclusive_ctxs)) { 1358 mutex_unlock(&damon_lock); 1359 return -EBUSY; 1360 } 1361 1362 for (i = 0; i < nr_ctxs; i++) { 1363 err = __damon_start(ctxs[i]); 1364 if (err) 1365 break; 1366 nr_running_ctxs++; 1367 } 1368 if (exclusive && nr_running_ctxs) 1369 running_exclusive_ctxs = true; 1370 mutex_unlock(&damon_lock); 1371 1372 return err; 1373 } 1374 1375 /* 1376 * __damon_stop() - Stops monitoring of a given context. 1377 * @ctx: monitoring context 1378 * 1379 * Return: 0 on success, negative error code otherwise. 1380 */ 1381 static int __damon_stop(struct damon_ctx *ctx) 1382 { 1383 struct task_struct *tsk; 1384 1385 mutex_lock(&ctx->kdamond_lock); 1386 tsk = ctx->kdamond; 1387 if (tsk) { 1388 get_task_struct(tsk); 1389 mutex_unlock(&ctx->kdamond_lock); 1390 kthread_stop_put(tsk); 1391 return 0; 1392 } 1393 mutex_unlock(&ctx->kdamond_lock); 1394 1395 return -EPERM; 1396 } 1397 1398 /** 1399 * damon_stop() - Stops the monitorings for a given group of contexts. 1400 * @ctxs: an array of the pointers for contexts to stop monitoring 1401 * @nr_ctxs: size of @ctxs 1402 * 1403 * Return: 0 on success, negative error code otherwise. 1404 */ 1405 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 1406 { 1407 int i, err = 0; 1408 1409 for (i = 0; i < nr_ctxs; i++) { 1410 /* nr_running_ctxs is decremented in kdamond_fn */ 1411 err = __damon_stop(ctxs[i]); 1412 if (err) 1413 break; 1414 } 1415 return err; 1416 } 1417 1418 /** 1419 * damon_is_running() - Returns if a given DAMON context is running. 1420 * @ctx: The DAMON context to see if running. 1421 * 1422 * Return: true if @ctx is running, false otherwise. 1423 */ 1424 bool damon_is_running(struct damon_ctx *ctx) 1425 { 1426 bool running; 1427 1428 mutex_lock(&ctx->kdamond_lock); 1429 running = ctx->kdamond != NULL; 1430 mutex_unlock(&ctx->kdamond_lock); 1431 return running; 1432 } 1433 1434 /** 1435 * damon_call() - Invoke a given function on DAMON worker thread (kdamond). 1436 * @ctx: DAMON context to call the function for. 1437 * @control: Control variable of the call request. 1438 * 1439 * Ask DAMON worker thread (kdamond) of @ctx to call a function with an 1440 * argument data that respectively passed via &damon_call_control->fn and 1441 * &damon_call_control->data of @control. If &damon_call_control->repeat of 1442 * @control is unset, further wait until the kdamond finishes handling of the 1443 * request. Otherwise, return as soon as the request is made. 1444 * 1445 * The kdamond executes the function with the argument in the main loop, just 1446 * after a sampling of the iteration is finished. The function can hence 1447 * safely access the internal data of the &struct damon_ctx without additional 1448 * synchronization. The return value of the function will be saved in 1449 * &damon_call_control->return_code. 1450 * 1451 * Return: 0 on success, negative error code otherwise. 1452 */ 1453 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) 1454 { 1455 if (!control->repeat) 1456 init_completion(&control->completion); 1457 control->canceled = false; 1458 INIT_LIST_HEAD(&control->list); 1459 1460 mutex_lock(&ctx->call_controls_lock); 1461 list_add_tail(&control->list, &ctx->call_controls); 1462 mutex_unlock(&ctx->call_controls_lock); 1463 if (!damon_is_running(ctx)) 1464 return -EINVAL; 1465 if (control->repeat) 1466 return 0; 1467 wait_for_completion(&control->completion); 1468 if (control->canceled) 1469 return -ECANCELED; 1470 return 0; 1471 } 1472 1473 /** 1474 * damos_walk() - Invoke a given functions while DAMOS walk regions. 1475 * @ctx: DAMON context to call the functions for. 1476 * @control: Control variable of the walk request. 1477 * 1478 * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region 1479 * that the kdamond will apply DAMOS action to, and wait until the kdamond 1480 * finishes handling of the request. 1481 * 1482 * The kdamond executes the given function in the main loop, for each region 1483 * just after it applied any DAMOS actions of @ctx to it. The invocation is 1484 * made only within one &damos->apply_interval_us since damos_walk() 1485 * invocation, for each scheme. The given callback function can hence safely 1486 * access the internal data of &struct damon_ctx and &struct damon_region that 1487 * each of the scheme will apply the action for next interval, without 1488 * additional synchronizations against the kdamond. If every scheme of @ctx 1489 * passed at least one &damos->apply_interval_us, kdamond marks the request as 1490 * completed so that damos_walk() can wakeup and return. 1491 * 1492 * Return: 0 on success, negative error code otherwise. 1493 */ 1494 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) 1495 { 1496 init_completion(&control->completion); 1497 control->canceled = false; 1498 mutex_lock(&ctx->walk_control_lock); 1499 if (ctx->walk_control) { 1500 mutex_unlock(&ctx->walk_control_lock); 1501 return -EBUSY; 1502 } 1503 ctx->walk_control = control; 1504 mutex_unlock(&ctx->walk_control_lock); 1505 if (!damon_is_running(ctx)) 1506 return -EINVAL; 1507 wait_for_completion(&control->completion); 1508 if (control->canceled) 1509 return -ECANCELED; 1510 return 0; 1511 } 1512 1513 /* 1514 * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing 1515 * the problem being propagated. 1516 */ 1517 static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r) 1518 { 1519 if (r->nr_accesses_bp == r->nr_accesses * 10000) 1520 return; 1521 WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n", 1522 r->nr_accesses_bp, r->nr_accesses); 1523 r->nr_accesses_bp = r->nr_accesses * 10000; 1524 } 1525 1526 /* 1527 * Reset the aggregated monitoring results ('nr_accesses' of each region). 1528 */ 1529 static void kdamond_reset_aggregated(struct damon_ctx *c) 1530 { 1531 struct damon_target *t; 1532 unsigned int ti = 0; /* target's index */ 1533 1534 damon_for_each_target(t, c) { 1535 struct damon_region *r; 1536 1537 damon_for_each_region(r, t) { 1538 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 1539 damon_warn_fix_nr_accesses_corruption(r); 1540 r->last_nr_accesses = r->nr_accesses; 1541 r->nr_accesses = 0; 1542 } 1543 ti++; 1544 } 1545 } 1546 1547 static unsigned long damon_get_intervals_score(struct damon_ctx *c) 1548 { 1549 struct damon_target *t; 1550 struct damon_region *r; 1551 unsigned long sz_region, max_access_events = 0, access_events = 0; 1552 unsigned long target_access_events; 1553 unsigned long goal_bp = c->attrs.intervals_goal.access_bp; 1554 1555 damon_for_each_target(t, c) { 1556 damon_for_each_region(r, t) { 1557 sz_region = damon_sz_region(r); 1558 max_access_events += sz_region * c->attrs.aggr_samples; 1559 access_events += sz_region * r->nr_accesses; 1560 } 1561 } 1562 target_access_events = max_access_events * goal_bp / 10000; 1563 target_access_events = target_access_events ? : 1; 1564 return access_events * 10000 / target_access_events; 1565 } 1566 1567 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1568 unsigned long score); 1569 1570 static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) 1571 { 1572 unsigned long score_bp, adaptation_bp; 1573 1574 score_bp = damon_get_intervals_score(c); 1575 adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / 1576 10000; 1577 /* 1578 * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of 1579 * the intervals by rescaling [1,10,000] to [5000, 10,000]. 1580 */ 1581 if (adaptation_bp <= 10000) 1582 adaptation_bp = 5000 + adaptation_bp / 2; 1583 return adaptation_bp; 1584 } 1585 1586 static void kdamond_tune_intervals(struct damon_ctx *c) 1587 { 1588 unsigned long adaptation_bp; 1589 struct damon_attrs new_attrs; 1590 struct damon_intervals_goal *goal; 1591 1592 adaptation_bp = damon_get_intervals_adaptation_bp(c); 1593 if (adaptation_bp == 10000) 1594 return; 1595 1596 new_attrs = c->attrs; 1597 goal = &c->attrs.intervals_goal; 1598 new_attrs.sample_interval = min(goal->max_sample_us, 1599 c->attrs.sample_interval * adaptation_bp / 10000); 1600 new_attrs.sample_interval = max(goal->min_sample_us, 1601 new_attrs.sample_interval); 1602 new_attrs.aggr_interval = new_attrs.sample_interval * 1603 c->attrs.aggr_samples; 1604 trace_damon_monitor_intervals_tune(new_attrs.sample_interval); 1605 damon_set_attrs(c, &new_attrs); 1606 } 1607 1608 static void damon_split_region_at(struct damon_target *t, 1609 struct damon_region *r, unsigned long sz_r); 1610 1611 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 1612 { 1613 unsigned long sz; 1614 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 1615 1616 sz = damon_sz_region(r); 1617 return s->pattern.min_sz_region <= sz && 1618 sz <= s->pattern.max_sz_region && 1619 s->pattern.min_nr_accesses <= nr_accesses && 1620 nr_accesses <= s->pattern.max_nr_accesses && 1621 s->pattern.min_age_region <= r->age && 1622 r->age <= s->pattern.max_age_region; 1623 } 1624 1625 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 1626 struct damon_region *r, struct damos *s) 1627 { 1628 bool ret = __damos_valid_target(r, s); 1629 1630 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 1631 return ret; 1632 1633 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 1634 } 1635 1636 /* 1637 * damos_skip_charged_region() - Check if the given region or starting part of 1638 * it is already charged for the DAMOS quota. 1639 * @t: The target of the region. 1640 * @rp: The pointer to the region. 1641 * @s: The scheme to be applied. 1642 * @min_sz_region: minimum region size. 1643 * 1644 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 1645 * action would applied to only a part of the target access pattern fulfilling 1646 * regions. To avoid applying the scheme action to only already applied 1647 * regions, DAMON skips applying the scheme action to the regions that charged 1648 * in the previous charge window. 1649 * 1650 * This function checks if a given region should be skipped or not for the 1651 * reason. If only the starting part of the region has previously charged, 1652 * this function splits the region into two so that the second one covers the 1653 * area that not charged in the previous charge widnow and saves the second 1654 * region in *rp and returns false, so that the caller can apply DAMON action 1655 * to the second one. 1656 * 1657 * Return: true if the region should be entirely skipped, false otherwise. 1658 */ 1659 static bool damos_skip_charged_region(struct damon_target *t, 1660 struct damon_region **rp, struct damos *s, unsigned long min_sz_region) 1661 { 1662 struct damon_region *r = *rp; 1663 struct damos_quota *quota = &s->quota; 1664 unsigned long sz_to_skip; 1665 1666 /* Skip previously charged regions */ 1667 if (quota->charge_target_from) { 1668 if (t != quota->charge_target_from) 1669 return true; 1670 if (r == damon_last_region(t)) { 1671 quota->charge_target_from = NULL; 1672 quota->charge_addr_from = 0; 1673 return true; 1674 } 1675 if (quota->charge_addr_from && 1676 r->ar.end <= quota->charge_addr_from) 1677 return true; 1678 1679 if (quota->charge_addr_from && r->ar.start < 1680 quota->charge_addr_from) { 1681 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 1682 r->ar.start, min_sz_region); 1683 if (!sz_to_skip) { 1684 if (damon_sz_region(r) <= min_sz_region) 1685 return true; 1686 sz_to_skip = min_sz_region; 1687 } 1688 damon_split_region_at(t, r, sz_to_skip); 1689 r = damon_next_region(r); 1690 *rp = r; 1691 } 1692 quota->charge_target_from = NULL; 1693 quota->charge_addr_from = 0; 1694 } 1695 return false; 1696 } 1697 1698 static void damos_update_stat(struct damos *s, 1699 unsigned long sz_tried, unsigned long sz_applied, 1700 unsigned long sz_ops_filter_passed) 1701 { 1702 s->stat.nr_tried++; 1703 s->stat.sz_tried += sz_tried; 1704 if (sz_applied) 1705 s->stat.nr_applied++; 1706 s->stat.sz_applied += sz_applied; 1707 s->stat.sz_ops_filter_passed += sz_ops_filter_passed; 1708 } 1709 1710 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, 1711 struct damon_region *r, struct damos_filter *filter, 1712 unsigned long min_sz_region) 1713 { 1714 bool matched = false; 1715 struct damon_target *ti; 1716 int target_idx = 0; 1717 unsigned long start, end; 1718 1719 switch (filter->type) { 1720 case DAMOS_FILTER_TYPE_TARGET: 1721 damon_for_each_target(ti, ctx) { 1722 if (ti == t) 1723 break; 1724 target_idx++; 1725 } 1726 matched = target_idx == filter->target_idx; 1727 break; 1728 case DAMOS_FILTER_TYPE_ADDR: 1729 start = ALIGN_DOWN(filter->addr_range.start, min_sz_region); 1730 end = ALIGN_DOWN(filter->addr_range.end, min_sz_region); 1731 1732 /* inside the range */ 1733 if (start <= r->ar.start && r->ar.end <= end) { 1734 matched = true; 1735 break; 1736 } 1737 /* outside of the range */ 1738 if (r->ar.end <= start || end <= r->ar.start) { 1739 matched = false; 1740 break; 1741 } 1742 /* start before the range and overlap */ 1743 if (r->ar.start < start) { 1744 damon_split_region_at(t, r, start - r->ar.start); 1745 matched = false; 1746 break; 1747 } 1748 /* start inside the range */ 1749 damon_split_region_at(t, r, end - r->ar.start); 1750 matched = true; 1751 break; 1752 default: 1753 return false; 1754 } 1755 1756 return matched == filter->matching; 1757 } 1758 1759 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 1760 struct damon_region *r, struct damos *s) 1761 { 1762 struct damos_filter *filter; 1763 1764 s->core_filters_allowed = false; 1765 damos_for_each_core_filter(filter, s) { 1766 if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) { 1767 if (filter->allow) 1768 s->core_filters_allowed = true; 1769 return !filter->allow; 1770 } 1771 } 1772 return s->core_filters_default_reject; 1773 } 1774 1775 /* 1776 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. 1777 * @ctx: The context of &damon_ctx->walk_control. 1778 * @t: The monitoring target of @r that @s will be applied. 1779 * @r: The region of @t that @s will be applied. 1780 * @s: The scheme of @ctx that will be applied to @r. 1781 * 1782 * This function is called from kdamond whenever it asked the operation set to 1783 * apply a DAMOS scheme action to a region. If a DAMOS walk request is 1784 * installed by damos_walk() and not yet uninstalled, invoke it. 1785 */ 1786 static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, 1787 struct damon_region *r, struct damos *s, 1788 unsigned long sz_filter_passed) 1789 { 1790 struct damos_walk_control *control; 1791 1792 if (s->walk_completed) 1793 return; 1794 1795 control = ctx->walk_control; 1796 if (!control) 1797 return; 1798 1799 control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); 1800 } 1801 1802 /* 1803 * damos_walk_complete() - Complete DAMOS walk request if all walks are done. 1804 * @ctx: The context of &damon_ctx->walk_control. 1805 * @s: A scheme of @ctx that all walks are now done. 1806 * 1807 * This function is called when kdamond finished applying the action of a DAMOS 1808 * scheme to all regions that eligible for the given &damos->apply_interval_us. 1809 * If every scheme of @ctx including @s now finished walking for at least one 1810 * &damos->apply_interval_us, this function makrs the handling of the given 1811 * DAMOS walk request is done, so that damos_walk() can wake up and return. 1812 */ 1813 static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) 1814 { 1815 struct damos *siter; 1816 struct damos_walk_control *control; 1817 1818 control = ctx->walk_control; 1819 if (!control) 1820 return; 1821 1822 s->walk_completed = true; 1823 /* if all schemes completed, signal completion to walker */ 1824 damon_for_each_scheme(siter, ctx) { 1825 if (!siter->walk_completed) 1826 return; 1827 } 1828 damon_for_each_scheme(siter, ctx) 1829 siter->walk_completed = false; 1830 1831 complete(&control->completion); 1832 ctx->walk_control = NULL; 1833 } 1834 1835 /* 1836 * damos_walk_cancel() - Cancel the current DAMOS walk request. 1837 * @ctx: The context of &damon_ctx->walk_control. 1838 * 1839 * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS 1840 * walk is requested but there is no DAMOS scheme to walk for, or the kdamond 1841 * is already out of the main loop and therefore gonna be terminated, and hence 1842 * cannot continue the walks. This function therefore marks the walk request 1843 * as canceled, so that damos_walk() can wake up and return. 1844 */ 1845 static void damos_walk_cancel(struct damon_ctx *ctx) 1846 { 1847 struct damos_walk_control *control; 1848 1849 mutex_lock(&ctx->walk_control_lock); 1850 control = ctx->walk_control; 1851 mutex_unlock(&ctx->walk_control_lock); 1852 1853 if (!control) 1854 return; 1855 control->canceled = true; 1856 complete(&control->completion); 1857 mutex_lock(&ctx->walk_control_lock); 1858 ctx->walk_control = NULL; 1859 mutex_unlock(&ctx->walk_control_lock); 1860 } 1861 1862 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 1863 struct damon_region *r, struct damos *s) 1864 { 1865 struct damos_quota *quota = &s->quota; 1866 unsigned long sz = damon_sz_region(r); 1867 struct timespec64 begin, end; 1868 unsigned long sz_applied = 0; 1869 unsigned long sz_ops_filter_passed = 0; 1870 /* 1871 * We plan to support multiple context per kdamond, as DAMON sysfs 1872 * implies with 'nr_contexts' file. Nevertheless, only single context 1873 * per kdamond is supported for now. So, we can simply use '0' context 1874 * index here. 1875 */ 1876 unsigned int cidx = 0; 1877 struct damos *siter; /* schemes iterator */ 1878 unsigned int sidx = 0; 1879 struct damon_target *titer; /* targets iterator */ 1880 unsigned int tidx = 0; 1881 bool do_trace = false; 1882 1883 /* get indices for trace_damos_before_apply() */ 1884 if (trace_damos_before_apply_enabled()) { 1885 damon_for_each_scheme(siter, c) { 1886 if (siter == s) 1887 break; 1888 sidx++; 1889 } 1890 damon_for_each_target(titer, c) { 1891 if (titer == t) 1892 break; 1893 tidx++; 1894 } 1895 do_trace = true; 1896 } 1897 1898 if (c->ops.apply_scheme) { 1899 if (quota->esz && quota->charged_sz + sz > quota->esz) { 1900 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 1901 c->min_sz_region); 1902 if (!sz) 1903 goto update_stat; 1904 damon_split_region_at(t, r, sz); 1905 } 1906 if (damos_filter_out(c, t, r, s)) 1907 return; 1908 ktime_get_coarse_ts64(&begin); 1909 trace_damos_before_apply(cidx, sidx, tidx, r, 1910 damon_nr_regions(t), do_trace); 1911 sz_applied = c->ops.apply_scheme(c, t, r, s, 1912 &sz_ops_filter_passed); 1913 damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); 1914 ktime_get_coarse_ts64(&end); 1915 quota->total_charged_ns += timespec64_to_ns(&end) - 1916 timespec64_to_ns(&begin); 1917 quota->charged_sz += sz; 1918 if (quota->esz && quota->charged_sz >= quota->esz) { 1919 quota->charge_target_from = t; 1920 quota->charge_addr_from = r->ar.end + 1; 1921 } 1922 } 1923 if (s->action != DAMOS_STAT) 1924 r->age = 0; 1925 1926 update_stat: 1927 damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); 1928 } 1929 1930 static void damon_do_apply_schemes(struct damon_ctx *c, 1931 struct damon_target *t, 1932 struct damon_region *r) 1933 { 1934 struct damos *s; 1935 1936 damon_for_each_scheme(s, c) { 1937 struct damos_quota *quota = &s->quota; 1938 1939 if (c->passed_sample_intervals < s->next_apply_sis) 1940 continue; 1941 1942 if (!s->wmarks.activated) 1943 continue; 1944 1945 /* Check the quota */ 1946 if (quota->esz && quota->charged_sz >= quota->esz) 1947 continue; 1948 1949 if (damos_skip_charged_region(t, &r, s, c->min_sz_region)) 1950 continue; 1951 1952 if (!damos_valid_target(c, t, r, s)) 1953 continue; 1954 1955 damos_apply_scheme(c, t, r, s); 1956 } 1957 } 1958 1959 /* 1960 * damon_feed_loop_next_input() - get next input to achieve a target score. 1961 * @last_input The last input. 1962 * @score Current score that made with @last_input. 1963 * 1964 * Calculate next input to achieve the target score, based on the last input 1965 * and current score. Assuming the input and the score are positively 1966 * proportional, calculate how much compensation should be added to or 1967 * subtracted from the last input as a proportion of the last input. Avoid 1968 * next input always being zero by setting it non-zero always. In short form 1969 * (assuming support of float and signed calculations), the algorithm is as 1970 * below. 1971 * 1972 * next_input = max(last_input * ((goal - current) / goal + 1), 1) 1973 * 1974 * For simple implementation, we assume the target score is always 10,000. The 1975 * caller should adjust @score for this. 1976 * 1977 * Returns next input that assumed to achieve the target score. 1978 */ 1979 static unsigned long damon_feed_loop_next_input(unsigned long last_input, 1980 unsigned long score) 1981 { 1982 const unsigned long goal = 10000; 1983 /* Set minimum input as 10000 to avoid compensation be zero */ 1984 const unsigned long min_input = 10000; 1985 unsigned long score_goal_diff, compensation; 1986 bool over_achieving = score > goal; 1987 1988 if (score == goal) 1989 return last_input; 1990 if (score >= goal * 2) 1991 return min_input; 1992 1993 if (over_achieving) 1994 score_goal_diff = score - goal; 1995 else 1996 score_goal_diff = goal - score; 1997 1998 if (last_input < ULONG_MAX / score_goal_diff) 1999 compensation = last_input * score_goal_diff / goal; 2000 else 2001 compensation = last_input / goal * score_goal_diff; 2002 2003 if (over_achieving) 2004 return max(last_input - compensation, min_input); 2005 if (last_input < ULONG_MAX - compensation) 2006 return last_input + compensation; 2007 return ULONG_MAX; 2008 } 2009 2010 #ifdef CONFIG_PSI 2011 2012 static u64 damos_get_some_mem_psi_total(void) 2013 { 2014 if (static_branch_likely(&psi_disabled)) 2015 return 0; 2016 return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], 2017 NSEC_PER_USEC); 2018 } 2019 2020 #else /* CONFIG_PSI */ 2021 2022 static inline u64 damos_get_some_mem_psi_total(void) 2023 { 2024 return 0; 2025 }; 2026 2027 #endif /* CONFIG_PSI */ 2028 2029 #ifdef CONFIG_NUMA 2030 static __kernel_ulong_t damos_get_node_mem_bp( 2031 struct damos_quota_goal *goal) 2032 { 2033 struct sysinfo i; 2034 __kernel_ulong_t numerator; 2035 2036 si_meminfo_node(&i, goal->nid); 2037 if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP) 2038 numerator = i.totalram - i.freeram; 2039 else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */ 2040 numerator = i.freeram; 2041 return numerator * 10000 / i.totalram; 2042 } 2043 2044 static unsigned long damos_get_node_memcg_used_bp( 2045 struct damos_quota_goal *goal) 2046 { 2047 struct mem_cgroup *memcg; 2048 struct lruvec *lruvec; 2049 unsigned long used_pages, numerator; 2050 struct sysinfo i; 2051 2052 rcu_read_lock(); 2053 memcg = mem_cgroup_from_id(goal->memcg_id); 2054 rcu_read_unlock(); 2055 if (!memcg) { 2056 if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP) 2057 return 0; 2058 else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */ 2059 return 10000; 2060 } 2061 mem_cgroup_flush_stats(memcg); 2062 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(goal->nid)); 2063 used_pages = lruvec_page_state(lruvec, NR_ACTIVE_ANON); 2064 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_ANON); 2065 used_pages += lruvec_page_state(lruvec, NR_ACTIVE_FILE); 2066 used_pages += lruvec_page_state(lruvec, NR_INACTIVE_FILE); 2067 2068 si_meminfo_node(&i, goal->nid); 2069 if (goal->metric == DAMOS_QUOTA_NODE_MEMCG_USED_BP) 2070 numerator = used_pages; 2071 else /* DAMOS_QUOTA_NODE_MEMCG_FREE_BP */ 2072 numerator = i.totalram - used_pages; 2073 return numerator * 10000 / i.totalram; 2074 } 2075 #else 2076 static __kernel_ulong_t damos_get_node_mem_bp( 2077 struct damos_quota_goal *goal) 2078 { 2079 return 0; 2080 } 2081 2082 static unsigned long damos_get_node_memcg_used_bp( 2083 struct damos_quota_goal *goal) 2084 { 2085 return 0; 2086 } 2087 #endif 2088 2089 2090 static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) 2091 { 2092 u64 now_psi_total; 2093 2094 switch (goal->metric) { 2095 case DAMOS_QUOTA_USER_INPUT: 2096 /* User should already set goal->current_value */ 2097 break; 2098 case DAMOS_QUOTA_SOME_MEM_PSI_US: 2099 now_psi_total = damos_get_some_mem_psi_total(); 2100 goal->current_value = now_psi_total - goal->last_psi_total; 2101 goal->last_psi_total = now_psi_total; 2102 break; 2103 case DAMOS_QUOTA_NODE_MEM_USED_BP: 2104 case DAMOS_QUOTA_NODE_MEM_FREE_BP: 2105 goal->current_value = damos_get_node_mem_bp(goal); 2106 break; 2107 case DAMOS_QUOTA_NODE_MEMCG_USED_BP: 2108 case DAMOS_QUOTA_NODE_MEMCG_FREE_BP: 2109 goal->current_value = damos_get_node_memcg_used_bp(goal); 2110 break; 2111 default: 2112 break; 2113 } 2114 } 2115 2116 /* Return the highest score since it makes schemes least aggressive */ 2117 static unsigned long damos_quota_score(struct damos_quota *quota) 2118 { 2119 struct damos_quota_goal *goal; 2120 unsigned long highest_score = 0; 2121 2122 damos_for_each_quota_goal(goal, quota) { 2123 damos_set_quota_goal_current_value(goal); 2124 highest_score = max(highest_score, 2125 goal->current_value * 10000 / 2126 goal->target_value); 2127 } 2128 2129 return highest_score; 2130 } 2131 2132 /* 2133 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty 2134 */ 2135 static void damos_set_effective_quota(struct damos_quota *quota) 2136 { 2137 unsigned long throughput; 2138 unsigned long esz = ULONG_MAX; 2139 2140 if (!quota->ms && list_empty("a->goals)) { 2141 quota->esz = quota->sz; 2142 return; 2143 } 2144 2145 if (!list_empty("a->goals)) { 2146 unsigned long score = damos_quota_score(quota); 2147 2148 quota->esz_bp = damon_feed_loop_next_input( 2149 max(quota->esz_bp, 10000UL), 2150 score); 2151 esz = quota->esz_bp / 10000; 2152 } 2153 2154 if (quota->ms) { 2155 if (quota->total_charged_ns) 2156 throughput = mult_frac(quota->total_charged_sz, 1000000, 2157 quota->total_charged_ns); 2158 else 2159 throughput = PAGE_SIZE * 1024; 2160 esz = min(throughput * quota->ms, esz); 2161 } 2162 2163 if (quota->sz && quota->sz < esz) 2164 esz = quota->sz; 2165 2166 quota->esz = esz; 2167 } 2168 2169 static void damos_trace_esz(struct damon_ctx *c, struct damos *s, 2170 struct damos_quota *quota) 2171 { 2172 unsigned int cidx = 0, sidx = 0; 2173 struct damos *siter; 2174 2175 damon_for_each_scheme(siter, c) { 2176 if (siter == s) 2177 break; 2178 sidx++; 2179 } 2180 trace_damos_esz(cidx, sidx, quota->esz); 2181 } 2182 2183 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 2184 { 2185 struct damos_quota *quota = &s->quota; 2186 struct damon_target *t; 2187 struct damon_region *r; 2188 unsigned long cumulated_sz, cached_esz; 2189 unsigned int score, max_score = 0; 2190 2191 if (!quota->ms && !quota->sz && list_empty("a->goals)) 2192 return; 2193 2194 /* First charge window */ 2195 if (!quota->total_charged_sz && !quota->charged_from) { 2196 quota->charged_from = jiffies; 2197 damos_set_effective_quota(quota); 2198 } 2199 2200 /* New charge window starts */ 2201 if (time_after_eq(jiffies, quota->charged_from + 2202 msecs_to_jiffies(quota->reset_interval))) { 2203 if (quota->esz && quota->charged_sz >= quota->esz) 2204 s->stat.qt_exceeds++; 2205 quota->total_charged_sz += quota->charged_sz; 2206 quota->charged_from = jiffies; 2207 quota->charged_sz = 0; 2208 if (trace_damos_esz_enabled()) 2209 cached_esz = quota->esz; 2210 damos_set_effective_quota(quota); 2211 if (trace_damos_esz_enabled() && quota->esz != cached_esz) 2212 damos_trace_esz(c, s, quota); 2213 } 2214 2215 if (!c->ops.get_scheme_score) 2216 return; 2217 2218 /* Fill up the score histogram */ 2219 memset(c->regions_score_histogram, 0, 2220 sizeof(*c->regions_score_histogram) * 2221 (DAMOS_MAX_SCORE + 1)); 2222 damon_for_each_target(t, c) { 2223 damon_for_each_region(r, t) { 2224 if (!__damos_valid_target(r, s)) 2225 continue; 2226 score = c->ops.get_scheme_score(c, t, r, s); 2227 c->regions_score_histogram[score] += 2228 damon_sz_region(r); 2229 if (score > max_score) 2230 max_score = score; 2231 } 2232 } 2233 2234 /* Set the min score limit */ 2235 for (cumulated_sz = 0, score = max_score; ; score--) { 2236 cumulated_sz += c->regions_score_histogram[score]; 2237 if (cumulated_sz >= quota->esz || !score) 2238 break; 2239 } 2240 quota->min_score = score; 2241 } 2242 2243 static void kdamond_apply_schemes(struct damon_ctx *c) 2244 { 2245 struct damon_target *t; 2246 struct damon_region *r, *next_r; 2247 struct damos *s; 2248 unsigned long sample_interval = c->attrs.sample_interval ? 2249 c->attrs.sample_interval : 1; 2250 bool has_schemes_to_apply = false; 2251 2252 damon_for_each_scheme(s, c) { 2253 if (c->passed_sample_intervals < s->next_apply_sis) 2254 continue; 2255 2256 if (!s->wmarks.activated) 2257 continue; 2258 2259 has_schemes_to_apply = true; 2260 2261 damos_adjust_quota(c, s); 2262 } 2263 2264 if (!has_schemes_to_apply) 2265 return; 2266 2267 mutex_lock(&c->walk_control_lock); 2268 damon_for_each_target(t, c) { 2269 damon_for_each_region_safe(r, next_r, t) 2270 damon_do_apply_schemes(c, t, r); 2271 } 2272 2273 damon_for_each_scheme(s, c) { 2274 if (c->passed_sample_intervals < s->next_apply_sis) 2275 continue; 2276 damos_walk_complete(c, s); 2277 s->next_apply_sis = c->passed_sample_intervals + 2278 (s->apply_interval_us ? s->apply_interval_us : 2279 c->attrs.aggr_interval) / sample_interval; 2280 s->last_applied = NULL; 2281 } 2282 mutex_unlock(&c->walk_control_lock); 2283 } 2284 2285 /* 2286 * Merge two adjacent regions into one region 2287 */ 2288 static void damon_merge_two_regions(struct damon_target *t, 2289 struct damon_region *l, struct damon_region *r) 2290 { 2291 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 2292 2293 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 2294 (sz_l + sz_r); 2295 l->nr_accesses_bp = l->nr_accesses * 10000; 2296 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 2297 l->ar.end = r->ar.end; 2298 damon_destroy_region(r, t); 2299 } 2300 2301 /* 2302 * Merge adjacent regions having similar access frequencies 2303 * 2304 * t target affected by this merge operation 2305 * thres '->nr_accesses' diff threshold for the merge 2306 * sz_limit size upper limit of each region 2307 */ 2308 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 2309 unsigned long sz_limit) 2310 { 2311 struct damon_region *r, *prev = NULL, *next; 2312 2313 damon_for_each_region_safe(r, next, t) { 2314 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 2315 r->age = 0; 2316 else if ((r->nr_accesses == 0) != (r->last_nr_accesses == 0)) 2317 r->age = 0; 2318 else 2319 r->age++; 2320 2321 if (prev && prev->ar.end == r->ar.start && 2322 abs(prev->nr_accesses - r->nr_accesses) <= thres && 2323 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 2324 damon_merge_two_regions(t, prev, r); 2325 else 2326 prev = r; 2327 } 2328 } 2329 2330 /* 2331 * Merge adjacent regions having similar access frequencies 2332 * 2333 * threshold '->nr_accesses' diff threshold for the merge 2334 * sz_limit size upper limit of each region 2335 * 2336 * This function merges monitoring target regions which are adjacent and their 2337 * access frequencies are similar. This is for minimizing the monitoring 2338 * overhead under the dynamically changeable access pattern. If a merge was 2339 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 2340 * 2341 * The total number of regions could be higher than the user-defined limit, 2342 * max_nr_regions for some cases. For example, the user can update 2343 * max_nr_regions to a number that lower than the current number of regions 2344 * while DAMON is running. For such a case, repeat merging until the limit is 2345 * met while increasing @threshold up to possible maximum level. 2346 */ 2347 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 2348 unsigned long sz_limit) 2349 { 2350 struct damon_target *t; 2351 unsigned int nr_regions; 2352 unsigned int max_thres; 2353 2354 max_thres = c->attrs.aggr_interval / 2355 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); 2356 do { 2357 nr_regions = 0; 2358 damon_for_each_target(t, c) { 2359 damon_merge_regions_of(t, threshold, sz_limit); 2360 nr_regions += damon_nr_regions(t); 2361 } 2362 threshold = max(1, threshold * 2); 2363 } while (nr_regions > c->attrs.max_nr_regions && 2364 threshold / 2 < max_thres); 2365 } 2366 2367 /* 2368 * Split a region in two 2369 * 2370 * r the region to be split 2371 * sz_r size of the first sub-region that will be made 2372 */ 2373 static void damon_split_region_at(struct damon_target *t, 2374 struct damon_region *r, unsigned long sz_r) 2375 { 2376 struct damon_region *new; 2377 2378 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 2379 if (!new) 2380 return; 2381 2382 r->ar.end = new->ar.start; 2383 2384 new->age = r->age; 2385 new->last_nr_accesses = r->last_nr_accesses; 2386 new->nr_accesses_bp = r->nr_accesses_bp; 2387 new->nr_accesses = r->nr_accesses; 2388 2389 damon_insert_region(new, r, damon_next_region(r), t); 2390 } 2391 2392 /* Split every region in the given target into 'nr_subs' regions */ 2393 static void damon_split_regions_of(struct damon_target *t, int nr_subs, 2394 unsigned long min_sz_region) 2395 { 2396 struct damon_region *r, *next; 2397 unsigned long sz_region, sz_sub = 0; 2398 int i; 2399 2400 damon_for_each_region_safe(r, next, t) { 2401 sz_region = damon_sz_region(r); 2402 2403 for (i = 0; i < nr_subs - 1 && 2404 sz_region > 2 * min_sz_region; i++) { 2405 /* 2406 * Randomly select size of left sub-region to be at 2407 * least 10 percent and at most 90% of original region 2408 */ 2409 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 2410 sz_region / 10, min_sz_region); 2411 /* Do not allow blank region */ 2412 if (sz_sub == 0 || sz_sub >= sz_region) 2413 continue; 2414 2415 damon_split_region_at(t, r, sz_sub); 2416 sz_region = sz_sub; 2417 } 2418 } 2419 } 2420 2421 /* 2422 * Split every target region into randomly-sized small regions 2423 * 2424 * This function splits every target region into random-sized small regions if 2425 * current total number of the regions is equal or smaller than half of the 2426 * user-specified maximum number of regions. This is for maximizing the 2427 * monitoring accuracy under the dynamically changeable access patterns. If a 2428 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 2429 * it. 2430 */ 2431 static void kdamond_split_regions(struct damon_ctx *ctx) 2432 { 2433 struct damon_target *t; 2434 unsigned int nr_regions = 0; 2435 static unsigned int last_nr_regions; 2436 int nr_subregions = 2; 2437 2438 damon_for_each_target(t, ctx) 2439 nr_regions += damon_nr_regions(t); 2440 2441 if (nr_regions > ctx->attrs.max_nr_regions / 2) 2442 return; 2443 2444 /* Maybe the middle of the region has different access frequency */ 2445 if (last_nr_regions == nr_regions && 2446 nr_regions < ctx->attrs.max_nr_regions / 3) 2447 nr_subregions = 3; 2448 2449 damon_for_each_target(t, ctx) 2450 damon_split_regions_of(t, nr_subregions, ctx->min_sz_region); 2451 2452 last_nr_regions = nr_regions; 2453 } 2454 2455 /* 2456 * Check whether current monitoring should be stopped 2457 * 2458 * The monitoring is stopped when either the user requested to stop, or all 2459 * monitoring targets are invalid. 2460 * 2461 * Returns true if need to stop current monitoring. 2462 */ 2463 static bool kdamond_need_stop(struct damon_ctx *ctx) 2464 { 2465 struct damon_target *t; 2466 2467 if (kthread_should_stop()) 2468 return true; 2469 2470 if (!ctx->ops.target_valid) 2471 return false; 2472 2473 damon_for_each_target(t, ctx) { 2474 if (ctx->ops.target_valid(t)) 2475 return false; 2476 } 2477 2478 return true; 2479 } 2480 2481 static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, 2482 unsigned long *metric_value) 2483 { 2484 switch (metric) { 2485 case DAMOS_WMARK_FREE_MEM_RATE: 2486 *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / 2487 totalram_pages(); 2488 return 0; 2489 default: 2490 break; 2491 } 2492 return -EINVAL; 2493 } 2494 2495 /* 2496 * Returns zero if the scheme is active. Else, returns time to wait for next 2497 * watermark check in micro-seconds. 2498 */ 2499 static unsigned long damos_wmark_wait_us(struct damos *scheme) 2500 { 2501 unsigned long metric; 2502 2503 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) 2504 return 0; 2505 2506 /* higher than high watermark or lower than low watermark */ 2507 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 2508 if (scheme->wmarks.activated) 2509 pr_debug("deactivate a scheme (%d) for %s wmark\n", 2510 scheme->action, 2511 str_high_low(metric > scheme->wmarks.high)); 2512 scheme->wmarks.activated = false; 2513 return scheme->wmarks.interval; 2514 } 2515 2516 /* inactive and higher than middle watermark */ 2517 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 2518 !scheme->wmarks.activated) 2519 return scheme->wmarks.interval; 2520 2521 if (!scheme->wmarks.activated) 2522 pr_debug("activate a scheme (%d)\n", scheme->action); 2523 scheme->wmarks.activated = true; 2524 return 0; 2525 } 2526 2527 static void kdamond_usleep(unsigned long usecs) 2528 { 2529 if (usecs >= USLEEP_RANGE_UPPER_BOUND) 2530 schedule_timeout_idle(usecs_to_jiffies(usecs)); 2531 else 2532 usleep_range_idle(usecs, usecs + 1); 2533 } 2534 2535 /* 2536 * kdamond_call() - handle damon_call_control objects. 2537 * @ctx: The &struct damon_ctx of the kdamond. 2538 * @cancel: Whether to cancel the invocation of the function. 2539 * 2540 * If there are &struct damon_call_control requests that registered via 2541 * &damon_call() on @ctx, do or cancel the invocation of the function depending 2542 * on @cancel. @cancel is set when the kdamond is already out of the main loop 2543 * and therefore will be terminated. 2544 */ 2545 static void kdamond_call(struct damon_ctx *ctx, bool cancel) 2546 { 2547 struct damon_call_control *control; 2548 LIST_HEAD(repeat_controls); 2549 int ret = 0; 2550 2551 while (true) { 2552 mutex_lock(&ctx->call_controls_lock); 2553 control = list_first_entry_or_null(&ctx->call_controls, 2554 struct damon_call_control, list); 2555 mutex_unlock(&ctx->call_controls_lock); 2556 if (!control) 2557 break; 2558 if (cancel) { 2559 control->canceled = true; 2560 } else { 2561 ret = control->fn(control->data); 2562 control->return_code = ret; 2563 } 2564 mutex_lock(&ctx->call_controls_lock); 2565 list_del(&control->list); 2566 mutex_unlock(&ctx->call_controls_lock); 2567 if (!control->repeat) { 2568 complete(&control->completion); 2569 } else if (control->canceled && control->dealloc_on_cancel) { 2570 kfree(control); 2571 continue; 2572 } else { 2573 list_add(&control->list, &repeat_controls); 2574 } 2575 } 2576 control = list_first_entry_or_null(&repeat_controls, 2577 struct damon_call_control, list); 2578 if (!control || cancel) 2579 return; 2580 mutex_lock(&ctx->call_controls_lock); 2581 list_add_tail(&control->list, &ctx->call_controls); 2582 mutex_unlock(&ctx->call_controls_lock); 2583 } 2584 2585 /* Returns negative error code if it's not activated but should return */ 2586 static int kdamond_wait_activation(struct damon_ctx *ctx) 2587 { 2588 struct damos *s; 2589 unsigned long wait_time; 2590 unsigned long min_wait_time = 0; 2591 bool init_wait_time = false; 2592 2593 while (!kdamond_need_stop(ctx)) { 2594 damon_for_each_scheme(s, ctx) { 2595 wait_time = damos_wmark_wait_us(s); 2596 if (!init_wait_time || wait_time < min_wait_time) { 2597 init_wait_time = true; 2598 min_wait_time = wait_time; 2599 } 2600 } 2601 if (!min_wait_time) 2602 return 0; 2603 2604 kdamond_usleep(min_wait_time); 2605 2606 kdamond_call(ctx, false); 2607 damos_walk_cancel(ctx); 2608 } 2609 return -EBUSY; 2610 } 2611 2612 static void kdamond_init_ctx(struct damon_ctx *ctx) 2613 { 2614 unsigned long sample_interval = ctx->attrs.sample_interval ? 2615 ctx->attrs.sample_interval : 1; 2616 unsigned long apply_interval; 2617 struct damos *scheme; 2618 2619 ctx->passed_sample_intervals = 0; 2620 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 2621 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 2622 sample_interval; 2623 ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * 2624 ctx->attrs.intervals_goal.aggrs; 2625 2626 damon_for_each_scheme(scheme, ctx) { 2627 apply_interval = scheme->apply_interval_us ? 2628 scheme->apply_interval_us : ctx->attrs.aggr_interval; 2629 scheme->next_apply_sis = apply_interval / sample_interval; 2630 damos_set_filters_default_reject(scheme); 2631 } 2632 } 2633 2634 /* 2635 * The monitoring daemon that runs as a kernel thread 2636 */ 2637 static int kdamond_fn(void *data) 2638 { 2639 struct damon_ctx *ctx = data; 2640 struct damon_target *t; 2641 struct damon_region *r, *next; 2642 unsigned int max_nr_accesses = 0; 2643 unsigned long sz_limit = 0; 2644 2645 pr_debug("kdamond (%d) starts\n", current->pid); 2646 2647 complete(&ctx->kdamond_started); 2648 kdamond_init_ctx(ctx); 2649 2650 if (ctx->ops.init) 2651 ctx->ops.init(ctx); 2652 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, 2653 sizeof(*ctx->regions_score_histogram), GFP_KERNEL); 2654 if (!ctx->regions_score_histogram) 2655 goto done; 2656 2657 sz_limit = damon_region_sz_limit(ctx); 2658 2659 while (!kdamond_need_stop(ctx)) { 2660 /* 2661 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 2662 * be changed from kdamond_call(). Read the values here, and 2663 * use those for this iteration. That is, damon_set_attrs() 2664 * updated new values are respected from next iteration. 2665 */ 2666 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 2667 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 2668 unsigned long sample_interval = ctx->attrs.sample_interval; 2669 2670 if (kdamond_wait_activation(ctx)) 2671 break; 2672 2673 if (ctx->ops.prepare_access_checks) 2674 ctx->ops.prepare_access_checks(ctx); 2675 2676 kdamond_usleep(sample_interval); 2677 ctx->passed_sample_intervals++; 2678 2679 if (ctx->ops.check_accesses) 2680 max_nr_accesses = ctx->ops.check_accesses(ctx); 2681 2682 if (ctx->passed_sample_intervals >= next_aggregation_sis) 2683 kdamond_merge_regions(ctx, 2684 max_nr_accesses / 10, 2685 sz_limit); 2686 2687 /* 2688 * do kdamond_call() and kdamond_apply_schemes() after 2689 * kdamond_merge_regions() if possible, to reduce overhead 2690 */ 2691 kdamond_call(ctx, false); 2692 if (!list_empty(&ctx->schemes)) 2693 kdamond_apply_schemes(ctx); 2694 else 2695 damos_walk_cancel(ctx); 2696 2697 sample_interval = ctx->attrs.sample_interval ? 2698 ctx->attrs.sample_interval : 1; 2699 if (ctx->passed_sample_intervals >= next_aggregation_sis) { 2700 if (ctx->attrs.intervals_goal.aggrs && 2701 ctx->passed_sample_intervals >= 2702 ctx->next_intervals_tune_sis) { 2703 /* 2704 * ctx->next_aggregation_sis might be updated 2705 * from kdamond_call(). In the case, 2706 * damon_set_attrs() which will be called from 2707 * kdamond_tune_interval() may wrongly think 2708 * this is in the middle of the current 2709 * aggregation, and make aggregation 2710 * information reset for all regions. Then, 2711 * following kdamond_reset_aggregated() call 2712 * will make the region information invalid, 2713 * particularly for ->nr_accesses_bp. 2714 * 2715 * Reset ->next_aggregation_sis to avoid that. 2716 * It will anyway correctly updated after this 2717 * if caluse. 2718 */ 2719 ctx->next_aggregation_sis = 2720 next_aggregation_sis; 2721 ctx->next_intervals_tune_sis += 2722 ctx->attrs.aggr_samples * 2723 ctx->attrs.intervals_goal.aggrs; 2724 kdamond_tune_intervals(ctx); 2725 sample_interval = ctx->attrs.sample_interval ? 2726 ctx->attrs.sample_interval : 1; 2727 2728 } 2729 ctx->next_aggregation_sis = next_aggregation_sis + 2730 ctx->attrs.aggr_interval / sample_interval; 2731 2732 kdamond_reset_aggregated(ctx); 2733 kdamond_split_regions(ctx); 2734 } 2735 2736 if (ctx->passed_sample_intervals >= next_ops_update_sis) { 2737 ctx->next_ops_update_sis = next_ops_update_sis + 2738 ctx->attrs.ops_update_interval / 2739 sample_interval; 2740 if (ctx->ops.update) 2741 ctx->ops.update(ctx); 2742 sz_limit = damon_region_sz_limit(ctx); 2743 } 2744 } 2745 done: 2746 damon_for_each_target(t, ctx) { 2747 damon_for_each_region_safe(r, next, t) 2748 damon_destroy_region(r, t); 2749 } 2750 2751 if (ctx->ops.cleanup) 2752 ctx->ops.cleanup(ctx); 2753 kfree(ctx->regions_score_histogram); 2754 2755 pr_debug("kdamond (%d) finishes\n", current->pid); 2756 mutex_lock(&ctx->kdamond_lock); 2757 ctx->kdamond = NULL; 2758 mutex_unlock(&ctx->kdamond_lock); 2759 2760 kdamond_call(ctx, true); 2761 damos_walk_cancel(ctx); 2762 2763 mutex_lock(&damon_lock); 2764 nr_running_ctxs--; 2765 if (!nr_running_ctxs && running_exclusive_ctxs) 2766 running_exclusive_ctxs = false; 2767 mutex_unlock(&damon_lock); 2768 2769 damon_destroy_targets(ctx); 2770 return 0; 2771 } 2772 2773 /* 2774 * struct damon_system_ram_region - System RAM resource address region of 2775 * [@start, @end). 2776 * @start: Start address of the region (inclusive). 2777 * @end: End address of the region (exclusive). 2778 */ 2779 struct damon_system_ram_region { 2780 unsigned long start; 2781 unsigned long end; 2782 }; 2783 2784 static int walk_system_ram(struct resource *res, void *arg) 2785 { 2786 struct damon_system_ram_region *a = arg; 2787 2788 if (a->end - a->start < resource_size(res)) { 2789 a->start = res->start; 2790 a->end = res->end; 2791 } 2792 return 0; 2793 } 2794 2795 /* 2796 * Find biggest 'System RAM' resource and store its start and end address in 2797 * @start and @end, respectively. If no System RAM is found, returns false. 2798 */ 2799 static bool damon_find_biggest_system_ram(unsigned long *start, 2800 unsigned long *end) 2801 2802 { 2803 struct damon_system_ram_region arg = {}; 2804 2805 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 2806 if (arg.end <= arg.start) 2807 return false; 2808 2809 *start = arg.start; 2810 *end = arg.end; 2811 return true; 2812 } 2813 2814 /** 2815 * damon_set_region_biggest_system_ram_default() - Set the region of the given 2816 * monitoring target as requested, or biggest 'System RAM'. 2817 * @t: The monitoring target to set the region. 2818 * @start: The pointer to the start address of the region. 2819 * @end: The pointer to the end address of the region. 2820 * @min_sz_region: Minimum region size. 2821 * 2822 * This function sets the region of @t as requested by @start and @end. If the 2823 * values of @start and @end are zero, however, this function finds the biggest 2824 * 'System RAM' resource and sets the region to cover the resource. In the 2825 * latter case, this function saves the start and end addresses of the resource 2826 * in @start and @end, respectively. 2827 * 2828 * Return: 0 on success, negative error code otherwise. 2829 */ 2830 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 2831 unsigned long *start, unsigned long *end, 2832 unsigned long min_sz_region) 2833 { 2834 struct damon_addr_range addr_range; 2835 2836 if (*start > *end) 2837 return -EINVAL; 2838 2839 if (!*start && !*end && 2840 !damon_find_biggest_system_ram(start, end)) 2841 return -EINVAL; 2842 2843 addr_range.start = *start; 2844 addr_range.end = *end; 2845 return damon_set_regions(t, &addr_range, 1, min_sz_region); 2846 } 2847 2848 /* 2849 * damon_moving_sum() - Calculate an inferred moving sum value. 2850 * @mvsum: Inferred sum of the last @len_window values. 2851 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 2852 * @len_window: The number of last values to take care of. 2853 * @new_value: New value that will be added to the pseudo moving sum. 2854 * 2855 * Moving sum (moving average * window size) is good for handling noise, but 2856 * the cost of keeping past values can be high for arbitrary window size. This 2857 * function implements a lightweight pseudo moving sum function that doesn't 2858 * keep the past window values. 2859 * 2860 * It simply assumes there was no noise in the past, and get the no-noise 2861 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 2862 * non-moving sum of the last window. For example, if @len_window is 10 and we 2863 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 2864 * values. Hence, this function simply drops @nomvsum / @len_window from 2865 * given @mvsum and add @new_value. 2866 * 2867 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 2868 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 2869 * calculating next moving sum with a new value, we should drop 0 from 50 and 2870 * add the new value. However, this function assumes it got value 5 for each 2871 * of the last ten times. Based on the assumption, when the next value is 2872 * measured, it drops the assumed past value, 5 from the current sum, and add 2873 * the new value to get the updated pseduo-moving average. 2874 * 2875 * This means the value could have errors, but the errors will be disappeared 2876 * for every @len_window aligned calls. For example, if @len_window is 10, the 2877 * pseudo moving sum with 11th value to 19th value would have an error. But 2878 * the sum with 20th value will not have the error. 2879 * 2880 * Return: Pseudo-moving average after getting the @new_value. 2881 */ 2882 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 2883 unsigned int len_window, unsigned int new_value) 2884 { 2885 return mvsum - nomvsum / len_window + new_value; 2886 } 2887 2888 /** 2889 * damon_update_region_access_rate() - Update the access rate of a region. 2890 * @r: The DAMON region to update for its access check result. 2891 * @accessed: Whether the region has accessed during last sampling interval. 2892 * @attrs: The damon_attrs of the DAMON context. 2893 * 2894 * Update the access rate of a region with the region's last sampling interval 2895 * access check result. 2896 * 2897 * Usually this will be called by &damon_operations->check_accesses callback. 2898 */ 2899 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 2900 struct damon_attrs *attrs) 2901 { 2902 unsigned int len_window = 1; 2903 2904 /* 2905 * sample_interval can be zero, but cannot be larger than 2906 * aggr_interval, owing to validation of damon_set_attrs(). 2907 */ 2908 if (attrs->sample_interval) 2909 len_window = damon_max_nr_accesses(attrs); 2910 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 2911 r->last_nr_accesses * 10000, len_window, 2912 accessed ? 10000 : 0); 2913 2914 if (accessed) 2915 r->nr_accesses++; 2916 } 2917 2918 /** 2919 * damon_initialized() - Return if DAMON is ready to be used. 2920 * 2921 * Return: true if DAMON is ready to be used, false otherwise. 2922 */ 2923 bool damon_initialized(void) 2924 { 2925 return damon_region_cache != NULL; 2926 } 2927 2928 static int __init damon_init(void) 2929 { 2930 damon_region_cache = KMEM_CACHE(damon_region, 0); 2931 if (unlikely(!damon_region_cache)) { 2932 pr_err("creating damon_region_cache fails\n"); 2933 return -ENOMEM; 2934 } 2935 2936 return 0; 2937 } 2938 2939 subsys_initcall(damon_init); 2940 2941 #include "tests/core-kunit.h" 2942