1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 static struct kmem_cache *damon_region_cache __ro_after_init; 33 34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 35 static bool __damon_is_registered_ops(enum damon_ops_id id) 36 { 37 struct damon_operations empty_ops = {}; 38 39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 40 return false; 41 return true; 42 } 43 44 /** 45 * damon_is_registered_ops() - Check if a given damon_operations is registered. 46 * @id: Id of the damon_operations to check if registered. 47 * 48 * Return: true if the ops is set, false otherwise. 49 */ 50 bool damon_is_registered_ops(enum damon_ops_id id) 51 { 52 bool registered; 53 54 if (id >= NR_DAMON_OPS) 55 return false; 56 mutex_lock(&damon_ops_lock); 57 registered = __damon_is_registered_ops(id); 58 mutex_unlock(&damon_ops_lock); 59 return registered; 60 } 61 62 /** 63 * damon_register_ops() - Register a monitoring operations set to DAMON. 64 * @ops: monitoring operations set to register. 65 * 66 * This function registers a monitoring operations set of valid &struct 67 * damon_operations->id so that others can find and use them later. 68 * 69 * Return: 0 on success, negative error code otherwise. 70 */ 71 int damon_register_ops(struct damon_operations *ops) 72 { 73 int err = 0; 74 75 if (ops->id >= NR_DAMON_OPS) 76 return -EINVAL; 77 mutex_lock(&damon_ops_lock); 78 /* Fail for already registered ops */ 79 if (__damon_is_registered_ops(ops->id)) { 80 err = -EINVAL; 81 goto out; 82 } 83 damon_registered_ops[ops->id] = *ops; 84 out: 85 mutex_unlock(&damon_ops_lock); 86 return err; 87 } 88 89 /** 90 * damon_select_ops() - Select a monitoring operations to use with the context. 91 * @ctx: monitoring context to use the operations. 92 * @id: id of the registered monitoring operations to select. 93 * 94 * This function finds registered monitoring operations set of @id and make 95 * @ctx to use it. 96 * 97 * Return: 0 on success, negative error code otherwise. 98 */ 99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 100 { 101 int err = 0; 102 103 if (id >= NR_DAMON_OPS) 104 return -EINVAL; 105 106 mutex_lock(&damon_ops_lock); 107 if (!__damon_is_registered_ops(id)) 108 err = -EINVAL; 109 else 110 ctx->ops = damon_registered_ops[id]; 111 mutex_unlock(&damon_ops_lock); 112 return err; 113 } 114 115 /* 116 * Construct a damon_region struct 117 * 118 * Returns the pointer to the new struct if success, or NULL otherwise 119 */ 120 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 121 { 122 struct damon_region *region; 123 124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 125 if (!region) 126 return NULL; 127 128 region->ar.start = start; 129 region->ar.end = end; 130 region->nr_accesses = 0; 131 region->nr_accesses_bp = 0; 132 INIT_LIST_HEAD(®ion->list); 133 134 region->age = 0; 135 region->last_nr_accesses = 0; 136 137 return region; 138 } 139 140 void damon_add_region(struct damon_region *r, struct damon_target *t) 141 { 142 list_add_tail(&r->list, &t->regions_list); 143 t->nr_regions++; 144 } 145 146 static void damon_del_region(struct damon_region *r, struct damon_target *t) 147 { 148 list_del(&r->list); 149 t->nr_regions--; 150 } 151 152 static void damon_free_region(struct damon_region *r) 153 { 154 kmem_cache_free(damon_region_cache, r); 155 } 156 157 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 158 { 159 damon_del_region(r, t); 160 damon_free_region(r); 161 } 162 163 /* 164 * Check whether a region is intersecting an address range 165 * 166 * Returns true if it is. 167 */ 168 static bool damon_intersect(struct damon_region *r, 169 struct damon_addr_range *re) 170 { 171 return !(r->ar.end <= re->start || re->end <= r->ar.start); 172 } 173 174 /* 175 * Fill holes in regions with new regions. 176 */ 177 static int damon_fill_regions_holes(struct damon_region *first, 178 struct damon_region *last, struct damon_target *t) 179 { 180 struct damon_region *r = first; 181 182 damon_for_each_region_from(r, t) { 183 struct damon_region *next, *newr; 184 185 if (r == last) 186 break; 187 next = damon_next_region(r); 188 if (r->ar.end != next->ar.start) { 189 newr = damon_new_region(r->ar.end, next->ar.start); 190 if (!newr) 191 return -ENOMEM; 192 damon_insert_region(newr, r, next, t); 193 } 194 } 195 return 0; 196 } 197 198 /* 199 * damon_set_regions() - Set regions of a target for given address ranges. 200 * @t: the given target. 201 * @ranges: array of new monitoring target ranges. 202 * @nr_ranges: length of @ranges. 203 * 204 * This function adds new regions to, or modify existing regions of a 205 * monitoring target to fit in specific ranges. 206 * 207 * Return: 0 if success, or negative error code otherwise. 208 */ 209 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 210 unsigned int nr_ranges) 211 { 212 struct damon_region *r, *next; 213 unsigned int i; 214 int err; 215 216 /* Remove regions which are not in the new ranges */ 217 damon_for_each_region_safe(r, next, t) { 218 for (i = 0; i < nr_ranges; i++) { 219 if (damon_intersect(r, &ranges[i])) 220 break; 221 } 222 if (i == nr_ranges) 223 damon_destroy_region(r, t); 224 } 225 226 r = damon_first_region(t); 227 /* Add new regions or resize existing regions to fit in the ranges */ 228 for (i = 0; i < nr_ranges; i++) { 229 struct damon_region *first = NULL, *last, *newr; 230 struct damon_addr_range *range; 231 232 range = &ranges[i]; 233 /* Get the first/last regions intersecting with the range */ 234 damon_for_each_region_from(r, t) { 235 if (damon_intersect(r, range)) { 236 if (!first) 237 first = r; 238 last = r; 239 } 240 if (r->ar.start >= range->end) 241 break; 242 } 243 if (!first) { 244 /* no region intersects with this range */ 245 newr = damon_new_region( 246 ALIGN_DOWN(range->start, 247 DAMON_MIN_REGION), 248 ALIGN(range->end, DAMON_MIN_REGION)); 249 if (!newr) 250 return -ENOMEM; 251 damon_insert_region(newr, damon_prev_region(r), r, t); 252 } else { 253 /* resize intersecting regions to fit in this range */ 254 first->ar.start = ALIGN_DOWN(range->start, 255 DAMON_MIN_REGION); 256 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 257 258 /* fill possible holes in the range */ 259 err = damon_fill_regions_holes(first, last, t); 260 if (err) 261 return err; 262 } 263 } 264 return 0; 265 } 266 267 struct damos_filter *damos_new_filter(enum damos_filter_type type, 268 bool matching) 269 { 270 struct damos_filter *filter; 271 272 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 273 if (!filter) 274 return NULL; 275 filter->type = type; 276 filter->matching = matching; 277 INIT_LIST_HEAD(&filter->list); 278 return filter; 279 } 280 281 void damos_add_filter(struct damos *s, struct damos_filter *f) 282 { 283 list_add_tail(&f->list, &s->filters); 284 } 285 286 static void damos_del_filter(struct damos_filter *f) 287 { 288 list_del(&f->list); 289 } 290 291 static void damos_free_filter(struct damos_filter *f) 292 { 293 kfree(f); 294 } 295 296 void damos_destroy_filter(struct damos_filter *f) 297 { 298 damos_del_filter(f); 299 damos_free_filter(f); 300 } 301 302 /* initialize private fields of damos_quota and return the pointer */ 303 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) 304 { 305 quota->total_charged_sz = 0; 306 quota->total_charged_ns = 0; 307 quota->esz = 0; 308 quota->charged_sz = 0; 309 quota->charged_from = 0; 310 quota->charge_target_from = NULL; 311 quota->charge_addr_from = 0; 312 return quota; 313 } 314 315 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 316 enum damos_action action, 317 unsigned long apply_interval_us, 318 struct damos_quota *quota, 319 struct damos_watermarks *wmarks) 320 { 321 struct damos *scheme; 322 323 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 324 if (!scheme) 325 return NULL; 326 scheme->pattern = *pattern; 327 scheme->action = action; 328 scheme->apply_interval_us = apply_interval_us; 329 /* 330 * next_apply_sis will be set when kdamond starts. While kdamond is 331 * running, it will also updated when it is added to the DAMON context, 332 * or damon_attrs are updated. 333 */ 334 scheme->next_apply_sis = 0; 335 INIT_LIST_HEAD(&scheme->filters); 336 scheme->stat = (struct damos_stat){}; 337 INIT_LIST_HEAD(&scheme->list); 338 339 scheme->quota = *(damos_quota_init_priv(quota)); 340 341 scheme->wmarks = *wmarks; 342 scheme->wmarks.activated = true; 343 344 return scheme; 345 } 346 347 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 348 { 349 unsigned long sample_interval = ctx->attrs.sample_interval ? 350 ctx->attrs.sample_interval : 1; 351 unsigned long apply_interval = s->apply_interval_us ? 352 s->apply_interval_us : ctx->attrs.aggr_interval; 353 354 s->next_apply_sis = ctx->passed_sample_intervals + 355 apply_interval / sample_interval; 356 } 357 358 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 359 { 360 list_add_tail(&s->list, &ctx->schemes); 361 damos_set_next_apply_sis(s, ctx); 362 } 363 364 static void damon_del_scheme(struct damos *s) 365 { 366 list_del(&s->list); 367 } 368 369 static void damon_free_scheme(struct damos *s) 370 { 371 kfree(s); 372 } 373 374 void damon_destroy_scheme(struct damos *s) 375 { 376 struct damos_filter *f, *next; 377 378 damos_for_each_filter_safe(f, next, s) 379 damos_destroy_filter(f); 380 damon_del_scheme(s); 381 damon_free_scheme(s); 382 } 383 384 /* 385 * Construct a damon_target struct 386 * 387 * Returns the pointer to the new struct if success, or NULL otherwise 388 */ 389 struct damon_target *damon_new_target(void) 390 { 391 struct damon_target *t; 392 393 t = kmalloc(sizeof(*t), GFP_KERNEL); 394 if (!t) 395 return NULL; 396 397 t->pid = NULL; 398 t->nr_regions = 0; 399 INIT_LIST_HEAD(&t->regions_list); 400 INIT_LIST_HEAD(&t->list); 401 402 return t; 403 } 404 405 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 406 { 407 list_add_tail(&t->list, &ctx->adaptive_targets); 408 } 409 410 bool damon_targets_empty(struct damon_ctx *ctx) 411 { 412 return list_empty(&ctx->adaptive_targets); 413 } 414 415 static void damon_del_target(struct damon_target *t) 416 { 417 list_del(&t->list); 418 } 419 420 void damon_free_target(struct damon_target *t) 421 { 422 struct damon_region *r, *next; 423 424 damon_for_each_region_safe(r, next, t) 425 damon_free_region(r); 426 kfree(t); 427 } 428 429 void damon_destroy_target(struct damon_target *t) 430 { 431 damon_del_target(t); 432 damon_free_target(t); 433 } 434 435 unsigned int damon_nr_regions(struct damon_target *t) 436 { 437 return t->nr_regions; 438 } 439 440 struct damon_ctx *damon_new_ctx(void) 441 { 442 struct damon_ctx *ctx; 443 444 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 445 if (!ctx) 446 return NULL; 447 448 init_completion(&ctx->kdamond_started); 449 450 ctx->attrs.sample_interval = 5 * 1000; 451 ctx->attrs.aggr_interval = 100 * 1000; 452 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 453 454 ctx->passed_sample_intervals = 0; 455 /* These will be set from kdamond_init_intervals_sis() */ 456 ctx->next_aggregation_sis = 0; 457 ctx->next_ops_update_sis = 0; 458 459 mutex_init(&ctx->kdamond_lock); 460 461 ctx->attrs.min_nr_regions = 10; 462 ctx->attrs.max_nr_regions = 1000; 463 464 INIT_LIST_HEAD(&ctx->adaptive_targets); 465 INIT_LIST_HEAD(&ctx->schemes); 466 467 return ctx; 468 } 469 470 static void damon_destroy_targets(struct damon_ctx *ctx) 471 { 472 struct damon_target *t, *next_t; 473 474 if (ctx->ops.cleanup) { 475 ctx->ops.cleanup(ctx); 476 return; 477 } 478 479 damon_for_each_target_safe(t, next_t, ctx) 480 damon_destroy_target(t); 481 } 482 483 void damon_destroy_ctx(struct damon_ctx *ctx) 484 { 485 struct damos *s, *next_s; 486 487 damon_destroy_targets(ctx); 488 489 damon_for_each_scheme_safe(s, next_s, ctx) 490 damon_destroy_scheme(s); 491 492 kfree(ctx); 493 } 494 495 static unsigned int damon_age_for_new_attrs(unsigned int age, 496 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 497 { 498 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 499 } 500 501 /* convert access ratio in bp (per 10,000) to nr_accesses */ 502 static unsigned int damon_accesses_bp_to_nr_accesses( 503 unsigned int accesses_bp, struct damon_attrs *attrs) 504 { 505 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 506 } 507 508 /* convert nr_accesses to access ratio in bp (per 10,000) */ 509 static unsigned int damon_nr_accesses_to_accesses_bp( 510 unsigned int nr_accesses, struct damon_attrs *attrs) 511 { 512 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 513 } 514 515 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 516 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 517 { 518 return damon_accesses_bp_to_nr_accesses( 519 damon_nr_accesses_to_accesses_bp( 520 nr_accesses, old_attrs), 521 new_attrs); 522 } 523 524 static void damon_update_monitoring_result(struct damon_region *r, 525 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 526 { 527 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 528 old_attrs, new_attrs); 529 r->nr_accesses_bp = r->nr_accesses * 10000; 530 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 531 } 532 533 /* 534 * region->nr_accesses is the number of sampling intervals in the last 535 * aggregation interval that access to the region has found, and region->age is 536 * the number of aggregation intervals that its access pattern has maintained. 537 * For the reason, the real meaning of the two fields depend on current 538 * sampling interval and aggregation interval. This function updates 539 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 540 */ 541 static void damon_update_monitoring_results(struct damon_ctx *ctx, 542 struct damon_attrs *new_attrs) 543 { 544 struct damon_attrs *old_attrs = &ctx->attrs; 545 struct damon_target *t; 546 struct damon_region *r; 547 548 /* if any interval is zero, simply forgive conversion */ 549 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 550 !new_attrs->sample_interval || 551 !new_attrs->aggr_interval) 552 return; 553 554 damon_for_each_target(t, ctx) 555 damon_for_each_region(r, t) 556 damon_update_monitoring_result( 557 r, old_attrs, new_attrs); 558 } 559 560 /** 561 * damon_set_attrs() - Set attributes for the monitoring. 562 * @ctx: monitoring context 563 * @attrs: monitoring attributes 564 * 565 * This function should be called while the kdamond is not running, or an 566 * access check results aggregation is not ongoing (e.g., from 567 * &struct damon_callback->after_aggregation or 568 * &struct damon_callback->after_wmarks_check callbacks). 569 * 570 * Every time interval is in micro-seconds. 571 * 572 * Return: 0 on success, negative error code otherwise. 573 */ 574 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 575 { 576 unsigned long sample_interval = attrs->sample_interval ? 577 attrs->sample_interval : 1; 578 struct damos *s; 579 580 if (attrs->min_nr_regions < 3) 581 return -EINVAL; 582 if (attrs->min_nr_regions > attrs->max_nr_regions) 583 return -EINVAL; 584 if (attrs->sample_interval > attrs->aggr_interval) 585 return -EINVAL; 586 587 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 588 attrs->aggr_interval / sample_interval; 589 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 590 attrs->ops_update_interval / sample_interval; 591 592 damon_update_monitoring_results(ctx, attrs); 593 ctx->attrs = *attrs; 594 595 damon_for_each_scheme(s, ctx) 596 damos_set_next_apply_sis(s, ctx); 597 598 return 0; 599 } 600 601 /** 602 * damon_set_schemes() - Set data access monitoring based operation schemes. 603 * @ctx: monitoring context 604 * @schemes: array of the schemes 605 * @nr_schemes: number of entries in @schemes 606 * 607 * This function should not be called while the kdamond of the context is 608 * running. 609 */ 610 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 611 ssize_t nr_schemes) 612 { 613 struct damos *s, *next; 614 ssize_t i; 615 616 damon_for_each_scheme_safe(s, next, ctx) 617 damon_destroy_scheme(s); 618 for (i = 0; i < nr_schemes; i++) 619 damon_add_scheme(ctx, schemes[i]); 620 } 621 622 /** 623 * damon_nr_running_ctxs() - Return number of currently running contexts. 624 */ 625 int damon_nr_running_ctxs(void) 626 { 627 int nr_ctxs; 628 629 mutex_lock(&damon_lock); 630 nr_ctxs = nr_running_ctxs; 631 mutex_unlock(&damon_lock); 632 633 return nr_ctxs; 634 } 635 636 /* Returns the size upper limit for each monitoring region */ 637 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 638 { 639 struct damon_target *t; 640 struct damon_region *r; 641 unsigned long sz = 0; 642 643 damon_for_each_target(t, ctx) { 644 damon_for_each_region(r, t) 645 sz += damon_sz_region(r); 646 } 647 648 if (ctx->attrs.min_nr_regions) 649 sz /= ctx->attrs.min_nr_regions; 650 if (sz < DAMON_MIN_REGION) 651 sz = DAMON_MIN_REGION; 652 653 return sz; 654 } 655 656 static int kdamond_fn(void *data); 657 658 /* 659 * __damon_start() - Starts monitoring with given context. 660 * @ctx: monitoring context 661 * 662 * This function should be called while damon_lock is hold. 663 * 664 * Return: 0 on success, negative error code otherwise. 665 */ 666 static int __damon_start(struct damon_ctx *ctx) 667 { 668 int err = -EBUSY; 669 670 mutex_lock(&ctx->kdamond_lock); 671 if (!ctx->kdamond) { 672 err = 0; 673 reinit_completion(&ctx->kdamond_started); 674 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 675 nr_running_ctxs); 676 if (IS_ERR(ctx->kdamond)) { 677 err = PTR_ERR(ctx->kdamond); 678 ctx->kdamond = NULL; 679 } else { 680 wait_for_completion(&ctx->kdamond_started); 681 } 682 } 683 mutex_unlock(&ctx->kdamond_lock); 684 685 return err; 686 } 687 688 /** 689 * damon_start() - Starts the monitorings for a given group of contexts. 690 * @ctxs: an array of the pointers for contexts to start monitoring 691 * @nr_ctxs: size of @ctxs 692 * @exclusive: exclusiveness of this contexts group 693 * 694 * This function starts a group of monitoring threads for a group of monitoring 695 * contexts. One thread per each context is created and run in parallel. The 696 * caller should handle synchronization between the threads by itself. If 697 * @exclusive is true and a group of threads that created by other 698 * 'damon_start()' call is currently running, this function does nothing but 699 * returns -EBUSY. 700 * 701 * Return: 0 on success, negative error code otherwise. 702 */ 703 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 704 { 705 int i; 706 int err = 0; 707 708 mutex_lock(&damon_lock); 709 if ((exclusive && nr_running_ctxs) || 710 (!exclusive && running_exclusive_ctxs)) { 711 mutex_unlock(&damon_lock); 712 return -EBUSY; 713 } 714 715 for (i = 0; i < nr_ctxs; i++) { 716 err = __damon_start(ctxs[i]); 717 if (err) 718 break; 719 nr_running_ctxs++; 720 } 721 if (exclusive && nr_running_ctxs) 722 running_exclusive_ctxs = true; 723 mutex_unlock(&damon_lock); 724 725 return err; 726 } 727 728 /* 729 * __damon_stop() - Stops monitoring of a given context. 730 * @ctx: monitoring context 731 * 732 * Return: 0 on success, negative error code otherwise. 733 */ 734 static int __damon_stop(struct damon_ctx *ctx) 735 { 736 struct task_struct *tsk; 737 738 mutex_lock(&ctx->kdamond_lock); 739 tsk = ctx->kdamond; 740 if (tsk) { 741 get_task_struct(tsk); 742 mutex_unlock(&ctx->kdamond_lock); 743 kthread_stop_put(tsk); 744 return 0; 745 } 746 mutex_unlock(&ctx->kdamond_lock); 747 748 return -EPERM; 749 } 750 751 /** 752 * damon_stop() - Stops the monitorings for a given group of contexts. 753 * @ctxs: an array of the pointers for contexts to stop monitoring 754 * @nr_ctxs: size of @ctxs 755 * 756 * Return: 0 on success, negative error code otherwise. 757 */ 758 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 759 { 760 int i, err = 0; 761 762 for (i = 0; i < nr_ctxs; i++) { 763 /* nr_running_ctxs is decremented in kdamond_fn */ 764 err = __damon_stop(ctxs[i]); 765 if (err) 766 break; 767 } 768 return err; 769 } 770 771 /* 772 * Reset the aggregated monitoring results ('nr_accesses' of each region). 773 */ 774 static void kdamond_reset_aggregated(struct damon_ctx *c) 775 { 776 struct damon_target *t; 777 unsigned int ti = 0; /* target's index */ 778 779 damon_for_each_target(t, c) { 780 struct damon_region *r; 781 782 damon_for_each_region(r, t) { 783 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 784 r->last_nr_accesses = r->nr_accesses; 785 r->nr_accesses = 0; 786 } 787 ti++; 788 } 789 } 790 791 static void damon_split_region_at(struct damon_target *t, 792 struct damon_region *r, unsigned long sz_r); 793 794 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 795 { 796 unsigned long sz; 797 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 798 799 sz = damon_sz_region(r); 800 return s->pattern.min_sz_region <= sz && 801 sz <= s->pattern.max_sz_region && 802 s->pattern.min_nr_accesses <= nr_accesses && 803 nr_accesses <= s->pattern.max_nr_accesses && 804 s->pattern.min_age_region <= r->age && 805 r->age <= s->pattern.max_age_region; 806 } 807 808 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 809 struct damon_region *r, struct damos *s) 810 { 811 bool ret = __damos_valid_target(r, s); 812 813 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 814 return ret; 815 816 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 817 } 818 819 /* 820 * damos_skip_charged_region() - Check if the given region or starting part of 821 * it is already charged for the DAMOS quota. 822 * @t: The target of the region. 823 * @rp: The pointer to the region. 824 * @s: The scheme to be applied. 825 * 826 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 827 * action would applied to only a part of the target access pattern fulfilling 828 * regions. To avoid applying the scheme action to only already applied 829 * regions, DAMON skips applying the scheme action to the regions that charged 830 * in the previous charge window. 831 * 832 * This function checks if a given region should be skipped or not for the 833 * reason. If only the starting part of the region has previously charged, 834 * this function splits the region into two so that the second one covers the 835 * area that not charged in the previous charge widnow and saves the second 836 * region in *rp and returns false, so that the caller can apply DAMON action 837 * to the second one. 838 * 839 * Return: true if the region should be entirely skipped, false otherwise. 840 */ 841 static bool damos_skip_charged_region(struct damon_target *t, 842 struct damon_region **rp, struct damos *s) 843 { 844 struct damon_region *r = *rp; 845 struct damos_quota *quota = &s->quota; 846 unsigned long sz_to_skip; 847 848 /* Skip previously charged regions */ 849 if (quota->charge_target_from) { 850 if (t != quota->charge_target_from) 851 return true; 852 if (r == damon_last_region(t)) { 853 quota->charge_target_from = NULL; 854 quota->charge_addr_from = 0; 855 return true; 856 } 857 if (quota->charge_addr_from && 858 r->ar.end <= quota->charge_addr_from) 859 return true; 860 861 if (quota->charge_addr_from && r->ar.start < 862 quota->charge_addr_from) { 863 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 864 r->ar.start, DAMON_MIN_REGION); 865 if (!sz_to_skip) { 866 if (damon_sz_region(r) <= DAMON_MIN_REGION) 867 return true; 868 sz_to_skip = DAMON_MIN_REGION; 869 } 870 damon_split_region_at(t, r, sz_to_skip); 871 r = damon_next_region(r); 872 *rp = r; 873 } 874 quota->charge_target_from = NULL; 875 quota->charge_addr_from = 0; 876 } 877 return false; 878 } 879 880 static void damos_update_stat(struct damos *s, 881 unsigned long sz_tried, unsigned long sz_applied) 882 { 883 s->stat.nr_tried++; 884 s->stat.sz_tried += sz_tried; 885 if (sz_applied) 886 s->stat.nr_applied++; 887 s->stat.sz_applied += sz_applied; 888 } 889 890 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 891 struct damon_region *r, struct damos_filter *filter) 892 { 893 bool matched = false; 894 struct damon_target *ti; 895 int target_idx = 0; 896 unsigned long start, end; 897 898 switch (filter->type) { 899 case DAMOS_FILTER_TYPE_TARGET: 900 damon_for_each_target(ti, ctx) { 901 if (ti == t) 902 break; 903 target_idx++; 904 } 905 matched = target_idx == filter->target_idx; 906 break; 907 case DAMOS_FILTER_TYPE_ADDR: 908 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 909 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 910 911 /* inside the range */ 912 if (start <= r->ar.start && r->ar.end <= end) { 913 matched = true; 914 break; 915 } 916 /* outside of the range */ 917 if (r->ar.end <= start || end <= r->ar.start) { 918 matched = false; 919 break; 920 } 921 /* start before the range and overlap */ 922 if (r->ar.start < start) { 923 damon_split_region_at(t, r, start - r->ar.start); 924 matched = false; 925 break; 926 } 927 /* start inside the range */ 928 damon_split_region_at(t, r, end - r->ar.start); 929 matched = true; 930 break; 931 default: 932 return false; 933 } 934 935 return matched == filter->matching; 936 } 937 938 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 939 struct damon_region *r, struct damos *s) 940 { 941 struct damos_filter *filter; 942 943 damos_for_each_filter(filter, s) { 944 if (__damos_filter_out(ctx, t, r, filter)) 945 return true; 946 } 947 return false; 948 } 949 950 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 951 struct damon_region *r, struct damos *s) 952 { 953 struct damos_quota *quota = &s->quota; 954 unsigned long sz = damon_sz_region(r); 955 struct timespec64 begin, end; 956 unsigned long sz_applied = 0; 957 int err = 0; 958 /* 959 * We plan to support multiple context per kdamond, as DAMON sysfs 960 * implies with 'nr_contexts' file. Nevertheless, only single context 961 * per kdamond is supported for now. So, we can simply use '0' context 962 * index here. 963 */ 964 unsigned int cidx = 0; 965 struct damos *siter; /* schemes iterator */ 966 unsigned int sidx = 0; 967 struct damon_target *titer; /* targets iterator */ 968 unsigned int tidx = 0; 969 bool do_trace = false; 970 971 /* get indices for trace_damos_before_apply() */ 972 if (trace_damos_before_apply_enabled()) { 973 damon_for_each_scheme(siter, c) { 974 if (siter == s) 975 break; 976 sidx++; 977 } 978 damon_for_each_target(titer, c) { 979 if (titer == t) 980 break; 981 tidx++; 982 } 983 do_trace = true; 984 } 985 986 if (c->ops.apply_scheme) { 987 if (quota->esz && quota->charged_sz + sz > quota->esz) { 988 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 989 DAMON_MIN_REGION); 990 if (!sz) 991 goto update_stat; 992 damon_split_region_at(t, r, sz); 993 } 994 if (damos_filter_out(c, t, r, s)) 995 return; 996 ktime_get_coarse_ts64(&begin); 997 if (c->callback.before_damos_apply) 998 err = c->callback.before_damos_apply(c, t, r, s); 999 if (!err) { 1000 trace_damos_before_apply(cidx, sidx, tidx, r, 1001 damon_nr_regions(t), do_trace); 1002 sz_applied = c->ops.apply_scheme(c, t, r, s); 1003 } 1004 ktime_get_coarse_ts64(&end); 1005 quota->total_charged_ns += timespec64_to_ns(&end) - 1006 timespec64_to_ns(&begin); 1007 quota->charged_sz += sz; 1008 if (quota->esz && quota->charged_sz >= quota->esz) { 1009 quota->charge_target_from = t; 1010 quota->charge_addr_from = r->ar.end + 1; 1011 } 1012 } 1013 if (s->action != DAMOS_STAT) 1014 r->age = 0; 1015 1016 update_stat: 1017 damos_update_stat(s, sz, sz_applied); 1018 } 1019 1020 static void damon_do_apply_schemes(struct damon_ctx *c, 1021 struct damon_target *t, 1022 struct damon_region *r) 1023 { 1024 struct damos *s; 1025 1026 damon_for_each_scheme(s, c) { 1027 struct damos_quota *quota = &s->quota; 1028 1029 if (!s->wmarks.activated) 1030 continue; 1031 1032 /* Check the quota */ 1033 if (quota->esz && quota->charged_sz >= quota->esz) 1034 continue; 1035 1036 if (damos_skip_charged_region(t, &r, s)) 1037 continue; 1038 1039 if (!damos_valid_target(c, t, r, s)) 1040 continue; 1041 1042 damos_apply_scheme(c, t, r, s); 1043 } 1044 } 1045 1046 /* Shouldn't be called if quota->ms and quota->sz are zero */ 1047 static void damos_set_effective_quota(struct damos_quota *quota) 1048 { 1049 unsigned long throughput; 1050 unsigned long esz; 1051 1052 if (!quota->ms) { 1053 quota->esz = quota->sz; 1054 return; 1055 } 1056 1057 if (quota->total_charged_ns) 1058 throughput = quota->total_charged_sz * 1000000 / 1059 quota->total_charged_ns; 1060 else 1061 throughput = PAGE_SIZE * 1024; 1062 esz = throughput * quota->ms; 1063 1064 if (quota->sz && quota->sz < esz) 1065 esz = quota->sz; 1066 quota->esz = esz; 1067 } 1068 1069 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1070 { 1071 struct damos_quota *quota = &s->quota; 1072 struct damon_target *t; 1073 struct damon_region *r; 1074 unsigned long cumulated_sz; 1075 unsigned int score, max_score = 0; 1076 1077 if (!quota->ms && !quota->sz) 1078 return; 1079 1080 /* New charge window starts */ 1081 if (time_after_eq(jiffies, quota->charged_from + 1082 msecs_to_jiffies(quota->reset_interval))) { 1083 if (quota->esz && quota->charged_sz >= quota->esz) 1084 s->stat.qt_exceeds++; 1085 quota->total_charged_sz += quota->charged_sz; 1086 quota->charged_from = jiffies; 1087 quota->charged_sz = 0; 1088 damos_set_effective_quota(quota); 1089 } 1090 1091 if (!c->ops.get_scheme_score) 1092 return; 1093 1094 /* Fill up the score histogram */ 1095 memset(quota->histogram, 0, sizeof(quota->histogram)); 1096 damon_for_each_target(t, c) { 1097 damon_for_each_region(r, t) { 1098 if (!__damos_valid_target(r, s)) 1099 continue; 1100 score = c->ops.get_scheme_score(c, t, r, s); 1101 quota->histogram[score] += damon_sz_region(r); 1102 if (score > max_score) 1103 max_score = score; 1104 } 1105 } 1106 1107 /* Set the min score limit */ 1108 for (cumulated_sz = 0, score = max_score; ; score--) { 1109 cumulated_sz += quota->histogram[score]; 1110 if (cumulated_sz >= quota->esz || !score) 1111 break; 1112 } 1113 quota->min_score = score; 1114 } 1115 1116 static void kdamond_apply_schemes(struct damon_ctx *c) 1117 { 1118 struct damon_target *t; 1119 struct damon_region *r, *next_r; 1120 struct damos *s; 1121 unsigned long sample_interval = c->attrs.sample_interval ? 1122 c->attrs.sample_interval : 1; 1123 bool has_schemes_to_apply = false; 1124 1125 damon_for_each_scheme(s, c) { 1126 if (c->passed_sample_intervals != s->next_apply_sis) 1127 continue; 1128 1129 s->next_apply_sis += 1130 (s->apply_interval_us ? s->apply_interval_us : 1131 c->attrs.aggr_interval) / sample_interval; 1132 1133 if (!s->wmarks.activated) 1134 continue; 1135 1136 has_schemes_to_apply = true; 1137 1138 damos_adjust_quota(c, s); 1139 } 1140 1141 if (!has_schemes_to_apply) 1142 return; 1143 1144 damon_for_each_target(t, c) { 1145 damon_for_each_region_safe(r, next_r, t) 1146 damon_do_apply_schemes(c, t, r); 1147 } 1148 } 1149 1150 /* 1151 * Merge two adjacent regions into one region 1152 */ 1153 static void damon_merge_two_regions(struct damon_target *t, 1154 struct damon_region *l, struct damon_region *r) 1155 { 1156 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1157 1158 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1159 (sz_l + sz_r); 1160 l->nr_accesses_bp = l->nr_accesses * 10000; 1161 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1162 l->ar.end = r->ar.end; 1163 damon_destroy_region(r, t); 1164 } 1165 1166 /* 1167 * Merge adjacent regions having similar access frequencies 1168 * 1169 * t target affected by this merge operation 1170 * thres '->nr_accesses' diff threshold for the merge 1171 * sz_limit size upper limit of each region 1172 */ 1173 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1174 unsigned long sz_limit) 1175 { 1176 struct damon_region *r, *prev = NULL, *next; 1177 1178 damon_for_each_region_safe(r, next, t) { 1179 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1180 r->age = 0; 1181 else 1182 r->age++; 1183 1184 if (prev && prev->ar.end == r->ar.start && 1185 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1186 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1187 damon_merge_two_regions(t, prev, r); 1188 else 1189 prev = r; 1190 } 1191 } 1192 1193 /* 1194 * Merge adjacent regions having similar access frequencies 1195 * 1196 * threshold '->nr_accesses' diff threshold for the merge 1197 * sz_limit size upper limit of each region 1198 * 1199 * This function merges monitoring target regions which are adjacent and their 1200 * access frequencies are similar. This is for minimizing the monitoring 1201 * overhead under the dynamically changeable access pattern. If a merge was 1202 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1203 */ 1204 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1205 unsigned long sz_limit) 1206 { 1207 struct damon_target *t; 1208 1209 damon_for_each_target(t, c) 1210 damon_merge_regions_of(t, threshold, sz_limit); 1211 } 1212 1213 /* 1214 * Split a region in two 1215 * 1216 * r the region to be split 1217 * sz_r size of the first sub-region that will be made 1218 */ 1219 static void damon_split_region_at(struct damon_target *t, 1220 struct damon_region *r, unsigned long sz_r) 1221 { 1222 struct damon_region *new; 1223 1224 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1225 if (!new) 1226 return; 1227 1228 r->ar.end = new->ar.start; 1229 1230 new->age = r->age; 1231 new->last_nr_accesses = r->last_nr_accesses; 1232 new->nr_accesses_bp = r->nr_accesses_bp; 1233 new->nr_accesses = r->nr_accesses; 1234 1235 damon_insert_region(new, r, damon_next_region(r), t); 1236 } 1237 1238 /* Split every region in the given target into 'nr_subs' regions */ 1239 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1240 { 1241 struct damon_region *r, *next; 1242 unsigned long sz_region, sz_sub = 0; 1243 int i; 1244 1245 damon_for_each_region_safe(r, next, t) { 1246 sz_region = damon_sz_region(r); 1247 1248 for (i = 0; i < nr_subs - 1 && 1249 sz_region > 2 * DAMON_MIN_REGION; i++) { 1250 /* 1251 * Randomly select size of left sub-region to be at 1252 * least 10 percent and at most 90% of original region 1253 */ 1254 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1255 sz_region / 10, DAMON_MIN_REGION); 1256 /* Do not allow blank region */ 1257 if (sz_sub == 0 || sz_sub >= sz_region) 1258 continue; 1259 1260 damon_split_region_at(t, r, sz_sub); 1261 sz_region = sz_sub; 1262 } 1263 } 1264 } 1265 1266 /* 1267 * Split every target region into randomly-sized small regions 1268 * 1269 * This function splits every target region into random-sized small regions if 1270 * current total number of the regions is equal or smaller than half of the 1271 * user-specified maximum number of regions. This is for maximizing the 1272 * monitoring accuracy under the dynamically changeable access patterns. If a 1273 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1274 * it. 1275 */ 1276 static void kdamond_split_regions(struct damon_ctx *ctx) 1277 { 1278 struct damon_target *t; 1279 unsigned int nr_regions = 0; 1280 static unsigned int last_nr_regions; 1281 int nr_subregions = 2; 1282 1283 damon_for_each_target(t, ctx) 1284 nr_regions += damon_nr_regions(t); 1285 1286 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1287 return; 1288 1289 /* Maybe the middle of the region has different access frequency */ 1290 if (last_nr_regions == nr_regions && 1291 nr_regions < ctx->attrs.max_nr_regions / 3) 1292 nr_subregions = 3; 1293 1294 damon_for_each_target(t, ctx) 1295 damon_split_regions_of(t, nr_subregions); 1296 1297 last_nr_regions = nr_regions; 1298 } 1299 1300 /* 1301 * Check whether current monitoring should be stopped 1302 * 1303 * The monitoring is stopped when either the user requested to stop, or all 1304 * monitoring targets are invalid. 1305 * 1306 * Returns true if need to stop current monitoring. 1307 */ 1308 static bool kdamond_need_stop(struct damon_ctx *ctx) 1309 { 1310 struct damon_target *t; 1311 1312 if (kthread_should_stop()) 1313 return true; 1314 1315 if (!ctx->ops.target_valid) 1316 return false; 1317 1318 damon_for_each_target(t, ctx) { 1319 if (ctx->ops.target_valid(t)) 1320 return false; 1321 } 1322 1323 return true; 1324 } 1325 1326 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1327 { 1328 switch (metric) { 1329 case DAMOS_WMARK_FREE_MEM_RATE: 1330 return global_zone_page_state(NR_FREE_PAGES) * 1000 / 1331 totalram_pages(); 1332 default: 1333 break; 1334 } 1335 return -EINVAL; 1336 } 1337 1338 /* 1339 * Returns zero if the scheme is active. Else, returns time to wait for next 1340 * watermark check in micro-seconds. 1341 */ 1342 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1343 { 1344 unsigned long metric; 1345 1346 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1347 return 0; 1348 1349 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1350 /* higher than high watermark or lower than low watermark */ 1351 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1352 if (scheme->wmarks.activated) 1353 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1354 scheme->action, 1355 metric > scheme->wmarks.high ? 1356 "high" : "low"); 1357 scheme->wmarks.activated = false; 1358 return scheme->wmarks.interval; 1359 } 1360 1361 /* inactive and higher than middle watermark */ 1362 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1363 !scheme->wmarks.activated) 1364 return scheme->wmarks.interval; 1365 1366 if (!scheme->wmarks.activated) 1367 pr_debug("activate a scheme (%d)\n", scheme->action); 1368 scheme->wmarks.activated = true; 1369 return 0; 1370 } 1371 1372 static void kdamond_usleep(unsigned long usecs) 1373 { 1374 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1375 if (usecs > 20 * USEC_PER_MSEC) 1376 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1377 else 1378 usleep_idle_range(usecs, usecs + 1); 1379 } 1380 1381 /* Returns negative error code if it's not activated but should return */ 1382 static int kdamond_wait_activation(struct damon_ctx *ctx) 1383 { 1384 struct damos *s; 1385 unsigned long wait_time; 1386 unsigned long min_wait_time = 0; 1387 bool init_wait_time = false; 1388 1389 while (!kdamond_need_stop(ctx)) { 1390 damon_for_each_scheme(s, ctx) { 1391 wait_time = damos_wmark_wait_us(s); 1392 if (!init_wait_time || wait_time < min_wait_time) { 1393 init_wait_time = true; 1394 min_wait_time = wait_time; 1395 } 1396 } 1397 if (!min_wait_time) 1398 return 0; 1399 1400 kdamond_usleep(min_wait_time); 1401 1402 if (ctx->callback.after_wmarks_check && 1403 ctx->callback.after_wmarks_check(ctx)) 1404 break; 1405 } 1406 return -EBUSY; 1407 } 1408 1409 static void kdamond_init_intervals_sis(struct damon_ctx *ctx) 1410 { 1411 unsigned long sample_interval = ctx->attrs.sample_interval ? 1412 ctx->attrs.sample_interval : 1; 1413 unsigned long apply_interval; 1414 struct damos *scheme; 1415 1416 ctx->passed_sample_intervals = 0; 1417 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 1418 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 1419 sample_interval; 1420 1421 damon_for_each_scheme(scheme, ctx) { 1422 apply_interval = scheme->apply_interval_us ? 1423 scheme->apply_interval_us : ctx->attrs.aggr_interval; 1424 scheme->next_apply_sis = apply_interval / sample_interval; 1425 } 1426 } 1427 1428 /* 1429 * The monitoring daemon that runs as a kernel thread 1430 */ 1431 static int kdamond_fn(void *data) 1432 { 1433 struct damon_ctx *ctx = data; 1434 struct damon_target *t; 1435 struct damon_region *r, *next; 1436 unsigned int max_nr_accesses = 0; 1437 unsigned long sz_limit = 0; 1438 1439 pr_debug("kdamond (%d) starts\n", current->pid); 1440 1441 complete(&ctx->kdamond_started); 1442 kdamond_init_intervals_sis(ctx); 1443 1444 if (ctx->ops.init) 1445 ctx->ops.init(ctx); 1446 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1447 goto done; 1448 1449 sz_limit = damon_region_sz_limit(ctx); 1450 1451 while (!kdamond_need_stop(ctx)) { 1452 /* 1453 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 1454 * be changed from after_wmarks_check() or after_aggregation() 1455 * callbacks. Read the values here, and use those for this 1456 * iteration. That is, damon_set_attrs() updated new values 1457 * are respected from next iteration. 1458 */ 1459 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 1460 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 1461 unsigned long sample_interval = ctx->attrs.sample_interval; 1462 1463 if (kdamond_wait_activation(ctx)) 1464 break; 1465 1466 if (ctx->ops.prepare_access_checks) 1467 ctx->ops.prepare_access_checks(ctx); 1468 if (ctx->callback.after_sampling && 1469 ctx->callback.after_sampling(ctx)) 1470 break; 1471 1472 kdamond_usleep(sample_interval); 1473 ctx->passed_sample_intervals++; 1474 1475 if (ctx->ops.check_accesses) 1476 max_nr_accesses = ctx->ops.check_accesses(ctx); 1477 1478 if (ctx->passed_sample_intervals == next_aggregation_sis) { 1479 kdamond_merge_regions(ctx, 1480 max_nr_accesses / 10, 1481 sz_limit); 1482 if (ctx->callback.after_aggregation && 1483 ctx->callback.after_aggregation(ctx)) 1484 break; 1485 } 1486 1487 /* 1488 * do kdamond_apply_schemes() after kdamond_merge_regions() if 1489 * possible, to reduce overhead 1490 */ 1491 if (!list_empty(&ctx->schemes)) 1492 kdamond_apply_schemes(ctx); 1493 1494 sample_interval = ctx->attrs.sample_interval ? 1495 ctx->attrs.sample_interval : 1; 1496 if (ctx->passed_sample_intervals == next_aggregation_sis) { 1497 ctx->next_aggregation_sis = next_aggregation_sis + 1498 ctx->attrs.aggr_interval / sample_interval; 1499 1500 kdamond_reset_aggregated(ctx); 1501 kdamond_split_regions(ctx); 1502 if (ctx->ops.reset_aggregated) 1503 ctx->ops.reset_aggregated(ctx); 1504 } 1505 1506 if (ctx->passed_sample_intervals == next_ops_update_sis) { 1507 ctx->next_ops_update_sis = next_ops_update_sis + 1508 ctx->attrs.ops_update_interval / 1509 sample_interval; 1510 if (ctx->ops.update) 1511 ctx->ops.update(ctx); 1512 sz_limit = damon_region_sz_limit(ctx); 1513 } 1514 } 1515 done: 1516 damon_for_each_target(t, ctx) { 1517 damon_for_each_region_safe(r, next, t) 1518 damon_destroy_region(r, t); 1519 } 1520 1521 if (ctx->callback.before_terminate) 1522 ctx->callback.before_terminate(ctx); 1523 if (ctx->ops.cleanup) 1524 ctx->ops.cleanup(ctx); 1525 1526 pr_debug("kdamond (%d) finishes\n", current->pid); 1527 mutex_lock(&ctx->kdamond_lock); 1528 ctx->kdamond = NULL; 1529 mutex_unlock(&ctx->kdamond_lock); 1530 1531 mutex_lock(&damon_lock); 1532 nr_running_ctxs--; 1533 if (!nr_running_ctxs && running_exclusive_ctxs) 1534 running_exclusive_ctxs = false; 1535 mutex_unlock(&damon_lock); 1536 1537 return 0; 1538 } 1539 1540 /* 1541 * struct damon_system_ram_region - System RAM resource address region of 1542 * [@start, @end). 1543 * @start: Start address of the region (inclusive). 1544 * @end: End address of the region (exclusive). 1545 */ 1546 struct damon_system_ram_region { 1547 unsigned long start; 1548 unsigned long end; 1549 }; 1550 1551 static int walk_system_ram(struct resource *res, void *arg) 1552 { 1553 struct damon_system_ram_region *a = arg; 1554 1555 if (a->end - a->start < resource_size(res)) { 1556 a->start = res->start; 1557 a->end = res->end; 1558 } 1559 return 0; 1560 } 1561 1562 /* 1563 * Find biggest 'System RAM' resource and store its start and end address in 1564 * @start and @end, respectively. If no System RAM is found, returns false. 1565 */ 1566 static bool damon_find_biggest_system_ram(unsigned long *start, 1567 unsigned long *end) 1568 1569 { 1570 struct damon_system_ram_region arg = {}; 1571 1572 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 1573 if (arg.end <= arg.start) 1574 return false; 1575 1576 *start = arg.start; 1577 *end = arg.end; 1578 return true; 1579 } 1580 1581 /** 1582 * damon_set_region_biggest_system_ram_default() - Set the region of the given 1583 * monitoring target as requested, or biggest 'System RAM'. 1584 * @t: The monitoring target to set the region. 1585 * @start: The pointer to the start address of the region. 1586 * @end: The pointer to the end address of the region. 1587 * 1588 * This function sets the region of @t as requested by @start and @end. If the 1589 * values of @start and @end are zero, however, this function finds the biggest 1590 * 'System RAM' resource and sets the region to cover the resource. In the 1591 * latter case, this function saves the start and end addresses of the resource 1592 * in @start and @end, respectively. 1593 * 1594 * Return: 0 on success, negative error code otherwise. 1595 */ 1596 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 1597 unsigned long *start, unsigned long *end) 1598 { 1599 struct damon_addr_range addr_range; 1600 1601 if (*start > *end) 1602 return -EINVAL; 1603 1604 if (!*start && !*end && 1605 !damon_find_biggest_system_ram(start, end)) 1606 return -EINVAL; 1607 1608 addr_range.start = *start; 1609 addr_range.end = *end; 1610 return damon_set_regions(t, &addr_range, 1); 1611 } 1612 1613 /* 1614 * damon_moving_sum() - Calculate an inferred moving sum value. 1615 * @mvsum: Inferred sum of the last @len_window values. 1616 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 1617 * @len_window: The number of last values to take care of. 1618 * @new_value: New value that will be added to the pseudo moving sum. 1619 * 1620 * Moving sum (moving average * window size) is good for handling noise, but 1621 * the cost of keeping past values can be high for arbitrary window size. This 1622 * function implements a lightweight pseudo moving sum function that doesn't 1623 * keep the past window values. 1624 * 1625 * It simply assumes there was no noise in the past, and get the no-noise 1626 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 1627 * non-moving sum of the last window. For example, if @len_window is 10 and we 1628 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 1629 * values. Hence, this function simply drops @nomvsum / @len_window from 1630 * given @mvsum and add @new_value. 1631 * 1632 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 1633 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 1634 * calculating next moving sum with a new value, we should drop 0 from 50 and 1635 * add the new value. However, this function assumes it got value 5 for each 1636 * of the last ten times. Based on the assumption, when the next value is 1637 * measured, it drops the assumed past value, 5 from the current sum, and add 1638 * the new value to get the updated pseduo-moving average. 1639 * 1640 * This means the value could have errors, but the errors will be disappeared 1641 * for every @len_window aligned calls. For example, if @len_window is 10, the 1642 * pseudo moving sum with 11th value to 19th value would have an error. But 1643 * the sum with 20th value will not have the error. 1644 * 1645 * Return: Pseudo-moving average after getting the @new_value. 1646 */ 1647 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 1648 unsigned int len_window, unsigned int new_value) 1649 { 1650 return mvsum - nomvsum / len_window + new_value; 1651 } 1652 1653 /** 1654 * damon_update_region_access_rate() - Update the access rate of a region. 1655 * @r: The DAMON region to update for its access check result. 1656 * @accessed: Whether the region has accessed during last sampling interval. 1657 * @attrs: The damon_attrs of the DAMON context. 1658 * 1659 * Update the access rate of a region with the region's last sampling interval 1660 * access check result. 1661 * 1662 * Usually this will be called by &damon_operations->check_accesses callback. 1663 */ 1664 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 1665 struct damon_attrs *attrs) 1666 { 1667 unsigned int len_window = 1; 1668 1669 /* 1670 * sample_interval can be zero, but cannot be larger than 1671 * aggr_interval, owing to validation of damon_set_attrs(). 1672 */ 1673 if (attrs->sample_interval) 1674 len_window = damon_max_nr_accesses(attrs); 1675 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 1676 r->last_nr_accesses * 10000, len_window, 1677 accessed ? 10000 : 0); 1678 1679 if (accessed) 1680 r->nr_accesses++; 1681 } 1682 1683 static int __init damon_init(void) 1684 { 1685 damon_region_cache = KMEM_CACHE(damon_region, 0); 1686 if (unlikely(!damon_region_cache)) { 1687 pr_err("creating damon_region_cache fails\n"); 1688 return -ENOMEM; 1689 } 1690 1691 return 0; 1692 } 1693 1694 subsys_initcall(damon_init); 1695 1696 #include "core-test.h" 1697