1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data Access Monitor 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/delay.h> 12 #include <linux/kthread.h> 13 #include <linux/mm.h> 14 #include <linux/slab.h> 15 #include <linux/string.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/damon.h> 19 20 #ifdef CONFIG_DAMON_KUNIT_TEST 21 #undef DAMON_MIN_REGION 22 #define DAMON_MIN_REGION 1 23 #endif 24 25 static DEFINE_MUTEX(damon_lock); 26 static int nr_running_ctxs; 27 static bool running_exclusive_ctxs; 28 29 static DEFINE_MUTEX(damon_ops_lock); 30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; 31 32 static struct kmem_cache *damon_region_cache __ro_after_init; 33 34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ 35 static bool __damon_is_registered_ops(enum damon_ops_id id) 36 { 37 struct damon_operations empty_ops = {}; 38 39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) 40 return false; 41 return true; 42 } 43 44 /** 45 * damon_is_registered_ops() - Check if a given damon_operations is registered. 46 * @id: Id of the damon_operations to check if registered. 47 * 48 * Return: true if the ops is set, false otherwise. 49 */ 50 bool damon_is_registered_ops(enum damon_ops_id id) 51 { 52 bool registered; 53 54 if (id >= NR_DAMON_OPS) 55 return false; 56 mutex_lock(&damon_ops_lock); 57 registered = __damon_is_registered_ops(id); 58 mutex_unlock(&damon_ops_lock); 59 return registered; 60 } 61 62 /** 63 * damon_register_ops() - Register a monitoring operations set to DAMON. 64 * @ops: monitoring operations set to register. 65 * 66 * This function registers a monitoring operations set of valid &struct 67 * damon_operations->id so that others can find and use them later. 68 * 69 * Return: 0 on success, negative error code otherwise. 70 */ 71 int damon_register_ops(struct damon_operations *ops) 72 { 73 int err = 0; 74 75 if (ops->id >= NR_DAMON_OPS) 76 return -EINVAL; 77 mutex_lock(&damon_ops_lock); 78 /* Fail for already registered ops */ 79 if (__damon_is_registered_ops(ops->id)) { 80 err = -EINVAL; 81 goto out; 82 } 83 damon_registered_ops[ops->id] = *ops; 84 out: 85 mutex_unlock(&damon_ops_lock); 86 return err; 87 } 88 89 /** 90 * damon_select_ops() - Select a monitoring operations to use with the context. 91 * @ctx: monitoring context to use the operations. 92 * @id: id of the registered monitoring operations to select. 93 * 94 * This function finds registered monitoring operations set of @id and make 95 * @ctx to use it. 96 * 97 * Return: 0 on success, negative error code otherwise. 98 */ 99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) 100 { 101 int err = 0; 102 103 if (id >= NR_DAMON_OPS) 104 return -EINVAL; 105 106 mutex_lock(&damon_ops_lock); 107 if (!__damon_is_registered_ops(id)) 108 err = -EINVAL; 109 else 110 ctx->ops = damon_registered_ops[id]; 111 mutex_unlock(&damon_ops_lock); 112 return err; 113 } 114 115 /* 116 * Construct a damon_region struct 117 * 118 * Returns the pointer to the new struct if success, or NULL otherwise 119 */ 120 struct damon_region *damon_new_region(unsigned long start, unsigned long end) 121 { 122 struct damon_region *region; 123 124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); 125 if (!region) 126 return NULL; 127 128 region->ar.start = start; 129 region->ar.end = end; 130 region->nr_accesses = 0; 131 region->nr_accesses_bp = 0; 132 INIT_LIST_HEAD(®ion->list); 133 134 region->age = 0; 135 region->last_nr_accesses = 0; 136 137 return region; 138 } 139 140 void damon_add_region(struct damon_region *r, struct damon_target *t) 141 { 142 list_add_tail(&r->list, &t->regions_list); 143 t->nr_regions++; 144 } 145 146 static void damon_del_region(struct damon_region *r, struct damon_target *t) 147 { 148 list_del(&r->list); 149 t->nr_regions--; 150 } 151 152 static void damon_free_region(struct damon_region *r) 153 { 154 kmem_cache_free(damon_region_cache, r); 155 } 156 157 void damon_destroy_region(struct damon_region *r, struct damon_target *t) 158 { 159 damon_del_region(r, t); 160 damon_free_region(r); 161 } 162 163 /* 164 * Check whether a region is intersecting an address range 165 * 166 * Returns true if it is. 167 */ 168 static bool damon_intersect(struct damon_region *r, 169 struct damon_addr_range *re) 170 { 171 return !(r->ar.end <= re->start || re->end <= r->ar.start); 172 } 173 174 /* 175 * Fill holes in regions with new regions. 176 */ 177 static int damon_fill_regions_holes(struct damon_region *first, 178 struct damon_region *last, struct damon_target *t) 179 { 180 struct damon_region *r = first; 181 182 damon_for_each_region_from(r, t) { 183 struct damon_region *next, *newr; 184 185 if (r == last) 186 break; 187 next = damon_next_region(r); 188 if (r->ar.end != next->ar.start) { 189 newr = damon_new_region(r->ar.end, next->ar.start); 190 if (!newr) 191 return -ENOMEM; 192 damon_insert_region(newr, r, next, t); 193 } 194 } 195 return 0; 196 } 197 198 /* 199 * damon_set_regions() - Set regions of a target for given address ranges. 200 * @t: the given target. 201 * @ranges: array of new monitoring target ranges. 202 * @nr_ranges: length of @ranges. 203 * 204 * This function adds new regions to, or modify existing regions of a 205 * monitoring target to fit in specific ranges. 206 * 207 * Return: 0 if success, or negative error code otherwise. 208 */ 209 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 210 unsigned int nr_ranges) 211 { 212 struct damon_region *r, *next; 213 unsigned int i; 214 int err; 215 216 /* Remove regions which are not in the new ranges */ 217 damon_for_each_region_safe(r, next, t) { 218 for (i = 0; i < nr_ranges; i++) { 219 if (damon_intersect(r, &ranges[i])) 220 break; 221 } 222 if (i == nr_ranges) 223 damon_destroy_region(r, t); 224 } 225 226 r = damon_first_region(t); 227 /* Add new regions or resize existing regions to fit in the ranges */ 228 for (i = 0; i < nr_ranges; i++) { 229 struct damon_region *first = NULL, *last, *newr; 230 struct damon_addr_range *range; 231 232 range = &ranges[i]; 233 /* Get the first/last regions intersecting with the range */ 234 damon_for_each_region_from(r, t) { 235 if (damon_intersect(r, range)) { 236 if (!first) 237 first = r; 238 last = r; 239 } 240 if (r->ar.start >= range->end) 241 break; 242 } 243 if (!first) { 244 /* no region intersects with this range */ 245 newr = damon_new_region( 246 ALIGN_DOWN(range->start, 247 DAMON_MIN_REGION), 248 ALIGN(range->end, DAMON_MIN_REGION)); 249 if (!newr) 250 return -ENOMEM; 251 damon_insert_region(newr, damon_prev_region(r), r, t); 252 } else { 253 /* resize intersecting regions to fit in this range */ 254 first->ar.start = ALIGN_DOWN(range->start, 255 DAMON_MIN_REGION); 256 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); 257 258 /* fill possible holes in the range */ 259 err = damon_fill_regions_holes(first, last, t); 260 if (err) 261 return err; 262 } 263 } 264 return 0; 265 } 266 267 struct damos_filter *damos_new_filter(enum damos_filter_type type, 268 bool matching) 269 { 270 struct damos_filter *filter; 271 272 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 273 if (!filter) 274 return NULL; 275 filter->type = type; 276 filter->matching = matching; 277 INIT_LIST_HEAD(&filter->list); 278 return filter; 279 } 280 281 void damos_add_filter(struct damos *s, struct damos_filter *f) 282 { 283 list_add_tail(&f->list, &s->filters); 284 } 285 286 static void damos_del_filter(struct damos_filter *f) 287 { 288 list_del(&f->list); 289 } 290 291 static void damos_free_filter(struct damos_filter *f) 292 { 293 kfree(f); 294 } 295 296 void damos_destroy_filter(struct damos_filter *f) 297 { 298 damos_del_filter(f); 299 damos_free_filter(f); 300 } 301 302 /* initialize private fields of damos_quota and return the pointer */ 303 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) 304 { 305 quota->total_charged_sz = 0; 306 quota->total_charged_ns = 0; 307 quota->esz = 0; 308 quota->charged_sz = 0; 309 quota->charged_from = 0; 310 quota->charge_target_from = NULL; 311 quota->charge_addr_from = 0; 312 return quota; 313 } 314 315 struct damos *damon_new_scheme(struct damos_access_pattern *pattern, 316 enum damos_action action, 317 unsigned long apply_interval_us, 318 struct damos_quota *quota, 319 struct damos_watermarks *wmarks) 320 { 321 struct damos *scheme; 322 323 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); 324 if (!scheme) 325 return NULL; 326 scheme->pattern = *pattern; 327 scheme->action = action; 328 scheme->apply_interval_us = apply_interval_us; 329 /* 330 * next_apply_sis will be set when kdamond starts. While kdamond is 331 * running, it will also updated when it is added to the DAMON context, 332 * or damon_attrs are updated. 333 */ 334 scheme->next_apply_sis = 0; 335 INIT_LIST_HEAD(&scheme->filters); 336 scheme->stat = (struct damos_stat){}; 337 INIT_LIST_HEAD(&scheme->list); 338 339 scheme->quota = *(damos_quota_init_priv(quota)); 340 341 scheme->wmarks = *wmarks; 342 scheme->wmarks.activated = true; 343 344 return scheme; 345 } 346 347 static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) 348 { 349 unsigned long sample_interval = ctx->attrs.sample_interval ? 350 ctx->attrs.sample_interval : 1; 351 unsigned long apply_interval = s->apply_interval_us ? 352 s->apply_interval_us : ctx->attrs.aggr_interval; 353 354 s->next_apply_sis = ctx->passed_sample_intervals + 355 apply_interval / sample_interval; 356 } 357 358 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) 359 { 360 list_add_tail(&s->list, &ctx->schemes); 361 damos_set_next_apply_sis(s, ctx); 362 } 363 364 static void damon_del_scheme(struct damos *s) 365 { 366 list_del(&s->list); 367 } 368 369 static void damon_free_scheme(struct damos *s) 370 { 371 kfree(s); 372 } 373 374 void damon_destroy_scheme(struct damos *s) 375 { 376 struct damos_filter *f, *next; 377 378 damos_for_each_filter_safe(f, next, s) 379 damos_destroy_filter(f); 380 damon_del_scheme(s); 381 damon_free_scheme(s); 382 } 383 384 /* 385 * Construct a damon_target struct 386 * 387 * Returns the pointer to the new struct if success, or NULL otherwise 388 */ 389 struct damon_target *damon_new_target(void) 390 { 391 struct damon_target *t; 392 393 t = kmalloc(sizeof(*t), GFP_KERNEL); 394 if (!t) 395 return NULL; 396 397 t->pid = NULL; 398 t->nr_regions = 0; 399 INIT_LIST_HEAD(&t->regions_list); 400 INIT_LIST_HEAD(&t->list); 401 402 return t; 403 } 404 405 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) 406 { 407 list_add_tail(&t->list, &ctx->adaptive_targets); 408 } 409 410 bool damon_targets_empty(struct damon_ctx *ctx) 411 { 412 return list_empty(&ctx->adaptive_targets); 413 } 414 415 static void damon_del_target(struct damon_target *t) 416 { 417 list_del(&t->list); 418 } 419 420 void damon_free_target(struct damon_target *t) 421 { 422 struct damon_region *r, *next; 423 424 damon_for_each_region_safe(r, next, t) 425 damon_free_region(r); 426 kfree(t); 427 } 428 429 void damon_destroy_target(struct damon_target *t) 430 { 431 damon_del_target(t); 432 damon_free_target(t); 433 } 434 435 unsigned int damon_nr_regions(struct damon_target *t) 436 { 437 return t->nr_regions; 438 } 439 440 struct damon_ctx *damon_new_ctx(void) 441 { 442 struct damon_ctx *ctx; 443 444 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 445 if (!ctx) 446 return NULL; 447 448 ctx->attrs.sample_interval = 5 * 1000; 449 ctx->attrs.aggr_interval = 100 * 1000; 450 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; 451 452 ctx->passed_sample_intervals = 0; 453 /* These will be set from kdamond_init_intervals_sis() */ 454 ctx->next_aggregation_sis = 0; 455 ctx->next_ops_update_sis = 0; 456 457 mutex_init(&ctx->kdamond_lock); 458 459 ctx->attrs.min_nr_regions = 10; 460 ctx->attrs.max_nr_regions = 1000; 461 462 INIT_LIST_HEAD(&ctx->adaptive_targets); 463 INIT_LIST_HEAD(&ctx->schemes); 464 465 return ctx; 466 } 467 468 static void damon_destroy_targets(struct damon_ctx *ctx) 469 { 470 struct damon_target *t, *next_t; 471 472 if (ctx->ops.cleanup) { 473 ctx->ops.cleanup(ctx); 474 return; 475 } 476 477 damon_for_each_target_safe(t, next_t, ctx) 478 damon_destroy_target(t); 479 } 480 481 void damon_destroy_ctx(struct damon_ctx *ctx) 482 { 483 struct damos *s, *next_s; 484 485 damon_destroy_targets(ctx); 486 487 damon_for_each_scheme_safe(s, next_s, ctx) 488 damon_destroy_scheme(s); 489 490 kfree(ctx); 491 } 492 493 static unsigned int damon_age_for_new_attrs(unsigned int age, 494 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 495 { 496 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; 497 } 498 499 /* convert access ratio in bp (per 10,000) to nr_accesses */ 500 static unsigned int damon_accesses_bp_to_nr_accesses( 501 unsigned int accesses_bp, struct damon_attrs *attrs) 502 { 503 return accesses_bp * damon_max_nr_accesses(attrs) / 10000; 504 } 505 506 /* convert nr_accesses to access ratio in bp (per 10,000) */ 507 static unsigned int damon_nr_accesses_to_accesses_bp( 508 unsigned int nr_accesses, struct damon_attrs *attrs) 509 { 510 return nr_accesses * 10000 / damon_max_nr_accesses(attrs); 511 } 512 513 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, 514 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 515 { 516 return damon_accesses_bp_to_nr_accesses( 517 damon_nr_accesses_to_accesses_bp( 518 nr_accesses, old_attrs), 519 new_attrs); 520 } 521 522 static void damon_update_monitoring_result(struct damon_region *r, 523 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) 524 { 525 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, 526 old_attrs, new_attrs); 527 r->nr_accesses_bp = r->nr_accesses * 10000; 528 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); 529 } 530 531 /* 532 * region->nr_accesses is the number of sampling intervals in the last 533 * aggregation interval that access to the region has found, and region->age is 534 * the number of aggregation intervals that its access pattern has maintained. 535 * For the reason, the real meaning of the two fields depend on current 536 * sampling interval and aggregation interval. This function updates 537 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. 538 */ 539 static void damon_update_monitoring_results(struct damon_ctx *ctx, 540 struct damon_attrs *new_attrs) 541 { 542 struct damon_attrs *old_attrs = &ctx->attrs; 543 struct damon_target *t; 544 struct damon_region *r; 545 546 /* if any interval is zero, simply forgive conversion */ 547 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || 548 !new_attrs->sample_interval || 549 !new_attrs->aggr_interval) 550 return; 551 552 damon_for_each_target(t, ctx) 553 damon_for_each_region(r, t) 554 damon_update_monitoring_result( 555 r, old_attrs, new_attrs); 556 } 557 558 /** 559 * damon_set_attrs() - Set attributes for the monitoring. 560 * @ctx: monitoring context 561 * @attrs: monitoring attributes 562 * 563 * This function should be called while the kdamond is not running, or an 564 * access check results aggregation is not ongoing (e.g., from 565 * &struct damon_callback->after_aggregation or 566 * &struct damon_callback->after_wmarks_check callbacks). 567 * 568 * Every time interval is in micro-seconds. 569 * 570 * Return: 0 on success, negative error code otherwise. 571 */ 572 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) 573 { 574 unsigned long sample_interval = attrs->sample_interval ? 575 attrs->sample_interval : 1; 576 struct damos *s; 577 578 if (attrs->min_nr_regions < 3) 579 return -EINVAL; 580 if (attrs->min_nr_regions > attrs->max_nr_regions) 581 return -EINVAL; 582 if (attrs->sample_interval > attrs->aggr_interval) 583 return -EINVAL; 584 585 ctx->next_aggregation_sis = ctx->passed_sample_intervals + 586 attrs->aggr_interval / sample_interval; 587 ctx->next_ops_update_sis = ctx->passed_sample_intervals + 588 attrs->ops_update_interval / sample_interval; 589 590 damon_update_monitoring_results(ctx, attrs); 591 ctx->attrs = *attrs; 592 593 damon_for_each_scheme(s, ctx) 594 damos_set_next_apply_sis(s, ctx); 595 596 return 0; 597 } 598 599 /** 600 * damon_set_schemes() - Set data access monitoring based operation schemes. 601 * @ctx: monitoring context 602 * @schemes: array of the schemes 603 * @nr_schemes: number of entries in @schemes 604 * 605 * This function should not be called while the kdamond of the context is 606 * running. 607 */ 608 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, 609 ssize_t nr_schemes) 610 { 611 struct damos *s, *next; 612 ssize_t i; 613 614 damon_for_each_scheme_safe(s, next, ctx) 615 damon_destroy_scheme(s); 616 for (i = 0; i < nr_schemes; i++) 617 damon_add_scheme(ctx, schemes[i]); 618 } 619 620 /** 621 * damon_nr_running_ctxs() - Return number of currently running contexts. 622 */ 623 int damon_nr_running_ctxs(void) 624 { 625 int nr_ctxs; 626 627 mutex_lock(&damon_lock); 628 nr_ctxs = nr_running_ctxs; 629 mutex_unlock(&damon_lock); 630 631 return nr_ctxs; 632 } 633 634 /* Returns the size upper limit for each monitoring region */ 635 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) 636 { 637 struct damon_target *t; 638 struct damon_region *r; 639 unsigned long sz = 0; 640 641 damon_for_each_target(t, ctx) { 642 damon_for_each_region(r, t) 643 sz += damon_sz_region(r); 644 } 645 646 if (ctx->attrs.min_nr_regions) 647 sz /= ctx->attrs.min_nr_regions; 648 if (sz < DAMON_MIN_REGION) 649 sz = DAMON_MIN_REGION; 650 651 return sz; 652 } 653 654 static int kdamond_fn(void *data); 655 656 /* 657 * __damon_start() - Starts monitoring with given context. 658 * @ctx: monitoring context 659 * 660 * This function should be called while damon_lock is hold. 661 * 662 * Return: 0 on success, negative error code otherwise. 663 */ 664 static int __damon_start(struct damon_ctx *ctx) 665 { 666 int err = -EBUSY; 667 668 mutex_lock(&ctx->kdamond_lock); 669 if (!ctx->kdamond) { 670 err = 0; 671 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", 672 nr_running_ctxs); 673 if (IS_ERR(ctx->kdamond)) { 674 err = PTR_ERR(ctx->kdamond); 675 ctx->kdamond = NULL; 676 } 677 } 678 mutex_unlock(&ctx->kdamond_lock); 679 680 return err; 681 } 682 683 /** 684 * damon_start() - Starts the monitorings for a given group of contexts. 685 * @ctxs: an array of the pointers for contexts to start monitoring 686 * @nr_ctxs: size of @ctxs 687 * @exclusive: exclusiveness of this contexts group 688 * 689 * This function starts a group of monitoring threads for a group of monitoring 690 * contexts. One thread per each context is created and run in parallel. The 691 * caller should handle synchronization between the threads by itself. If 692 * @exclusive is true and a group of threads that created by other 693 * 'damon_start()' call is currently running, this function does nothing but 694 * returns -EBUSY. 695 * 696 * Return: 0 on success, negative error code otherwise. 697 */ 698 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) 699 { 700 int i; 701 int err = 0; 702 703 mutex_lock(&damon_lock); 704 if ((exclusive && nr_running_ctxs) || 705 (!exclusive && running_exclusive_ctxs)) { 706 mutex_unlock(&damon_lock); 707 return -EBUSY; 708 } 709 710 for (i = 0; i < nr_ctxs; i++) { 711 err = __damon_start(ctxs[i]); 712 if (err) 713 break; 714 nr_running_ctxs++; 715 } 716 if (exclusive && nr_running_ctxs) 717 running_exclusive_ctxs = true; 718 mutex_unlock(&damon_lock); 719 720 return err; 721 } 722 723 /* 724 * __damon_stop() - Stops monitoring of a given context. 725 * @ctx: monitoring context 726 * 727 * Return: 0 on success, negative error code otherwise. 728 */ 729 static int __damon_stop(struct damon_ctx *ctx) 730 { 731 struct task_struct *tsk; 732 733 mutex_lock(&ctx->kdamond_lock); 734 tsk = ctx->kdamond; 735 if (tsk) { 736 get_task_struct(tsk); 737 mutex_unlock(&ctx->kdamond_lock); 738 kthread_stop_put(tsk); 739 return 0; 740 } 741 mutex_unlock(&ctx->kdamond_lock); 742 743 return -EPERM; 744 } 745 746 /** 747 * damon_stop() - Stops the monitorings for a given group of contexts. 748 * @ctxs: an array of the pointers for contexts to stop monitoring 749 * @nr_ctxs: size of @ctxs 750 * 751 * Return: 0 on success, negative error code otherwise. 752 */ 753 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) 754 { 755 int i, err = 0; 756 757 for (i = 0; i < nr_ctxs; i++) { 758 /* nr_running_ctxs is decremented in kdamond_fn */ 759 err = __damon_stop(ctxs[i]); 760 if (err) 761 break; 762 } 763 return err; 764 } 765 766 /* 767 * Reset the aggregated monitoring results ('nr_accesses' of each region). 768 */ 769 static void kdamond_reset_aggregated(struct damon_ctx *c) 770 { 771 struct damon_target *t; 772 unsigned int ti = 0; /* target's index */ 773 774 damon_for_each_target(t, c) { 775 struct damon_region *r; 776 777 damon_for_each_region(r, t) { 778 trace_damon_aggregated(ti, r, damon_nr_regions(t)); 779 r->last_nr_accesses = r->nr_accesses; 780 r->nr_accesses = 0; 781 } 782 ti++; 783 } 784 } 785 786 static void damon_split_region_at(struct damon_target *t, 787 struct damon_region *r, unsigned long sz_r); 788 789 static bool __damos_valid_target(struct damon_region *r, struct damos *s) 790 { 791 unsigned long sz; 792 unsigned int nr_accesses = r->nr_accesses_bp / 10000; 793 794 sz = damon_sz_region(r); 795 return s->pattern.min_sz_region <= sz && 796 sz <= s->pattern.max_sz_region && 797 s->pattern.min_nr_accesses <= nr_accesses && 798 nr_accesses <= s->pattern.max_nr_accesses && 799 s->pattern.min_age_region <= r->age && 800 r->age <= s->pattern.max_age_region; 801 } 802 803 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, 804 struct damon_region *r, struct damos *s) 805 { 806 bool ret = __damos_valid_target(r, s); 807 808 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) 809 return ret; 810 811 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; 812 } 813 814 /* 815 * damos_skip_charged_region() - Check if the given region or starting part of 816 * it is already charged for the DAMOS quota. 817 * @t: The target of the region. 818 * @rp: The pointer to the region. 819 * @s: The scheme to be applied. 820 * 821 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 822 * action would applied to only a part of the target access pattern fulfilling 823 * regions. To avoid applying the scheme action to only already applied 824 * regions, DAMON skips applying the scheme action to the regions that charged 825 * in the previous charge window. 826 * 827 * This function checks if a given region should be skipped or not for the 828 * reason. If only the starting part of the region has previously charged, 829 * this function splits the region into two so that the second one covers the 830 * area that not charged in the previous charge widnow and saves the second 831 * region in *rp and returns false, so that the caller can apply DAMON action 832 * to the second one. 833 * 834 * Return: true if the region should be entirely skipped, false otherwise. 835 */ 836 static bool damos_skip_charged_region(struct damon_target *t, 837 struct damon_region **rp, struct damos *s) 838 { 839 struct damon_region *r = *rp; 840 struct damos_quota *quota = &s->quota; 841 unsigned long sz_to_skip; 842 843 /* Skip previously charged regions */ 844 if (quota->charge_target_from) { 845 if (t != quota->charge_target_from) 846 return true; 847 if (r == damon_last_region(t)) { 848 quota->charge_target_from = NULL; 849 quota->charge_addr_from = 0; 850 return true; 851 } 852 if (quota->charge_addr_from && 853 r->ar.end <= quota->charge_addr_from) 854 return true; 855 856 if (quota->charge_addr_from && r->ar.start < 857 quota->charge_addr_from) { 858 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 859 r->ar.start, DAMON_MIN_REGION); 860 if (!sz_to_skip) { 861 if (damon_sz_region(r) <= DAMON_MIN_REGION) 862 return true; 863 sz_to_skip = DAMON_MIN_REGION; 864 } 865 damon_split_region_at(t, r, sz_to_skip); 866 r = damon_next_region(r); 867 *rp = r; 868 } 869 quota->charge_target_from = NULL; 870 quota->charge_addr_from = 0; 871 } 872 return false; 873 } 874 875 static void damos_update_stat(struct damos *s, 876 unsigned long sz_tried, unsigned long sz_applied) 877 { 878 s->stat.nr_tried++; 879 s->stat.sz_tried += sz_tried; 880 if (sz_applied) 881 s->stat.nr_applied++; 882 s->stat.sz_applied += sz_applied; 883 } 884 885 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 886 struct damon_region *r, struct damos_filter *filter) 887 { 888 bool matched = false; 889 struct damon_target *ti; 890 int target_idx = 0; 891 unsigned long start, end; 892 893 switch (filter->type) { 894 case DAMOS_FILTER_TYPE_TARGET: 895 damon_for_each_target(ti, ctx) { 896 if (ti == t) 897 break; 898 target_idx++; 899 } 900 matched = target_idx == filter->target_idx; 901 break; 902 case DAMOS_FILTER_TYPE_ADDR: 903 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); 904 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); 905 906 /* inside the range */ 907 if (start <= r->ar.start && r->ar.end <= end) { 908 matched = true; 909 break; 910 } 911 /* outside of the range */ 912 if (r->ar.end <= start || end <= r->ar.start) { 913 matched = false; 914 break; 915 } 916 /* start before the range and overlap */ 917 if (r->ar.start < start) { 918 damon_split_region_at(t, r, start - r->ar.start); 919 matched = false; 920 break; 921 } 922 /* start inside the range */ 923 damon_split_region_at(t, r, end - r->ar.start); 924 matched = true; 925 break; 926 default: 927 return false; 928 } 929 930 return matched == filter->matching; 931 } 932 933 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, 934 struct damon_region *r, struct damos *s) 935 { 936 struct damos_filter *filter; 937 938 damos_for_each_filter(filter, s) { 939 if (__damos_filter_out(ctx, t, r, filter)) 940 return true; 941 } 942 return false; 943 } 944 945 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, 946 struct damon_region *r, struct damos *s) 947 { 948 struct damos_quota *quota = &s->quota; 949 unsigned long sz = damon_sz_region(r); 950 struct timespec64 begin, end; 951 unsigned long sz_applied = 0; 952 int err = 0; 953 /* 954 * We plan to support multiple context per kdamond, as DAMON sysfs 955 * implies with 'nr_contexts' file. Nevertheless, only single context 956 * per kdamond is supported for now. So, we can simply use '0' context 957 * index here. 958 */ 959 unsigned int cidx = 0; 960 struct damos *siter; /* schemes iterator */ 961 unsigned int sidx = 0; 962 struct damon_target *titer; /* targets iterator */ 963 unsigned int tidx = 0; 964 bool do_trace = false; 965 966 /* get indices for trace_damos_before_apply() */ 967 if (trace_damos_before_apply_enabled()) { 968 damon_for_each_scheme(siter, c) { 969 if (siter == s) 970 break; 971 sidx++; 972 } 973 damon_for_each_target(titer, c) { 974 if (titer == t) 975 break; 976 tidx++; 977 } 978 do_trace = true; 979 } 980 981 if (c->ops.apply_scheme) { 982 if (quota->esz && quota->charged_sz + sz > quota->esz) { 983 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 984 DAMON_MIN_REGION); 985 if (!sz) 986 goto update_stat; 987 damon_split_region_at(t, r, sz); 988 } 989 if (damos_filter_out(c, t, r, s)) 990 return; 991 ktime_get_coarse_ts64(&begin); 992 if (c->callback.before_damos_apply) 993 err = c->callback.before_damos_apply(c, t, r, s); 994 if (!err) { 995 trace_damos_before_apply(cidx, sidx, tidx, r, 996 damon_nr_regions(t), do_trace); 997 sz_applied = c->ops.apply_scheme(c, t, r, s); 998 } 999 ktime_get_coarse_ts64(&end); 1000 quota->total_charged_ns += timespec64_to_ns(&end) - 1001 timespec64_to_ns(&begin); 1002 quota->charged_sz += sz; 1003 if (quota->esz && quota->charged_sz >= quota->esz) { 1004 quota->charge_target_from = t; 1005 quota->charge_addr_from = r->ar.end + 1; 1006 } 1007 } 1008 if (s->action != DAMOS_STAT) 1009 r->age = 0; 1010 1011 update_stat: 1012 damos_update_stat(s, sz, sz_applied); 1013 } 1014 1015 static void damon_do_apply_schemes(struct damon_ctx *c, 1016 struct damon_target *t, 1017 struct damon_region *r) 1018 { 1019 struct damos *s; 1020 1021 damon_for_each_scheme(s, c) { 1022 struct damos_quota *quota = &s->quota; 1023 1024 if (!s->wmarks.activated) 1025 continue; 1026 1027 /* Check the quota */ 1028 if (quota->esz && quota->charged_sz >= quota->esz) 1029 continue; 1030 1031 if (damos_skip_charged_region(t, &r, s)) 1032 continue; 1033 1034 if (!damos_valid_target(c, t, r, s)) 1035 continue; 1036 1037 damos_apply_scheme(c, t, r, s); 1038 } 1039 } 1040 1041 /* Shouldn't be called if quota->ms and quota->sz are zero */ 1042 static void damos_set_effective_quota(struct damos_quota *quota) 1043 { 1044 unsigned long throughput; 1045 unsigned long esz; 1046 1047 if (!quota->ms) { 1048 quota->esz = quota->sz; 1049 return; 1050 } 1051 1052 if (quota->total_charged_ns) 1053 throughput = quota->total_charged_sz * 1000000 / 1054 quota->total_charged_ns; 1055 else 1056 throughput = PAGE_SIZE * 1024; 1057 esz = throughput * quota->ms; 1058 1059 if (quota->sz && quota->sz < esz) 1060 esz = quota->sz; 1061 quota->esz = esz; 1062 } 1063 1064 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) 1065 { 1066 struct damos_quota *quota = &s->quota; 1067 struct damon_target *t; 1068 struct damon_region *r; 1069 unsigned long cumulated_sz; 1070 unsigned int score, max_score = 0; 1071 1072 if (!quota->ms && !quota->sz) 1073 return; 1074 1075 /* New charge window starts */ 1076 if (time_after_eq(jiffies, quota->charged_from + 1077 msecs_to_jiffies(quota->reset_interval))) { 1078 if (quota->esz && quota->charged_sz >= quota->esz) 1079 s->stat.qt_exceeds++; 1080 quota->total_charged_sz += quota->charged_sz; 1081 quota->charged_from = jiffies; 1082 quota->charged_sz = 0; 1083 damos_set_effective_quota(quota); 1084 } 1085 1086 if (!c->ops.get_scheme_score) 1087 return; 1088 1089 /* Fill up the score histogram */ 1090 memset(quota->histogram, 0, sizeof(quota->histogram)); 1091 damon_for_each_target(t, c) { 1092 damon_for_each_region(r, t) { 1093 if (!__damos_valid_target(r, s)) 1094 continue; 1095 score = c->ops.get_scheme_score(c, t, r, s); 1096 quota->histogram[score] += damon_sz_region(r); 1097 if (score > max_score) 1098 max_score = score; 1099 } 1100 } 1101 1102 /* Set the min score limit */ 1103 for (cumulated_sz = 0, score = max_score; ; score--) { 1104 cumulated_sz += quota->histogram[score]; 1105 if (cumulated_sz >= quota->esz || !score) 1106 break; 1107 } 1108 quota->min_score = score; 1109 } 1110 1111 static void kdamond_apply_schemes(struct damon_ctx *c) 1112 { 1113 struct damon_target *t; 1114 struct damon_region *r, *next_r; 1115 struct damos *s; 1116 unsigned long sample_interval = c->attrs.sample_interval ? 1117 c->attrs.sample_interval : 1; 1118 bool has_schemes_to_apply = false; 1119 1120 damon_for_each_scheme(s, c) { 1121 if (c->passed_sample_intervals != s->next_apply_sis) 1122 continue; 1123 1124 s->next_apply_sis += 1125 (s->apply_interval_us ? s->apply_interval_us : 1126 c->attrs.aggr_interval) / sample_interval; 1127 1128 if (!s->wmarks.activated) 1129 continue; 1130 1131 has_schemes_to_apply = true; 1132 1133 damos_adjust_quota(c, s); 1134 } 1135 1136 if (!has_schemes_to_apply) 1137 return; 1138 1139 damon_for_each_target(t, c) { 1140 damon_for_each_region_safe(r, next_r, t) 1141 damon_do_apply_schemes(c, t, r); 1142 } 1143 } 1144 1145 /* 1146 * Merge two adjacent regions into one region 1147 */ 1148 static void damon_merge_two_regions(struct damon_target *t, 1149 struct damon_region *l, struct damon_region *r) 1150 { 1151 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 1152 1153 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 1154 (sz_l + sz_r); 1155 l->nr_accesses_bp = l->nr_accesses * 10000; 1156 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); 1157 l->ar.end = r->ar.end; 1158 damon_destroy_region(r, t); 1159 } 1160 1161 /* 1162 * Merge adjacent regions having similar access frequencies 1163 * 1164 * t target affected by this merge operation 1165 * thres '->nr_accesses' diff threshold for the merge 1166 * sz_limit size upper limit of each region 1167 */ 1168 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, 1169 unsigned long sz_limit) 1170 { 1171 struct damon_region *r, *prev = NULL, *next; 1172 1173 damon_for_each_region_safe(r, next, t) { 1174 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) 1175 r->age = 0; 1176 else 1177 r->age++; 1178 1179 if (prev && prev->ar.end == r->ar.start && 1180 abs(prev->nr_accesses - r->nr_accesses) <= thres && 1181 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 1182 damon_merge_two_regions(t, prev, r); 1183 else 1184 prev = r; 1185 } 1186 } 1187 1188 /* 1189 * Merge adjacent regions having similar access frequencies 1190 * 1191 * threshold '->nr_accesses' diff threshold for the merge 1192 * sz_limit size upper limit of each region 1193 * 1194 * This function merges monitoring target regions which are adjacent and their 1195 * access frequencies are similar. This is for minimizing the monitoring 1196 * overhead under the dynamically changeable access pattern. If a merge was 1197 * unnecessarily made, later 'kdamond_split_regions()' will revert it. 1198 */ 1199 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, 1200 unsigned long sz_limit) 1201 { 1202 struct damon_target *t; 1203 1204 damon_for_each_target(t, c) 1205 damon_merge_regions_of(t, threshold, sz_limit); 1206 } 1207 1208 /* 1209 * Split a region in two 1210 * 1211 * r the region to be split 1212 * sz_r size of the first sub-region that will be made 1213 */ 1214 static void damon_split_region_at(struct damon_target *t, 1215 struct damon_region *r, unsigned long sz_r) 1216 { 1217 struct damon_region *new; 1218 1219 new = damon_new_region(r->ar.start + sz_r, r->ar.end); 1220 if (!new) 1221 return; 1222 1223 r->ar.end = new->ar.start; 1224 1225 new->age = r->age; 1226 new->last_nr_accesses = r->last_nr_accesses; 1227 new->nr_accesses_bp = r->nr_accesses_bp; 1228 1229 damon_insert_region(new, r, damon_next_region(r), t); 1230 } 1231 1232 /* Split every region in the given target into 'nr_subs' regions */ 1233 static void damon_split_regions_of(struct damon_target *t, int nr_subs) 1234 { 1235 struct damon_region *r, *next; 1236 unsigned long sz_region, sz_sub = 0; 1237 int i; 1238 1239 damon_for_each_region_safe(r, next, t) { 1240 sz_region = damon_sz_region(r); 1241 1242 for (i = 0; i < nr_subs - 1 && 1243 sz_region > 2 * DAMON_MIN_REGION; i++) { 1244 /* 1245 * Randomly select size of left sub-region to be at 1246 * least 10 percent and at most 90% of original region 1247 */ 1248 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 1249 sz_region / 10, DAMON_MIN_REGION); 1250 /* Do not allow blank region */ 1251 if (sz_sub == 0 || sz_sub >= sz_region) 1252 continue; 1253 1254 damon_split_region_at(t, r, sz_sub); 1255 sz_region = sz_sub; 1256 } 1257 } 1258 } 1259 1260 /* 1261 * Split every target region into randomly-sized small regions 1262 * 1263 * This function splits every target region into random-sized small regions if 1264 * current total number of the regions is equal or smaller than half of the 1265 * user-specified maximum number of regions. This is for maximizing the 1266 * monitoring accuracy under the dynamically changeable access patterns. If a 1267 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert 1268 * it. 1269 */ 1270 static void kdamond_split_regions(struct damon_ctx *ctx) 1271 { 1272 struct damon_target *t; 1273 unsigned int nr_regions = 0; 1274 static unsigned int last_nr_regions; 1275 int nr_subregions = 2; 1276 1277 damon_for_each_target(t, ctx) 1278 nr_regions += damon_nr_regions(t); 1279 1280 if (nr_regions > ctx->attrs.max_nr_regions / 2) 1281 return; 1282 1283 /* Maybe the middle of the region has different access frequency */ 1284 if (last_nr_regions == nr_regions && 1285 nr_regions < ctx->attrs.max_nr_regions / 3) 1286 nr_subregions = 3; 1287 1288 damon_for_each_target(t, ctx) 1289 damon_split_regions_of(t, nr_subregions); 1290 1291 last_nr_regions = nr_regions; 1292 } 1293 1294 /* 1295 * Check whether current monitoring should be stopped 1296 * 1297 * The monitoring is stopped when either the user requested to stop, or all 1298 * monitoring targets are invalid. 1299 * 1300 * Returns true if need to stop current monitoring. 1301 */ 1302 static bool kdamond_need_stop(struct damon_ctx *ctx) 1303 { 1304 struct damon_target *t; 1305 1306 if (kthread_should_stop()) 1307 return true; 1308 1309 if (!ctx->ops.target_valid) 1310 return false; 1311 1312 damon_for_each_target(t, ctx) { 1313 if (ctx->ops.target_valid(t)) 1314 return false; 1315 } 1316 1317 return true; 1318 } 1319 1320 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric) 1321 { 1322 switch (metric) { 1323 case DAMOS_WMARK_FREE_MEM_RATE: 1324 return global_zone_page_state(NR_FREE_PAGES) * 1000 / 1325 totalram_pages(); 1326 default: 1327 break; 1328 } 1329 return -EINVAL; 1330 } 1331 1332 /* 1333 * Returns zero if the scheme is active. Else, returns time to wait for next 1334 * watermark check in micro-seconds. 1335 */ 1336 static unsigned long damos_wmark_wait_us(struct damos *scheme) 1337 { 1338 unsigned long metric; 1339 1340 if (scheme->wmarks.metric == DAMOS_WMARK_NONE) 1341 return 0; 1342 1343 metric = damos_wmark_metric_value(scheme->wmarks.metric); 1344 /* higher than high watermark or lower than low watermark */ 1345 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { 1346 if (scheme->wmarks.activated) 1347 pr_debug("deactivate a scheme (%d) for %s wmark\n", 1348 scheme->action, 1349 metric > scheme->wmarks.high ? 1350 "high" : "low"); 1351 scheme->wmarks.activated = false; 1352 return scheme->wmarks.interval; 1353 } 1354 1355 /* inactive and higher than middle watermark */ 1356 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && 1357 !scheme->wmarks.activated) 1358 return scheme->wmarks.interval; 1359 1360 if (!scheme->wmarks.activated) 1361 pr_debug("activate a scheme (%d)\n", scheme->action); 1362 scheme->wmarks.activated = true; 1363 return 0; 1364 } 1365 1366 static void kdamond_usleep(unsigned long usecs) 1367 { 1368 /* See Documentation/timers/timers-howto.rst for the thresholds */ 1369 if (usecs > 20 * USEC_PER_MSEC) 1370 schedule_timeout_idle(usecs_to_jiffies(usecs)); 1371 else 1372 usleep_idle_range(usecs, usecs + 1); 1373 } 1374 1375 /* Returns negative error code if it's not activated but should return */ 1376 static int kdamond_wait_activation(struct damon_ctx *ctx) 1377 { 1378 struct damos *s; 1379 unsigned long wait_time; 1380 unsigned long min_wait_time = 0; 1381 bool init_wait_time = false; 1382 1383 while (!kdamond_need_stop(ctx)) { 1384 damon_for_each_scheme(s, ctx) { 1385 wait_time = damos_wmark_wait_us(s); 1386 if (!init_wait_time || wait_time < min_wait_time) { 1387 init_wait_time = true; 1388 min_wait_time = wait_time; 1389 } 1390 } 1391 if (!min_wait_time) 1392 return 0; 1393 1394 kdamond_usleep(min_wait_time); 1395 1396 if (ctx->callback.after_wmarks_check && 1397 ctx->callback.after_wmarks_check(ctx)) 1398 break; 1399 } 1400 return -EBUSY; 1401 } 1402 1403 static void kdamond_init_intervals_sis(struct damon_ctx *ctx) 1404 { 1405 unsigned long sample_interval = ctx->attrs.sample_interval ? 1406 ctx->attrs.sample_interval : 1; 1407 unsigned long apply_interval; 1408 struct damos *scheme; 1409 1410 ctx->passed_sample_intervals = 0; 1411 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; 1412 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / 1413 sample_interval; 1414 1415 damon_for_each_scheme(scheme, ctx) { 1416 apply_interval = scheme->apply_interval_us ? 1417 scheme->apply_interval_us : ctx->attrs.aggr_interval; 1418 scheme->next_apply_sis = apply_interval / sample_interval; 1419 } 1420 } 1421 1422 /* 1423 * The monitoring daemon that runs as a kernel thread 1424 */ 1425 static int kdamond_fn(void *data) 1426 { 1427 struct damon_ctx *ctx = data; 1428 struct damon_target *t; 1429 struct damon_region *r, *next; 1430 unsigned int max_nr_accesses = 0; 1431 unsigned long sz_limit = 0; 1432 1433 pr_debug("kdamond (%d) starts\n", current->pid); 1434 1435 kdamond_init_intervals_sis(ctx); 1436 1437 if (ctx->ops.init) 1438 ctx->ops.init(ctx); 1439 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) 1440 goto done; 1441 1442 sz_limit = damon_region_sz_limit(ctx); 1443 1444 while (!kdamond_need_stop(ctx)) { 1445 /* 1446 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could 1447 * be changed from after_wmarks_check() or after_aggregation() 1448 * callbacks. Read the values here, and use those for this 1449 * iteration. That is, damon_set_attrs() updated new values 1450 * are respected from next iteration. 1451 */ 1452 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; 1453 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; 1454 unsigned long sample_interval = ctx->attrs.sample_interval; 1455 1456 if (kdamond_wait_activation(ctx)) 1457 break; 1458 1459 if (ctx->ops.prepare_access_checks) 1460 ctx->ops.prepare_access_checks(ctx); 1461 if (ctx->callback.after_sampling && 1462 ctx->callback.after_sampling(ctx)) 1463 break; 1464 1465 kdamond_usleep(sample_interval); 1466 ctx->passed_sample_intervals++; 1467 1468 if (ctx->ops.check_accesses) 1469 max_nr_accesses = ctx->ops.check_accesses(ctx); 1470 1471 if (ctx->passed_sample_intervals == next_aggregation_sis) { 1472 kdamond_merge_regions(ctx, 1473 max_nr_accesses / 10, 1474 sz_limit); 1475 if (ctx->callback.after_aggregation && 1476 ctx->callback.after_aggregation(ctx)) 1477 break; 1478 } 1479 1480 /* 1481 * do kdamond_apply_schemes() after kdamond_merge_regions() if 1482 * possible, to reduce overhead 1483 */ 1484 if (!list_empty(&ctx->schemes)) 1485 kdamond_apply_schemes(ctx); 1486 1487 sample_interval = ctx->attrs.sample_interval ? 1488 ctx->attrs.sample_interval : 1; 1489 if (ctx->passed_sample_intervals == next_aggregation_sis) { 1490 ctx->next_aggregation_sis = next_aggregation_sis + 1491 ctx->attrs.aggr_interval / sample_interval; 1492 1493 kdamond_reset_aggregated(ctx); 1494 kdamond_split_regions(ctx); 1495 if (ctx->ops.reset_aggregated) 1496 ctx->ops.reset_aggregated(ctx); 1497 } 1498 1499 if (ctx->passed_sample_intervals == next_ops_update_sis) { 1500 ctx->next_ops_update_sis = next_ops_update_sis + 1501 ctx->attrs.ops_update_interval / 1502 sample_interval; 1503 if (ctx->ops.update) 1504 ctx->ops.update(ctx); 1505 sz_limit = damon_region_sz_limit(ctx); 1506 } 1507 } 1508 done: 1509 damon_for_each_target(t, ctx) { 1510 damon_for_each_region_safe(r, next, t) 1511 damon_destroy_region(r, t); 1512 } 1513 1514 if (ctx->callback.before_terminate) 1515 ctx->callback.before_terminate(ctx); 1516 if (ctx->ops.cleanup) 1517 ctx->ops.cleanup(ctx); 1518 1519 pr_debug("kdamond (%d) finishes\n", current->pid); 1520 mutex_lock(&ctx->kdamond_lock); 1521 ctx->kdamond = NULL; 1522 mutex_unlock(&ctx->kdamond_lock); 1523 1524 mutex_lock(&damon_lock); 1525 nr_running_ctxs--; 1526 if (!nr_running_ctxs && running_exclusive_ctxs) 1527 running_exclusive_ctxs = false; 1528 mutex_unlock(&damon_lock); 1529 1530 return 0; 1531 } 1532 1533 /* 1534 * struct damon_system_ram_region - System RAM resource address region of 1535 * [@start, @end). 1536 * @start: Start address of the region (inclusive). 1537 * @end: End address of the region (exclusive). 1538 */ 1539 struct damon_system_ram_region { 1540 unsigned long start; 1541 unsigned long end; 1542 }; 1543 1544 static int walk_system_ram(struct resource *res, void *arg) 1545 { 1546 struct damon_system_ram_region *a = arg; 1547 1548 if (a->end - a->start < resource_size(res)) { 1549 a->start = res->start; 1550 a->end = res->end; 1551 } 1552 return 0; 1553 } 1554 1555 /* 1556 * Find biggest 'System RAM' resource and store its start and end address in 1557 * @start and @end, respectively. If no System RAM is found, returns false. 1558 */ 1559 static bool damon_find_biggest_system_ram(unsigned long *start, 1560 unsigned long *end) 1561 1562 { 1563 struct damon_system_ram_region arg = {}; 1564 1565 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); 1566 if (arg.end <= arg.start) 1567 return false; 1568 1569 *start = arg.start; 1570 *end = arg.end; 1571 return true; 1572 } 1573 1574 /** 1575 * damon_set_region_biggest_system_ram_default() - Set the region of the given 1576 * monitoring target as requested, or biggest 'System RAM'. 1577 * @t: The monitoring target to set the region. 1578 * @start: The pointer to the start address of the region. 1579 * @end: The pointer to the end address of the region. 1580 * 1581 * This function sets the region of @t as requested by @start and @end. If the 1582 * values of @start and @end are zero, however, this function finds the biggest 1583 * 'System RAM' resource and sets the region to cover the resource. In the 1584 * latter case, this function saves the start and end addresses of the resource 1585 * in @start and @end, respectively. 1586 * 1587 * Return: 0 on success, negative error code otherwise. 1588 */ 1589 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 1590 unsigned long *start, unsigned long *end) 1591 { 1592 struct damon_addr_range addr_range; 1593 1594 if (*start > *end) 1595 return -EINVAL; 1596 1597 if (!*start && !*end && 1598 !damon_find_biggest_system_ram(start, end)) 1599 return -EINVAL; 1600 1601 addr_range.start = *start; 1602 addr_range.end = *end; 1603 return damon_set_regions(t, &addr_range, 1); 1604 } 1605 1606 /* 1607 * damon_moving_sum() - Calculate an inferred moving sum value. 1608 * @mvsum: Inferred sum of the last @len_window values. 1609 * @nomvsum: Non-moving sum of the last discrete @len_window window values. 1610 * @len_window: The number of last values to take care of. 1611 * @new_value: New value that will be added to the pseudo moving sum. 1612 * 1613 * Moving sum (moving average * window size) is good for handling noise, but 1614 * the cost of keeping past values can be high for arbitrary window size. This 1615 * function implements a lightweight pseudo moving sum function that doesn't 1616 * keep the past window values. 1617 * 1618 * It simply assumes there was no noise in the past, and get the no-noise 1619 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a 1620 * non-moving sum of the last window. For example, if @len_window is 10 and we 1621 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 1622 * values. Hence, this function simply drops @nomvsum / @len_window from 1623 * given @mvsum and add @new_value. 1624 * 1625 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for 1626 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For 1627 * calculating next moving sum with a new value, we should drop 0 from 50 and 1628 * add the new value. However, this function assumes it got value 5 for each 1629 * of the last ten times. Based on the assumption, when the next value is 1630 * measured, it drops the assumed past value, 5 from the current sum, and add 1631 * the new value to get the updated pseduo-moving average. 1632 * 1633 * This means the value could have errors, but the errors will be disappeared 1634 * for every @len_window aligned calls. For example, if @len_window is 10, the 1635 * pseudo moving sum with 11th value to 19th value would have an error. But 1636 * the sum with 20th value will not have the error. 1637 * 1638 * Return: Pseudo-moving average after getting the @new_value. 1639 */ 1640 static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, 1641 unsigned int len_window, unsigned int new_value) 1642 { 1643 return mvsum - nomvsum / len_window + new_value; 1644 } 1645 1646 /** 1647 * damon_update_region_access_rate() - Update the access rate of a region. 1648 * @r: The DAMON region to update for its access check result. 1649 * @accessed: Whether the region has accessed during last sampling interval. 1650 * @attrs: The damon_attrs of the DAMON context. 1651 * 1652 * Update the access rate of a region with the region's last sampling interval 1653 * access check result. 1654 * 1655 * Usually this will be called by &damon_operations->check_accesses callback. 1656 */ 1657 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 1658 struct damon_attrs *attrs) 1659 { 1660 unsigned int len_window = 1; 1661 1662 /* 1663 * sample_interval can be zero, but cannot be larger than 1664 * aggr_interval, owing to validation of damon_set_attrs(). 1665 */ 1666 if (attrs->sample_interval) 1667 len_window = damon_max_nr_accesses(attrs); 1668 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, 1669 r->last_nr_accesses * 10000, len_window, 1670 accessed ? 10000 : 0); 1671 1672 if (accessed) 1673 r->nr_accesses++; 1674 } 1675 1676 static int __init damon_init(void) 1677 { 1678 damon_region_cache = KMEM_CACHE(damon_region, 0); 1679 if (unlikely(!damon_region_cache)) { 1680 pr_err("creating damon_region_cache fails\n"); 1681 return -ENOMEM; 1682 } 1683 1684 return 0; 1685 } 1686 1687 subsys_initcall(damon_init); 1688 1689 #include "core-test.h" 1690