1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON sysfs Interface 4 * 5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org> 6 */ 7 8 #include <linux/slab.h> 9 10 #include "sysfs-common.h" 11 12 /* 13 * scheme region directory 14 */ 15 16 struct damon_sysfs_scheme_region { 17 struct kobject kobj; 18 struct damon_addr_range ar; 19 unsigned int nr_accesses; 20 unsigned int age; 21 struct list_head list; 22 }; 23 24 static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc( 25 struct damon_region *region) 26 { 27 struct damon_sysfs_scheme_region *sysfs_region = kmalloc( 28 sizeof(*sysfs_region), GFP_KERNEL); 29 30 if (!sysfs_region) 31 return NULL; 32 sysfs_region->kobj = (struct kobject){}; 33 sysfs_region->ar = region->ar; 34 sysfs_region->nr_accesses = region->nr_accesses_bp / 10000; 35 sysfs_region->age = region->age; 36 INIT_LIST_HEAD(&sysfs_region->list); 37 return sysfs_region; 38 } 39 40 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr, 41 char *buf) 42 { 43 struct damon_sysfs_scheme_region *region = container_of(kobj, 44 struct damon_sysfs_scheme_region, kobj); 45 46 return sysfs_emit(buf, "%lu\n", region->ar.start); 47 } 48 49 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr, 50 char *buf) 51 { 52 struct damon_sysfs_scheme_region *region = container_of(kobj, 53 struct damon_sysfs_scheme_region, kobj); 54 55 return sysfs_emit(buf, "%lu\n", region->ar.end); 56 } 57 58 static ssize_t nr_accesses_show(struct kobject *kobj, 59 struct kobj_attribute *attr, char *buf) 60 { 61 struct damon_sysfs_scheme_region *region = container_of(kobj, 62 struct damon_sysfs_scheme_region, kobj); 63 64 return sysfs_emit(buf, "%u\n", region->nr_accesses); 65 } 66 67 static ssize_t age_show(struct kobject *kobj, struct kobj_attribute *attr, 68 char *buf) 69 { 70 struct damon_sysfs_scheme_region *region = container_of(kobj, 71 struct damon_sysfs_scheme_region, kobj); 72 73 return sysfs_emit(buf, "%u\n", region->age); 74 } 75 76 static void damon_sysfs_scheme_region_release(struct kobject *kobj) 77 { 78 struct damon_sysfs_scheme_region *region = container_of(kobj, 79 struct damon_sysfs_scheme_region, kobj); 80 81 list_del(®ion->list); 82 kfree(region); 83 } 84 85 static struct kobj_attribute damon_sysfs_scheme_region_start_attr = 86 __ATTR_RO_MODE(start, 0400); 87 88 static struct kobj_attribute damon_sysfs_scheme_region_end_attr = 89 __ATTR_RO_MODE(end, 0400); 90 91 static struct kobj_attribute damon_sysfs_scheme_region_nr_accesses_attr = 92 __ATTR_RO_MODE(nr_accesses, 0400); 93 94 static struct kobj_attribute damon_sysfs_scheme_region_age_attr = 95 __ATTR_RO_MODE(age, 0400); 96 97 static struct attribute *damon_sysfs_scheme_region_attrs[] = { 98 &damon_sysfs_scheme_region_start_attr.attr, 99 &damon_sysfs_scheme_region_end_attr.attr, 100 &damon_sysfs_scheme_region_nr_accesses_attr.attr, 101 &damon_sysfs_scheme_region_age_attr.attr, 102 NULL, 103 }; 104 ATTRIBUTE_GROUPS(damon_sysfs_scheme_region); 105 106 static const struct kobj_type damon_sysfs_scheme_region_ktype = { 107 .release = damon_sysfs_scheme_region_release, 108 .sysfs_ops = &kobj_sysfs_ops, 109 .default_groups = damon_sysfs_scheme_region_groups, 110 }; 111 112 /* 113 * scheme regions directory 114 */ 115 116 /* 117 * enum damos_sysfs_regions_upd_status - Represent DAMOS tried regions update 118 * status 119 * @DAMOS_TRIED_REGIONS_UPD_IDLE: Waiting for next request. 120 * @DAMOS_TRIED_REGIONS_UPD_STARTED: Update started. 121 * @DAMOS_TRIED_REGIONS_UPD_FINISHED: Update finished. 122 * 123 * Each DAMON-based operation scheme (&struct damos) has its own apply 124 * interval, and we need to expose the scheme tried regions based on only 125 * single snapshot. For this, we keep the tried regions update status for each 126 * scheme. The status becomes 'idle' at the beginning. 127 * 128 * Once the tried regions update request is received, the request handling 129 * start function (damon_sysfs_scheme_update_regions_start()) sets the status 130 * of all schemes as 'idle' again, and register ->before_damos_apply() 131 * callback. 132 * 133 * Then, the first followup ->before_damos_apply() callback 134 * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first 135 * ->after_sampling() or ->after_aggregation() callback 136 * (damon_sysfs_cmd_request_callback()) after the call is called only after 137 * the scheme is completely applied to the given snapshot. Hence the callback 138 * knows the situation by showing 'started' status, and sets the status as 139 * 'finished'. Then, damon_sysfs_before_damos_apply() understands the 140 * situation by showing the 'finished' status and do nothing. 141 * 142 * If DAMOS is not applied to any region due to any reasons including the 143 * access pattern, the watermarks, the quotas, and the filters, 144 * ->before_damos_apply() will not be called back. Until the situation is 145 * changed, the update will not be finished. To avoid this, 146 * damon_sysfs_after_sampling() set the status as 'finished' if more than two 147 * apply intervals of the scheme is passed while the state is 'idle'. 148 * 149 * Finally, the tried regions request handling finisher function 150 * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks. 151 */ 152 enum damos_sysfs_regions_upd_status { 153 DAMOS_TRIED_REGIONS_UPD_IDLE, 154 DAMOS_TRIED_REGIONS_UPD_STARTED, 155 DAMOS_TRIED_REGIONS_UPD_FINISHED, 156 }; 157 158 struct damon_sysfs_scheme_regions { 159 struct kobject kobj; 160 struct list_head regions_list; 161 int nr_regions; 162 unsigned long total_bytes; 163 enum damos_sysfs_regions_upd_status upd_status; 164 unsigned long upd_timeout_jiffies; 165 }; 166 167 static struct damon_sysfs_scheme_regions * 168 damon_sysfs_scheme_regions_alloc(void) 169 { 170 struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions), 171 GFP_KERNEL); 172 173 if (!regions) 174 return NULL; 175 176 regions->kobj = (struct kobject){}; 177 INIT_LIST_HEAD(®ions->regions_list); 178 regions->nr_regions = 0; 179 regions->total_bytes = 0; 180 regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; 181 return regions; 182 } 183 184 static ssize_t total_bytes_show(struct kobject *kobj, 185 struct kobj_attribute *attr, char *buf) 186 { 187 struct damon_sysfs_scheme_regions *regions = container_of(kobj, 188 struct damon_sysfs_scheme_regions, kobj); 189 190 return sysfs_emit(buf, "%lu\n", regions->total_bytes); 191 } 192 193 static void damon_sysfs_scheme_regions_rm_dirs( 194 struct damon_sysfs_scheme_regions *regions) 195 { 196 struct damon_sysfs_scheme_region *r, *next; 197 198 list_for_each_entry_safe(r, next, ®ions->regions_list, list) { 199 /* release function deletes it from the list */ 200 kobject_put(&r->kobj); 201 regions->nr_regions--; 202 } 203 } 204 205 static void damon_sysfs_scheme_regions_release(struct kobject *kobj) 206 { 207 kfree(container_of(kobj, struct damon_sysfs_scheme_regions, kobj)); 208 } 209 210 static struct kobj_attribute damon_sysfs_scheme_regions_total_bytes_attr = 211 __ATTR_RO_MODE(total_bytes, 0400); 212 213 static struct attribute *damon_sysfs_scheme_regions_attrs[] = { 214 &damon_sysfs_scheme_regions_total_bytes_attr.attr, 215 NULL, 216 }; 217 ATTRIBUTE_GROUPS(damon_sysfs_scheme_regions); 218 219 static const struct kobj_type damon_sysfs_scheme_regions_ktype = { 220 .release = damon_sysfs_scheme_regions_release, 221 .sysfs_ops = &kobj_sysfs_ops, 222 .default_groups = damon_sysfs_scheme_regions_groups, 223 }; 224 225 /* 226 * schemes/stats directory 227 */ 228 229 struct damon_sysfs_stats { 230 struct kobject kobj; 231 unsigned long nr_tried; 232 unsigned long sz_tried; 233 unsigned long nr_applied; 234 unsigned long sz_applied; 235 unsigned long qt_exceeds; 236 }; 237 238 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void) 239 { 240 return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL); 241 } 242 243 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr, 244 char *buf) 245 { 246 struct damon_sysfs_stats *stats = container_of(kobj, 247 struct damon_sysfs_stats, kobj); 248 249 return sysfs_emit(buf, "%lu\n", stats->nr_tried); 250 } 251 252 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr, 253 char *buf) 254 { 255 struct damon_sysfs_stats *stats = container_of(kobj, 256 struct damon_sysfs_stats, kobj); 257 258 return sysfs_emit(buf, "%lu\n", stats->sz_tried); 259 } 260 261 static ssize_t nr_applied_show(struct kobject *kobj, 262 struct kobj_attribute *attr, char *buf) 263 { 264 struct damon_sysfs_stats *stats = container_of(kobj, 265 struct damon_sysfs_stats, kobj); 266 267 return sysfs_emit(buf, "%lu\n", stats->nr_applied); 268 } 269 270 static ssize_t sz_applied_show(struct kobject *kobj, 271 struct kobj_attribute *attr, char *buf) 272 { 273 struct damon_sysfs_stats *stats = container_of(kobj, 274 struct damon_sysfs_stats, kobj); 275 276 return sysfs_emit(buf, "%lu\n", stats->sz_applied); 277 } 278 279 static ssize_t qt_exceeds_show(struct kobject *kobj, 280 struct kobj_attribute *attr, char *buf) 281 { 282 struct damon_sysfs_stats *stats = container_of(kobj, 283 struct damon_sysfs_stats, kobj); 284 285 return sysfs_emit(buf, "%lu\n", stats->qt_exceeds); 286 } 287 288 static void damon_sysfs_stats_release(struct kobject *kobj) 289 { 290 kfree(container_of(kobj, struct damon_sysfs_stats, kobj)); 291 } 292 293 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr = 294 __ATTR_RO_MODE(nr_tried, 0400); 295 296 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr = 297 __ATTR_RO_MODE(sz_tried, 0400); 298 299 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr = 300 __ATTR_RO_MODE(nr_applied, 0400); 301 302 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr = 303 __ATTR_RO_MODE(sz_applied, 0400); 304 305 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr = 306 __ATTR_RO_MODE(qt_exceeds, 0400); 307 308 static struct attribute *damon_sysfs_stats_attrs[] = { 309 &damon_sysfs_stats_nr_tried_attr.attr, 310 &damon_sysfs_stats_sz_tried_attr.attr, 311 &damon_sysfs_stats_nr_applied_attr.attr, 312 &damon_sysfs_stats_sz_applied_attr.attr, 313 &damon_sysfs_stats_qt_exceeds_attr.attr, 314 NULL, 315 }; 316 ATTRIBUTE_GROUPS(damon_sysfs_stats); 317 318 static const struct kobj_type damon_sysfs_stats_ktype = { 319 .release = damon_sysfs_stats_release, 320 .sysfs_ops = &kobj_sysfs_ops, 321 .default_groups = damon_sysfs_stats_groups, 322 }; 323 324 /* 325 * filter directory 326 */ 327 328 struct damon_sysfs_scheme_filter { 329 struct kobject kobj; 330 enum damos_filter_type type; 331 bool matching; 332 char *memcg_path; 333 struct damon_addr_range addr_range; 334 int target_idx; 335 }; 336 337 static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(void) 338 { 339 return kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); 340 } 341 342 /* Should match with enum damos_filter_type */ 343 static const char * const damon_sysfs_scheme_filter_type_strs[] = { 344 "anon", 345 "memcg", 346 "young", 347 "addr", 348 "target", 349 }; 350 351 static ssize_t type_show(struct kobject *kobj, 352 struct kobj_attribute *attr, char *buf) 353 { 354 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 355 struct damon_sysfs_scheme_filter, kobj); 356 357 return sysfs_emit(buf, "%s\n", 358 damon_sysfs_scheme_filter_type_strs[filter->type]); 359 } 360 361 static ssize_t type_store(struct kobject *kobj, 362 struct kobj_attribute *attr, const char *buf, size_t count) 363 { 364 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 365 struct damon_sysfs_scheme_filter, kobj); 366 enum damos_filter_type type; 367 ssize_t ret = -EINVAL; 368 369 for (type = 0; type < NR_DAMOS_FILTER_TYPES; type++) { 370 if (sysfs_streq(buf, damon_sysfs_scheme_filter_type_strs[ 371 type])) { 372 filter->type = type; 373 ret = count; 374 break; 375 } 376 } 377 return ret; 378 } 379 380 static ssize_t matching_show(struct kobject *kobj, 381 struct kobj_attribute *attr, char *buf) 382 { 383 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 384 struct damon_sysfs_scheme_filter, kobj); 385 386 return sysfs_emit(buf, "%c\n", filter->matching ? 'Y' : 'N'); 387 } 388 389 static ssize_t matching_store(struct kobject *kobj, 390 struct kobj_attribute *attr, const char *buf, size_t count) 391 { 392 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 393 struct damon_sysfs_scheme_filter, kobj); 394 bool matching; 395 int err = kstrtobool(buf, &matching); 396 397 if (err) 398 return err; 399 400 filter->matching = matching; 401 return count; 402 } 403 404 static ssize_t memcg_path_show(struct kobject *kobj, 405 struct kobj_attribute *attr, char *buf) 406 { 407 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 408 struct damon_sysfs_scheme_filter, kobj); 409 410 return sysfs_emit(buf, "%s\n", 411 filter->memcg_path ? filter->memcg_path : ""); 412 } 413 414 static ssize_t memcg_path_store(struct kobject *kobj, 415 struct kobj_attribute *attr, const char *buf, size_t count) 416 { 417 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 418 struct damon_sysfs_scheme_filter, kobj); 419 char *path = kmalloc(sizeof(*path) * (count + 1), GFP_KERNEL); 420 421 if (!path) 422 return -ENOMEM; 423 424 strscpy(path, buf, count + 1); 425 filter->memcg_path = path; 426 return count; 427 } 428 429 static ssize_t addr_start_show(struct kobject *kobj, 430 struct kobj_attribute *attr, char *buf) 431 { 432 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 433 struct damon_sysfs_scheme_filter, kobj); 434 435 return sysfs_emit(buf, "%lu\n", filter->addr_range.start); 436 } 437 438 static ssize_t addr_start_store(struct kobject *kobj, 439 struct kobj_attribute *attr, const char *buf, size_t count) 440 { 441 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 442 struct damon_sysfs_scheme_filter, kobj); 443 int err = kstrtoul(buf, 0, &filter->addr_range.start); 444 445 return err ? err : count; 446 } 447 448 static ssize_t addr_end_show(struct kobject *kobj, 449 struct kobj_attribute *attr, char *buf) 450 { 451 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 452 struct damon_sysfs_scheme_filter, kobj); 453 454 return sysfs_emit(buf, "%lu\n", filter->addr_range.end); 455 } 456 457 static ssize_t addr_end_store(struct kobject *kobj, 458 struct kobj_attribute *attr, const char *buf, size_t count) 459 { 460 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 461 struct damon_sysfs_scheme_filter, kobj); 462 int err = kstrtoul(buf, 0, &filter->addr_range.end); 463 464 return err ? err : count; 465 } 466 467 static ssize_t damon_target_idx_show(struct kobject *kobj, 468 struct kobj_attribute *attr, char *buf) 469 { 470 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 471 struct damon_sysfs_scheme_filter, kobj); 472 473 return sysfs_emit(buf, "%d\n", filter->target_idx); 474 } 475 476 static ssize_t damon_target_idx_store(struct kobject *kobj, 477 struct kobj_attribute *attr, const char *buf, size_t count) 478 { 479 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 480 struct damon_sysfs_scheme_filter, kobj); 481 int err = kstrtoint(buf, 0, &filter->target_idx); 482 483 return err ? err : count; 484 } 485 486 static void damon_sysfs_scheme_filter_release(struct kobject *kobj) 487 { 488 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 489 struct damon_sysfs_scheme_filter, kobj); 490 491 kfree(filter->memcg_path); 492 kfree(filter); 493 } 494 495 static struct kobj_attribute damon_sysfs_scheme_filter_type_attr = 496 __ATTR_RW_MODE(type, 0600); 497 498 static struct kobj_attribute damon_sysfs_scheme_filter_matching_attr = 499 __ATTR_RW_MODE(matching, 0600); 500 501 static struct kobj_attribute damon_sysfs_scheme_filter_memcg_path_attr = 502 __ATTR_RW_MODE(memcg_path, 0600); 503 504 static struct kobj_attribute damon_sysfs_scheme_filter_addr_start_attr = 505 __ATTR_RW_MODE(addr_start, 0600); 506 507 static struct kobj_attribute damon_sysfs_scheme_filter_addr_end_attr = 508 __ATTR_RW_MODE(addr_end, 0600); 509 510 static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr = 511 __ATTR_RW_MODE(damon_target_idx, 0600); 512 513 static struct attribute *damon_sysfs_scheme_filter_attrs[] = { 514 &damon_sysfs_scheme_filter_type_attr.attr, 515 &damon_sysfs_scheme_filter_matching_attr.attr, 516 &damon_sysfs_scheme_filter_memcg_path_attr.attr, 517 &damon_sysfs_scheme_filter_addr_start_attr.attr, 518 &damon_sysfs_scheme_filter_addr_end_attr.attr, 519 &damon_sysfs_scheme_filter_damon_target_idx_attr.attr, 520 NULL, 521 }; 522 ATTRIBUTE_GROUPS(damon_sysfs_scheme_filter); 523 524 static const struct kobj_type damon_sysfs_scheme_filter_ktype = { 525 .release = damon_sysfs_scheme_filter_release, 526 .sysfs_ops = &kobj_sysfs_ops, 527 .default_groups = damon_sysfs_scheme_filter_groups, 528 }; 529 530 /* 531 * filters directory 532 */ 533 534 struct damon_sysfs_scheme_filters { 535 struct kobject kobj; 536 struct damon_sysfs_scheme_filter **filters_arr; 537 int nr; 538 }; 539 540 static struct damon_sysfs_scheme_filters * 541 damon_sysfs_scheme_filters_alloc(void) 542 { 543 return kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); 544 } 545 546 static void damon_sysfs_scheme_filters_rm_dirs( 547 struct damon_sysfs_scheme_filters *filters) 548 { 549 struct damon_sysfs_scheme_filter **filters_arr = filters->filters_arr; 550 int i; 551 552 for (i = 0; i < filters->nr; i++) 553 kobject_put(&filters_arr[i]->kobj); 554 filters->nr = 0; 555 kfree(filters_arr); 556 filters->filters_arr = NULL; 557 } 558 559 static int damon_sysfs_scheme_filters_add_dirs( 560 struct damon_sysfs_scheme_filters *filters, int nr_filters) 561 { 562 struct damon_sysfs_scheme_filter **filters_arr, *filter; 563 int err, i; 564 565 damon_sysfs_scheme_filters_rm_dirs(filters); 566 if (!nr_filters) 567 return 0; 568 569 filters_arr = kmalloc_array(nr_filters, sizeof(*filters_arr), 570 GFP_KERNEL | __GFP_NOWARN); 571 if (!filters_arr) 572 return -ENOMEM; 573 filters->filters_arr = filters_arr; 574 575 for (i = 0; i < nr_filters; i++) { 576 filter = damon_sysfs_scheme_filter_alloc(); 577 if (!filter) { 578 damon_sysfs_scheme_filters_rm_dirs(filters); 579 return -ENOMEM; 580 } 581 582 err = kobject_init_and_add(&filter->kobj, 583 &damon_sysfs_scheme_filter_ktype, 584 &filters->kobj, "%d", i); 585 if (err) { 586 kobject_put(&filter->kobj); 587 damon_sysfs_scheme_filters_rm_dirs(filters); 588 return err; 589 } 590 591 filters_arr[i] = filter; 592 filters->nr++; 593 } 594 return 0; 595 } 596 597 static ssize_t nr_filters_show(struct kobject *kobj, 598 struct kobj_attribute *attr, char *buf) 599 { 600 struct damon_sysfs_scheme_filters *filters = container_of(kobj, 601 struct damon_sysfs_scheme_filters, kobj); 602 603 return sysfs_emit(buf, "%d\n", filters->nr); 604 } 605 606 static ssize_t nr_filters_store(struct kobject *kobj, 607 struct kobj_attribute *attr, const char *buf, size_t count) 608 { 609 struct damon_sysfs_scheme_filters *filters; 610 int nr, err = kstrtoint(buf, 0, &nr); 611 612 if (err) 613 return err; 614 if (nr < 0) 615 return -EINVAL; 616 617 filters = container_of(kobj, struct damon_sysfs_scheme_filters, kobj); 618 619 if (!mutex_trylock(&damon_sysfs_lock)) 620 return -EBUSY; 621 err = damon_sysfs_scheme_filters_add_dirs(filters, nr); 622 mutex_unlock(&damon_sysfs_lock); 623 if (err) 624 return err; 625 626 return count; 627 } 628 629 static void damon_sysfs_scheme_filters_release(struct kobject *kobj) 630 { 631 kfree(container_of(kobj, struct damon_sysfs_scheme_filters, kobj)); 632 } 633 634 static struct kobj_attribute damon_sysfs_scheme_filters_nr_attr = 635 __ATTR_RW_MODE(nr_filters, 0600); 636 637 static struct attribute *damon_sysfs_scheme_filters_attrs[] = { 638 &damon_sysfs_scheme_filters_nr_attr.attr, 639 NULL, 640 }; 641 ATTRIBUTE_GROUPS(damon_sysfs_scheme_filters); 642 643 static const struct kobj_type damon_sysfs_scheme_filters_ktype = { 644 .release = damon_sysfs_scheme_filters_release, 645 .sysfs_ops = &kobj_sysfs_ops, 646 .default_groups = damon_sysfs_scheme_filters_groups, 647 }; 648 649 /* 650 * watermarks directory 651 */ 652 653 struct damon_sysfs_watermarks { 654 struct kobject kobj; 655 enum damos_wmark_metric metric; 656 unsigned long interval_us; 657 unsigned long high; 658 unsigned long mid; 659 unsigned long low; 660 }; 661 662 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc( 663 enum damos_wmark_metric metric, unsigned long interval_us, 664 unsigned long high, unsigned long mid, unsigned long low) 665 { 666 struct damon_sysfs_watermarks *watermarks = kmalloc( 667 sizeof(*watermarks), GFP_KERNEL); 668 669 if (!watermarks) 670 return NULL; 671 watermarks->kobj = (struct kobject){}; 672 watermarks->metric = metric; 673 watermarks->interval_us = interval_us; 674 watermarks->high = high; 675 watermarks->mid = mid; 676 watermarks->low = low; 677 return watermarks; 678 } 679 680 /* Should match with enum damos_wmark_metric */ 681 static const char * const damon_sysfs_wmark_metric_strs[] = { 682 "none", 683 "free_mem_rate", 684 }; 685 686 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr, 687 char *buf) 688 { 689 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 690 struct damon_sysfs_watermarks, kobj); 691 692 return sysfs_emit(buf, "%s\n", 693 damon_sysfs_wmark_metric_strs[watermarks->metric]); 694 } 695 696 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr, 697 const char *buf, size_t count) 698 { 699 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 700 struct damon_sysfs_watermarks, kobj); 701 enum damos_wmark_metric metric; 702 703 for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) { 704 if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) { 705 watermarks->metric = metric; 706 return count; 707 } 708 } 709 return -EINVAL; 710 } 711 712 static ssize_t interval_us_show(struct kobject *kobj, 713 struct kobj_attribute *attr, char *buf) 714 { 715 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 716 struct damon_sysfs_watermarks, kobj); 717 718 return sysfs_emit(buf, "%lu\n", watermarks->interval_us); 719 } 720 721 static ssize_t interval_us_store(struct kobject *kobj, 722 struct kobj_attribute *attr, const char *buf, size_t count) 723 { 724 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 725 struct damon_sysfs_watermarks, kobj); 726 int err = kstrtoul(buf, 0, &watermarks->interval_us); 727 728 return err ? err : count; 729 } 730 731 static ssize_t high_show(struct kobject *kobj, 732 struct kobj_attribute *attr, char *buf) 733 { 734 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 735 struct damon_sysfs_watermarks, kobj); 736 737 return sysfs_emit(buf, "%lu\n", watermarks->high); 738 } 739 740 static ssize_t high_store(struct kobject *kobj, 741 struct kobj_attribute *attr, const char *buf, size_t count) 742 { 743 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 744 struct damon_sysfs_watermarks, kobj); 745 int err = kstrtoul(buf, 0, &watermarks->high); 746 747 return err ? err : count; 748 } 749 750 static ssize_t mid_show(struct kobject *kobj, 751 struct kobj_attribute *attr, char *buf) 752 { 753 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 754 struct damon_sysfs_watermarks, kobj); 755 756 return sysfs_emit(buf, "%lu\n", watermarks->mid); 757 } 758 759 static ssize_t mid_store(struct kobject *kobj, 760 struct kobj_attribute *attr, const char *buf, size_t count) 761 { 762 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 763 struct damon_sysfs_watermarks, kobj); 764 int err = kstrtoul(buf, 0, &watermarks->mid); 765 766 return err ? err : count; 767 } 768 769 static ssize_t low_show(struct kobject *kobj, 770 struct kobj_attribute *attr, char *buf) 771 { 772 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 773 struct damon_sysfs_watermarks, kobj); 774 775 return sysfs_emit(buf, "%lu\n", watermarks->low); 776 } 777 778 static ssize_t low_store(struct kobject *kobj, 779 struct kobj_attribute *attr, const char *buf, size_t count) 780 { 781 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 782 struct damon_sysfs_watermarks, kobj); 783 int err = kstrtoul(buf, 0, &watermarks->low); 784 785 return err ? err : count; 786 } 787 788 static void damon_sysfs_watermarks_release(struct kobject *kobj) 789 { 790 kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj)); 791 } 792 793 static struct kobj_attribute damon_sysfs_watermarks_metric_attr = 794 __ATTR_RW_MODE(metric, 0600); 795 796 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr = 797 __ATTR_RW_MODE(interval_us, 0600); 798 799 static struct kobj_attribute damon_sysfs_watermarks_high_attr = 800 __ATTR_RW_MODE(high, 0600); 801 802 static struct kobj_attribute damon_sysfs_watermarks_mid_attr = 803 __ATTR_RW_MODE(mid, 0600); 804 805 static struct kobj_attribute damon_sysfs_watermarks_low_attr = 806 __ATTR_RW_MODE(low, 0600); 807 808 static struct attribute *damon_sysfs_watermarks_attrs[] = { 809 &damon_sysfs_watermarks_metric_attr.attr, 810 &damon_sysfs_watermarks_interval_us_attr.attr, 811 &damon_sysfs_watermarks_high_attr.attr, 812 &damon_sysfs_watermarks_mid_attr.attr, 813 &damon_sysfs_watermarks_low_attr.attr, 814 NULL, 815 }; 816 ATTRIBUTE_GROUPS(damon_sysfs_watermarks); 817 818 static const struct kobj_type damon_sysfs_watermarks_ktype = { 819 .release = damon_sysfs_watermarks_release, 820 .sysfs_ops = &kobj_sysfs_ops, 821 .default_groups = damon_sysfs_watermarks_groups, 822 }; 823 824 /* 825 * quota goal directory 826 */ 827 828 struct damos_sysfs_quota_goal { 829 struct kobject kobj; 830 enum damos_quota_goal_metric metric; 831 unsigned long target_value; 832 unsigned long current_value; 833 }; 834 835 /* This should match with enum damos_action */ 836 static const char * const damos_sysfs_quota_goal_metric_strs[] = { 837 "user_input", 838 "some_mem_psi_us", 839 }; 840 841 static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void) 842 { 843 return kzalloc(sizeof(struct damos_sysfs_quota_goal), GFP_KERNEL); 844 } 845 846 static ssize_t target_metric_show(struct kobject *kobj, 847 struct kobj_attribute *attr, char *buf) 848 { 849 struct damos_sysfs_quota_goal *goal = container_of(kobj, 850 struct damos_sysfs_quota_goal, kobj); 851 852 return sysfs_emit(buf, "%s\n", 853 damos_sysfs_quota_goal_metric_strs[goal->metric]); 854 } 855 856 static ssize_t target_metric_store(struct kobject *kobj, 857 struct kobj_attribute *attr, const char *buf, size_t count) 858 { 859 struct damos_sysfs_quota_goal *goal = container_of(kobj, 860 struct damos_sysfs_quota_goal, kobj); 861 enum damos_quota_goal_metric m; 862 863 for (m = 0; m < NR_DAMOS_QUOTA_GOAL_METRICS; m++) { 864 if (sysfs_streq(buf, damos_sysfs_quota_goal_metric_strs[m])) { 865 goal->metric = m; 866 return count; 867 } 868 } 869 return -EINVAL; 870 } 871 872 static ssize_t target_value_show(struct kobject *kobj, 873 struct kobj_attribute *attr, char *buf) 874 { 875 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 876 damos_sysfs_quota_goal, kobj); 877 878 return sysfs_emit(buf, "%lu\n", goal->target_value); 879 } 880 881 static ssize_t target_value_store(struct kobject *kobj, 882 struct kobj_attribute *attr, const char *buf, size_t count) 883 { 884 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 885 damos_sysfs_quota_goal, kobj); 886 int err = kstrtoul(buf, 0, &goal->target_value); 887 888 return err ? err : count; 889 } 890 891 static ssize_t current_value_show(struct kobject *kobj, 892 struct kobj_attribute *attr, char *buf) 893 { 894 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 895 damos_sysfs_quota_goal, kobj); 896 897 return sysfs_emit(buf, "%lu\n", goal->current_value); 898 } 899 900 static ssize_t current_value_store(struct kobject *kobj, 901 struct kobj_attribute *attr, const char *buf, size_t count) 902 { 903 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 904 damos_sysfs_quota_goal, kobj); 905 int err = kstrtoul(buf, 0, &goal->current_value); 906 907 /* feed callback should check existence of this file and read value */ 908 return err ? err : count; 909 } 910 911 static void damos_sysfs_quota_goal_release(struct kobject *kobj) 912 { 913 /* or, notify this release to the feed callback */ 914 kfree(container_of(kobj, struct damos_sysfs_quota_goal, kobj)); 915 } 916 917 static struct kobj_attribute damos_sysfs_quota_goal_target_metric_attr = 918 __ATTR_RW_MODE(target_metric, 0600); 919 920 static struct kobj_attribute damos_sysfs_quota_goal_target_value_attr = 921 __ATTR_RW_MODE(target_value, 0600); 922 923 static struct kobj_attribute damos_sysfs_quota_goal_current_value_attr = 924 __ATTR_RW_MODE(current_value, 0600); 925 926 static struct attribute *damos_sysfs_quota_goal_attrs[] = { 927 &damos_sysfs_quota_goal_target_metric_attr.attr, 928 &damos_sysfs_quota_goal_target_value_attr.attr, 929 &damos_sysfs_quota_goal_current_value_attr.attr, 930 NULL, 931 }; 932 ATTRIBUTE_GROUPS(damos_sysfs_quota_goal); 933 934 static const struct kobj_type damos_sysfs_quota_goal_ktype = { 935 .release = damos_sysfs_quota_goal_release, 936 .sysfs_ops = &kobj_sysfs_ops, 937 .default_groups = damos_sysfs_quota_goal_groups, 938 }; 939 940 /* 941 * quota goals directory 942 */ 943 944 struct damos_sysfs_quota_goals { 945 struct kobject kobj; 946 struct damos_sysfs_quota_goal **goals_arr; /* counted by nr */ 947 int nr; 948 }; 949 950 static struct damos_sysfs_quota_goals *damos_sysfs_quota_goals_alloc(void) 951 { 952 return kzalloc(sizeof(struct damos_sysfs_quota_goals), GFP_KERNEL); 953 } 954 955 static void damos_sysfs_quota_goals_rm_dirs( 956 struct damos_sysfs_quota_goals *goals) 957 { 958 struct damos_sysfs_quota_goal **goals_arr = goals->goals_arr; 959 int i; 960 961 for (i = 0; i < goals->nr; i++) 962 kobject_put(&goals_arr[i]->kobj); 963 goals->nr = 0; 964 kfree(goals_arr); 965 goals->goals_arr = NULL; 966 } 967 968 static int damos_sysfs_quota_goals_add_dirs( 969 struct damos_sysfs_quota_goals *goals, int nr_goals) 970 { 971 struct damos_sysfs_quota_goal **goals_arr, *goal; 972 int err, i; 973 974 damos_sysfs_quota_goals_rm_dirs(goals); 975 if (!nr_goals) 976 return 0; 977 978 goals_arr = kmalloc_array(nr_goals, sizeof(*goals_arr), 979 GFP_KERNEL | __GFP_NOWARN); 980 if (!goals_arr) 981 return -ENOMEM; 982 goals->goals_arr = goals_arr; 983 984 for (i = 0; i < nr_goals; i++) { 985 goal = damos_sysfs_quota_goal_alloc(); 986 if (!goal) { 987 damos_sysfs_quota_goals_rm_dirs(goals); 988 return -ENOMEM; 989 } 990 991 err = kobject_init_and_add(&goal->kobj, 992 &damos_sysfs_quota_goal_ktype, &goals->kobj, 993 "%d", i); 994 if (err) { 995 kobject_put(&goal->kobj); 996 damos_sysfs_quota_goals_rm_dirs(goals); 997 return err; 998 } 999 1000 goals_arr[i] = goal; 1001 goals->nr++; 1002 } 1003 return 0; 1004 } 1005 1006 static ssize_t nr_goals_show(struct kobject *kobj, 1007 struct kobj_attribute *attr, char *buf) 1008 { 1009 struct damos_sysfs_quota_goals *goals = container_of(kobj, 1010 struct damos_sysfs_quota_goals, kobj); 1011 1012 return sysfs_emit(buf, "%d\n", goals->nr); 1013 } 1014 1015 static ssize_t nr_goals_store(struct kobject *kobj, 1016 struct kobj_attribute *attr, const char *buf, size_t count) 1017 { 1018 struct damos_sysfs_quota_goals *goals; 1019 int nr, err = kstrtoint(buf, 0, &nr); 1020 1021 if (err) 1022 return err; 1023 if (nr < 0) 1024 return -EINVAL; 1025 1026 goals = container_of(kobj, struct damos_sysfs_quota_goals, kobj); 1027 1028 if (!mutex_trylock(&damon_sysfs_lock)) 1029 return -EBUSY; 1030 err = damos_sysfs_quota_goals_add_dirs(goals, nr); 1031 mutex_unlock(&damon_sysfs_lock); 1032 if (err) 1033 return err; 1034 1035 return count; 1036 } 1037 1038 static void damos_sysfs_quota_goals_release(struct kobject *kobj) 1039 { 1040 kfree(container_of(kobj, struct damos_sysfs_quota_goals, kobj)); 1041 } 1042 1043 static struct kobj_attribute damos_sysfs_quota_goals_nr_attr = 1044 __ATTR_RW_MODE(nr_goals, 0600); 1045 1046 static struct attribute *damos_sysfs_quota_goals_attrs[] = { 1047 &damos_sysfs_quota_goals_nr_attr.attr, 1048 NULL, 1049 }; 1050 ATTRIBUTE_GROUPS(damos_sysfs_quota_goals); 1051 1052 static const struct kobj_type damos_sysfs_quota_goals_ktype = { 1053 .release = damos_sysfs_quota_goals_release, 1054 .sysfs_ops = &kobj_sysfs_ops, 1055 .default_groups = damos_sysfs_quota_goals_groups, 1056 }; 1057 1058 /* 1059 * scheme/weights directory 1060 */ 1061 1062 struct damon_sysfs_weights { 1063 struct kobject kobj; 1064 unsigned int sz; 1065 unsigned int nr_accesses; 1066 unsigned int age; 1067 }; 1068 1069 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz, 1070 unsigned int nr_accesses, unsigned int age) 1071 { 1072 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights), 1073 GFP_KERNEL); 1074 1075 if (!weights) 1076 return NULL; 1077 weights->kobj = (struct kobject){}; 1078 weights->sz = sz; 1079 weights->nr_accesses = nr_accesses; 1080 weights->age = age; 1081 return weights; 1082 } 1083 1084 static ssize_t sz_permil_show(struct kobject *kobj, 1085 struct kobj_attribute *attr, char *buf) 1086 { 1087 struct damon_sysfs_weights *weights = container_of(kobj, 1088 struct damon_sysfs_weights, kobj); 1089 1090 return sysfs_emit(buf, "%u\n", weights->sz); 1091 } 1092 1093 static ssize_t sz_permil_store(struct kobject *kobj, 1094 struct kobj_attribute *attr, const char *buf, size_t count) 1095 { 1096 struct damon_sysfs_weights *weights = container_of(kobj, 1097 struct damon_sysfs_weights, kobj); 1098 int err = kstrtouint(buf, 0, &weights->sz); 1099 1100 return err ? err : count; 1101 } 1102 1103 static ssize_t nr_accesses_permil_show(struct kobject *kobj, 1104 struct kobj_attribute *attr, char *buf) 1105 { 1106 struct damon_sysfs_weights *weights = container_of(kobj, 1107 struct damon_sysfs_weights, kobj); 1108 1109 return sysfs_emit(buf, "%u\n", weights->nr_accesses); 1110 } 1111 1112 static ssize_t nr_accesses_permil_store(struct kobject *kobj, 1113 struct kobj_attribute *attr, const char *buf, size_t count) 1114 { 1115 struct damon_sysfs_weights *weights = container_of(kobj, 1116 struct damon_sysfs_weights, kobj); 1117 int err = kstrtouint(buf, 0, &weights->nr_accesses); 1118 1119 return err ? err : count; 1120 } 1121 1122 static ssize_t age_permil_show(struct kobject *kobj, 1123 struct kobj_attribute *attr, char *buf) 1124 { 1125 struct damon_sysfs_weights *weights = container_of(kobj, 1126 struct damon_sysfs_weights, kobj); 1127 1128 return sysfs_emit(buf, "%u\n", weights->age); 1129 } 1130 1131 static ssize_t age_permil_store(struct kobject *kobj, 1132 struct kobj_attribute *attr, const char *buf, size_t count) 1133 { 1134 struct damon_sysfs_weights *weights = container_of(kobj, 1135 struct damon_sysfs_weights, kobj); 1136 int err = kstrtouint(buf, 0, &weights->age); 1137 1138 return err ? err : count; 1139 } 1140 1141 static void damon_sysfs_weights_release(struct kobject *kobj) 1142 { 1143 kfree(container_of(kobj, struct damon_sysfs_weights, kobj)); 1144 } 1145 1146 static struct kobj_attribute damon_sysfs_weights_sz_attr = 1147 __ATTR_RW_MODE(sz_permil, 0600); 1148 1149 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr = 1150 __ATTR_RW_MODE(nr_accesses_permil, 0600); 1151 1152 static struct kobj_attribute damon_sysfs_weights_age_attr = 1153 __ATTR_RW_MODE(age_permil, 0600); 1154 1155 static struct attribute *damon_sysfs_weights_attrs[] = { 1156 &damon_sysfs_weights_sz_attr.attr, 1157 &damon_sysfs_weights_nr_accesses_attr.attr, 1158 &damon_sysfs_weights_age_attr.attr, 1159 NULL, 1160 }; 1161 ATTRIBUTE_GROUPS(damon_sysfs_weights); 1162 1163 static const struct kobj_type damon_sysfs_weights_ktype = { 1164 .release = damon_sysfs_weights_release, 1165 .sysfs_ops = &kobj_sysfs_ops, 1166 .default_groups = damon_sysfs_weights_groups, 1167 }; 1168 1169 /* 1170 * quotas directory 1171 */ 1172 1173 struct damon_sysfs_quotas { 1174 struct kobject kobj; 1175 struct damon_sysfs_weights *weights; 1176 struct damos_sysfs_quota_goals *goals; 1177 unsigned long ms; 1178 unsigned long sz; 1179 unsigned long reset_interval_ms; 1180 unsigned long effective_sz; /* Effective size quota in bytes */ 1181 }; 1182 1183 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void) 1184 { 1185 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL); 1186 } 1187 1188 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas) 1189 { 1190 struct damon_sysfs_weights *weights; 1191 struct damos_sysfs_quota_goals *goals; 1192 int err; 1193 1194 weights = damon_sysfs_weights_alloc(0, 0, 0); 1195 if (!weights) 1196 return -ENOMEM; 1197 1198 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype, 1199 "as->kobj, "weights"); 1200 if (err) { 1201 kobject_put(&weights->kobj); 1202 return err; 1203 } 1204 quotas->weights = weights; 1205 1206 goals = damos_sysfs_quota_goals_alloc(); 1207 if (!goals) { 1208 kobject_put(&weights->kobj); 1209 return -ENOMEM; 1210 } 1211 err = kobject_init_and_add(&goals->kobj, 1212 &damos_sysfs_quota_goals_ktype, "as->kobj, 1213 "goals"); 1214 if (err) { 1215 kobject_put(&weights->kobj); 1216 kobject_put(&goals->kobj); 1217 } else { 1218 quotas->goals = goals; 1219 } 1220 1221 return err; 1222 } 1223 1224 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas) 1225 { 1226 kobject_put("as->weights->kobj); 1227 damos_sysfs_quota_goals_rm_dirs(quotas->goals); 1228 kobject_put("as->goals->kobj); 1229 } 1230 1231 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr, 1232 char *buf) 1233 { 1234 struct damon_sysfs_quotas *quotas = container_of(kobj, 1235 struct damon_sysfs_quotas, kobj); 1236 1237 return sysfs_emit(buf, "%lu\n", quotas->ms); 1238 } 1239 1240 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr, 1241 const char *buf, size_t count) 1242 { 1243 struct damon_sysfs_quotas *quotas = container_of(kobj, 1244 struct damon_sysfs_quotas, kobj); 1245 int err = kstrtoul(buf, 0, "as->ms); 1246 1247 if (err) 1248 return -EINVAL; 1249 return count; 1250 } 1251 1252 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr, 1253 char *buf) 1254 { 1255 struct damon_sysfs_quotas *quotas = container_of(kobj, 1256 struct damon_sysfs_quotas, kobj); 1257 1258 return sysfs_emit(buf, "%lu\n", quotas->sz); 1259 } 1260 1261 static ssize_t bytes_store(struct kobject *kobj, 1262 struct kobj_attribute *attr, const char *buf, size_t count) 1263 { 1264 struct damon_sysfs_quotas *quotas = container_of(kobj, 1265 struct damon_sysfs_quotas, kobj); 1266 int err = kstrtoul(buf, 0, "as->sz); 1267 1268 if (err) 1269 return -EINVAL; 1270 return count; 1271 } 1272 1273 static ssize_t reset_interval_ms_show(struct kobject *kobj, 1274 struct kobj_attribute *attr, char *buf) 1275 { 1276 struct damon_sysfs_quotas *quotas = container_of(kobj, 1277 struct damon_sysfs_quotas, kobj); 1278 1279 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms); 1280 } 1281 1282 static ssize_t reset_interval_ms_store(struct kobject *kobj, 1283 struct kobj_attribute *attr, const char *buf, size_t count) 1284 { 1285 struct damon_sysfs_quotas *quotas = container_of(kobj, 1286 struct damon_sysfs_quotas, kobj); 1287 int err = kstrtoul(buf, 0, "as->reset_interval_ms); 1288 1289 if (err) 1290 return -EINVAL; 1291 return count; 1292 } 1293 1294 static ssize_t effective_bytes_show(struct kobject *kobj, 1295 struct kobj_attribute *attr, char *buf) 1296 { 1297 struct damon_sysfs_quotas *quotas = container_of(kobj, 1298 struct damon_sysfs_quotas, kobj); 1299 1300 return sysfs_emit(buf, "%lu\n", quotas->effective_sz); 1301 } 1302 1303 static void damon_sysfs_quotas_release(struct kobject *kobj) 1304 { 1305 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj)); 1306 } 1307 1308 static struct kobj_attribute damon_sysfs_quotas_ms_attr = 1309 __ATTR_RW_MODE(ms, 0600); 1310 1311 static struct kobj_attribute damon_sysfs_quotas_sz_attr = 1312 __ATTR_RW_MODE(bytes, 0600); 1313 1314 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr = 1315 __ATTR_RW_MODE(reset_interval_ms, 0600); 1316 1317 static struct kobj_attribute damon_sysfs_quotas_effective_bytes_attr = 1318 __ATTR_RO_MODE(effective_bytes, 0400); 1319 1320 static struct attribute *damon_sysfs_quotas_attrs[] = { 1321 &damon_sysfs_quotas_ms_attr.attr, 1322 &damon_sysfs_quotas_sz_attr.attr, 1323 &damon_sysfs_quotas_reset_interval_ms_attr.attr, 1324 &damon_sysfs_quotas_effective_bytes_attr.attr, 1325 NULL, 1326 }; 1327 ATTRIBUTE_GROUPS(damon_sysfs_quotas); 1328 1329 static const struct kobj_type damon_sysfs_quotas_ktype = { 1330 .release = damon_sysfs_quotas_release, 1331 .sysfs_ops = &kobj_sysfs_ops, 1332 .default_groups = damon_sysfs_quotas_groups, 1333 }; 1334 1335 /* 1336 * access_pattern directory 1337 */ 1338 1339 struct damon_sysfs_access_pattern { 1340 struct kobject kobj; 1341 struct damon_sysfs_ul_range *sz; 1342 struct damon_sysfs_ul_range *nr_accesses; 1343 struct damon_sysfs_ul_range *age; 1344 }; 1345 1346 static 1347 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void) 1348 { 1349 struct damon_sysfs_access_pattern *access_pattern = 1350 kmalloc(sizeof(*access_pattern), GFP_KERNEL); 1351 1352 if (!access_pattern) 1353 return NULL; 1354 access_pattern->kobj = (struct kobject){}; 1355 return access_pattern; 1356 } 1357 1358 static int damon_sysfs_access_pattern_add_range_dir( 1359 struct damon_sysfs_access_pattern *access_pattern, 1360 struct damon_sysfs_ul_range **range_dir_ptr, 1361 char *name) 1362 { 1363 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0); 1364 int err; 1365 1366 if (!range) 1367 return -ENOMEM; 1368 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype, 1369 &access_pattern->kobj, name); 1370 if (err) 1371 kobject_put(&range->kobj); 1372 else 1373 *range_dir_ptr = range; 1374 return err; 1375 } 1376 1377 static int damon_sysfs_access_pattern_add_dirs( 1378 struct damon_sysfs_access_pattern *access_pattern) 1379 { 1380 int err; 1381 1382 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 1383 &access_pattern->sz, "sz"); 1384 if (err) 1385 goto put_sz_out; 1386 1387 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 1388 &access_pattern->nr_accesses, "nr_accesses"); 1389 if (err) 1390 goto put_nr_accesses_sz_out; 1391 1392 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 1393 &access_pattern->age, "age"); 1394 if (err) 1395 goto put_age_nr_accesses_sz_out; 1396 return 0; 1397 1398 put_age_nr_accesses_sz_out: 1399 kobject_put(&access_pattern->age->kobj); 1400 access_pattern->age = NULL; 1401 put_nr_accesses_sz_out: 1402 kobject_put(&access_pattern->nr_accesses->kobj); 1403 access_pattern->nr_accesses = NULL; 1404 put_sz_out: 1405 kobject_put(&access_pattern->sz->kobj); 1406 access_pattern->sz = NULL; 1407 return err; 1408 } 1409 1410 static void damon_sysfs_access_pattern_rm_dirs( 1411 struct damon_sysfs_access_pattern *access_pattern) 1412 { 1413 kobject_put(&access_pattern->sz->kobj); 1414 kobject_put(&access_pattern->nr_accesses->kobj); 1415 kobject_put(&access_pattern->age->kobj); 1416 } 1417 1418 static void damon_sysfs_access_pattern_release(struct kobject *kobj) 1419 { 1420 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj)); 1421 } 1422 1423 static struct attribute *damon_sysfs_access_pattern_attrs[] = { 1424 NULL, 1425 }; 1426 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern); 1427 1428 static const struct kobj_type damon_sysfs_access_pattern_ktype = { 1429 .release = damon_sysfs_access_pattern_release, 1430 .sysfs_ops = &kobj_sysfs_ops, 1431 .default_groups = damon_sysfs_access_pattern_groups, 1432 }; 1433 1434 /* 1435 * scheme directory 1436 */ 1437 1438 struct damon_sysfs_scheme { 1439 struct kobject kobj; 1440 enum damos_action action; 1441 struct damon_sysfs_access_pattern *access_pattern; 1442 unsigned long apply_interval_us; 1443 struct damon_sysfs_quotas *quotas; 1444 struct damon_sysfs_watermarks *watermarks; 1445 struct damon_sysfs_scheme_filters *filters; 1446 struct damon_sysfs_stats *stats; 1447 struct damon_sysfs_scheme_regions *tried_regions; 1448 }; 1449 1450 /* This should match with enum damos_action */ 1451 static const char * const damon_sysfs_damos_action_strs[] = { 1452 "willneed", 1453 "cold", 1454 "pageout", 1455 "hugepage", 1456 "nohugepage", 1457 "lru_prio", 1458 "lru_deprio", 1459 "stat", 1460 }; 1461 1462 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( 1463 enum damos_action action, unsigned long apply_interval_us) 1464 { 1465 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme), 1466 GFP_KERNEL); 1467 1468 if (!scheme) 1469 return NULL; 1470 scheme->kobj = (struct kobject){}; 1471 scheme->action = action; 1472 scheme->apply_interval_us = apply_interval_us; 1473 return scheme; 1474 } 1475 1476 static int damon_sysfs_scheme_set_access_pattern( 1477 struct damon_sysfs_scheme *scheme) 1478 { 1479 struct damon_sysfs_access_pattern *access_pattern; 1480 int err; 1481 1482 access_pattern = damon_sysfs_access_pattern_alloc(); 1483 if (!access_pattern) 1484 return -ENOMEM; 1485 err = kobject_init_and_add(&access_pattern->kobj, 1486 &damon_sysfs_access_pattern_ktype, &scheme->kobj, 1487 "access_pattern"); 1488 if (err) 1489 goto out; 1490 err = damon_sysfs_access_pattern_add_dirs(access_pattern); 1491 if (err) 1492 goto out; 1493 scheme->access_pattern = access_pattern; 1494 return 0; 1495 1496 out: 1497 kobject_put(&access_pattern->kobj); 1498 return err; 1499 } 1500 1501 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme) 1502 { 1503 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc(); 1504 int err; 1505 1506 if (!quotas) 1507 return -ENOMEM; 1508 err = kobject_init_and_add("as->kobj, &damon_sysfs_quotas_ktype, 1509 &scheme->kobj, "quotas"); 1510 if (err) 1511 goto out; 1512 err = damon_sysfs_quotas_add_dirs(quotas); 1513 if (err) 1514 goto out; 1515 scheme->quotas = quotas; 1516 return 0; 1517 1518 out: 1519 kobject_put("as->kobj); 1520 return err; 1521 } 1522 1523 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme) 1524 { 1525 struct damon_sysfs_watermarks *watermarks = 1526 damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0); 1527 int err; 1528 1529 if (!watermarks) 1530 return -ENOMEM; 1531 err = kobject_init_and_add(&watermarks->kobj, 1532 &damon_sysfs_watermarks_ktype, &scheme->kobj, 1533 "watermarks"); 1534 if (err) 1535 kobject_put(&watermarks->kobj); 1536 else 1537 scheme->watermarks = watermarks; 1538 return err; 1539 } 1540 1541 static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme) 1542 { 1543 struct damon_sysfs_scheme_filters *filters = 1544 damon_sysfs_scheme_filters_alloc(); 1545 int err; 1546 1547 if (!filters) 1548 return -ENOMEM; 1549 err = kobject_init_and_add(&filters->kobj, 1550 &damon_sysfs_scheme_filters_ktype, &scheme->kobj, 1551 "filters"); 1552 if (err) 1553 kobject_put(&filters->kobj); 1554 else 1555 scheme->filters = filters; 1556 return err; 1557 } 1558 1559 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme) 1560 { 1561 struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc(); 1562 int err; 1563 1564 if (!stats) 1565 return -ENOMEM; 1566 err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype, 1567 &scheme->kobj, "stats"); 1568 if (err) 1569 kobject_put(&stats->kobj); 1570 else 1571 scheme->stats = stats; 1572 return err; 1573 } 1574 1575 static int damon_sysfs_scheme_set_tried_regions( 1576 struct damon_sysfs_scheme *scheme) 1577 { 1578 struct damon_sysfs_scheme_regions *tried_regions = 1579 damon_sysfs_scheme_regions_alloc(); 1580 int err; 1581 1582 if (!tried_regions) 1583 return -ENOMEM; 1584 err = kobject_init_and_add(&tried_regions->kobj, 1585 &damon_sysfs_scheme_regions_ktype, &scheme->kobj, 1586 "tried_regions"); 1587 if (err) 1588 kobject_put(&tried_regions->kobj); 1589 else 1590 scheme->tried_regions = tried_regions; 1591 return err; 1592 } 1593 1594 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) 1595 { 1596 int err; 1597 1598 err = damon_sysfs_scheme_set_access_pattern(scheme); 1599 if (err) 1600 return err; 1601 err = damon_sysfs_scheme_set_quotas(scheme); 1602 if (err) 1603 goto put_access_pattern_out; 1604 err = damon_sysfs_scheme_set_watermarks(scheme); 1605 if (err) 1606 goto put_quotas_access_pattern_out; 1607 err = damon_sysfs_scheme_set_filters(scheme); 1608 if (err) 1609 goto put_watermarks_quotas_access_pattern_out; 1610 err = damon_sysfs_scheme_set_stats(scheme); 1611 if (err) 1612 goto put_filters_watermarks_quotas_access_pattern_out; 1613 err = damon_sysfs_scheme_set_tried_regions(scheme); 1614 if (err) 1615 goto put_tried_regions_out; 1616 return 0; 1617 1618 put_tried_regions_out: 1619 kobject_put(&scheme->tried_regions->kobj); 1620 scheme->tried_regions = NULL; 1621 put_filters_watermarks_quotas_access_pattern_out: 1622 kobject_put(&scheme->filters->kobj); 1623 scheme->filters = NULL; 1624 put_watermarks_quotas_access_pattern_out: 1625 kobject_put(&scheme->watermarks->kobj); 1626 scheme->watermarks = NULL; 1627 put_quotas_access_pattern_out: 1628 kobject_put(&scheme->quotas->kobj); 1629 scheme->quotas = NULL; 1630 put_access_pattern_out: 1631 kobject_put(&scheme->access_pattern->kobj); 1632 scheme->access_pattern = NULL; 1633 return err; 1634 } 1635 1636 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme) 1637 { 1638 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern); 1639 kobject_put(&scheme->access_pattern->kobj); 1640 damon_sysfs_quotas_rm_dirs(scheme->quotas); 1641 kobject_put(&scheme->quotas->kobj); 1642 kobject_put(&scheme->watermarks->kobj); 1643 damon_sysfs_scheme_filters_rm_dirs(scheme->filters); 1644 kobject_put(&scheme->filters->kobj); 1645 kobject_put(&scheme->stats->kobj); 1646 damon_sysfs_scheme_regions_rm_dirs(scheme->tried_regions); 1647 kobject_put(&scheme->tried_regions->kobj); 1648 } 1649 1650 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr, 1651 char *buf) 1652 { 1653 struct damon_sysfs_scheme *scheme = container_of(kobj, 1654 struct damon_sysfs_scheme, kobj); 1655 1656 return sysfs_emit(buf, "%s\n", 1657 damon_sysfs_damos_action_strs[scheme->action]); 1658 } 1659 1660 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr, 1661 const char *buf, size_t count) 1662 { 1663 struct damon_sysfs_scheme *scheme = container_of(kobj, 1664 struct damon_sysfs_scheme, kobj); 1665 enum damos_action action; 1666 1667 for (action = 0; action < NR_DAMOS_ACTIONS; action++) { 1668 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) { 1669 scheme->action = action; 1670 return count; 1671 } 1672 } 1673 return -EINVAL; 1674 } 1675 1676 static ssize_t apply_interval_us_show(struct kobject *kobj, 1677 struct kobj_attribute *attr, char *buf) 1678 { 1679 struct damon_sysfs_scheme *scheme = container_of(kobj, 1680 struct damon_sysfs_scheme, kobj); 1681 1682 return sysfs_emit(buf, "%lu\n", scheme->apply_interval_us); 1683 } 1684 1685 static ssize_t apply_interval_us_store(struct kobject *kobj, 1686 struct kobj_attribute *attr, const char *buf, size_t count) 1687 { 1688 struct damon_sysfs_scheme *scheme = container_of(kobj, 1689 struct damon_sysfs_scheme, kobj); 1690 int err = kstrtoul(buf, 0, &scheme->apply_interval_us); 1691 1692 return err ? err : count; 1693 } 1694 1695 static void damon_sysfs_scheme_release(struct kobject *kobj) 1696 { 1697 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj)); 1698 } 1699 1700 static struct kobj_attribute damon_sysfs_scheme_action_attr = 1701 __ATTR_RW_MODE(action, 0600); 1702 1703 static struct kobj_attribute damon_sysfs_scheme_apply_interval_us_attr = 1704 __ATTR_RW_MODE(apply_interval_us, 0600); 1705 1706 static struct attribute *damon_sysfs_scheme_attrs[] = { 1707 &damon_sysfs_scheme_action_attr.attr, 1708 &damon_sysfs_scheme_apply_interval_us_attr.attr, 1709 NULL, 1710 }; 1711 ATTRIBUTE_GROUPS(damon_sysfs_scheme); 1712 1713 static const struct kobj_type damon_sysfs_scheme_ktype = { 1714 .release = damon_sysfs_scheme_release, 1715 .sysfs_ops = &kobj_sysfs_ops, 1716 .default_groups = damon_sysfs_scheme_groups, 1717 }; 1718 1719 /* 1720 * schemes directory 1721 */ 1722 1723 struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void) 1724 { 1725 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL); 1726 } 1727 1728 void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes) 1729 { 1730 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr; 1731 int i; 1732 1733 for (i = 0; i < schemes->nr; i++) { 1734 damon_sysfs_scheme_rm_dirs(schemes_arr[i]); 1735 kobject_put(&schemes_arr[i]->kobj); 1736 } 1737 schemes->nr = 0; 1738 kfree(schemes_arr); 1739 schemes->schemes_arr = NULL; 1740 } 1741 1742 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes, 1743 int nr_schemes) 1744 { 1745 struct damon_sysfs_scheme **schemes_arr, *scheme; 1746 int err, i; 1747 1748 damon_sysfs_schemes_rm_dirs(schemes); 1749 if (!nr_schemes) 1750 return 0; 1751 1752 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr), 1753 GFP_KERNEL | __GFP_NOWARN); 1754 if (!schemes_arr) 1755 return -ENOMEM; 1756 schemes->schemes_arr = schemes_arr; 1757 1758 for (i = 0; i < nr_schemes; i++) { 1759 /* 1760 * apply_interval_us as 0 means same to aggregation interval 1761 * (same to before-apply_interval behavior) 1762 */ 1763 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT, 0); 1764 if (!scheme) { 1765 damon_sysfs_schemes_rm_dirs(schemes); 1766 return -ENOMEM; 1767 } 1768 1769 err = kobject_init_and_add(&scheme->kobj, 1770 &damon_sysfs_scheme_ktype, &schemes->kobj, 1771 "%d", i); 1772 if (err) 1773 goto out; 1774 err = damon_sysfs_scheme_add_dirs(scheme); 1775 if (err) 1776 goto out; 1777 1778 schemes_arr[i] = scheme; 1779 schemes->nr++; 1780 } 1781 return 0; 1782 1783 out: 1784 damon_sysfs_schemes_rm_dirs(schemes); 1785 kobject_put(&scheme->kobj); 1786 return err; 1787 } 1788 1789 static ssize_t nr_schemes_show(struct kobject *kobj, 1790 struct kobj_attribute *attr, char *buf) 1791 { 1792 struct damon_sysfs_schemes *schemes = container_of(kobj, 1793 struct damon_sysfs_schemes, kobj); 1794 1795 return sysfs_emit(buf, "%d\n", schemes->nr); 1796 } 1797 1798 static ssize_t nr_schemes_store(struct kobject *kobj, 1799 struct kobj_attribute *attr, const char *buf, size_t count) 1800 { 1801 struct damon_sysfs_schemes *schemes; 1802 int nr, err = kstrtoint(buf, 0, &nr); 1803 1804 if (err) 1805 return err; 1806 if (nr < 0) 1807 return -EINVAL; 1808 1809 schemes = container_of(kobj, struct damon_sysfs_schemes, kobj); 1810 1811 if (!mutex_trylock(&damon_sysfs_lock)) 1812 return -EBUSY; 1813 err = damon_sysfs_schemes_add_dirs(schemes, nr); 1814 mutex_unlock(&damon_sysfs_lock); 1815 if (err) 1816 return err; 1817 return count; 1818 } 1819 1820 static void damon_sysfs_schemes_release(struct kobject *kobj) 1821 { 1822 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj)); 1823 } 1824 1825 static struct kobj_attribute damon_sysfs_schemes_nr_attr = 1826 __ATTR_RW_MODE(nr_schemes, 0600); 1827 1828 static struct attribute *damon_sysfs_schemes_attrs[] = { 1829 &damon_sysfs_schemes_nr_attr.attr, 1830 NULL, 1831 }; 1832 ATTRIBUTE_GROUPS(damon_sysfs_schemes); 1833 1834 const struct kobj_type damon_sysfs_schemes_ktype = { 1835 .release = damon_sysfs_schemes_release, 1836 .sysfs_ops = &kobj_sysfs_ops, 1837 .default_groups = damon_sysfs_schemes_groups, 1838 }; 1839 1840 static bool damon_sysfs_memcg_path_eq(struct mem_cgroup *memcg, 1841 char *memcg_path_buf, char *path) 1842 { 1843 #ifdef CONFIG_MEMCG 1844 cgroup_path(memcg->css.cgroup, memcg_path_buf, PATH_MAX); 1845 if (sysfs_streq(memcg_path_buf, path)) 1846 return true; 1847 #endif /* CONFIG_MEMCG */ 1848 return false; 1849 } 1850 1851 static int damon_sysfs_memcg_path_to_id(char *memcg_path, unsigned short *id) 1852 { 1853 struct mem_cgroup *memcg; 1854 char *path; 1855 bool found = false; 1856 1857 if (!memcg_path) 1858 return -EINVAL; 1859 1860 path = kmalloc(sizeof(*path) * PATH_MAX, GFP_KERNEL); 1861 if (!path) 1862 return -ENOMEM; 1863 1864 for (memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg; 1865 memcg = mem_cgroup_iter(NULL, memcg, NULL)) { 1866 /* skip removed memcg */ 1867 if (!mem_cgroup_id(memcg)) 1868 continue; 1869 if (damon_sysfs_memcg_path_eq(memcg, path, memcg_path)) { 1870 *id = mem_cgroup_id(memcg); 1871 found = true; 1872 break; 1873 } 1874 } 1875 1876 kfree(path); 1877 return found ? 0 : -EINVAL; 1878 } 1879 1880 static int damon_sysfs_set_scheme_filters(struct damos *scheme, 1881 struct damon_sysfs_scheme_filters *sysfs_filters) 1882 { 1883 int i; 1884 struct damos_filter *filter, *next; 1885 1886 damos_for_each_filter_safe(filter, next, scheme) 1887 damos_destroy_filter(filter); 1888 1889 for (i = 0; i < sysfs_filters->nr; i++) { 1890 struct damon_sysfs_scheme_filter *sysfs_filter = 1891 sysfs_filters->filters_arr[i]; 1892 struct damos_filter *filter = 1893 damos_new_filter(sysfs_filter->type, 1894 sysfs_filter->matching); 1895 int err; 1896 1897 if (!filter) 1898 return -ENOMEM; 1899 if (filter->type == DAMOS_FILTER_TYPE_MEMCG) { 1900 err = damon_sysfs_memcg_path_to_id( 1901 sysfs_filter->memcg_path, 1902 &filter->memcg_id); 1903 if (err) { 1904 damos_destroy_filter(filter); 1905 return err; 1906 } 1907 } else if (filter->type == DAMOS_FILTER_TYPE_ADDR) { 1908 if (sysfs_filter->addr_range.end < 1909 sysfs_filter->addr_range.start) { 1910 damos_destroy_filter(filter); 1911 return -EINVAL; 1912 } 1913 filter->addr_range = sysfs_filter->addr_range; 1914 } else if (filter->type == DAMOS_FILTER_TYPE_TARGET) { 1915 filter->target_idx = sysfs_filter->target_idx; 1916 } 1917 1918 damos_add_filter(scheme, filter); 1919 } 1920 return 0; 1921 } 1922 1923 static int damos_sysfs_set_quota_score( 1924 struct damos_sysfs_quota_goals *sysfs_goals, 1925 struct damos_quota *quota) 1926 { 1927 struct damos_quota_goal *goal, *next; 1928 int i; 1929 1930 damos_for_each_quota_goal_safe(goal, next, quota) 1931 damos_destroy_quota_goal(goal); 1932 1933 for (i = 0; i < sysfs_goals->nr; i++) { 1934 struct damos_sysfs_quota_goal *sysfs_goal = 1935 sysfs_goals->goals_arr[i]; 1936 1937 if (!sysfs_goal->target_value) 1938 continue; 1939 1940 goal = damos_new_quota_goal(sysfs_goal->metric, 1941 sysfs_goal->target_value); 1942 if (!goal) 1943 return -ENOMEM; 1944 if (sysfs_goal->metric == DAMOS_QUOTA_USER_INPUT) 1945 goal->current_value = sysfs_goal->current_value; 1946 damos_add_quota_goal(quota, goal); 1947 } 1948 return 0; 1949 } 1950 1951 int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, 1952 struct damon_ctx *ctx) 1953 { 1954 struct damos *scheme; 1955 int i = 0; 1956 1957 damon_for_each_scheme(scheme, ctx) { 1958 struct damon_sysfs_scheme *sysfs_scheme; 1959 int err; 1960 1961 /* user could have removed the scheme sysfs dir */ 1962 if (i >= sysfs_schemes->nr) 1963 break; 1964 1965 sysfs_scheme = sysfs_schemes->schemes_arr[i]; 1966 err = damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals, 1967 &scheme->quota); 1968 if (err) 1969 /* kdamond will clean up schemes and terminated */ 1970 return err; 1971 i++; 1972 } 1973 return 0; 1974 } 1975 1976 void damos_sysfs_update_effective_quotas( 1977 struct damon_sysfs_schemes *sysfs_schemes, 1978 struct damon_ctx *ctx) 1979 { 1980 struct damos *scheme; 1981 int schemes_idx = 0; 1982 1983 damon_for_each_scheme(scheme, ctx) { 1984 struct damon_sysfs_quotas *sysfs_quotas; 1985 1986 /* user could have removed the scheme sysfs dir */ 1987 if (schemes_idx >= sysfs_schemes->nr) 1988 break; 1989 1990 sysfs_quotas = 1991 sysfs_schemes->schemes_arr[schemes_idx++]->quotas; 1992 sysfs_quotas->effective_sz = scheme->quota.esz; 1993 } 1994 } 1995 1996 static struct damos *damon_sysfs_mk_scheme( 1997 struct damon_sysfs_scheme *sysfs_scheme) 1998 { 1999 struct damon_sysfs_access_pattern *access_pattern = 2000 sysfs_scheme->access_pattern; 2001 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; 2002 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; 2003 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; 2004 struct damon_sysfs_scheme_filters *sysfs_filters = 2005 sysfs_scheme->filters; 2006 struct damos *scheme; 2007 int err; 2008 2009 struct damos_access_pattern pattern = { 2010 .min_sz_region = access_pattern->sz->min, 2011 .max_sz_region = access_pattern->sz->max, 2012 .min_nr_accesses = access_pattern->nr_accesses->min, 2013 .max_nr_accesses = access_pattern->nr_accesses->max, 2014 .min_age_region = access_pattern->age->min, 2015 .max_age_region = access_pattern->age->max, 2016 }; 2017 struct damos_quota quota = { 2018 .ms = sysfs_quotas->ms, 2019 .sz = sysfs_quotas->sz, 2020 .reset_interval = sysfs_quotas->reset_interval_ms, 2021 .weight_sz = sysfs_weights->sz, 2022 .weight_nr_accesses = sysfs_weights->nr_accesses, 2023 .weight_age = sysfs_weights->age, 2024 }; 2025 struct damos_watermarks wmarks = { 2026 .metric = sysfs_wmarks->metric, 2027 .interval = sysfs_wmarks->interval_us, 2028 .high = sysfs_wmarks->high, 2029 .mid = sysfs_wmarks->mid, 2030 .low = sysfs_wmarks->low, 2031 }; 2032 2033 scheme = damon_new_scheme(&pattern, sysfs_scheme->action, 2034 sysfs_scheme->apply_interval_us, "a, &wmarks); 2035 if (!scheme) 2036 return NULL; 2037 2038 err = damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota); 2039 if (err) { 2040 damon_destroy_scheme(scheme); 2041 return NULL; 2042 } 2043 2044 err = damon_sysfs_set_scheme_filters(scheme, sysfs_filters); 2045 if (err) { 2046 damon_destroy_scheme(scheme); 2047 return NULL; 2048 } 2049 return scheme; 2050 } 2051 2052 static void damon_sysfs_update_scheme(struct damos *scheme, 2053 struct damon_sysfs_scheme *sysfs_scheme) 2054 { 2055 struct damon_sysfs_access_pattern *access_pattern = 2056 sysfs_scheme->access_pattern; 2057 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; 2058 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; 2059 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; 2060 int err; 2061 2062 scheme->pattern.min_sz_region = access_pattern->sz->min; 2063 scheme->pattern.max_sz_region = access_pattern->sz->max; 2064 scheme->pattern.min_nr_accesses = access_pattern->nr_accesses->min; 2065 scheme->pattern.max_nr_accesses = access_pattern->nr_accesses->max; 2066 scheme->pattern.min_age_region = access_pattern->age->min; 2067 scheme->pattern.max_age_region = access_pattern->age->max; 2068 2069 scheme->action = sysfs_scheme->action; 2070 scheme->apply_interval_us = sysfs_scheme->apply_interval_us; 2071 2072 scheme->quota.ms = sysfs_quotas->ms; 2073 scheme->quota.sz = sysfs_quotas->sz; 2074 scheme->quota.reset_interval = sysfs_quotas->reset_interval_ms; 2075 scheme->quota.weight_sz = sysfs_weights->sz; 2076 scheme->quota.weight_nr_accesses = sysfs_weights->nr_accesses; 2077 scheme->quota.weight_age = sysfs_weights->age; 2078 2079 err = damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota); 2080 if (err) { 2081 damon_destroy_scheme(scheme); 2082 return; 2083 } 2084 2085 scheme->wmarks.metric = sysfs_wmarks->metric; 2086 scheme->wmarks.interval = sysfs_wmarks->interval_us; 2087 scheme->wmarks.high = sysfs_wmarks->high; 2088 scheme->wmarks.mid = sysfs_wmarks->mid; 2089 scheme->wmarks.low = sysfs_wmarks->low; 2090 2091 err = damon_sysfs_set_scheme_filters(scheme, sysfs_scheme->filters); 2092 if (err) 2093 damon_destroy_scheme(scheme); 2094 } 2095 2096 int damon_sysfs_set_schemes(struct damon_ctx *ctx, 2097 struct damon_sysfs_schemes *sysfs_schemes) 2098 { 2099 struct damos *scheme, *next; 2100 int i = 0; 2101 2102 damon_for_each_scheme_safe(scheme, next, ctx) { 2103 if (i < sysfs_schemes->nr) 2104 damon_sysfs_update_scheme(scheme, 2105 sysfs_schemes->schemes_arr[i]); 2106 else 2107 damon_destroy_scheme(scheme); 2108 i++; 2109 } 2110 2111 for (; i < sysfs_schemes->nr; i++) { 2112 struct damos *scheme, *next; 2113 2114 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]); 2115 if (!scheme) { 2116 damon_for_each_scheme_safe(scheme, next, ctx) 2117 damon_destroy_scheme(scheme); 2118 return -ENOMEM; 2119 } 2120 damon_add_scheme(ctx, scheme); 2121 } 2122 return 0; 2123 } 2124 2125 void damon_sysfs_schemes_update_stats( 2126 struct damon_sysfs_schemes *sysfs_schemes, 2127 struct damon_ctx *ctx) 2128 { 2129 struct damos *scheme; 2130 int schemes_idx = 0; 2131 2132 damon_for_each_scheme(scheme, ctx) { 2133 struct damon_sysfs_stats *sysfs_stats; 2134 2135 /* user could have removed the scheme sysfs dir */ 2136 if (schemes_idx >= sysfs_schemes->nr) 2137 break; 2138 2139 sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats; 2140 sysfs_stats->nr_tried = scheme->stat.nr_tried; 2141 sysfs_stats->sz_tried = scheme->stat.sz_tried; 2142 sysfs_stats->nr_applied = scheme->stat.nr_applied; 2143 sysfs_stats->sz_applied = scheme->stat.sz_applied; 2144 sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds; 2145 } 2146 } 2147 2148 /* 2149 * damon_sysfs_schemes that need to update its schemes regions dir. Protected 2150 * by damon_sysfs_lock 2151 */ 2152 static struct damon_sysfs_schemes *damon_sysfs_schemes_for_damos_callback; 2153 static int damon_sysfs_schemes_region_idx; 2154 static bool damos_regions_upd_total_bytes_only; 2155 2156 /* 2157 * DAMON callback that called before damos apply. While this callback is 2158 * registered, damon_sysfs_lock should be held to ensure the regions 2159 * directories exist. 2160 */ 2161 static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, 2162 struct damon_target *t, struct damon_region *r, 2163 struct damos *s) 2164 { 2165 struct damos *scheme; 2166 struct damon_sysfs_scheme_regions *sysfs_regions; 2167 struct damon_sysfs_scheme_region *region; 2168 struct damon_sysfs_schemes *sysfs_schemes = 2169 damon_sysfs_schemes_for_damos_callback; 2170 int schemes_idx = 0; 2171 2172 damon_for_each_scheme(scheme, ctx) { 2173 if (scheme == s) 2174 break; 2175 schemes_idx++; 2176 } 2177 2178 /* user could have removed the scheme sysfs dir */ 2179 if (schemes_idx >= sysfs_schemes->nr) 2180 return 0; 2181 2182 sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions; 2183 if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_FINISHED) 2184 return 0; 2185 if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_IDLE) 2186 sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_STARTED; 2187 sysfs_regions->total_bytes += r->ar.end - r->ar.start; 2188 if (damos_regions_upd_total_bytes_only) 2189 return 0; 2190 2191 region = damon_sysfs_scheme_region_alloc(r); 2192 if (!region) 2193 return 0; 2194 list_add_tail(®ion->list, &sysfs_regions->regions_list); 2195 sysfs_regions->nr_regions++; 2196 if (kobject_init_and_add(®ion->kobj, 2197 &damon_sysfs_scheme_region_ktype, 2198 &sysfs_regions->kobj, "%d", 2199 damon_sysfs_schemes_region_idx++)) { 2200 kobject_put(®ion->kobj); 2201 } 2202 return 0; 2203 } 2204 2205 /* 2206 * DAMON callback that called after each accesses sampling. While this 2207 * callback is registered, damon_sysfs_lock should be held to ensure the 2208 * regions directories exist. 2209 */ 2210 void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx) 2211 { 2212 struct damon_sysfs_schemes *sysfs_schemes = 2213 damon_sysfs_schemes_for_damos_callback; 2214 struct damon_sysfs_scheme_regions *sysfs_regions; 2215 int i; 2216 2217 for (i = 0; i < sysfs_schemes->nr; i++) { 2218 sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; 2219 if (sysfs_regions->upd_status == 2220 DAMOS_TRIED_REGIONS_UPD_STARTED || 2221 time_after(jiffies, 2222 sysfs_regions->upd_timeout_jiffies)) 2223 sysfs_regions->upd_status = 2224 DAMOS_TRIED_REGIONS_UPD_FINISHED; 2225 } 2226 } 2227 2228 /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ 2229 int damon_sysfs_schemes_clear_regions( 2230 struct damon_sysfs_schemes *sysfs_schemes, 2231 struct damon_ctx *ctx) 2232 { 2233 struct damos *scheme; 2234 int schemes_idx = 0; 2235 2236 damon_for_each_scheme(scheme, ctx) { 2237 struct damon_sysfs_scheme *sysfs_scheme; 2238 2239 /* user could have removed the scheme sysfs dir */ 2240 if (schemes_idx >= sysfs_schemes->nr) 2241 break; 2242 2243 sysfs_scheme = sysfs_schemes->schemes_arr[schemes_idx++]; 2244 damon_sysfs_scheme_regions_rm_dirs( 2245 sysfs_scheme->tried_regions); 2246 sysfs_scheme->tried_regions->total_bytes = 0; 2247 } 2248 return 0; 2249 } 2250 2251 static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx) 2252 { 2253 struct damos *scheme; 2254 int i = 0; 2255 2256 damon_for_each_scheme(scheme, ctx) { 2257 if (i == n) 2258 return scheme; 2259 i++; 2260 } 2261 return NULL; 2262 } 2263 2264 static void damos_tried_regions_init_upd_status( 2265 struct damon_sysfs_schemes *sysfs_schemes, 2266 struct damon_ctx *ctx) 2267 { 2268 int i; 2269 struct damos *scheme; 2270 struct damon_sysfs_scheme_regions *sysfs_regions; 2271 2272 for (i = 0; i < sysfs_schemes->nr; i++) { 2273 sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; 2274 scheme = damos_sysfs_nth_scheme(i, ctx); 2275 if (!scheme) { 2276 sysfs_regions->upd_status = 2277 DAMOS_TRIED_REGIONS_UPD_FINISHED; 2278 continue; 2279 } 2280 sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; 2281 sysfs_regions->upd_timeout_jiffies = jiffies + 2282 2 * usecs_to_jiffies(scheme->apply_interval_us ? 2283 scheme->apply_interval_us : 2284 ctx->attrs.aggr_interval); 2285 } 2286 } 2287 2288 /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ 2289 int damon_sysfs_schemes_update_regions_start( 2290 struct damon_sysfs_schemes *sysfs_schemes, 2291 struct damon_ctx *ctx, bool total_bytes_only) 2292 { 2293 damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx); 2294 damon_sysfs_schemes_for_damos_callback = sysfs_schemes; 2295 damos_tried_regions_init_upd_status(sysfs_schemes, ctx); 2296 damos_regions_upd_total_bytes_only = total_bytes_only; 2297 ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply; 2298 return 0; 2299 } 2300 2301 bool damos_sysfs_regions_upd_done(void) 2302 { 2303 struct damon_sysfs_schemes *sysfs_schemes = 2304 damon_sysfs_schemes_for_damos_callback; 2305 struct damon_sysfs_scheme_regions *sysfs_regions; 2306 int i; 2307 2308 for (i = 0; i < sysfs_schemes->nr; i++) { 2309 sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; 2310 if (sysfs_regions->upd_status != 2311 DAMOS_TRIED_REGIONS_UPD_FINISHED) 2312 return false; 2313 } 2314 return true; 2315 } 2316 2317 /* 2318 * Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock. Caller 2319 * should unlock damon_sysfs_lock which held before 2320 * damon_sysfs_schemes_update_regions_start() 2321 */ 2322 int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx) 2323 { 2324 damon_sysfs_schemes_for_damos_callback = NULL; 2325 ctx->callback.before_damos_apply = NULL; 2326 damon_sysfs_schemes_region_idx = 0; 2327 return 0; 2328 } 2329