1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON sysfs Interface 4 * 5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org> 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/numa.h> 10 11 #include "sysfs-common.h" 12 13 /* 14 * scheme region directory 15 */ 16 17 struct damon_sysfs_scheme_region { 18 struct kobject kobj; 19 struct damon_addr_range ar; 20 unsigned int nr_accesses; 21 unsigned int age; 22 struct list_head list; 23 }; 24 25 static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc( 26 struct damon_region *region) 27 { 28 struct damon_sysfs_scheme_region *sysfs_region = kmalloc( 29 sizeof(*sysfs_region), GFP_KERNEL); 30 31 if (!sysfs_region) 32 return NULL; 33 sysfs_region->kobj = (struct kobject){}; 34 sysfs_region->ar = region->ar; 35 sysfs_region->nr_accesses = region->nr_accesses_bp / 10000; 36 sysfs_region->age = region->age; 37 INIT_LIST_HEAD(&sysfs_region->list); 38 return sysfs_region; 39 } 40 41 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr, 42 char *buf) 43 { 44 struct damon_sysfs_scheme_region *region = container_of(kobj, 45 struct damon_sysfs_scheme_region, kobj); 46 47 return sysfs_emit(buf, "%lu\n", region->ar.start); 48 } 49 50 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr, 51 char *buf) 52 { 53 struct damon_sysfs_scheme_region *region = container_of(kobj, 54 struct damon_sysfs_scheme_region, kobj); 55 56 return sysfs_emit(buf, "%lu\n", region->ar.end); 57 } 58 59 static ssize_t nr_accesses_show(struct kobject *kobj, 60 struct kobj_attribute *attr, char *buf) 61 { 62 struct damon_sysfs_scheme_region *region = container_of(kobj, 63 struct damon_sysfs_scheme_region, kobj); 64 65 return sysfs_emit(buf, "%u\n", region->nr_accesses); 66 } 67 68 static ssize_t age_show(struct kobject *kobj, struct kobj_attribute *attr, 69 char *buf) 70 { 71 struct damon_sysfs_scheme_region *region = container_of(kobj, 72 struct damon_sysfs_scheme_region, kobj); 73 74 return sysfs_emit(buf, "%u\n", region->age); 75 } 76 77 static void damon_sysfs_scheme_region_release(struct kobject *kobj) 78 { 79 struct damon_sysfs_scheme_region *region = container_of(kobj, 80 struct damon_sysfs_scheme_region, kobj); 81 82 list_del(®ion->list); 83 kfree(region); 84 } 85 86 static struct kobj_attribute damon_sysfs_scheme_region_start_attr = 87 __ATTR_RO_MODE(start, 0400); 88 89 static struct kobj_attribute damon_sysfs_scheme_region_end_attr = 90 __ATTR_RO_MODE(end, 0400); 91 92 static struct kobj_attribute damon_sysfs_scheme_region_nr_accesses_attr = 93 __ATTR_RO_MODE(nr_accesses, 0400); 94 95 static struct kobj_attribute damon_sysfs_scheme_region_age_attr = 96 __ATTR_RO_MODE(age, 0400); 97 98 static struct attribute *damon_sysfs_scheme_region_attrs[] = { 99 &damon_sysfs_scheme_region_start_attr.attr, 100 &damon_sysfs_scheme_region_end_attr.attr, 101 &damon_sysfs_scheme_region_nr_accesses_attr.attr, 102 &damon_sysfs_scheme_region_age_attr.attr, 103 NULL, 104 }; 105 ATTRIBUTE_GROUPS(damon_sysfs_scheme_region); 106 107 static const struct kobj_type damon_sysfs_scheme_region_ktype = { 108 .release = damon_sysfs_scheme_region_release, 109 .sysfs_ops = &kobj_sysfs_ops, 110 .default_groups = damon_sysfs_scheme_region_groups, 111 }; 112 113 /* 114 * scheme regions directory 115 */ 116 117 /* 118 * enum damos_sysfs_regions_upd_status - Represent DAMOS tried regions update 119 * status 120 * @DAMOS_TRIED_REGIONS_UPD_IDLE: Waiting for next request. 121 * @DAMOS_TRIED_REGIONS_UPD_STARTED: Update started. 122 * @DAMOS_TRIED_REGIONS_UPD_FINISHED: Update finished. 123 * 124 * Each DAMON-based operation scheme (&struct damos) has its own apply 125 * interval, and we need to expose the scheme tried regions based on only 126 * single snapshot. For this, we keep the tried regions update status for each 127 * scheme. The status becomes 'idle' at the beginning. 128 * 129 * Once the tried regions update request is received, the request handling 130 * start function (damon_sysfs_scheme_update_regions_start()) sets the status 131 * of all schemes as 'idle' again, and register ->before_damos_apply() 132 * callback. 133 * 134 * Then, the first followup ->before_damos_apply() callback 135 * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first 136 * ->after_sampling() or ->after_aggregation() callback 137 * (damon_sysfs_cmd_request_callback()) after the call is called only after 138 * the scheme is completely applied to the given snapshot. Hence the callback 139 * knows the situation by showing 'started' status, and sets the status as 140 * 'finished'. Then, damon_sysfs_before_damos_apply() understands the 141 * situation by showing the 'finished' status and do nothing. 142 * 143 * If DAMOS is not applied to any region due to any reasons including the 144 * access pattern, the watermarks, the quotas, and the filters, 145 * ->before_damos_apply() will not be called back. Until the situation is 146 * changed, the update will not be finished. To avoid this, 147 * damon_sysfs_after_sampling() set the status as 'finished' if more than two 148 * apply intervals of the scheme is passed while the state is 'idle'. 149 * 150 * Finally, the tried regions request handling finisher function 151 * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks. 152 */ 153 enum damos_sysfs_regions_upd_status { 154 DAMOS_TRIED_REGIONS_UPD_IDLE, 155 DAMOS_TRIED_REGIONS_UPD_STARTED, 156 DAMOS_TRIED_REGIONS_UPD_FINISHED, 157 }; 158 159 struct damon_sysfs_scheme_regions { 160 struct kobject kobj; 161 struct list_head regions_list; 162 int nr_regions; 163 unsigned long total_bytes; 164 enum damos_sysfs_regions_upd_status upd_status; 165 unsigned long upd_timeout_jiffies; 166 }; 167 168 static struct damon_sysfs_scheme_regions * 169 damon_sysfs_scheme_regions_alloc(void) 170 { 171 struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions), 172 GFP_KERNEL); 173 174 if (!regions) 175 return NULL; 176 177 regions->kobj = (struct kobject){}; 178 INIT_LIST_HEAD(®ions->regions_list); 179 regions->nr_regions = 0; 180 regions->total_bytes = 0; 181 regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; 182 return regions; 183 } 184 185 static ssize_t total_bytes_show(struct kobject *kobj, 186 struct kobj_attribute *attr, char *buf) 187 { 188 struct damon_sysfs_scheme_regions *regions = container_of(kobj, 189 struct damon_sysfs_scheme_regions, kobj); 190 191 return sysfs_emit(buf, "%lu\n", regions->total_bytes); 192 } 193 194 static void damon_sysfs_scheme_regions_rm_dirs( 195 struct damon_sysfs_scheme_regions *regions) 196 { 197 struct damon_sysfs_scheme_region *r, *next; 198 199 list_for_each_entry_safe(r, next, ®ions->regions_list, list) { 200 /* release function deletes it from the list */ 201 kobject_put(&r->kobj); 202 regions->nr_regions--; 203 } 204 } 205 206 static void damon_sysfs_scheme_regions_release(struct kobject *kobj) 207 { 208 kfree(container_of(kobj, struct damon_sysfs_scheme_regions, kobj)); 209 } 210 211 static struct kobj_attribute damon_sysfs_scheme_regions_total_bytes_attr = 212 __ATTR_RO_MODE(total_bytes, 0400); 213 214 static struct attribute *damon_sysfs_scheme_regions_attrs[] = { 215 &damon_sysfs_scheme_regions_total_bytes_attr.attr, 216 NULL, 217 }; 218 ATTRIBUTE_GROUPS(damon_sysfs_scheme_regions); 219 220 static const struct kobj_type damon_sysfs_scheme_regions_ktype = { 221 .release = damon_sysfs_scheme_regions_release, 222 .sysfs_ops = &kobj_sysfs_ops, 223 .default_groups = damon_sysfs_scheme_regions_groups, 224 }; 225 226 /* 227 * schemes/stats directory 228 */ 229 230 struct damon_sysfs_stats { 231 struct kobject kobj; 232 unsigned long nr_tried; 233 unsigned long sz_tried; 234 unsigned long nr_applied; 235 unsigned long sz_applied; 236 unsigned long qt_exceeds; 237 }; 238 239 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void) 240 { 241 return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL); 242 } 243 244 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr, 245 char *buf) 246 { 247 struct damon_sysfs_stats *stats = container_of(kobj, 248 struct damon_sysfs_stats, kobj); 249 250 return sysfs_emit(buf, "%lu\n", stats->nr_tried); 251 } 252 253 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr, 254 char *buf) 255 { 256 struct damon_sysfs_stats *stats = container_of(kobj, 257 struct damon_sysfs_stats, kobj); 258 259 return sysfs_emit(buf, "%lu\n", stats->sz_tried); 260 } 261 262 static ssize_t nr_applied_show(struct kobject *kobj, 263 struct kobj_attribute *attr, char *buf) 264 { 265 struct damon_sysfs_stats *stats = container_of(kobj, 266 struct damon_sysfs_stats, kobj); 267 268 return sysfs_emit(buf, "%lu\n", stats->nr_applied); 269 } 270 271 static ssize_t sz_applied_show(struct kobject *kobj, 272 struct kobj_attribute *attr, char *buf) 273 { 274 struct damon_sysfs_stats *stats = container_of(kobj, 275 struct damon_sysfs_stats, kobj); 276 277 return sysfs_emit(buf, "%lu\n", stats->sz_applied); 278 } 279 280 static ssize_t qt_exceeds_show(struct kobject *kobj, 281 struct kobj_attribute *attr, char *buf) 282 { 283 struct damon_sysfs_stats *stats = container_of(kobj, 284 struct damon_sysfs_stats, kobj); 285 286 return sysfs_emit(buf, "%lu\n", stats->qt_exceeds); 287 } 288 289 static void damon_sysfs_stats_release(struct kobject *kobj) 290 { 291 kfree(container_of(kobj, struct damon_sysfs_stats, kobj)); 292 } 293 294 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr = 295 __ATTR_RO_MODE(nr_tried, 0400); 296 297 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr = 298 __ATTR_RO_MODE(sz_tried, 0400); 299 300 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr = 301 __ATTR_RO_MODE(nr_applied, 0400); 302 303 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr = 304 __ATTR_RO_MODE(sz_applied, 0400); 305 306 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr = 307 __ATTR_RO_MODE(qt_exceeds, 0400); 308 309 static struct attribute *damon_sysfs_stats_attrs[] = { 310 &damon_sysfs_stats_nr_tried_attr.attr, 311 &damon_sysfs_stats_sz_tried_attr.attr, 312 &damon_sysfs_stats_nr_applied_attr.attr, 313 &damon_sysfs_stats_sz_applied_attr.attr, 314 &damon_sysfs_stats_qt_exceeds_attr.attr, 315 NULL, 316 }; 317 ATTRIBUTE_GROUPS(damon_sysfs_stats); 318 319 static const struct kobj_type damon_sysfs_stats_ktype = { 320 .release = damon_sysfs_stats_release, 321 .sysfs_ops = &kobj_sysfs_ops, 322 .default_groups = damon_sysfs_stats_groups, 323 }; 324 325 /* 326 * filter directory 327 */ 328 329 struct damon_sysfs_scheme_filter { 330 struct kobject kobj; 331 enum damos_filter_type type; 332 bool matching; 333 char *memcg_path; 334 struct damon_addr_range addr_range; 335 int target_idx; 336 }; 337 338 static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(void) 339 { 340 return kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); 341 } 342 343 /* Should match with enum damos_filter_type */ 344 static const char * const damon_sysfs_scheme_filter_type_strs[] = { 345 "anon", 346 "memcg", 347 "young", 348 "addr", 349 "target", 350 }; 351 352 static ssize_t type_show(struct kobject *kobj, 353 struct kobj_attribute *attr, char *buf) 354 { 355 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 356 struct damon_sysfs_scheme_filter, kobj); 357 358 return sysfs_emit(buf, "%s\n", 359 damon_sysfs_scheme_filter_type_strs[filter->type]); 360 } 361 362 static ssize_t type_store(struct kobject *kobj, 363 struct kobj_attribute *attr, const char *buf, size_t count) 364 { 365 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 366 struct damon_sysfs_scheme_filter, kobj); 367 enum damos_filter_type type; 368 ssize_t ret = -EINVAL; 369 370 for (type = 0; type < NR_DAMOS_FILTER_TYPES; type++) { 371 if (sysfs_streq(buf, damon_sysfs_scheme_filter_type_strs[ 372 type])) { 373 filter->type = type; 374 ret = count; 375 break; 376 } 377 } 378 return ret; 379 } 380 381 static ssize_t matching_show(struct kobject *kobj, 382 struct kobj_attribute *attr, char *buf) 383 { 384 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 385 struct damon_sysfs_scheme_filter, kobj); 386 387 return sysfs_emit(buf, "%c\n", filter->matching ? 'Y' : 'N'); 388 } 389 390 static ssize_t matching_store(struct kobject *kobj, 391 struct kobj_attribute *attr, const char *buf, size_t count) 392 { 393 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 394 struct damon_sysfs_scheme_filter, kobj); 395 bool matching; 396 int err = kstrtobool(buf, &matching); 397 398 if (err) 399 return err; 400 401 filter->matching = matching; 402 return count; 403 } 404 405 static ssize_t memcg_path_show(struct kobject *kobj, 406 struct kobj_attribute *attr, char *buf) 407 { 408 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 409 struct damon_sysfs_scheme_filter, kobj); 410 411 return sysfs_emit(buf, "%s\n", 412 filter->memcg_path ? filter->memcg_path : ""); 413 } 414 415 static ssize_t memcg_path_store(struct kobject *kobj, 416 struct kobj_attribute *attr, const char *buf, size_t count) 417 { 418 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 419 struct damon_sysfs_scheme_filter, kobj); 420 char *path = kmalloc(sizeof(*path) * (count + 1), GFP_KERNEL); 421 422 if (!path) 423 return -ENOMEM; 424 425 strscpy(path, buf, count + 1); 426 filter->memcg_path = path; 427 return count; 428 } 429 430 static ssize_t addr_start_show(struct kobject *kobj, 431 struct kobj_attribute *attr, char *buf) 432 { 433 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 434 struct damon_sysfs_scheme_filter, kobj); 435 436 return sysfs_emit(buf, "%lu\n", filter->addr_range.start); 437 } 438 439 static ssize_t addr_start_store(struct kobject *kobj, 440 struct kobj_attribute *attr, const char *buf, size_t count) 441 { 442 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 443 struct damon_sysfs_scheme_filter, kobj); 444 int err = kstrtoul(buf, 0, &filter->addr_range.start); 445 446 return err ? err : count; 447 } 448 449 static ssize_t addr_end_show(struct kobject *kobj, 450 struct kobj_attribute *attr, char *buf) 451 { 452 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 453 struct damon_sysfs_scheme_filter, kobj); 454 455 return sysfs_emit(buf, "%lu\n", filter->addr_range.end); 456 } 457 458 static ssize_t addr_end_store(struct kobject *kobj, 459 struct kobj_attribute *attr, const char *buf, size_t count) 460 { 461 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 462 struct damon_sysfs_scheme_filter, kobj); 463 int err = kstrtoul(buf, 0, &filter->addr_range.end); 464 465 return err ? err : count; 466 } 467 468 static ssize_t damon_target_idx_show(struct kobject *kobj, 469 struct kobj_attribute *attr, char *buf) 470 { 471 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 472 struct damon_sysfs_scheme_filter, kobj); 473 474 return sysfs_emit(buf, "%d\n", filter->target_idx); 475 } 476 477 static ssize_t damon_target_idx_store(struct kobject *kobj, 478 struct kobj_attribute *attr, const char *buf, size_t count) 479 { 480 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 481 struct damon_sysfs_scheme_filter, kobj); 482 int err = kstrtoint(buf, 0, &filter->target_idx); 483 484 return err ? err : count; 485 } 486 487 static void damon_sysfs_scheme_filter_release(struct kobject *kobj) 488 { 489 struct damon_sysfs_scheme_filter *filter = container_of(kobj, 490 struct damon_sysfs_scheme_filter, kobj); 491 492 kfree(filter->memcg_path); 493 kfree(filter); 494 } 495 496 static struct kobj_attribute damon_sysfs_scheme_filter_type_attr = 497 __ATTR_RW_MODE(type, 0600); 498 499 static struct kobj_attribute damon_sysfs_scheme_filter_matching_attr = 500 __ATTR_RW_MODE(matching, 0600); 501 502 static struct kobj_attribute damon_sysfs_scheme_filter_memcg_path_attr = 503 __ATTR_RW_MODE(memcg_path, 0600); 504 505 static struct kobj_attribute damon_sysfs_scheme_filter_addr_start_attr = 506 __ATTR_RW_MODE(addr_start, 0600); 507 508 static struct kobj_attribute damon_sysfs_scheme_filter_addr_end_attr = 509 __ATTR_RW_MODE(addr_end, 0600); 510 511 static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr = 512 __ATTR_RW_MODE(damon_target_idx, 0600); 513 514 static struct attribute *damon_sysfs_scheme_filter_attrs[] = { 515 &damon_sysfs_scheme_filter_type_attr.attr, 516 &damon_sysfs_scheme_filter_matching_attr.attr, 517 &damon_sysfs_scheme_filter_memcg_path_attr.attr, 518 &damon_sysfs_scheme_filter_addr_start_attr.attr, 519 &damon_sysfs_scheme_filter_addr_end_attr.attr, 520 &damon_sysfs_scheme_filter_damon_target_idx_attr.attr, 521 NULL, 522 }; 523 ATTRIBUTE_GROUPS(damon_sysfs_scheme_filter); 524 525 static const struct kobj_type damon_sysfs_scheme_filter_ktype = { 526 .release = damon_sysfs_scheme_filter_release, 527 .sysfs_ops = &kobj_sysfs_ops, 528 .default_groups = damon_sysfs_scheme_filter_groups, 529 }; 530 531 /* 532 * filters directory 533 */ 534 535 struct damon_sysfs_scheme_filters { 536 struct kobject kobj; 537 struct damon_sysfs_scheme_filter **filters_arr; 538 int nr; 539 }; 540 541 static struct damon_sysfs_scheme_filters * 542 damon_sysfs_scheme_filters_alloc(void) 543 { 544 return kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); 545 } 546 547 static void damon_sysfs_scheme_filters_rm_dirs( 548 struct damon_sysfs_scheme_filters *filters) 549 { 550 struct damon_sysfs_scheme_filter **filters_arr = filters->filters_arr; 551 int i; 552 553 for (i = 0; i < filters->nr; i++) 554 kobject_put(&filters_arr[i]->kobj); 555 filters->nr = 0; 556 kfree(filters_arr); 557 filters->filters_arr = NULL; 558 } 559 560 static int damon_sysfs_scheme_filters_add_dirs( 561 struct damon_sysfs_scheme_filters *filters, int nr_filters) 562 { 563 struct damon_sysfs_scheme_filter **filters_arr, *filter; 564 int err, i; 565 566 damon_sysfs_scheme_filters_rm_dirs(filters); 567 if (!nr_filters) 568 return 0; 569 570 filters_arr = kmalloc_array(nr_filters, sizeof(*filters_arr), 571 GFP_KERNEL | __GFP_NOWARN); 572 if (!filters_arr) 573 return -ENOMEM; 574 filters->filters_arr = filters_arr; 575 576 for (i = 0; i < nr_filters; i++) { 577 filter = damon_sysfs_scheme_filter_alloc(); 578 if (!filter) { 579 damon_sysfs_scheme_filters_rm_dirs(filters); 580 return -ENOMEM; 581 } 582 583 err = kobject_init_and_add(&filter->kobj, 584 &damon_sysfs_scheme_filter_ktype, 585 &filters->kobj, "%d", i); 586 if (err) { 587 kobject_put(&filter->kobj); 588 damon_sysfs_scheme_filters_rm_dirs(filters); 589 return err; 590 } 591 592 filters_arr[i] = filter; 593 filters->nr++; 594 } 595 return 0; 596 } 597 598 static ssize_t nr_filters_show(struct kobject *kobj, 599 struct kobj_attribute *attr, char *buf) 600 { 601 struct damon_sysfs_scheme_filters *filters = container_of(kobj, 602 struct damon_sysfs_scheme_filters, kobj); 603 604 return sysfs_emit(buf, "%d\n", filters->nr); 605 } 606 607 static ssize_t nr_filters_store(struct kobject *kobj, 608 struct kobj_attribute *attr, const char *buf, size_t count) 609 { 610 struct damon_sysfs_scheme_filters *filters; 611 int nr, err = kstrtoint(buf, 0, &nr); 612 613 if (err) 614 return err; 615 if (nr < 0) 616 return -EINVAL; 617 618 filters = container_of(kobj, struct damon_sysfs_scheme_filters, kobj); 619 620 if (!mutex_trylock(&damon_sysfs_lock)) 621 return -EBUSY; 622 err = damon_sysfs_scheme_filters_add_dirs(filters, nr); 623 mutex_unlock(&damon_sysfs_lock); 624 if (err) 625 return err; 626 627 return count; 628 } 629 630 static void damon_sysfs_scheme_filters_release(struct kobject *kobj) 631 { 632 kfree(container_of(kobj, struct damon_sysfs_scheme_filters, kobj)); 633 } 634 635 static struct kobj_attribute damon_sysfs_scheme_filters_nr_attr = 636 __ATTR_RW_MODE(nr_filters, 0600); 637 638 static struct attribute *damon_sysfs_scheme_filters_attrs[] = { 639 &damon_sysfs_scheme_filters_nr_attr.attr, 640 NULL, 641 }; 642 ATTRIBUTE_GROUPS(damon_sysfs_scheme_filters); 643 644 static const struct kobj_type damon_sysfs_scheme_filters_ktype = { 645 .release = damon_sysfs_scheme_filters_release, 646 .sysfs_ops = &kobj_sysfs_ops, 647 .default_groups = damon_sysfs_scheme_filters_groups, 648 }; 649 650 /* 651 * watermarks directory 652 */ 653 654 struct damon_sysfs_watermarks { 655 struct kobject kobj; 656 enum damos_wmark_metric metric; 657 unsigned long interval_us; 658 unsigned long high; 659 unsigned long mid; 660 unsigned long low; 661 }; 662 663 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc( 664 enum damos_wmark_metric metric, unsigned long interval_us, 665 unsigned long high, unsigned long mid, unsigned long low) 666 { 667 struct damon_sysfs_watermarks *watermarks = kmalloc( 668 sizeof(*watermarks), GFP_KERNEL); 669 670 if (!watermarks) 671 return NULL; 672 watermarks->kobj = (struct kobject){}; 673 watermarks->metric = metric; 674 watermarks->interval_us = interval_us; 675 watermarks->high = high; 676 watermarks->mid = mid; 677 watermarks->low = low; 678 return watermarks; 679 } 680 681 /* Should match with enum damos_wmark_metric */ 682 static const char * const damon_sysfs_wmark_metric_strs[] = { 683 "none", 684 "free_mem_rate", 685 }; 686 687 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr, 688 char *buf) 689 { 690 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 691 struct damon_sysfs_watermarks, kobj); 692 693 return sysfs_emit(buf, "%s\n", 694 damon_sysfs_wmark_metric_strs[watermarks->metric]); 695 } 696 697 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr, 698 const char *buf, size_t count) 699 { 700 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 701 struct damon_sysfs_watermarks, kobj); 702 enum damos_wmark_metric metric; 703 704 for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) { 705 if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) { 706 watermarks->metric = metric; 707 return count; 708 } 709 } 710 return -EINVAL; 711 } 712 713 static ssize_t interval_us_show(struct kobject *kobj, 714 struct kobj_attribute *attr, char *buf) 715 { 716 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 717 struct damon_sysfs_watermarks, kobj); 718 719 return sysfs_emit(buf, "%lu\n", watermarks->interval_us); 720 } 721 722 static ssize_t interval_us_store(struct kobject *kobj, 723 struct kobj_attribute *attr, const char *buf, size_t count) 724 { 725 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 726 struct damon_sysfs_watermarks, kobj); 727 int err = kstrtoul(buf, 0, &watermarks->interval_us); 728 729 return err ? err : count; 730 } 731 732 static ssize_t high_show(struct kobject *kobj, 733 struct kobj_attribute *attr, char *buf) 734 { 735 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 736 struct damon_sysfs_watermarks, kobj); 737 738 return sysfs_emit(buf, "%lu\n", watermarks->high); 739 } 740 741 static ssize_t high_store(struct kobject *kobj, 742 struct kobj_attribute *attr, const char *buf, size_t count) 743 { 744 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 745 struct damon_sysfs_watermarks, kobj); 746 int err = kstrtoul(buf, 0, &watermarks->high); 747 748 return err ? err : count; 749 } 750 751 static ssize_t mid_show(struct kobject *kobj, 752 struct kobj_attribute *attr, char *buf) 753 { 754 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 755 struct damon_sysfs_watermarks, kobj); 756 757 return sysfs_emit(buf, "%lu\n", watermarks->mid); 758 } 759 760 static ssize_t mid_store(struct kobject *kobj, 761 struct kobj_attribute *attr, const char *buf, size_t count) 762 { 763 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 764 struct damon_sysfs_watermarks, kobj); 765 int err = kstrtoul(buf, 0, &watermarks->mid); 766 767 return err ? err : count; 768 } 769 770 static ssize_t low_show(struct kobject *kobj, 771 struct kobj_attribute *attr, char *buf) 772 { 773 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 774 struct damon_sysfs_watermarks, kobj); 775 776 return sysfs_emit(buf, "%lu\n", watermarks->low); 777 } 778 779 static ssize_t low_store(struct kobject *kobj, 780 struct kobj_attribute *attr, const char *buf, size_t count) 781 { 782 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 783 struct damon_sysfs_watermarks, kobj); 784 int err = kstrtoul(buf, 0, &watermarks->low); 785 786 return err ? err : count; 787 } 788 789 static void damon_sysfs_watermarks_release(struct kobject *kobj) 790 { 791 kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj)); 792 } 793 794 static struct kobj_attribute damon_sysfs_watermarks_metric_attr = 795 __ATTR_RW_MODE(metric, 0600); 796 797 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr = 798 __ATTR_RW_MODE(interval_us, 0600); 799 800 static struct kobj_attribute damon_sysfs_watermarks_high_attr = 801 __ATTR_RW_MODE(high, 0600); 802 803 static struct kobj_attribute damon_sysfs_watermarks_mid_attr = 804 __ATTR_RW_MODE(mid, 0600); 805 806 static struct kobj_attribute damon_sysfs_watermarks_low_attr = 807 __ATTR_RW_MODE(low, 0600); 808 809 static struct attribute *damon_sysfs_watermarks_attrs[] = { 810 &damon_sysfs_watermarks_metric_attr.attr, 811 &damon_sysfs_watermarks_interval_us_attr.attr, 812 &damon_sysfs_watermarks_high_attr.attr, 813 &damon_sysfs_watermarks_mid_attr.attr, 814 &damon_sysfs_watermarks_low_attr.attr, 815 NULL, 816 }; 817 ATTRIBUTE_GROUPS(damon_sysfs_watermarks); 818 819 static const struct kobj_type damon_sysfs_watermarks_ktype = { 820 .release = damon_sysfs_watermarks_release, 821 .sysfs_ops = &kobj_sysfs_ops, 822 .default_groups = damon_sysfs_watermarks_groups, 823 }; 824 825 /* 826 * quota goal directory 827 */ 828 829 struct damos_sysfs_quota_goal { 830 struct kobject kobj; 831 enum damos_quota_goal_metric metric; 832 unsigned long target_value; 833 unsigned long current_value; 834 }; 835 836 /* This should match with enum damos_action */ 837 static const char * const damos_sysfs_quota_goal_metric_strs[] = { 838 "user_input", 839 "some_mem_psi_us", 840 }; 841 842 static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void) 843 { 844 return kzalloc(sizeof(struct damos_sysfs_quota_goal), GFP_KERNEL); 845 } 846 847 static ssize_t target_metric_show(struct kobject *kobj, 848 struct kobj_attribute *attr, char *buf) 849 { 850 struct damos_sysfs_quota_goal *goal = container_of(kobj, 851 struct damos_sysfs_quota_goal, kobj); 852 853 return sysfs_emit(buf, "%s\n", 854 damos_sysfs_quota_goal_metric_strs[goal->metric]); 855 } 856 857 static ssize_t target_metric_store(struct kobject *kobj, 858 struct kobj_attribute *attr, const char *buf, size_t count) 859 { 860 struct damos_sysfs_quota_goal *goal = container_of(kobj, 861 struct damos_sysfs_quota_goal, kobj); 862 enum damos_quota_goal_metric m; 863 864 for (m = 0; m < NR_DAMOS_QUOTA_GOAL_METRICS; m++) { 865 if (sysfs_streq(buf, damos_sysfs_quota_goal_metric_strs[m])) { 866 goal->metric = m; 867 return count; 868 } 869 } 870 return -EINVAL; 871 } 872 873 static ssize_t target_value_show(struct kobject *kobj, 874 struct kobj_attribute *attr, char *buf) 875 { 876 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 877 damos_sysfs_quota_goal, kobj); 878 879 return sysfs_emit(buf, "%lu\n", goal->target_value); 880 } 881 882 static ssize_t target_value_store(struct kobject *kobj, 883 struct kobj_attribute *attr, const char *buf, size_t count) 884 { 885 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 886 damos_sysfs_quota_goal, kobj); 887 int err = kstrtoul(buf, 0, &goal->target_value); 888 889 return err ? err : count; 890 } 891 892 static ssize_t current_value_show(struct kobject *kobj, 893 struct kobj_attribute *attr, char *buf) 894 { 895 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 896 damos_sysfs_quota_goal, kobj); 897 898 return sysfs_emit(buf, "%lu\n", goal->current_value); 899 } 900 901 static ssize_t current_value_store(struct kobject *kobj, 902 struct kobj_attribute *attr, const char *buf, size_t count) 903 { 904 struct damos_sysfs_quota_goal *goal = container_of(kobj, struct 905 damos_sysfs_quota_goal, kobj); 906 int err = kstrtoul(buf, 0, &goal->current_value); 907 908 /* feed callback should check existence of this file and read value */ 909 return err ? err : count; 910 } 911 912 static void damos_sysfs_quota_goal_release(struct kobject *kobj) 913 { 914 /* or, notify this release to the feed callback */ 915 kfree(container_of(kobj, struct damos_sysfs_quota_goal, kobj)); 916 } 917 918 static struct kobj_attribute damos_sysfs_quota_goal_target_metric_attr = 919 __ATTR_RW_MODE(target_metric, 0600); 920 921 static struct kobj_attribute damos_sysfs_quota_goal_target_value_attr = 922 __ATTR_RW_MODE(target_value, 0600); 923 924 static struct kobj_attribute damos_sysfs_quota_goal_current_value_attr = 925 __ATTR_RW_MODE(current_value, 0600); 926 927 static struct attribute *damos_sysfs_quota_goal_attrs[] = { 928 &damos_sysfs_quota_goal_target_metric_attr.attr, 929 &damos_sysfs_quota_goal_target_value_attr.attr, 930 &damos_sysfs_quota_goal_current_value_attr.attr, 931 NULL, 932 }; 933 ATTRIBUTE_GROUPS(damos_sysfs_quota_goal); 934 935 static const struct kobj_type damos_sysfs_quota_goal_ktype = { 936 .release = damos_sysfs_quota_goal_release, 937 .sysfs_ops = &kobj_sysfs_ops, 938 .default_groups = damos_sysfs_quota_goal_groups, 939 }; 940 941 /* 942 * quota goals directory 943 */ 944 945 struct damos_sysfs_quota_goals { 946 struct kobject kobj; 947 struct damos_sysfs_quota_goal **goals_arr; /* counted by nr */ 948 int nr; 949 }; 950 951 static struct damos_sysfs_quota_goals *damos_sysfs_quota_goals_alloc(void) 952 { 953 return kzalloc(sizeof(struct damos_sysfs_quota_goals), GFP_KERNEL); 954 } 955 956 static void damos_sysfs_quota_goals_rm_dirs( 957 struct damos_sysfs_quota_goals *goals) 958 { 959 struct damos_sysfs_quota_goal **goals_arr = goals->goals_arr; 960 int i; 961 962 for (i = 0; i < goals->nr; i++) 963 kobject_put(&goals_arr[i]->kobj); 964 goals->nr = 0; 965 kfree(goals_arr); 966 goals->goals_arr = NULL; 967 } 968 969 static int damos_sysfs_quota_goals_add_dirs( 970 struct damos_sysfs_quota_goals *goals, int nr_goals) 971 { 972 struct damos_sysfs_quota_goal **goals_arr, *goal; 973 int err, i; 974 975 damos_sysfs_quota_goals_rm_dirs(goals); 976 if (!nr_goals) 977 return 0; 978 979 goals_arr = kmalloc_array(nr_goals, sizeof(*goals_arr), 980 GFP_KERNEL | __GFP_NOWARN); 981 if (!goals_arr) 982 return -ENOMEM; 983 goals->goals_arr = goals_arr; 984 985 for (i = 0; i < nr_goals; i++) { 986 goal = damos_sysfs_quota_goal_alloc(); 987 if (!goal) { 988 damos_sysfs_quota_goals_rm_dirs(goals); 989 return -ENOMEM; 990 } 991 992 err = kobject_init_and_add(&goal->kobj, 993 &damos_sysfs_quota_goal_ktype, &goals->kobj, 994 "%d", i); 995 if (err) { 996 kobject_put(&goal->kobj); 997 damos_sysfs_quota_goals_rm_dirs(goals); 998 return err; 999 } 1000 1001 goals_arr[i] = goal; 1002 goals->nr++; 1003 } 1004 return 0; 1005 } 1006 1007 static ssize_t nr_goals_show(struct kobject *kobj, 1008 struct kobj_attribute *attr, char *buf) 1009 { 1010 struct damos_sysfs_quota_goals *goals = container_of(kobj, 1011 struct damos_sysfs_quota_goals, kobj); 1012 1013 return sysfs_emit(buf, "%d\n", goals->nr); 1014 } 1015 1016 static ssize_t nr_goals_store(struct kobject *kobj, 1017 struct kobj_attribute *attr, const char *buf, size_t count) 1018 { 1019 struct damos_sysfs_quota_goals *goals; 1020 int nr, err = kstrtoint(buf, 0, &nr); 1021 1022 if (err) 1023 return err; 1024 if (nr < 0) 1025 return -EINVAL; 1026 1027 goals = container_of(kobj, struct damos_sysfs_quota_goals, kobj); 1028 1029 if (!mutex_trylock(&damon_sysfs_lock)) 1030 return -EBUSY; 1031 err = damos_sysfs_quota_goals_add_dirs(goals, nr); 1032 mutex_unlock(&damon_sysfs_lock); 1033 if (err) 1034 return err; 1035 1036 return count; 1037 } 1038 1039 static void damos_sysfs_quota_goals_release(struct kobject *kobj) 1040 { 1041 kfree(container_of(kobj, struct damos_sysfs_quota_goals, kobj)); 1042 } 1043 1044 static struct kobj_attribute damos_sysfs_quota_goals_nr_attr = 1045 __ATTR_RW_MODE(nr_goals, 0600); 1046 1047 static struct attribute *damos_sysfs_quota_goals_attrs[] = { 1048 &damos_sysfs_quota_goals_nr_attr.attr, 1049 NULL, 1050 }; 1051 ATTRIBUTE_GROUPS(damos_sysfs_quota_goals); 1052 1053 static const struct kobj_type damos_sysfs_quota_goals_ktype = { 1054 .release = damos_sysfs_quota_goals_release, 1055 .sysfs_ops = &kobj_sysfs_ops, 1056 .default_groups = damos_sysfs_quota_goals_groups, 1057 }; 1058 1059 /* 1060 * scheme/weights directory 1061 */ 1062 1063 struct damon_sysfs_weights { 1064 struct kobject kobj; 1065 unsigned int sz; 1066 unsigned int nr_accesses; 1067 unsigned int age; 1068 }; 1069 1070 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz, 1071 unsigned int nr_accesses, unsigned int age) 1072 { 1073 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights), 1074 GFP_KERNEL); 1075 1076 if (!weights) 1077 return NULL; 1078 weights->kobj = (struct kobject){}; 1079 weights->sz = sz; 1080 weights->nr_accesses = nr_accesses; 1081 weights->age = age; 1082 return weights; 1083 } 1084 1085 static ssize_t sz_permil_show(struct kobject *kobj, 1086 struct kobj_attribute *attr, char *buf) 1087 { 1088 struct damon_sysfs_weights *weights = container_of(kobj, 1089 struct damon_sysfs_weights, kobj); 1090 1091 return sysfs_emit(buf, "%u\n", weights->sz); 1092 } 1093 1094 static ssize_t sz_permil_store(struct kobject *kobj, 1095 struct kobj_attribute *attr, const char *buf, size_t count) 1096 { 1097 struct damon_sysfs_weights *weights = container_of(kobj, 1098 struct damon_sysfs_weights, kobj); 1099 int err = kstrtouint(buf, 0, &weights->sz); 1100 1101 return err ? err : count; 1102 } 1103 1104 static ssize_t nr_accesses_permil_show(struct kobject *kobj, 1105 struct kobj_attribute *attr, char *buf) 1106 { 1107 struct damon_sysfs_weights *weights = container_of(kobj, 1108 struct damon_sysfs_weights, kobj); 1109 1110 return sysfs_emit(buf, "%u\n", weights->nr_accesses); 1111 } 1112 1113 static ssize_t nr_accesses_permil_store(struct kobject *kobj, 1114 struct kobj_attribute *attr, const char *buf, size_t count) 1115 { 1116 struct damon_sysfs_weights *weights = container_of(kobj, 1117 struct damon_sysfs_weights, kobj); 1118 int err = kstrtouint(buf, 0, &weights->nr_accesses); 1119 1120 return err ? err : count; 1121 } 1122 1123 static ssize_t age_permil_show(struct kobject *kobj, 1124 struct kobj_attribute *attr, char *buf) 1125 { 1126 struct damon_sysfs_weights *weights = container_of(kobj, 1127 struct damon_sysfs_weights, kobj); 1128 1129 return sysfs_emit(buf, "%u\n", weights->age); 1130 } 1131 1132 static ssize_t age_permil_store(struct kobject *kobj, 1133 struct kobj_attribute *attr, const char *buf, size_t count) 1134 { 1135 struct damon_sysfs_weights *weights = container_of(kobj, 1136 struct damon_sysfs_weights, kobj); 1137 int err = kstrtouint(buf, 0, &weights->age); 1138 1139 return err ? err : count; 1140 } 1141 1142 static void damon_sysfs_weights_release(struct kobject *kobj) 1143 { 1144 kfree(container_of(kobj, struct damon_sysfs_weights, kobj)); 1145 } 1146 1147 static struct kobj_attribute damon_sysfs_weights_sz_attr = 1148 __ATTR_RW_MODE(sz_permil, 0600); 1149 1150 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr = 1151 __ATTR_RW_MODE(nr_accesses_permil, 0600); 1152 1153 static struct kobj_attribute damon_sysfs_weights_age_attr = 1154 __ATTR_RW_MODE(age_permil, 0600); 1155 1156 static struct attribute *damon_sysfs_weights_attrs[] = { 1157 &damon_sysfs_weights_sz_attr.attr, 1158 &damon_sysfs_weights_nr_accesses_attr.attr, 1159 &damon_sysfs_weights_age_attr.attr, 1160 NULL, 1161 }; 1162 ATTRIBUTE_GROUPS(damon_sysfs_weights); 1163 1164 static const struct kobj_type damon_sysfs_weights_ktype = { 1165 .release = damon_sysfs_weights_release, 1166 .sysfs_ops = &kobj_sysfs_ops, 1167 .default_groups = damon_sysfs_weights_groups, 1168 }; 1169 1170 /* 1171 * quotas directory 1172 */ 1173 1174 struct damon_sysfs_quotas { 1175 struct kobject kobj; 1176 struct damon_sysfs_weights *weights; 1177 struct damos_sysfs_quota_goals *goals; 1178 unsigned long ms; 1179 unsigned long sz; 1180 unsigned long reset_interval_ms; 1181 unsigned long effective_sz; /* Effective size quota in bytes */ 1182 }; 1183 1184 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void) 1185 { 1186 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL); 1187 } 1188 1189 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas) 1190 { 1191 struct damon_sysfs_weights *weights; 1192 struct damos_sysfs_quota_goals *goals; 1193 int err; 1194 1195 weights = damon_sysfs_weights_alloc(0, 0, 0); 1196 if (!weights) 1197 return -ENOMEM; 1198 1199 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype, 1200 "as->kobj, "weights"); 1201 if (err) { 1202 kobject_put(&weights->kobj); 1203 return err; 1204 } 1205 quotas->weights = weights; 1206 1207 goals = damos_sysfs_quota_goals_alloc(); 1208 if (!goals) { 1209 kobject_put(&weights->kobj); 1210 return -ENOMEM; 1211 } 1212 err = kobject_init_and_add(&goals->kobj, 1213 &damos_sysfs_quota_goals_ktype, "as->kobj, 1214 "goals"); 1215 if (err) { 1216 kobject_put(&weights->kobj); 1217 kobject_put(&goals->kobj); 1218 } else { 1219 quotas->goals = goals; 1220 } 1221 1222 return err; 1223 } 1224 1225 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas) 1226 { 1227 kobject_put("as->weights->kobj); 1228 damos_sysfs_quota_goals_rm_dirs(quotas->goals); 1229 kobject_put("as->goals->kobj); 1230 } 1231 1232 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr, 1233 char *buf) 1234 { 1235 struct damon_sysfs_quotas *quotas = container_of(kobj, 1236 struct damon_sysfs_quotas, kobj); 1237 1238 return sysfs_emit(buf, "%lu\n", quotas->ms); 1239 } 1240 1241 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr, 1242 const char *buf, size_t count) 1243 { 1244 struct damon_sysfs_quotas *quotas = container_of(kobj, 1245 struct damon_sysfs_quotas, kobj); 1246 int err = kstrtoul(buf, 0, "as->ms); 1247 1248 if (err) 1249 return -EINVAL; 1250 return count; 1251 } 1252 1253 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr, 1254 char *buf) 1255 { 1256 struct damon_sysfs_quotas *quotas = container_of(kobj, 1257 struct damon_sysfs_quotas, kobj); 1258 1259 return sysfs_emit(buf, "%lu\n", quotas->sz); 1260 } 1261 1262 static ssize_t bytes_store(struct kobject *kobj, 1263 struct kobj_attribute *attr, const char *buf, size_t count) 1264 { 1265 struct damon_sysfs_quotas *quotas = container_of(kobj, 1266 struct damon_sysfs_quotas, kobj); 1267 int err = kstrtoul(buf, 0, "as->sz); 1268 1269 if (err) 1270 return -EINVAL; 1271 return count; 1272 } 1273 1274 static ssize_t reset_interval_ms_show(struct kobject *kobj, 1275 struct kobj_attribute *attr, char *buf) 1276 { 1277 struct damon_sysfs_quotas *quotas = container_of(kobj, 1278 struct damon_sysfs_quotas, kobj); 1279 1280 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms); 1281 } 1282 1283 static ssize_t reset_interval_ms_store(struct kobject *kobj, 1284 struct kobj_attribute *attr, const char *buf, size_t count) 1285 { 1286 struct damon_sysfs_quotas *quotas = container_of(kobj, 1287 struct damon_sysfs_quotas, kobj); 1288 int err = kstrtoul(buf, 0, "as->reset_interval_ms); 1289 1290 if (err) 1291 return -EINVAL; 1292 return count; 1293 } 1294 1295 static ssize_t effective_bytes_show(struct kobject *kobj, 1296 struct kobj_attribute *attr, char *buf) 1297 { 1298 struct damon_sysfs_quotas *quotas = container_of(kobj, 1299 struct damon_sysfs_quotas, kobj); 1300 1301 return sysfs_emit(buf, "%lu\n", quotas->effective_sz); 1302 } 1303 1304 static void damon_sysfs_quotas_release(struct kobject *kobj) 1305 { 1306 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj)); 1307 } 1308 1309 static struct kobj_attribute damon_sysfs_quotas_ms_attr = 1310 __ATTR_RW_MODE(ms, 0600); 1311 1312 static struct kobj_attribute damon_sysfs_quotas_sz_attr = 1313 __ATTR_RW_MODE(bytes, 0600); 1314 1315 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr = 1316 __ATTR_RW_MODE(reset_interval_ms, 0600); 1317 1318 static struct kobj_attribute damon_sysfs_quotas_effective_bytes_attr = 1319 __ATTR_RO_MODE(effective_bytes, 0400); 1320 1321 static struct attribute *damon_sysfs_quotas_attrs[] = { 1322 &damon_sysfs_quotas_ms_attr.attr, 1323 &damon_sysfs_quotas_sz_attr.attr, 1324 &damon_sysfs_quotas_reset_interval_ms_attr.attr, 1325 &damon_sysfs_quotas_effective_bytes_attr.attr, 1326 NULL, 1327 }; 1328 ATTRIBUTE_GROUPS(damon_sysfs_quotas); 1329 1330 static const struct kobj_type damon_sysfs_quotas_ktype = { 1331 .release = damon_sysfs_quotas_release, 1332 .sysfs_ops = &kobj_sysfs_ops, 1333 .default_groups = damon_sysfs_quotas_groups, 1334 }; 1335 1336 /* 1337 * access_pattern directory 1338 */ 1339 1340 struct damon_sysfs_access_pattern { 1341 struct kobject kobj; 1342 struct damon_sysfs_ul_range *sz; 1343 struct damon_sysfs_ul_range *nr_accesses; 1344 struct damon_sysfs_ul_range *age; 1345 }; 1346 1347 static 1348 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void) 1349 { 1350 struct damon_sysfs_access_pattern *access_pattern = 1351 kmalloc(sizeof(*access_pattern), GFP_KERNEL); 1352 1353 if (!access_pattern) 1354 return NULL; 1355 access_pattern->kobj = (struct kobject){}; 1356 return access_pattern; 1357 } 1358 1359 static int damon_sysfs_access_pattern_add_range_dir( 1360 struct damon_sysfs_access_pattern *access_pattern, 1361 struct damon_sysfs_ul_range **range_dir_ptr, 1362 char *name) 1363 { 1364 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0); 1365 int err; 1366 1367 if (!range) 1368 return -ENOMEM; 1369 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype, 1370 &access_pattern->kobj, name); 1371 if (err) 1372 kobject_put(&range->kobj); 1373 else 1374 *range_dir_ptr = range; 1375 return err; 1376 } 1377 1378 static int damon_sysfs_access_pattern_add_dirs( 1379 struct damon_sysfs_access_pattern *access_pattern) 1380 { 1381 int err; 1382 1383 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 1384 &access_pattern->sz, "sz"); 1385 if (err) 1386 goto put_sz_out; 1387 1388 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 1389 &access_pattern->nr_accesses, "nr_accesses"); 1390 if (err) 1391 goto put_nr_accesses_sz_out; 1392 1393 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 1394 &access_pattern->age, "age"); 1395 if (err) 1396 goto put_age_nr_accesses_sz_out; 1397 return 0; 1398 1399 put_age_nr_accesses_sz_out: 1400 kobject_put(&access_pattern->age->kobj); 1401 access_pattern->age = NULL; 1402 put_nr_accesses_sz_out: 1403 kobject_put(&access_pattern->nr_accesses->kobj); 1404 access_pattern->nr_accesses = NULL; 1405 put_sz_out: 1406 kobject_put(&access_pattern->sz->kobj); 1407 access_pattern->sz = NULL; 1408 return err; 1409 } 1410 1411 static void damon_sysfs_access_pattern_rm_dirs( 1412 struct damon_sysfs_access_pattern *access_pattern) 1413 { 1414 kobject_put(&access_pattern->sz->kobj); 1415 kobject_put(&access_pattern->nr_accesses->kobj); 1416 kobject_put(&access_pattern->age->kobj); 1417 } 1418 1419 static void damon_sysfs_access_pattern_release(struct kobject *kobj) 1420 { 1421 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj)); 1422 } 1423 1424 static struct attribute *damon_sysfs_access_pattern_attrs[] = { 1425 NULL, 1426 }; 1427 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern); 1428 1429 static const struct kobj_type damon_sysfs_access_pattern_ktype = { 1430 .release = damon_sysfs_access_pattern_release, 1431 .sysfs_ops = &kobj_sysfs_ops, 1432 .default_groups = damon_sysfs_access_pattern_groups, 1433 }; 1434 1435 /* 1436 * scheme directory 1437 */ 1438 1439 struct damon_sysfs_scheme { 1440 struct kobject kobj; 1441 enum damos_action action; 1442 struct damon_sysfs_access_pattern *access_pattern; 1443 unsigned long apply_interval_us; 1444 struct damon_sysfs_quotas *quotas; 1445 struct damon_sysfs_watermarks *watermarks; 1446 struct damon_sysfs_scheme_filters *filters; 1447 struct damon_sysfs_stats *stats; 1448 struct damon_sysfs_scheme_regions *tried_regions; 1449 int target_nid; 1450 }; 1451 1452 /* This should match with enum damos_action */ 1453 static const char * const damon_sysfs_damos_action_strs[] = { 1454 "willneed", 1455 "cold", 1456 "pageout", 1457 "hugepage", 1458 "nohugepage", 1459 "lru_prio", 1460 "lru_deprio", 1461 "migrate_hot", 1462 "migrate_cold", 1463 "stat", 1464 }; 1465 1466 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( 1467 enum damos_action action, unsigned long apply_interval_us) 1468 { 1469 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme), 1470 GFP_KERNEL); 1471 1472 if (!scheme) 1473 return NULL; 1474 scheme->kobj = (struct kobject){}; 1475 scheme->action = action; 1476 scheme->apply_interval_us = apply_interval_us; 1477 scheme->target_nid = NUMA_NO_NODE; 1478 return scheme; 1479 } 1480 1481 static int damon_sysfs_scheme_set_access_pattern( 1482 struct damon_sysfs_scheme *scheme) 1483 { 1484 struct damon_sysfs_access_pattern *access_pattern; 1485 int err; 1486 1487 access_pattern = damon_sysfs_access_pattern_alloc(); 1488 if (!access_pattern) 1489 return -ENOMEM; 1490 err = kobject_init_and_add(&access_pattern->kobj, 1491 &damon_sysfs_access_pattern_ktype, &scheme->kobj, 1492 "access_pattern"); 1493 if (err) 1494 goto out; 1495 err = damon_sysfs_access_pattern_add_dirs(access_pattern); 1496 if (err) 1497 goto out; 1498 scheme->access_pattern = access_pattern; 1499 return 0; 1500 1501 out: 1502 kobject_put(&access_pattern->kobj); 1503 return err; 1504 } 1505 1506 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme) 1507 { 1508 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc(); 1509 int err; 1510 1511 if (!quotas) 1512 return -ENOMEM; 1513 err = kobject_init_and_add("as->kobj, &damon_sysfs_quotas_ktype, 1514 &scheme->kobj, "quotas"); 1515 if (err) 1516 goto out; 1517 err = damon_sysfs_quotas_add_dirs(quotas); 1518 if (err) 1519 goto out; 1520 scheme->quotas = quotas; 1521 return 0; 1522 1523 out: 1524 kobject_put("as->kobj); 1525 return err; 1526 } 1527 1528 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme) 1529 { 1530 struct damon_sysfs_watermarks *watermarks = 1531 damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0); 1532 int err; 1533 1534 if (!watermarks) 1535 return -ENOMEM; 1536 err = kobject_init_and_add(&watermarks->kobj, 1537 &damon_sysfs_watermarks_ktype, &scheme->kobj, 1538 "watermarks"); 1539 if (err) 1540 kobject_put(&watermarks->kobj); 1541 else 1542 scheme->watermarks = watermarks; 1543 return err; 1544 } 1545 1546 static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme) 1547 { 1548 struct damon_sysfs_scheme_filters *filters = 1549 damon_sysfs_scheme_filters_alloc(); 1550 int err; 1551 1552 if (!filters) 1553 return -ENOMEM; 1554 err = kobject_init_and_add(&filters->kobj, 1555 &damon_sysfs_scheme_filters_ktype, &scheme->kobj, 1556 "filters"); 1557 if (err) 1558 kobject_put(&filters->kobj); 1559 else 1560 scheme->filters = filters; 1561 return err; 1562 } 1563 1564 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme) 1565 { 1566 struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc(); 1567 int err; 1568 1569 if (!stats) 1570 return -ENOMEM; 1571 err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype, 1572 &scheme->kobj, "stats"); 1573 if (err) 1574 kobject_put(&stats->kobj); 1575 else 1576 scheme->stats = stats; 1577 return err; 1578 } 1579 1580 static int damon_sysfs_scheme_set_tried_regions( 1581 struct damon_sysfs_scheme *scheme) 1582 { 1583 struct damon_sysfs_scheme_regions *tried_regions = 1584 damon_sysfs_scheme_regions_alloc(); 1585 int err; 1586 1587 if (!tried_regions) 1588 return -ENOMEM; 1589 err = kobject_init_and_add(&tried_regions->kobj, 1590 &damon_sysfs_scheme_regions_ktype, &scheme->kobj, 1591 "tried_regions"); 1592 if (err) 1593 kobject_put(&tried_regions->kobj); 1594 else 1595 scheme->tried_regions = tried_regions; 1596 return err; 1597 } 1598 1599 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) 1600 { 1601 int err; 1602 1603 err = damon_sysfs_scheme_set_access_pattern(scheme); 1604 if (err) 1605 return err; 1606 err = damon_sysfs_scheme_set_quotas(scheme); 1607 if (err) 1608 goto put_access_pattern_out; 1609 err = damon_sysfs_scheme_set_watermarks(scheme); 1610 if (err) 1611 goto put_quotas_access_pattern_out; 1612 err = damon_sysfs_scheme_set_filters(scheme); 1613 if (err) 1614 goto put_watermarks_quotas_access_pattern_out; 1615 err = damon_sysfs_scheme_set_stats(scheme); 1616 if (err) 1617 goto put_filters_watermarks_quotas_access_pattern_out; 1618 err = damon_sysfs_scheme_set_tried_regions(scheme); 1619 if (err) 1620 goto put_tried_regions_out; 1621 return 0; 1622 1623 put_tried_regions_out: 1624 kobject_put(&scheme->tried_regions->kobj); 1625 scheme->tried_regions = NULL; 1626 put_filters_watermarks_quotas_access_pattern_out: 1627 kobject_put(&scheme->filters->kobj); 1628 scheme->filters = NULL; 1629 put_watermarks_quotas_access_pattern_out: 1630 kobject_put(&scheme->watermarks->kobj); 1631 scheme->watermarks = NULL; 1632 put_quotas_access_pattern_out: 1633 kobject_put(&scheme->quotas->kobj); 1634 scheme->quotas = NULL; 1635 put_access_pattern_out: 1636 kobject_put(&scheme->access_pattern->kobj); 1637 scheme->access_pattern = NULL; 1638 return err; 1639 } 1640 1641 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme) 1642 { 1643 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern); 1644 kobject_put(&scheme->access_pattern->kobj); 1645 damon_sysfs_quotas_rm_dirs(scheme->quotas); 1646 kobject_put(&scheme->quotas->kobj); 1647 kobject_put(&scheme->watermarks->kobj); 1648 damon_sysfs_scheme_filters_rm_dirs(scheme->filters); 1649 kobject_put(&scheme->filters->kobj); 1650 kobject_put(&scheme->stats->kobj); 1651 damon_sysfs_scheme_regions_rm_dirs(scheme->tried_regions); 1652 kobject_put(&scheme->tried_regions->kobj); 1653 } 1654 1655 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr, 1656 char *buf) 1657 { 1658 struct damon_sysfs_scheme *scheme = container_of(kobj, 1659 struct damon_sysfs_scheme, kobj); 1660 1661 return sysfs_emit(buf, "%s\n", 1662 damon_sysfs_damos_action_strs[scheme->action]); 1663 } 1664 1665 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr, 1666 const char *buf, size_t count) 1667 { 1668 struct damon_sysfs_scheme *scheme = container_of(kobj, 1669 struct damon_sysfs_scheme, kobj); 1670 enum damos_action action; 1671 1672 for (action = 0; action < NR_DAMOS_ACTIONS; action++) { 1673 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) { 1674 scheme->action = action; 1675 return count; 1676 } 1677 } 1678 return -EINVAL; 1679 } 1680 1681 static ssize_t apply_interval_us_show(struct kobject *kobj, 1682 struct kobj_attribute *attr, char *buf) 1683 { 1684 struct damon_sysfs_scheme *scheme = container_of(kobj, 1685 struct damon_sysfs_scheme, kobj); 1686 1687 return sysfs_emit(buf, "%lu\n", scheme->apply_interval_us); 1688 } 1689 1690 static ssize_t apply_interval_us_store(struct kobject *kobj, 1691 struct kobj_attribute *attr, const char *buf, size_t count) 1692 { 1693 struct damon_sysfs_scheme *scheme = container_of(kobj, 1694 struct damon_sysfs_scheme, kobj); 1695 int err = kstrtoul(buf, 0, &scheme->apply_interval_us); 1696 1697 return err ? err : count; 1698 } 1699 1700 static ssize_t target_nid_show(struct kobject *kobj, 1701 struct kobj_attribute *attr, char *buf) 1702 { 1703 struct damon_sysfs_scheme *scheme = container_of(kobj, 1704 struct damon_sysfs_scheme, kobj); 1705 1706 return sysfs_emit(buf, "%d\n", scheme->target_nid); 1707 } 1708 1709 static ssize_t target_nid_store(struct kobject *kobj, 1710 struct kobj_attribute *attr, const char *buf, size_t count) 1711 { 1712 struct damon_sysfs_scheme *scheme = container_of(kobj, 1713 struct damon_sysfs_scheme, kobj); 1714 int err = 0; 1715 1716 /* TODO: error handling for target_nid range. */ 1717 err = kstrtoint(buf, 0, &scheme->target_nid); 1718 1719 return err ? err : count; 1720 } 1721 1722 static void damon_sysfs_scheme_release(struct kobject *kobj) 1723 { 1724 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj)); 1725 } 1726 1727 static struct kobj_attribute damon_sysfs_scheme_action_attr = 1728 __ATTR_RW_MODE(action, 0600); 1729 1730 static struct kobj_attribute damon_sysfs_scheme_apply_interval_us_attr = 1731 __ATTR_RW_MODE(apply_interval_us, 0600); 1732 1733 static struct kobj_attribute damon_sysfs_scheme_target_nid_attr = 1734 __ATTR_RW_MODE(target_nid, 0600); 1735 1736 static struct attribute *damon_sysfs_scheme_attrs[] = { 1737 &damon_sysfs_scheme_action_attr.attr, 1738 &damon_sysfs_scheme_apply_interval_us_attr.attr, 1739 &damon_sysfs_scheme_target_nid_attr.attr, 1740 NULL, 1741 }; 1742 ATTRIBUTE_GROUPS(damon_sysfs_scheme); 1743 1744 static const struct kobj_type damon_sysfs_scheme_ktype = { 1745 .release = damon_sysfs_scheme_release, 1746 .sysfs_ops = &kobj_sysfs_ops, 1747 .default_groups = damon_sysfs_scheme_groups, 1748 }; 1749 1750 /* 1751 * schemes directory 1752 */ 1753 1754 struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void) 1755 { 1756 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL); 1757 } 1758 1759 void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes) 1760 { 1761 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr; 1762 int i; 1763 1764 for (i = 0; i < schemes->nr; i++) { 1765 damon_sysfs_scheme_rm_dirs(schemes_arr[i]); 1766 kobject_put(&schemes_arr[i]->kobj); 1767 } 1768 schemes->nr = 0; 1769 kfree(schemes_arr); 1770 schemes->schemes_arr = NULL; 1771 } 1772 1773 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes, 1774 int nr_schemes) 1775 { 1776 struct damon_sysfs_scheme **schemes_arr, *scheme; 1777 int err, i; 1778 1779 damon_sysfs_schemes_rm_dirs(schemes); 1780 if (!nr_schemes) 1781 return 0; 1782 1783 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr), 1784 GFP_KERNEL | __GFP_NOWARN); 1785 if (!schemes_arr) 1786 return -ENOMEM; 1787 schemes->schemes_arr = schemes_arr; 1788 1789 for (i = 0; i < nr_schemes; i++) { 1790 /* 1791 * apply_interval_us as 0 means same to aggregation interval 1792 * (same to before-apply_interval behavior) 1793 */ 1794 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT, 0); 1795 if (!scheme) { 1796 damon_sysfs_schemes_rm_dirs(schemes); 1797 return -ENOMEM; 1798 } 1799 1800 err = kobject_init_and_add(&scheme->kobj, 1801 &damon_sysfs_scheme_ktype, &schemes->kobj, 1802 "%d", i); 1803 if (err) 1804 goto out; 1805 err = damon_sysfs_scheme_add_dirs(scheme); 1806 if (err) 1807 goto out; 1808 1809 schemes_arr[i] = scheme; 1810 schemes->nr++; 1811 } 1812 return 0; 1813 1814 out: 1815 damon_sysfs_schemes_rm_dirs(schemes); 1816 kobject_put(&scheme->kobj); 1817 return err; 1818 } 1819 1820 static ssize_t nr_schemes_show(struct kobject *kobj, 1821 struct kobj_attribute *attr, char *buf) 1822 { 1823 struct damon_sysfs_schemes *schemes = container_of(kobj, 1824 struct damon_sysfs_schemes, kobj); 1825 1826 return sysfs_emit(buf, "%d\n", schemes->nr); 1827 } 1828 1829 static ssize_t nr_schemes_store(struct kobject *kobj, 1830 struct kobj_attribute *attr, const char *buf, size_t count) 1831 { 1832 struct damon_sysfs_schemes *schemes; 1833 int nr, err = kstrtoint(buf, 0, &nr); 1834 1835 if (err) 1836 return err; 1837 if (nr < 0) 1838 return -EINVAL; 1839 1840 schemes = container_of(kobj, struct damon_sysfs_schemes, kobj); 1841 1842 if (!mutex_trylock(&damon_sysfs_lock)) 1843 return -EBUSY; 1844 err = damon_sysfs_schemes_add_dirs(schemes, nr); 1845 mutex_unlock(&damon_sysfs_lock); 1846 if (err) 1847 return err; 1848 return count; 1849 } 1850 1851 static void damon_sysfs_schemes_release(struct kobject *kobj) 1852 { 1853 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj)); 1854 } 1855 1856 static struct kobj_attribute damon_sysfs_schemes_nr_attr = 1857 __ATTR_RW_MODE(nr_schemes, 0600); 1858 1859 static struct attribute *damon_sysfs_schemes_attrs[] = { 1860 &damon_sysfs_schemes_nr_attr.attr, 1861 NULL, 1862 }; 1863 ATTRIBUTE_GROUPS(damon_sysfs_schemes); 1864 1865 const struct kobj_type damon_sysfs_schemes_ktype = { 1866 .release = damon_sysfs_schemes_release, 1867 .sysfs_ops = &kobj_sysfs_ops, 1868 .default_groups = damon_sysfs_schemes_groups, 1869 }; 1870 1871 static bool damon_sysfs_memcg_path_eq(struct mem_cgroup *memcg, 1872 char *memcg_path_buf, char *path) 1873 { 1874 #ifdef CONFIG_MEMCG 1875 cgroup_path(memcg->css.cgroup, memcg_path_buf, PATH_MAX); 1876 if (sysfs_streq(memcg_path_buf, path)) 1877 return true; 1878 #endif /* CONFIG_MEMCG */ 1879 return false; 1880 } 1881 1882 static int damon_sysfs_memcg_path_to_id(char *memcg_path, unsigned short *id) 1883 { 1884 struct mem_cgroup *memcg; 1885 char *path; 1886 bool found = false; 1887 1888 if (!memcg_path) 1889 return -EINVAL; 1890 1891 path = kmalloc(sizeof(*path) * PATH_MAX, GFP_KERNEL); 1892 if (!path) 1893 return -ENOMEM; 1894 1895 for (memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg; 1896 memcg = mem_cgroup_iter(NULL, memcg, NULL)) { 1897 /* skip removed memcg */ 1898 if (!mem_cgroup_id(memcg)) 1899 continue; 1900 if (damon_sysfs_memcg_path_eq(memcg, path, memcg_path)) { 1901 *id = mem_cgroup_id(memcg); 1902 found = true; 1903 break; 1904 } 1905 } 1906 1907 kfree(path); 1908 return found ? 0 : -EINVAL; 1909 } 1910 1911 static int damon_sysfs_add_scheme_filters(struct damos *scheme, 1912 struct damon_sysfs_scheme_filters *sysfs_filters) 1913 { 1914 int i; 1915 1916 for (i = 0; i < sysfs_filters->nr; i++) { 1917 struct damon_sysfs_scheme_filter *sysfs_filter = 1918 sysfs_filters->filters_arr[i]; 1919 struct damos_filter *filter = 1920 damos_new_filter(sysfs_filter->type, 1921 sysfs_filter->matching); 1922 int err; 1923 1924 if (!filter) 1925 return -ENOMEM; 1926 if (filter->type == DAMOS_FILTER_TYPE_MEMCG) { 1927 err = damon_sysfs_memcg_path_to_id( 1928 sysfs_filter->memcg_path, 1929 &filter->memcg_id); 1930 if (err) { 1931 damos_destroy_filter(filter); 1932 return err; 1933 } 1934 } else if (filter->type == DAMOS_FILTER_TYPE_ADDR) { 1935 if (sysfs_filter->addr_range.end < 1936 sysfs_filter->addr_range.start) { 1937 damos_destroy_filter(filter); 1938 return -EINVAL; 1939 } 1940 filter->addr_range = sysfs_filter->addr_range; 1941 } else if (filter->type == DAMOS_FILTER_TYPE_TARGET) { 1942 filter->target_idx = sysfs_filter->target_idx; 1943 } 1944 1945 damos_add_filter(scheme, filter); 1946 } 1947 return 0; 1948 } 1949 1950 static int damos_sysfs_add_quota_score( 1951 struct damos_sysfs_quota_goals *sysfs_goals, 1952 struct damos_quota *quota) 1953 { 1954 struct damos_quota_goal *goal; 1955 int i; 1956 1957 for (i = 0; i < sysfs_goals->nr; i++) { 1958 struct damos_sysfs_quota_goal *sysfs_goal = 1959 sysfs_goals->goals_arr[i]; 1960 1961 if (!sysfs_goal->target_value) 1962 continue; 1963 1964 goal = damos_new_quota_goal(sysfs_goal->metric, 1965 sysfs_goal->target_value); 1966 if (!goal) 1967 return -ENOMEM; 1968 if (sysfs_goal->metric == DAMOS_QUOTA_USER_INPUT) 1969 goal->current_value = sysfs_goal->current_value; 1970 damos_add_quota_goal(quota, goal); 1971 } 1972 return 0; 1973 } 1974 1975 int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, 1976 struct damon_ctx *ctx) 1977 { 1978 struct damos *scheme; 1979 struct damos_quota quota = {}; 1980 int i = 0; 1981 1982 INIT_LIST_HEAD("a.goals); 1983 damon_for_each_scheme(scheme, ctx) { 1984 struct damon_sysfs_scheme *sysfs_scheme; 1985 struct damos_quota_goal *g, *g_next; 1986 int err; 1987 1988 /* user could have removed the scheme sysfs dir */ 1989 if (i >= sysfs_schemes->nr) 1990 break; 1991 1992 sysfs_scheme = sysfs_schemes->schemes_arr[i]; 1993 err = damos_sysfs_add_quota_score(sysfs_scheme->quotas->goals, 1994 "a); 1995 if (err) { 1996 damos_for_each_quota_goal_safe(g, g_next, "a) 1997 damos_destroy_quota_goal(g); 1998 return err; 1999 } 2000 err = damos_commit_quota_goals(&scheme->quota, "a); 2001 damos_for_each_quota_goal_safe(g, g_next, "a) 2002 damos_destroy_quota_goal(g); 2003 if (err) 2004 return err; 2005 i++; 2006 } 2007 return 0; 2008 } 2009 2010 void damos_sysfs_update_effective_quotas( 2011 struct damon_sysfs_schemes *sysfs_schemes, 2012 struct damon_ctx *ctx) 2013 { 2014 struct damos *scheme; 2015 int schemes_idx = 0; 2016 2017 damon_for_each_scheme(scheme, ctx) { 2018 struct damon_sysfs_quotas *sysfs_quotas; 2019 2020 /* user could have removed the scheme sysfs dir */ 2021 if (schemes_idx >= sysfs_schemes->nr) 2022 break; 2023 2024 sysfs_quotas = 2025 sysfs_schemes->schemes_arr[schemes_idx++]->quotas; 2026 sysfs_quotas->effective_sz = scheme->quota.esz; 2027 } 2028 } 2029 2030 static struct damos *damon_sysfs_mk_scheme( 2031 struct damon_sysfs_scheme *sysfs_scheme) 2032 { 2033 struct damon_sysfs_access_pattern *access_pattern = 2034 sysfs_scheme->access_pattern; 2035 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; 2036 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; 2037 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; 2038 struct damon_sysfs_scheme_filters *sysfs_filters = 2039 sysfs_scheme->filters; 2040 struct damos *scheme; 2041 int err; 2042 2043 struct damos_access_pattern pattern = { 2044 .min_sz_region = access_pattern->sz->min, 2045 .max_sz_region = access_pattern->sz->max, 2046 .min_nr_accesses = access_pattern->nr_accesses->min, 2047 .max_nr_accesses = access_pattern->nr_accesses->max, 2048 .min_age_region = access_pattern->age->min, 2049 .max_age_region = access_pattern->age->max, 2050 }; 2051 struct damos_quota quota = { 2052 .ms = sysfs_quotas->ms, 2053 .sz = sysfs_quotas->sz, 2054 .reset_interval = sysfs_quotas->reset_interval_ms, 2055 .weight_sz = sysfs_weights->sz, 2056 .weight_nr_accesses = sysfs_weights->nr_accesses, 2057 .weight_age = sysfs_weights->age, 2058 }; 2059 struct damos_watermarks wmarks = { 2060 .metric = sysfs_wmarks->metric, 2061 .interval = sysfs_wmarks->interval_us, 2062 .high = sysfs_wmarks->high, 2063 .mid = sysfs_wmarks->mid, 2064 .low = sysfs_wmarks->low, 2065 }; 2066 2067 scheme = damon_new_scheme(&pattern, sysfs_scheme->action, 2068 sysfs_scheme->apply_interval_us, "a, &wmarks, 2069 sysfs_scheme->target_nid); 2070 if (!scheme) 2071 return NULL; 2072 2073 err = damos_sysfs_add_quota_score(sysfs_quotas->goals, &scheme->quota); 2074 if (err) { 2075 damon_destroy_scheme(scheme); 2076 return NULL; 2077 } 2078 2079 err = damon_sysfs_add_scheme_filters(scheme, sysfs_filters); 2080 if (err) { 2081 damon_destroy_scheme(scheme); 2082 return NULL; 2083 } 2084 return scheme; 2085 } 2086 2087 int damon_sysfs_add_schemes(struct damon_ctx *ctx, 2088 struct damon_sysfs_schemes *sysfs_schemes) 2089 { 2090 int i; 2091 2092 for (i = 0; i < sysfs_schemes->nr; i++) { 2093 struct damos *scheme, *next; 2094 2095 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]); 2096 if (!scheme) { 2097 damon_for_each_scheme_safe(scheme, next, ctx) 2098 damon_destroy_scheme(scheme); 2099 return -ENOMEM; 2100 } 2101 damon_add_scheme(ctx, scheme); 2102 } 2103 return 0; 2104 } 2105 2106 void damon_sysfs_schemes_update_stats( 2107 struct damon_sysfs_schemes *sysfs_schemes, 2108 struct damon_ctx *ctx) 2109 { 2110 struct damos *scheme; 2111 int schemes_idx = 0; 2112 2113 damon_for_each_scheme(scheme, ctx) { 2114 struct damon_sysfs_stats *sysfs_stats; 2115 2116 /* user could have removed the scheme sysfs dir */ 2117 if (schemes_idx >= sysfs_schemes->nr) 2118 break; 2119 2120 sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats; 2121 sysfs_stats->nr_tried = scheme->stat.nr_tried; 2122 sysfs_stats->sz_tried = scheme->stat.sz_tried; 2123 sysfs_stats->nr_applied = scheme->stat.nr_applied; 2124 sysfs_stats->sz_applied = scheme->stat.sz_applied; 2125 sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds; 2126 } 2127 } 2128 2129 /* 2130 * damon_sysfs_schemes that need to update its schemes regions dir. Protected 2131 * by damon_sysfs_lock 2132 */ 2133 static struct damon_sysfs_schemes *damon_sysfs_schemes_for_damos_callback; 2134 static int damon_sysfs_schemes_region_idx; 2135 static bool damos_regions_upd_total_bytes_only; 2136 2137 /* 2138 * DAMON callback that called before damos apply. While this callback is 2139 * registered, damon_sysfs_lock should be held to ensure the regions 2140 * directories exist. 2141 */ 2142 static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, 2143 struct damon_target *t, struct damon_region *r, 2144 struct damos *s) 2145 { 2146 struct damos *scheme; 2147 struct damon_sysfs_scheme_regions *sysfs_regions; 2148 struct damon_sysfs_scheme_region *region; 2149 struct damon_sysfs_schemes *sysfs_schemes = 2150 damon_sysfs_schemes_for_damos_callback; 2151 int schemes_idx = 0; 2152 2153 damon_for_each_scheme(scheme, ctx) { 2154 if (scheme == s) 2155 break; 2156 schemes_idx++; 2157 } 2158 2159 /* user could have removed the scheme sysfs dir */ 2160 if (schemes_idx >= sysfs_schemes->nr) 2161 return 0; 2162 2163 sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions; 2164 if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_FINISHED) 2165 return 0; 2166 if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_IDLE) 2167 sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_STARTED; 2168 sysfs_regions->total_bytes += r->ar.end - r->ar.start; 2169 if (damos_regions_upd_total_bytes_only) 2170 return 0; 2171 2172 region = damon_sysfs_scheme_region_alloc(r); 2173 if (!region) 2174 return 0; 2175 list_add_tail(®ion->list, &sysfs_regions->regions_list); 2176 sysfs_regions->nr_regions++; 2177 if (kobject_init_and_add(®ion->kobj, 2178 &damon_sysfs_scheme_region_ktype, 2179 &sysfs_regions->kobj, "%d", 2180 damon_sysfs_schemes_region_idx++)) { 2181 kobject_put(®ion->kobj); 2182 } 2183 return 0; 2184 } 2185 2186 /* 2187 * DAMON callback that called after each accesses sampling. While this 2188 * callback is registered, damon_sysfs_lock should be held to ensure the 2189 * regions directories exist. 2190 */ 2191 void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx) 2192 { 2193 struct damon_sysfs_schemes *sysfs_schemes = 2194 damon_sysfs_schemes_for_damos_callback; 2195 struct damon_sysfs_scheme_regions *sysfs_regions; 2196 int i; 2197 2198 for (i = 0; i < sysfs_schemes->nr; i++) { 2199 sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; 2200 if (sysfs_regions->upd_status == 2201 DAMOS_TRIED_REGIONS_UPD_STARTED || 2202 time_after(jiffies, 2203 sysfs_regions->upd_timeout_jiffies)) 2204 sysfs_regions->upd_status = 2205 DAMOS_TRIED_REGIONS_UPD_FINISHED; 2206 } 2207 } 2208 2209 /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ 2210 int damon_sysfs_schemes_clear_regions( 2211 struct damon_sysfs_schemes *sysfs_schemes, 2212 struct damon_ctx *ctx) 2213 { 2214 struct damos *scheme; 2215 int schemes_idx = 0; 2216 2217 damon_for_each_scheme(scheme, ctx) { 2218 struct damon_sysfs_scheme *sysfs_scheme; 2219 2220 /* user could have removed the scheme sysfs dir */ 2221 if (schemes_idx >= sysfs_schemes->nr) 2222 break; 2223 2224 sysfs_scheme = sysfs_schemes->schemes_arr[schemes_idx++]; 2225 damon_sysfs_scheme_regions_rm_dirs( 2226 sysfs_scheme->tried_regions); 2227 sysfs_scheme->tried_regions->total_bytes = 0; 2228 } 2229 return 0; 2230 } 2231 2232 static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx) 2233 { 2234 struct damos *scheme; 2235 int i = 0; 2236 2237 damon_for_each_scheme(scheme, ctx) { 2238 if (i == n) 2239 return scheme; 2240 i++; 2241 } 2242 return NULL; 2243 } 2244 2245 static void damos_tried_regions_init_upd_status( 2246 struct damon_sysfs_schemes *sysfs_schemes, 2247 struct damon_ctx *ctx) 2248 { 2249 int i; 2250 struct damos *scheme; 2251 struct damon_sysfs_scheme_regions *sysfs_regions; 2252 2253 for (i = 0; i < sysfs_schemes->nr; i++) { 2254 sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; 2255 scheme = damos_sysfs_nth_scheme(i, ctx); 2256 if (!scheme) { 2257 sysfs_regions->upd_status = 2258 DAMOS_TRIED_REGIONS_UPD_FINISHED; 2259 continue; 2260 } 2261 sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; 2262 sysfs_regions->upd_timeout_jiffies = jiffies + 2263 2 * usecs_to_jiffies(scheme->apply_interval_us ? 2264 scheme->apply_interval_us : 2265 ctx->attrs.aggr_interval); 2266 } 2267 } 2268 2269 /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ 2270 int damon_sysfs_schemes_update_regions_start( 2271 struct damon_sysfs_schemes *sysfs_schemes, 2272 struct damon_ctx *ctx, bool total_bytes_only) 2273 { 2274 damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx); 2275 damon_sysfs_schemes_for_damos_callback = sysfs_schemes; 2276 damos_tried_regions_init_upd_status(sysfs_schemes, ctx); 2277 damos_regions_upd_total_bytes_only = total_bytes_only; 2278 ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply; 2279 return 0; 2280 } 2281 2282 bool damos_sysfs_regions_upd_done(void) 2283 { 2284 struct damon_sysfs_schemes *sysfs_schemes = 2285 damon_sysfs_schemes_for_damos_callback; 2286 struct damon_sysfs_scheme_regions *sysfs_regions; 2287 int i; 2288 2289 for (i = 0; i < sysfs_schemes->nr; i++) { 2290 sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; 2291 if (sysfs_regions->upd_status != 2292 DAMOS_TRIED_REGIONS_UPD_FINISHED) 2293 return false; 2294 } 2295 return true; 2296 } 2297 2298 /* 2299 * Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock. Caller 2300 * should unlock damon_sysfs_lock which held before 2301 * damon_sysfs_schemes_update_regions_start() 2302 */ 2303 int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx) 2304 { 2305 damon_sysfs_schemes_for_damos_callback = NULL; 2306 ctx->callback.before_damos_apply = NULL; 2307 damon_sysfs_schemes_region_idx = 0; 2308 return 0; 2309 } 2310