1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DAMON Debugfs Interface 4 * 5 * Author: SeongJae Park <sjpark@amazon.de> 6 */ 7 8 #define pr_fmt(fmt) "damon-dbgfs: " fmt 9 10 #include <linux/damon.h> 11 #include <linux/debugfs.h> 12 #include <linux/file.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/page_idle.h> 16 #include <linux/slab.h> 17 18 static struct damon_ctx **dbgfs_ctxs; 19 static int dbgfs_nr_ctxs; 20 static struct dentry **dbgfs_dirs; 21 static DEFINE_MUTEX(damon_dbgfs_lock); 22 23 /* 24 * Returns non-empty string on success, negative error code otherwise. 25 */ 26 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) 27 { 28 char *kbuf; 29 ssize_t ret; 30 31 /* We do not accept continuous write */ 32 if (*ppos) 33 return ERR_PTR(-EINVAL); 34 35 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); 36 if (!kbuf) 37 return ERR_PTR(-ENOMEM); 38 39 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count); 40 if (ret != count) { 41 kfree(kbuf); 42 return ERR_PTR(-EIO); 43 } 44 kbuf[ret] = '\0'; 45 46 return kbuf; 47 } 48 49 static ssize_t dbgfs_attrs_read(struct file *file, 50 char __user *buf, size_t count, loff_t *ppos) 51 { 52 struct damon_ctx *ctx = file->private_data; 53 char kbuf[128]; 54 int ret; 55 56 mutex_lock(&ctx->kdamond_lock); 57 ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n", 58 ctx->sample_interval, ctx->aggr_interval, 59 ctx->ops_update_interval, ctx->min_nr_regions, 60 ctx->max_nr_regions); 61 mutex_unlock(&ctx->kdamond_lock); 62 63 return simple_read_from_buffer(buf, count, ppos, kbuf, ret); 64 } 65 66 static ssize_t dbgfs_attrs_write(struct file *file, 67 const char __user *buf, size_t count, loff_t *ppos) 68 { 69 struct damon_ctx *ctx = file->private_data; 70 unsigned long s, a, r, minr, maxr; 71 char *kbuf; 72 ssize_t ret; 73 74 kbuf = user_input_str(buf, count, ppos); 75 if (IS_ERR(kbuf)) 76 return PTR_ERR(kbuf); 77 78 if (sscanf(kbuf, "%lu %lu %lu %lu %lu", 79 &s, &a, &r, &minr, &maxr) != 5) { 80 ret = -EINVAL; 81 goto out; 82 } 83 84 mutex_lock(&ctx->kdamond_lock); 85 if (ctx->kdamond) { 86 ret = -EBUSY; 87 goto unlock_out; 88 } 89 90 ret = damon_set_attrs(ctx, s, a, r, minr, maxr); 91 if (!ret) 92 ret = count; 93 unlock_out: 94 mutex_unlock(&ctx->kdamond_lock); 95 out: 96 kfree(kbuf); 97 return ret; 98 } 99 100 /* 101 * Return corresponding dbgfs' scheme action value (int) for the given 102 * damos_action if the given damos_action value is valid and supported by 103 * dbgfs, negative error code otherwise. 104 */ 105 static int damos_action_to_dbgfs_scheme_action(enum damos_action action) 106 { 107 switch (action) { 108 case DAMOS_WILLNEED: 109 return 0; 110 case DAMOS_COLD: 111 return 1; 112 case DAMOS_PAGEOUT: 113 return 2; 114 case DAMOS_HUGEPAGE: 115 return 3; 116 case DAMOS_NOHUGEPAGE: 117 return 4; 118 case DAMOS_STAT: 119 return 5; 120 default: 121 return -EINVAL; 122 } 123 } 124 125 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len) 126 { 127 struct damos *s; 128 int written = 0; 129 int rc; 130 131 damon_for_each_scheme(s, c) { 132 rc = scnprintf(&buf[written], len - written, 133 "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", 134 s->min_sz_region, s->max_sz_region, 135 s->min_nr_accesses, s->max_nr_accesses, 136 s->min_age_region, s->max_age_region, 137 damos_action_to_dbgfs_scheme_action(s->action), 138 s->quota.ms, s->quota.sz, 139 s->quota.reset_interval, 140 s->quota.weight_sz, 141 s->quota.weight_nr_accesses, 142 s->quota.weight_age, 143 s->wmarks.metric, s->wmarks.interval, 144 s->wmarks.high, s->wmarks.mid, s->wmarks.low, 145 s->stat.nr_tried, s->stat.sz_tried, 146 s->stat.nr_applied, s->stat.sz_applied, 147 s->stat.qt_exceeds); 148 if (!rc) 149 return -ENOMEM; 150 151 written += rc; 152 } 153 return written; 154 } 155 156 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf, 157 size_t count, loff_t *ppos) 158 { 159 struct damon_ctx *ctx = file->private_data; 160 char *kbuf; 161 ssize_t len; 162 163 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); 164 if (!kbuf) 165 return -ENOMEM; 166 167 mutex_lock(&ctx->kdamond_lock); 168 len = sprint_schemes(ctx, kbuf, count); 169 mutex_unlock(&ctx->kdamond_lock); 170 if (len < 0) 171 goto out; 172 len = simple_read_from_buffer(buf, count, ppos, kbuf, len); 173 174 out: 175 kfree(kbuf); 176 return len; 177 } 178 179 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes) 180 { 181 ssize_t i; 182 183 for (i = 0; i < nr_schemes; i++) 184 kfree(schemes[i]); 185 kfree(schemes); 186 } 187 188 /* 189 * Return corresponding damos_action for the given dbgfs input for a scheme 190 * action if the input is valid, negative error code otherwise. 191 */ 192 static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action) 193 { 194 switch (dbgfs_action) { 195 case 0: 196 return DAMOS_WILLNEED; 197 case 1: 198 return DAMOS_COLD; 199 case 2: 200 return DAMOS_PAGEOUT; 201 case 3: 202 return DAMOS_HUGEPAGE; 203 case 4: 204 return DAMOS_NOHUGEPAGE; 205 case 5: 206 return DAMOS_STAT; 207 default: 208 return -EINVAL; 209 } 210 } 211 212 /* 213 * Converts a string into an array of struct damos pointers 214 * 215 * Returns an array of struct damos pointers that converted if the conversion 216 * success, or NULL otherwise. 217 */ 218 static struct damos **str_to_schemes(const char *str, ssize_t len, 219 ssize_t *nr_schemes) 220 { 221 struct damos *scheme, **schemes; 222 const int max_nr_schemes = 256; 223 int pos = 0, parsed, ret; 224 unsigned long min_sz, max_sz; 225 unsigned int min_nr_a, max_nr_a, min_age, max_age; 226 unsigned int action_input; 227 enum damos_action action; 228 229 schemes = kmalloc_array(max_nr_schemes, sizeof(scheme), 230 GFP_KERNEL); 231 if (!schemes) 232 return NULL; 233 234 *nr_schemes = 0; 235 while (pos < len && *nr_schemes < max_nr_schemes) { 236 struct damos_quota quota = {}; 237 struct damos_watermarks wmarks; 238 239 ret = sscanf(&str[pos], 240 "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n", 241 &min_sz, &max_sz, &min_nr_a, &max_nr_a, 242 &min_age, &max_age, &action_input, "a.ms, 243 "a.sz, "a.reset_interval, 244 "a.weight_sz, "a.weight_nr_accesses, 245 "a.weight_age, &wmarks.metric, 246 &wmarks.interval, &wmarks.high, &wmarks.mid, 247 &wmarks.low, &parsed); 248 if (ret != 18) 249 break; 250 action = dbgfs_scheme_action_to_damos_action(action_input); 251 if ((int)action < 0) 252 goto fail; 253 254 if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age) 255 goto fail; 256 257 if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low || 258 wmarks.mid < wmarks.low) 259 goto fail; 260 261 pos += parsed; 262 scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a, 263 min_age, max_age, action, "a, &wmarks); 264 if (!scheme) 265 goto fail; 266 267 schemes[*nr_schemes] = scheme; 268 *nr_schemes += 1; 269 } 270 return schemes; 271 fail: 272 free_schemes_arr(schemes, *nr_schemes); 273 return NULL; 274 } 275 276 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf, 277 size_t count, loff_t *ppos) 278 { 279 struct damon_ctx *ctx = file->private_data; 280 char *kbuf; 281 struct damos **schemes; 282 ssize_t nr_schemes = 0, ret; 283 284 kbuf = user_input_str(buf, count, ppos); 285 if (IS_ERR(kbuf)) 286 return PTR_ERR(kbuf); 287 288 schemes = str_to_schemes(kbuf, count, &nr_schemes); 289 if (!schemes) { 290 ret = -EINVAL; 291 goto out; 292 } 293 294 mutex_lock(&ctx->kdamond_lock); 295 if (ctx->kdamond) { 296 ret = -EBUSY; 297 goto unlock_out; 298 } 299 300 ret = damon_set_schemes(ctx, schemes, nr_schemes); 301 if (!ret) { 302 ret = count; 303 nr_schemes = 0; 304 } 305 306 unlock_out: 307 mutex_unlock(&ctx->kdamond_lock); 308 free_schemes_arr(schemes, nr_schemes); 309 out: 310 kfree(kbuf); 311 return ret; 312 } 313 314 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) 315 { 316 struct damon_target *t; 317 int id; 318 int written = 0; 319 int rc; 320 321 damon_for_each_target(t, ctx) { 322 if (damon_target_has_pid(ctx)) 323 /* Show pid numbers to debugfs users */ 324 id = pid_vnr(t->pid); 325 else 326 /* Show 42 for physical address space, just for fun */ 327 id = 42; 328 329 rc = scnprintf(&buf[written], len - written, "%d ", id); 330 if (!rc) 331 return -ENOMEM; 332 written += rc; 333 } 334 if (written) 335 written -= 1; 336 written += scnprintf(&buf[written], len - written, "\n"); 337 return written; 338 } 339 340 static ssize_t dbgfs_target_ids_read(struct file *file, 341 char __user *buf, size_t count, loff_t *ppos) 342 { 343 struct damon_ctx *ctx = file->private_data; 344 ssize_t len; 345 char ids_buf[320]; 346 347 mutex_lock(&ctx->kdamond_lock); 348 len = sprint_target_ids(ctx, ids_buf, 320); 349 mutex_unlock(&ctx->kdamond_lock); 350 if (len < 0) 351 return len; 352 353 return simple_read_from_buffer(buf, count, ppos, ids_buf, len); 354 } 355 356 /* 357 * Converts a string into an integers array 358 * 359 * Returns an array of integers array if the conversion success, or NULL 360 * otherwise. 361 */ 362 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints) 363 { 364 int *array; 365 const int max_nr_ints = 32; 366 int nr; 367 int pos = 0, parsed, ret; 368 369 *nr_ints = 0; 370 array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL); 371 if (!array) 372 return NULL; 373 while (*nr_ints < max_nr_ints && pos < len) { 374 ret = sscanf(&str[pos], "%d%n", &nr, &parsed); 375 pos += parsed; 376 if (ret != 1) 377 break; 378 array[*nr_ints] = nr; 379 *nr_ints += 1; 380 } 381 382 return array; 383 } 384 385 static void dbgfs_put_pids(struct pid **pids, int nr_pids) 386 { 387 int i; 388 389 for (i = 0; i < nr_pids; i++) 390 put_pid(pids[i]); 391 } 392 393 /* 394 * Converts a string into an struct pid pointers array 395 * 396 * Returns an array of struct pid pointers if the conversion success, or NULL 397 * otherwise. 398 */ 399 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids) 400 { 401 int *ints; 402 ssize_t nr_ints; 403 struct pid **pids; 404 405 *nr_pids = 0; 406 407 ints = str_to_ints(str, len, &nr_ints); 408 if (!ints) 409 return NULL; 410 411 pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL); 412 if (!pids) 413 goto out; 414 415 for (; *nr_pids < nr_ints; (*nr_pids)++) { 416 pids[*nr_pids] = find_get_pid(ints[*nr_pids]); 417 if (!pids[*nr_pids]) { 418 dbgfs_put_pids(pids, *nr_pids); 419 kfree(ints); 420 kfree(pids); 421 return NULL; 422 } 423 } 424 425 out: 426 kfree(ints); 427 return pids; 428 } 429 430 /* 431 * dbgfs_set_targets() - Set monitoring targets. 432 * @ctx: monitoring context 433 * @nr_targets: number of targets 434 * @pids: array of target pids (size is same to @nr_targets) 435 * 436 * This function should not be called while the kdamond is running. @pids is 437 * ignored if the context is not configured to have pid in each target. On 438 * failure, reference counts of all pids in @pids are decremented. 439 * 440 * Return: 0 on success, negative error code otherwise. 441 */ 442 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets, 443 struct pid **pids) 444 { 445 ssize_t i; 446 struct damon_target *t, *next; 447 448 damon_for_each_target_safe(t, next, ctx) { 449 if (damon_target_has_pid(ctx)) 450 put_pid(t->pid); 451 damon_destroy_target(t); 452 } 453 454 for (i = 0; i < nr_targets; i++) { 455 t = damon_new_target(); 456 if (!t) { 457 damon_for_each_target_safe(t, next, ctx) 458 damon_destroy_target(t); 459 if (damon_target_has_pid(ctx)) 460 dbgfs_put_pids(pids, nr_targets); 461 return -ENOMEM; 462 } 463 if (damon_target_has_pid(ctx)) 464 t->pid = pids[i]; 465 damon_add_target(ctx, t); 466 } 467 468 return 0; 469 } 470 471 static ssize_t dbgfs_target_ids_write(struct file *file, 472 const char __user *buf, size_t count, loff_t *ppos) 473 { 474 struct damon_ctx *ctx = file->private_data; 475 bool id_is_pid = true; 476 char *kbuf; 477 struct pid **target_pids = NULL; 478 ssize_t nr_targets; 479 ssize_t ret; 480 481 kbuf = user_input_str(buf, count, ppos); 482 if (IS_ERR(kbuf)) 483 return PTR_ERR(kbuf); 484 485 if (!strncmp(kbuf, "paddr\n", count)) { 486 id_is_pid = false; 487 nr_targets = 1; 488 } 489 490 if (id_is_pid) { 491 target_pids = str_to_pids(kbuf, count, &nr_targets); 492 if (!target_pids) { 493 ret = -ENOMEM; 494 goto out; 495 } 496 } 497 498 mutex_lock(&ctx->kdamond_lock); 499 if (ctx->kdamond) { 500 if (id_is_pid) 501 dbgfs_put_pids(target_pids, nr_targets); 502 ret = -EBUSY; 503 goto unlock_out; 504 } 505 506 /* remove previously set targets */ 507 dbgfs_set_targets(ctx, 0, NULL); 508 if (!nr_targets) { 509 ret = count; 510 goto unlock_out; 511 } 512 513 /* Configure the context for the address space type */ 514 if (id_is_pid) 515 ret = damon_select_ops(ctx, DAMON_OPS_VADDR); 516 else 517 ret = damon_select_ops(ctx, DAMON_OPS_PADDR); 518 if (ret) 519 goto unlock_out; 520 521 ret = dbgfs_set_targets(ctx, nr_targets, target_pids); 522 if (!ret) 523 ret = count; 524 525 unlock_out: 526 mutex_unlock(&ctx->kdamond_lock); 527 kfree(target_pids); 528 out: 529 kfree(kbuf); 530 return ret; 531 } 532 533 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len) 534 { 535 struct damon_target *t; 536 struct damon_region *r; 537 int target_idx = 0; 538 int written = 0; 539 int rc; 540 541 damon_for_each_target(t, c) { 542 damon_for_each_region(r, t) { 543 rc = scnprintf(&buf[written], len - written, 544 "%d %lu %lu\n", 545 target_idx, r->ar.start, r->ar.end); 546 if (!rc) 547 return -ENOMEM; 548 written += rc; 549 } 550 target_idx++; 551 } 552 return written; 553 } 554 555 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf, 556 size_t count, loff_t *ppos) 557 { 558 struct damon_ctx *ctx = file->private_data; 559 char *kbuf; 560 ssize_t len; 561 562 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); 563 if (!kbuf) 564 return -ENOMEM; 565 566 mutex_lock(&ctx->kdamond_lock); 567 if (ctx->kdamond) { 568 mutex_unlock(&ctx->kdamond_lock); 569 len = -EBUSY; 570 goto out; 571 } 572 573 len = sprint_init_regions(ctx, kbuf, count); 574 mutex_unlock(&ctx->kdamond_lock); 575 if (len < 0) 576 goto out; 577 len = simple_read_from_buffer(buf, count, ppos, kbuf, len); 578 579 out: 580 kfree(kbuf); 581 return len; 582 } 583 584 static int add_init_region(struct damon_ctx *c, int target_idx, 585 struct damon_addr_range *ar) 586 { 587 struct damon_target *t; 588 struct damon_region *r, *prev; 589 unsigned long idx = 0; 590 int rc = -EINVAL; 591 592 if (ar->start >= ar->end) 593 return -EINVAL; 594 595 damon_for_each_target(t, c) { 596 if (idx++ == target_idx) { 597 r = damon_new_region(ar->start, ar->end); 598 if (!r) 599 return -ENOMEM; 600 damon_add_region(r, t); 601 if (damon_nr_regions(t) > 1) { 602 prev = damon_prev_region(r); 603 if (prev->ar.end > r->ar.start) { 604 damon_destroy_region(r, t); 605 return -EINVAL; 606 } 607 } 608 rc = 0; 609 } 610 } 611 return rc; 612 } 613 614 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len) 615 { 616 struct damon_target *t; 617 struct damon_region *r, *next; 618 int pos = 0, parsed, ret; 619 int target_idx; 620 struct damon_addr_range ar; 621 int err; 622 623 damon_for_each_target(t, c) { 624 damon_for_each_region_safe(r, next, t) 625 damon_destroy_region(r, t); 626 } 627 628 while (pos < len) { 629 ret = sscanf(&str[pos], "%d %lu %lu%n", 630 &target_idx, &ar.start, &ar.end, &parsed); 631 if (ret != 3) 632 break; 633 err = add_init_region(c, target_idx, &ar); 634 if (err) 635 goto fail; 636 pos += parsed; 637 } 638 639 return 0; 640 641 fail: 642 damon_for_each_target(t, c) { 643 damon_for_each_region_safe(r, next, t) 644 damon_destroy_region(r, t); 645 } 646 return err; 647 } 648 649 static ssize_t dbgfs_init_regions_write(struct file *file, 650 const char __user *buf, size_t count, 651 loff_t *ppos) 652 { 653 struct damon_ctx *ctx = file->private_data; 654 char *kbuf; 655 ssize_t ret = count; 656 int err; 657 658 kbuf = user_input_str(buf, count, ppos); 659 if (IS_ERR(kbuf)) 660 return PTR_ERR(kbuf); 661 662 mutex_lock(&ctx->kdamond_lock); 663 if (ctx->kdamond) { 664 ret = -EBUSY; 665 goto unlock_out; 666 } 667 668 err = set_init_regions(ctx, kbuf, ret); 669 if (err) 670 ret = err; 671 672 unlock_out: 673 mutex_unlock(&ctx->kdamond_lock); 674 kfree(kbuf); 675 return ret; 676 } 677 678 static ssize_t dbgfs_kdamond_pid_read(struct file *file, 679 char __user *buf, size_t count, loff_t *ppos) 680 { 681 struct damon_ctx *ctx = file->private_data; 682 char *kbuf; 683 ssize_t len; 684 685 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); 686 if (!kbuf) 687 return -ENOMEM; 688 689 mutex_lock(&ctx->kdamond_lock); 690 if (ctx->kdamond) 691 len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid); 692 else 693 len = scnprintf(kbuf, count, "none\n"); 694 mutex_unlock(&ctx->kdamond_lock); 695 if (!len) 696 goto out; 697 len = simple_read_from_buffer(buf, count, ppos, kbuf, len); 698 699 out: 700 kfree(kbuf); 701 return len; 702 } 703 704 static int damon_dbgfs_open(struct inode *inode, struct file *file) 705 { 706 file->private_data = inode->i_private; 707 708 return nonseekable_open(inode, file); 709 } 710 711 static const struct file_operations attrs_fops = { 712 .open = damon_dbgfs_open, 713 .read = dbgfs_attrs_read, 714 .write = dbgfs_attrs_write, 715 }; 716 717 static const struct file_operations schemes_fops = { 718 .open = damon_dbgfs_open, 719 .read = dbgfs_schemes_read, 720 .write = dbgfs_schemes_write, 721 }; 722 723 static const struct file_operations target_ids_fops = { 724 .open = damon_dbgfs_open, 725 .read = dbgfs_target_ids_read, 726 .write = dbgfs_target_ids_write, 727 }; 728 729 static const struct file_operations init_regions_fops = { 730 .open = damon_dbgfs_open, 731 .read = dbgfs_init_regions_read, 732 .write = dbgfs_init_regions_write, 733 }; 734 735 static const struct file_operations kdamond_pid_fops = { 736 .open = damon_dbgfs_open, 737 .read = dbgfs_kdamond_pid_read, 738 }; 739 740 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx) 741 { 742 const char * const file_names[] = {"attrs", "schemes", "target_ids", 743 "init_regions", "kdamond_pid"}; 744 const struct file_operations *fops[] = {&attrs_fops, &schemes_fops, 745 &target_ids_fops, &init_regions_fops, &kdamond_pid_fops}; 746 int i; 747 748 for (i = 0; i < ARRAY_SIZE(file_names); i++) 749 debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]); 750 } 751 752 static void dbgfs_before_terminate(struct damon_ctx *ctx) 753 { 754 struct damon_target *t, *next; 755 756 if (!damon_target_has_pid(ctx)) 757 return; 758 759 mutex_lock(&ctx->kdamond_lock); 760 damon_for_each_target_safe(t, next, ctx) { 761 put_pid(t->pid); 762 damon_destroy_target(t); 763 } 764 mutex_unlock(&ctx->kdamond_lock); 765 } 766 767 static struct damon_ctx *dbgfs_new_ctx(void) 768 { 769 struct damon_ctx *ctx; 770 771 ctx = damon_new_ctx(); 772 if (!ctx) 773 return NULL; 774 775 if (damon_select_ops(ctx, DAMON_OPS_VADDR) && 776 damon_select_ops(ctx, DAMON_OPS_PADDR)) { 777 damon_destroy_ctx(ctx); 778 return NULL; 779 } 780 ctx->callback.before_terminate = dbgfs_before_terminate; 781 return ctx; 782 } 783 784 static void dbgfs_destroy_ctx(struct damon_ctx *ctx) 785 { 786 damon_destroy_ctx(ctx); 787 } 788 789 /* 790 * Make a context of @name and create a debugfs directory for it. 791 * 792 * This function should be called while holding damon_dbgfs_lock. 793 * 794 * Returns 0 on success, negative error code otherwise. 795 */ 796 static int dbgfs_mk_context(char *name) 797 { 798 struct dentry *root, **new_dirs, *new_dir; 799 struct damon_ctx **new_ctxs, *new_ctx; 800 801 if (damon_nr_running_ctxs()) 802 return -EBUSY; 803 804 new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) * 805 (dbgfs_nr_ctxs + 1), GFP_KERNEL); 806 if (!new_ctxs) 807 return -ENOMEM; 808 dbgfs_ctxs = new_ctxs; 809 810 new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) * 811 (dbgfs_nr_ctxs + 1), GFP_KERNEL); 812 if (!new_dirs) 813 return -ENOMEM; 814 dbgfs_dirs = new_dirs; 815 816 root = dbgfs_dirs[0]; 817 if (!root) 818 return -ENOENT; 819 820 new_dir = debugfs_create_dir(name, root); 821 /* Below check is required for a potential duplicated name case */ 822 if (IS_ERR(new_dir)) 823 return PTR_ERR(new_dir); 824 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir; 825 826 new_ctx = dbgfs_new_ctx(); 827 if (!new_ctx) { 828 debugfs_remove(new_dir); 829 dbgfs_dirs[dbgfs_nr_ctxs] = NULL; 830 return -ENOMEM; 831 } 832 833 dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx; 834 dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs], 835 dbgfs_ctxs[dbgfs_nr_ctxs]); 836 dbgfs_nr_ctxs++; 837 838 return 0; 839 } 840 841 static ssize_t dbgfs_mk_context_write(struct file *file, 842 const char __user *buf, size_t count, loff_t *ppos) 843 { 844 char *kbuf; 845 char *ctx_name; 846 ssize_t ret; 847 848 kbuf = user_input_str(buf, count, ppos); 849 if (IS_ERR(kbuf)) 850 return PTR_ERR(kbuf); 851 ctx_name = kmalloc(count + 1, GFP_KERNEL); 852 if (!ctx_name) { 853 kfree(kbuf); 854 return -ENOMEM; 855 } 856 857 /* Trim white space */ 858 if (sscanf(kbuf, "%s", ctx_name) != 1) { 859 ret = -EINVAL; 860 goto out; 861 } 862 863 mutex_lock(&damon_dbgfs_lock); 864 ret = dbgfs_mk_context(ctx_name); 865 if (!ret) 866 ret = count; 867 mutex_unlock(&damon_dbgfs_lock); 868 869 out: 870 kfree(kbuf); 871 kfree(ctx_name); 872 return ret; 873 } 874 875 /* 876 * Remove a context of @name and its debugfs directory. 877 * 878 * This function should be called while holding damon_dbgfs_lock. 879 * 880 * Return 0 on success, negative error code otherwise. 881 */ 882 static int dbgfs_rm_context(char *name) 883 { 884 struct dentry *root, *dir, **new_dirs; 885 struct damon_ctx **new_ctxs; 886 int i, j; 887 888 if (damon_nr_running_ctxs()) 889 return -EBUSY; 890 891 root = dbgfs_dirs[0]; 892 if (!root) 893 return -ENOENT; 894 895 dir = debugfs_lookup(name, root); 896 if (!dir) 897 return -ENOENT; 898 899 new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs), 900 GFP_KERNEL); 901 if (!new_dirs) 902 return -ENOMEM; 903 904 new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs), 905 GFP_KERNEL); 906 if (!new_ctxs) { 907 kfree(new_dirs); 908 return -ENOMEM; 909 } 910 911 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) { 912 if (dbgfs_dirs[i] == dir) { 913 debugfs_remove(dbgfs_dirs[i]); 914 dbgfs_destroy_ctx(dbgfs_ctxs[i]); 915 continue; 916 } 917 new_dirs[j] = dbgfs_dirs[i]; 918 new_ctxs[j++] = dbgfs_ctxs[i]; 919 } 920 921 kfree(dbgfs_dirs); 922 kfree(dbgfs_ctxs); 923 924 dbgfs_dirs = new_dirs; 925 dbgfs_ctxs = new_ctxs; 926 dbgfs_nr_ctxs--; 927 928 return 0; 929 } 930 931 static ssize_t dbgfs_rm_context_write(struct file *file, 932 const char __user *buf, size_t count, loff_t *ppos) 933 { 934 char *kbuf; 935 ssize_t ret; 936 char *ctx_name; 937 938 kbuf = user_input_str(buf, count, ppos); 939 if (IS_ERR(kbuf)) 940 return PTR_ERR(kbuf); 941 ctx_name = kmalloc(count + 1, GFP_KERNEL); 942 if (!ctx_name) { 943 kfree(kbuf); 944 return -ENOMEM; 945 } 946 947 /* Trim white space */ 948 if (sscanf(kbuf, "%s", ctx_name) != 1) { 949 ret = -EINVAL; 950 goto out; 951 } 952 953 mutex_lock(&damon_dbgfs_lock); 954 ret = dbgfs_rm_context(ctx_name); 955 if (!ret) 956 ret = count; 957 mutex_unlock(&damon_dbgfs_lock); 958 959 out: 960 kfree(kbuf); 961 kfree(ctx_name); 962 return ret; 963 } 964 965 static ssize_t dbgfs_monitor_on_read(struct file *file, 966 char __user *buf, size_t count, loff_t *ppos) 967 { 968 char monitor_on_buf[5]; 969 bool monitor_on = damon_nr_running_ctxs() != 0; 970 int len; 971 972 len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n"); 973 974 return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len); 975 } 976 977 static ssize_t dbgfs_monitor_on_write(struct file *file, 978 const char __user *buf, size_t count, loff_t *ppos) 979 { 980 ssize_t ret; 981 char *kbuf; 982 983 kbuf = user_input_str(buf, count, ppos); 984 if (IS_ERR(kbuf)) 985 return PTR_ERR(kbuf); 986 987 /* Remove white space */ 988 if (sscanf(kbuf, "%s", kbuf) != 1) { 989 kfree(kbuf); 990 return -EINVAL; 991 } 992 993 mutex_lock(&damon_dbgfs_lock); 994 if (!strncmp(kbuf, "on", count)) { 995 int i; 996 997 for (i = 0; i < dbgfs_nr_ctxs; i++) { 998 if (damon_targets_empty(dbgfs_ctxs[i])) { 999 kfree(kbuf); 1000 mutex_unlock(&damon_dbgfs_lock); 1001 return -EINVAL; 1002 } 1003 } 1004 ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true); 1005 } else if (!strncmp(kbuf, "off", count)) { 1006 ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs); 1007 } else { 1008 ret = -EINVAL; 1009 } 1010 mutex_unlock(&damon_dbgfs_lock); 1011 1012 if (!ret) 1013 ret = count; 1014 kfree(kbuf); 1015 return ret; 1016 } 1017 1018 static const struct file_operations mk_contexts_fops = { 1019 .write = dbgfs_mk_context_write, 1020 }; 1021 1022 static const struct file_operations rm_contexts_fops = { 1023 .write = dbgfs_rm_context_write, 1024 }; 1025 1026 static const struct file_operations monitor_on_fops = { 1027 .read = dbgfs_monitor_on_read, 1028 .write = dbgfs_monitor_on_write, 1029 }; 1030 1031 static int __init __damon_dbgfs_init(void) 1032 { 1033 struct dentry *dbgfs_root; 1034 const char * const file_names[] = {"mk_contexts", "rm_contexts", 1035 "monitor_on"}; 1036 const struct file_operations *fops[] = {&mk_contexts_fops, 1037 &rm_contexts_fops, &monitor_on_fops}; 1038 int i; 1039 1040 dbgfs_root = debugfs_create_dir("damon", NULL); 1041 1042 for (i = 0; i < ARRAY_SIZE(file_names); i++) 1043 debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL, 1044 fops[i]); 1045 dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]); 1046 1047 dbgfs_dirs = kmalloc_array(1, sizeof(dbgfs_root), GFP_KERNEL); 1048 if (!dbgfs_dirs) { 1049 debugfs_remove(dbgfs_root); 1050 return -ENOMEM; 1051 } 1052 dbgfs_dirs[0] = dbgfs_root; 1053 1054 return 0; 1055 } 1056 1057 /* 1058 * Functions for the initialization 1059 */ 1060 1061 static int __init damon_dbgfs_init(void) 1062 { 1063 int rc = -ENOMEM; 1064 1065 mutex_lock(&damon_dbgfs_lock); 1066 dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL); 1067 if (!dbgfs_ctxs) 1068 goto out; 1069 dbgfs_ctxs[0] = dbgfs_new_ctx(); 1070 if (!dbgfs_ctxs[0]) { 1071 kfree(dbgfs_ctxs); 1072 goto out; 1073 } 1074 dbgfs_nr_ctxs = 1; 1075 1076 rc = __damon_dbgfs_init(); 1077 if (rc) { 1078 kfree(dbgfs_ctxs[0]); 1079 kfree(dbgfs_ctxs); 1080 pr_err("%s: dbgfs init failed\n", __func__); 1081 } 1082 1083 out: 1084 mutex_unlock(&damon_dbgfs_lock); 1085 return rc; 1086 } 1087 1088 module_init(damon_dbgfs_init); 1089 1090 #include "dbgfs-test.h" 1091