1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache sysfs interfaces 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #ifndef NO_BCACHEFS_SYSFS 10 11 #include "bcachefs.h" 12 #include "alloc_background.h" 13 #include "alloc_foreground.h" 14 #include "sysfs.h" 15 #include "btree_cache.h" 16 #include "btree_io.h" 17 #include "btree_iter.h" 18 #include "btree_key_cache.h" 19 #include "btree_update.h" 20 #include "btree_update_interior.h" 21 #include "btree_gc.h" 22 #include "buckets.h" 23 #include "clock.h" 24 #include "disk_groups.h" 25 #include "ec.h" 26 #include "inode.h" 27 #include "journal.h" 28 #include "keylist.h" 29 #include "move.h" 30 #include "movinggc.h" 31 #include "nocow_locking.h" 32 #include "opts.h" 33 #include "rebalance.h" 34 #include "replicas.h" 35 #include "super-io.h" 36 #include "tests.h" 37 38 #include <linux/blkdev.h> 39 #include <linux/sort.h> 40 #include <linux/sched/clock.h> 41 42 #include "util.h" 43 44 #define SYSFS_OPS(type) \ 45 const struct sysfs_ops type ## _sysfs_ops = { \ 46 .show = type ## _show, \ 47 .store = type ## _store \ 48 } 49 50 #define SHOW(fn) \ 51 static ssize_t fn ## _to_text(struct printbuf *, \ 52 struct kobject *, struct attribute *); \ 53 \ 54 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\ 55 char *buf) \ 56 { \ 57 struct printbuf out = PRINTBUF; \ 58 ssize_t ret = fn ## _to_text(&out, kobj, attr); \ 59 \ 60 if (out.pos && out.buf[out.pos - 1] != '\n') \ 61 prt_newline(&out); \ 62 \ 63 if (!ret && out.allocation_failure) \ 64 ret = -ENOMEM; \ 65 \ 66 if (!ret) { \ 67 ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \ 68 memcpy(buf, out.buf, ret); \ 69 } \ 70 printbuf_exit(&out); \ 71 return bch2_err_class(ret); \ 72 } \ 73 \ 74 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\ 75 struct attribute *attr) 76 77 #define STORE(fn) \ 78 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\ 79 const char *, size_t); \ 80 \ 81 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\ 82 const char *buf, size_t size) \ 83 { \ 84 return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \ 85 } \ 86 \ 87 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\ 88 const char *buf, size_t size) 89 90 #define __sysfs_attribute(_name, _mode) \ 91 static struct attribute sysfs_##_name = \ 92 { .name = #_name, .mode = _mode } 93 94 #define write_attribute(n) __sysfs_attribute(n, 0200) 95 #define read_attribute(n) __sysfs_attribute(n, 0444) 96 #define rw_attribute(n) __sysfs_attribute(n, 0644) 97 98 #define sysfs_printf(file, fmt, ...) \ 99 do { \ 100 if (attr == &sysfs_ ## file) \ 101 prt_printf(out, fmt "\n", __VA_ARGS__); \ 102 } while (0) 103 104 #define sysfs_print(file, var) \ 105 do { \ 106 if (attr == &sysfs_ ## file) \ 107 snprint(out, var); \ 108 } while (0) 109 110 #define sysfs_hprint(file, val) \ 111 do { \ 112 if (attr == &sysfs_ ## file) \ 113 prt_human_readable_s64(out, val); \ 114 } while (0) 115 116 #define sysfs_strtoul(file, var) \ 117 do { \ 118 if (attr == &sysfs_ ## file) \ 119 return strtoul_safe(buf, var) ?: (ssize_t) size; \ 120 } while (0) 121 122 #define sysfs_strtoul_clamp(file, var, min, max) \ 123 do { \ 124 if (attr == &sysfs_ ## file) \ 125 return strtoul_safe_clamp(buf, var, min, max) \ 126 ?: (ssize_t) size; \ 127 } while (0) 128 129 #define strtoul_or_return(cp) \ 130 ({ \ 131 unsigned long _v; \ 132 int _r = kstrtoul(cp, 10, &_v); \ 133 if (_r) \ 134 return _r; \ 135 _v; \ 136 }) 137 138 write_attribute(trigger_gc); 139 write_attribute(trigger_discards); 140 write_attribute(trigger_invalidates); 141 write_attribute(prune_cache); 142 write_attribute(btree_wakeup); 143 rw_attribute(btree_gc_periodic); 144 rw_attribute(gc_gens_pos); 145 146 read_attribute(uuid); 147 read_attribute(minor); 148 read_attribute(bucket_size); 149 read_attribute(first_bucket); 150 read_attribute(nbuckets); 151 rw_attribute(durability); 152 read_attribute(iodone); 153 154 read_attribute(io_latency_read); 155 read_attribute(io_latency_write); 156 read_attribute(io_latency_stats_read); 157 read_attribute(io_latency_stats_write); 158 read_attribute(congested); 159 160 read_attribute(btree_write_stats); 161 162 read_attribute(btree_cache_size); 163 read_attribute(compression_stats); 164 read_attribute(journal_debug); 165 read_attribute(btree_updates); 166 read_attribute(btree_cache); 167 read_attribute(btree_key_cache); 168 read_attribute(stripes_heap); 169 read_attribute(open_buckets); 170 read_attribute(open_buckets_partial); 171 read_attribute(write_points); 172 read_attribute(nocow_lock_table); 173 174 #ifdef BCH_WRITE_REF_DEBUG 175 read_attribute(write_refs); 176 177 static const char * const bch2_write_refs[] = { 178 #define x(n) #n, 179 BCH_WRITE_REFS() 180 #undef x 181 NULL 182 }; 183 184 static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c) 185 { 186 bch2_printbuf_tabstop_push(out, 24); 187 188 for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) { 189 prt_str(out, bch2_write_refs[i]); 190 prt_tab(out); 191 prt_printf(out, "%li", atomic_long_read(&c->writes[i])); 192 prt_newline(out); 193 } 194 } 195 #endif 196 197 read_attribute(internal_uuid); 198 read_attribute(disk_groups); 199 200 read_attribute(has_data); 201 read_attribute(alloc_debug); 202 203 #define x(t, n, ...) read_attribute(t); 204 BCH_PERSISTENT_COUNTERS() 205 #undef x 206 207 rw_attribute(discard); 208 rw_attribute(label); 209 210 rw_attribute(copy_gc_enabled); 211 read_attribute(copy_gc_wait); 212 213 rw_attribute(rebalance_enabled); 214 sysfs_pd_controller_attribute(rebalance); 215 read_attribute(rebalance_work); 216 rw_attribute(promote_whole_extents); 217 218 read_attribute(new_stripes); 219 220 read_attribute(io_timers_read); 221 read_attribute(io_timers_write); 222 223 read_attribute(moving_ctxts); 224 225 #ifdef CONFIG_BCACHEFS_TESTS 226 write_attribute(perf_test); 227 #endif /* CONFIG_BCACHEFS_TESTS */ 228 229 #define x(_name) \ 230 static struct attribute sysfs_time_stat_##_name = \ 231 { .name = #_name, .mode = 0444 }; 232 BCH_TIME_STATS() 233 #undef x 234 235 static struct attribute sysfs_state_rw = { 236 .name = "state", 237 .mode = 0444, 238 }; 239 240 static size_t bch2_btree_cache_size(struct bch_fs *c) 241 { 242 size_t ret = 0; 243 struct btree *b; 244 245 mutex_lock(&c->btree_cache.lock); 246 list_for_each_entry(b, &c->btree_cache.live, list) 247 ret += btree_bytes(c); 248 249 mutex_unlock(&c->btree_cache.lock); 250 return ret; 251 } 252 253 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c) 254 { 255 struct btree_trans *trans; 256 struct btree_iter iter; 257 struct bkey_s_c k; 258 enum btree_id id; 259 u64 nr_uncompressed_extents = 0, 260 nr_compressed_extents = 0, 261 nr_incompressible_extents = 0, 262 uncompressed_sectors = 0, 263 incompressible_sectors = 0, 264 compressed_sectors_compressed = 0, 265 compressed_sectors_uncompressed = 0; 266 int ret = 0; 267 268 if (!test_bit(BCH_FS_STARTED, &c->flags)) 269 return -EPERM; 270 271 trans = bch2_trans_get(c); 272 273 for (id = 0; id < BTREE_ID_NR; id++) { 274 if (!btree_type_has_ptrs(id)) 275 continue; 276 277 for_each_btree_key(trans, iter, id, POS_MIN, 278 BTREE_ITER_ALL_SNAPSHOTS, k, ret) { 279 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 280 const union bch_extent_entry *entry; 281 struct extent_ptr_decoded p; 282 bool compressed = false, uncompressed = false, incompressible = false; 283 284 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 285 switch (p.crc.compression_type) { 286 case BCH_COMPRESSION_TYPE_none: 287 uncompressed = true; 288 uncompressed_sectors += k.k->size; 289 break; 290 case BCH_COMPRESSION_TYPE_incompressible: 291 incompressible = true; 292 incompressible_sectors += k.k->size; 293 break; 294 default: 295 compressed_sectors_compressed += 296 p.crc.compressed_size; 297 compressed_sectors_uncompressed += 298 p.crc.uncompressed_size; 299 compressed = true; 300 break; 301 } 302 } 303 304 if (incompressible) 305 nr_incompressible_extents++; 306 else if (uncompressed) 307 nr_uncompressed_extents++; 308 else if (compressed) 309 nr_compressed_extents++; 310 } 311 bch2_trans_iter_exit(trans, &iter); 312 } 313 314 bch2_trans_put(trans); 315 316 if (ret) 317 return ret; 318 319 prt_printf(out, "uncompressed:\n"); 320 prt_printf(out, " nr extents: %llu\n", nr_uncompressed_extents); 321 prt_printf(out, " size: "); 322 prt_human_readable_u64(out, uncompressed_sectors << 9); 323 prt_printf(out, "\n"); 324 325 prt_printf(out, "compressed:\n"); 326 prt_printf(out, " nr extents: %llu\n", nr_compressed_extents); 327 prt_printf(out, " compressed size: "); 328 prt_human_readable_u64(out, compressed_sectors_compressed << 9); 329 prt_printf(out, "\n"); 330 prt_printf(out, " uncompressed size: "); 331 prt_human_readable_u64(out, compressed_sectors_uncompressed << 9); 332 prt_printf(out, "\n"); 333 334 prt_printf(out, "incompressible:\n"); 335 prt_printf(out, " nr extents: %llu\n", nr_incompressible_extents); 336 prt_printf(out, " size: "); 337 prt_human_readable_u64(out, incompressible_sectors << 9); 338 prt_printf(out, "\n"); 339 return 0; 340 } 341 342 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c) 343 { 344 prt_printf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]); 345 bch2_bpos_to_text(out, c->gc_gens_pos); 346 prt_printf(out, "\n"); 347 } 348 349 static void bch2_btree_wakeup_all(struct bch_fs *c) 350 { 351 struct btree_trans *trans; 352 353 seqmutex_lock(&c->btree_trans_lock); 354 list_for_each_entry(trans, &c->btree_trans_list, list) { 355 struct btree_bkey_cached_common *b = READ_ONCE(trans->locking); 356 357 if (b) 358 six_lock_wakeup_all(&b->lock); 359 360 } 361 seqmutex_unlock(&c->btree_trans_lock); 362 } 363 364 SHOW(bch2_fs) 365 { 366 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); 367 368 sysfs_print(minor, c->minor); 369 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b); 370 371 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); 372 373 if (attr == &sysfs_btree_write_stats) 374 bch2_btree_write_stats_to_text(out, c); 375 376 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); 377 378 if (attr == &sysfs_gc_gens_pos) 379 bch2_gc_gens_pos_to_text(out, c); 380 381 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); 382 383 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled); 384 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ 385 386 if (attr == &sysfs_copy_gc_wait) 387 bch2_copygc_wait_to_text(out, c); 388 389 if (attr == &sysfs_rebalance_work) 390 bch2_rebalance_work_to_text(out, c); 391 392 sysfs_print(promote_whole_extents, c->promote_whole_extents); 393 394 /* Debugging: */ 395 396 if (attr == &sysfs_journal_debug) 397 bch2_journal_debug_to_text(out, &c->journal); 398 399 if (attr == &sysfs_btree_updates) 400 bch2_btree_updates_to_text(out, c); 401 402 if (attr == &sysfs_btree_cache) 403 bch2_btree_cache_to_text(out, c); 404 405 if (attr == &sysfs_btree_key_cache) 406 bch2_btree_key_cache_to_text(out, &c->btree_key_cache); 407 408 if (attr == &sysfs_stripes_heap) 409 bch2_stripes_heap_to_text(out, c); 410 411 if (attr == &sysfs_open_buckets) 412 bch2_open_buckets_to_text(out, c); 413 414 if (attr == &sysfs_open_buckets_partial) 415 bch2_open_buckets_partial_to_text(out, c); 416 417 if (attr == &sysfs_write_points) 418 bch2_write_points_to_text(out, c); 419 420 if (attr == &sysfs_compression_stats) 421 bch2_compression_stats_to_text(out, c); 422 423 if (attr == &sysfs_new_stripes) 424 bch2_new_stripes_to_text(out, c); 425 426 if (attr == &sysfs_io_timers_read) 427 bch2_io_timers_to_text(out, &c->io_clock[READ]); 428 429 if (attr == &sysfs_io_timers_write) 430 bch2_io_timers_to_text(out, &c->io_clock[WRITE]); 431 432 if (attr == &sysfs_moving_ctxts) 433 bch2_fs_moving_ctxts_to_text(out, c); 434 435 #ifdef BCH_WRITE_REF_DEBUG 436 if (attr == &sysfs_write_refs) 437 bch2_write_refs_to_text(out, c); 438 #endif 439 440 if (attr == &sysfs_nocow_lock_table) 441 bch2_nocow_locks_to_text(out, &c->nocow_locks); 442 443 if (attr == &sysfs_disk_groups) 444 bch2_disk_groups_to_text(out, c); 445 446 return 0; 447 } 448 449 STORE(bch2_fs) 450 { 451 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); 452 453 if (attr == &sysfs_btree_gc_periodic) { 454 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic) 455 ?: (ssize_t) size; 456 457 wake_up_process(c->gc_thread); 458 return ret; 459 } 460 461 if (attr == &sysfs_copy_gc_enabled) { 462 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled) 463 ?: (ssize_t) size; 464 465 if (c->copygc_thread) 466 wake_up_process(c->copygc_thread); 467 return ret; 468 } 469 470 if (attr == &sysfs_rebalance_enabled) { 471 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled) 472 ?: (ssize_t) size; 473 474 rebalance_wakeup(c); 475 return ret; 476 } 477 478 sysfs_pd_controller_store(rebalance, &c->rebalance.pd); 479 480 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents); 481 482 /* Debugging: */ 483 484 if (!test_bit(BCH_FS_STARTED, &c->flags)) 485 return -EPERM; 486 487 /* Debugging: */ 488 489 if (!test_bit(BCH_FS_RW, &c->flags)) 490 return -EROFS; 491 492 if (attr == &sysfs_prune_cache) { 493 struct shrink_control sc; 494 495 sc.gfp_mask = GFP_KERNEL; 496 sc.nr_to_scan = strtoul_or_return(buf); 497 c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc); 498 } 499 500 if (attr == &sysfs_btree_wakeup) 501 bch2_btree_wakeup_all(c); 502 503 if (attr == &sysfs_trigger_gc) { 504 /* 505 * Full gc is currently incompatible with btree key cache: 506 */ 507 #if 0 508 down_read(&c->state_lock); 509 bch2_gc(c, false, false); 510 up_read(&c->state_lock); 511 #else 512 bch2_gc_gens(c); 513 #endif 514 } 515 516 if (attr == &sysfs_trigger_discards) 517 bch2_do_discards(c); 518 519 if (attr == &sysfs_trigger_invalidates) 520 bch2_do_invalidates(c); 521 522 #ifdef CONFIG_BCACHEFS_TESTS 523 if (attr == &sysfs_perf_test) { 524 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp; 525 char *test = strsep(&p, " \t\n"); 526 char *nr_str = strsep(&p, " \t\n"); 527 char *threads_str = strsep(&p, " \t\n"); 528 unsigned threads; 529 u64 nr; 530 int ret = -EINVAL; 531 532 if (threads_str && 533 !(ret = kstrtouint(threads_str, 10, &threads)) && 534 !(ret = bch2_strtoull_h(nr_str, &nr))) 535 ret = bch2_btree_perf_test(c, test, nr, threads); 536 kfree(tmp); 537 538 if (ret) 539 size = ret; 540 } 541 #endif 542 return size; 543 } 544 SYSFS_OPS(bch2_fs); 545 546 struct attribute *bch2_fs_files[] = { 547 &sysfs_minor, 548 &sysfs_btree_cache_size, 549 &sysfs_btree_write_stats, 550 551 &sysfs_promote_whole_extents, 552 553 &sysfs_compression_stats, 554 555 #ifdef CONFIG_BCACHEFS_TESTS 556 &sysfs_perf_test, 557 #endif 558 NULL 559 }; 560 561 /* counters dir */ 562 563 SHOW(bch2_fs_counters) 564 { 565 struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj); 566 u64 counter = 0; 567 u64 counter_since_mount = 0; 568 569 printbuf_tabstop_push(out, 32); 570 571 #define x(t, ...) \ 572 if (attr == &sysfs_##t) { \ 573 counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\ 574 counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\ 575 prt_printf(out, "since mount:"); \ 576 prt_tab(out); \ 577 prt_human_readable_u64(out, counter_since_mount); \ 578 prt_newline(out); \ 579 \ 580 prt_printf(out, "since filesystem creation:"); \ 581 prt_tab(out); \ 582 prt_human_readable_u64(out, counter); \ 583 prt_newline(out); \ 584 } 585 BCH_PERSISTENT_COUNTERS() 586 #undef x 587 return 0; 588 } 589 590 STORE(bch2_fs_counters) { 591 return 0; 592 } 593 594 SYSFS_OPS(bch2_fs_counters); 595 596 struct attribute *bch2_fs_counters_files[] = { 597 #define x(t, ...) \ 598 &sysfs_##t, 599 BCH_PERSISTENT_COUNTERS() 600 #undef x 601 NULL 602 }; 603 /* internal dir - just a wrapper */ 604 605 SHOW(bch2_fs_internal) 606 { 607 struct bch_fs *c = container_of(kobj, struct bch_fs, internal); 608 609 return bch2_fs_to_text(out, &c->kobj, attr); 610 } 611 612 STORE(bch2_fs_internal) 613 { 614 struct bch_fs *c = container_of(kobj, struct bch_fs, internal); 615 616 return bch2_fs_store(&c->kobj, attr, buf, size); 617 } 618 SYSFS_OPS(bch2_fs_internal); 619 620 struct attribute *bch2_fs_internal_files[] = { 621 &sysfs_journal_debug, 622 &sysfs_btree_updates, 623 &sysfs_btree_cache, 624 &sysfs_btree_key_cache, 625 &sysfs_new_stripes, 626 &sysfs_stripes_heap, 627 &sysfs_open_buckets, 628 &sysfs_open_buckets_partial, 629 &sysfs_write_points, 630 #ifdef BCH_WRITE_REF_DEBUG 631 &sysfs_write_refs, 632 #endif 633 &sysfs_nocow_lock_table, 634 &sysfs_io_timers_read, 635 &sysfs_io_timers_write, 636 637 &sysfs_trigger_gc, 638 &sysfs_trigger_discards, 639 &sysfs_trigger_invalidates, 640 &sysfs_prune_cache, 641 &sysfs_btree_wakeup, 642 643 &sysfs_gc_gens_pos, 644 645 &sysfs_copy_gc_enabled, 646 &sysfs_copy_gc_wait, 647 648 &sysfs_rebalance_enabled, 649 &sysfs_rebalance_work, 650 sysfs_pd_controller_files(rebalance), 651 652 &sysfs_moving_ctxts, 653 654 &sysfs_internal_uuid, 655 656 &sysfs_disk_groups, 657 NULL 658 }; 659 660 /* options */ 661 662 SHOW(bch2_fs_opts_dir) 663 { 664 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); 665 const struct bch_option *opt = container_of(attr, struct bch_option, attr); 666 int id = opt - bch2_opt_table; 667 u64 v = bch2_opt_get_by_id(&c->opts, id); 668 669 bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST); 670 prt_char(out, '\n'); 671 672 return 0; 673 } 674 675 STORE(bch2_fs_opts_dir) 676 { 677 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); 678 const struct bch_option *opt = container_of(attr, struct bch_option, attr); 679 int ret, id = opt - bch2_opt_table; 680 char *tmp; 681 u64 v; 682 683 /* 684 * We don't need to take c->writes for correctness, but it eliminates an 685 * unsightly error message in the dmesg log when we're RO: 686 */ 687 if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))) 688 return -EROFS; 689 690 tmp = kstrdup(buf, GFP_KERNEL); 691 if (!tmp) { 692 ret = -ENOMEM; 693 goto err; 694 } 695 696 ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL); 697 kfree(tmp); 698 699 if (ret < 0) 700 goto err; 701 702 ret = bch2_opt_check_may_set(c, id, v); 703 if (ret < 0) 704 goto err; 705 706 bch2_opt_set_sb(c, opt, v); 707 bch2_opt_set_by_id(&c->opts, id, v); 708 709 if ((id == Opt_background_target || 710 id == Opt_background_compression) && v) { 711 bch2_rebalance_add_work(c, S64_MAX); 712 rebalance_wakeup(c); 713 } 714 715 ret = size; 716 err: 717 bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); 718 return ret; 719 } 720 SYSFS_OPS(bch2_fs_opts_dir); 721 722 struct attribute *bch2_fs_opts_dir_files[] = { NULL }; 723 724 int bch2_opts_create_sysfs_files(struct kobject *kobj) 725 { 726 const struct bch_option *i; 727 int ret; 728 729 for (i = bch2_opt_table; 730 i < bch2_opt_table + bch2_opts_nr; 731 i++) { 732 if (!(i->flags & OPT_FS)) 733 continue; 734 735 ret = sysfs_create_file(kobj, &i->attr); 736 if (ret) 737 return ret; 738 } 739 740 return 0; 741 } 742 743 /* time stats */ 744 745 SHOW(bch2_fs_time_stats) 746 { 747 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats); 748 749 #define x(name) \ 750 if (attr == &sysfs_time_stat_##name) \ 751 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]); 752 BCH_TIME_STATS() 753 #undef x 754 755 return 0; 756 } 757 758 STORE(bch2_fs_time_stats) 759 { 760 return size; 761 } 762 SYSFS_OPS(bch2_fs_time_stats); 763 764 struct attribute *bch2_fs_time_stats_files[] = { 765 #define x(name) \ 766 &sysfs_time_stat_##name, 767 BCH_TIME_STATS() 768 #undef x 769 NULL 770 }; 771 772 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) 773 { 774 struct bch_fs *c = ca->fs; 775 struct bch_dev_usage stats = bch2_dev_usage_read(ca); 776 unsigned i, nr[BCH_DATA_NR]; 777 778 memset(nr, 0, sizeof(nr)); 779 780 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++) 781 nr[c->open_buckets[i].data_type]++; 782 783 printbuf_tabstop_push(out, 8); 784 printbuf_tabstop_push(out, 16); 785 printbuf_tabstop_push(out, 16); 786 printbuf_tabstop_push(out, 16); 787 printbuf_tabstop_push(out, 16); 788 789 prt_tab(out); 790 prt_str(out, "buckets"); 791 prt_tab_rjust(out); 792 prt_str(out, "sectors"); 793 prt_tab_rjust(out); 794 prt_str(out, "fragmented"); 795 prt_tab_rjust(out); 796 prt_newline(out); 797 798 for (i = 0; i < BCH_DATA_NR; i++) { 799 prt_str(out, bch2_data_types[i]); 800 prt_tab(out); 801 prt_u64(out, stats.d[i].buckets); 802 prt_tab_rjust(out); 803 prt_u64(out, stats.d[i].sectors); 804 prt_tab_rjust(out); 805 prt_u64(out, stats.d[i].fragmented); 806 prt_tab_rjust(out); 807 prt_newline(out); 808 } 809 810 prt_str(out, "ec"); 811 prt_tab(out); 812 prt_u64(out, stats.buckets_ec); 813 prt_tab_rjust(out); 814 prt_newline(out); 815 816 prt_newline(out); 817 818 prt_printf(out, "reserves:"); 819 prt_newline(out); 820 for (i = 0; i < BCH_WATERMARK_NR; i++) { 821 prt_str(out, bch2_watermarks[i]); 822 prt_tab(out); 823 prt_u64(out, bch2_dev_buckets_reserved(ca, i)); 824 prt_tab_rjust(out); 825 prt_newline(out); 826 } 827 828 prt_newline(out); 829 830 printbuf_tabstops_reset(out); 831 printbuf_tabstop_push(out, 24); 832 833 prt_str(out, "freelist_wait"); 834 prt_tab(out); 835 prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty"); 836 prt_newline(out); 837 838 prt_str(out, "open buckets allocated"); 839 prt_tab(out); 840 prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free); 841 prt_newline(out); 842 843 prt_str(out, "open buckets this dev"); 844 prt_tab(out); 845 prt_u64(out, ca->nr_open_buckets); 846 prt_newline(out); 847 848 prt_str(out, "open buckets total"); 849 prt_tab(out); 850 prt_u64(out, OPEN_BUCKETS_COUNT); 851 prt_newline(out); 852 853 prt_str(out, "open_buckets_wait"); 854 prt_tab(out); 855 prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty"); 856 prt_newline(out); 857 858 prt_str(out, "open_buckets_btree"); 859 prt_tab(out); 860 prt_u64(out, nr[BCH_DATA_btree]); 861 prt_newline(out); 862 863 prt_str(out, "open_buckets_user"); 864 prt_tab(out); 865 prt_u64(out, nr[BCH_DATA_user]); 866 prt_newline(out); 867 868 prt_str(out, "buckets_to_invalidate"); 869 prt_tab(out); 870 prt_u64(out, should_invalidate_buckets(ca, stats)); 871 prt_newline(out); 872 873 prt_str(out, "btree reserve cache"); 874 prt_tab(out); 875 prt_u64(out, c->btree_reserve_cache_nr); 876 prt_newline(out); 877 } 878 879 static const char * const bch2_rw[] = { 880 "read", 881 "write", 882 NULL 883 }; 884 885 static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca) 886 { 887 int rw, i; 888 889 for (rw = 0; rw < 2; rw++) { 890 prt_printf(out, "%s:\n", bch2_rw[rw]); 891 892 for (i = 1; i < BCH_DATA_NR; i++) 893 prt_printf(out, "%-12s:%12llu\n", 894 bch2_data_types[i], 895 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9); 896 } 897 } 898 899 SHOW(bch2_dev) 900 { 901 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); 902 struct bch_fs *c = ca->fs; 903 904 sysfs_printf(uuid, "%pU\n", ca->uuid.b); 905 906 sysfs_print(bucket_size, bucket_bytes(ca)); 907 sysfs_print(first_bucket, ca->mi.first_bucket); 908 sysfs_print(nbuckets, ca->mi.nbuckets); 909 sysfs_print(durability, ca->mi.durability); 910 sysfs_print(discard, ca->mi.discard); 911 912 if (attr == &sysfs_label) { 913 if (ca->mi.group) { 914 mutex_lock(&c->sb_lock); 915 bch2_disk_path_to_text(out, c->disk_sb.sb, 916 ca->mi.group - 1); 917 mutex_unlock(&c->sb_lock); 918 } 919 920 prt_char(out, '\n'); 921 } 922 923 if (attr == &sysfs_has_data) { 924 prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca)); 925 prt_char(out, '\n'); 926 } 927 928 if (attr == &sysfs_state_rw) { 929 prt_string_option(out, bch2_member_states, ca->mi.state); 930 prt_char(out, '\n'); 931 } 932 933 if (attr == &sysfs_iodone) 934 dev_iodone_to_text(out, ca); 935 936 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ])); 937 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE])); 938 939 if (attr == &sysfs_io_latency_stats_read) 940 bch2_time_stats_to_text(out, &ca->io_latency[READ]); 941 942 if (attr == &sysfs_io_latency_stats_write) 943 bch2_time_stats_to_text(out, &ca->io_latency[WRITE]); 944 945 sysfs_printf(congested, "%u%%", 946 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX) 947 * 100 / CONGESTED_MAX); 948 949 if (attr == &sysfs_alloc_debug) 950 dev_alloc_debug_to_text(out, ca); 951 952 return 0; 953 } 954 955 STORE(bch2_dev) 956 { 957 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); 958 struct bch_fs *c = ca->fs; 959 struct bch_member *mi; 960 961 if (attr == &sysfs_discard) { 962 bool v = strtoul_or_return(buf); 963 964 mutex_lock(&c->sb_lock); 965 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 966 967 if (v != BCH_MEMBER_DISCARD(mi)) { 968 SET_BCH_MEMBER_DISCARD(mi, v); 969 bch2_write_super(c); 970 } 971 mutex_unlock(&c->sb_lock); 972 } 973 974 if (attr == &sysfs_durability) { 975 u64 v = strtoul_or_return(buf); 976 977 mutex_lock(&c->sb_lock); 978 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 979 980 if (v + 1 != BCH_MEMBER_DURABILITY(mi)) { 981 SET_BCH_MEMBER_DURABILITY(mi, v + 1); 982 bch2_write_super(c); 983 } 984 mutex_unlock(&c->sb_lock); 985 } 986 987 if (attr == &sysfs_label) { 988 char *tmp; 989 int ret; 990 991 tmp = kstrdup(buf, GFP_KERNEL); 992 if (!tmp) 993 return -ENOMEM; 994 995 ret = bch2_dev_group_set(c, ca, strim(tmp)); 996 kfree(tmp); 997 if (ret) 998 return ret; 999 } 1000 1001 return size; 1002 } 1003 SYSFS_OPS(bch2_dev); 1004 1005 struct attribute *bch2_dev_files[] = { 1006 &sysfs_uuid, 1007 &sysfs_bucket_size, 1008 &sysfs_first_bucket, 1009 &sysfs_nbuckets, 1010 &sysfs_durability, 1011 1012 /* settings: */ 1013 &sysfs_discard, 1014 &sysfs_state_rw, 1015 &sysfs_label, 1016 1017 &sysfs_has_data, 1018 &sysfs_iodone, 1019 1020 &sysfs_io_latency_read, 1021 &sysfs_io_latency_write, 1022 &sysfs_io_latency_stats_read, 1023 &sysfs_io_latency_stats_write, 1024 &sysfs_congested, 1025 1026 /* debug: */ 1027 &sysfs_alloc_debug, 1028 NULL 1029 }; 1030 1031 #endif /* _BCACHEFS_SYSFS_H_ */ 1032