1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache sysfs interfaces 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #ifndef NO_BCACHEFS_SYSFS 10 11 #include "bcachefs.h" 12 #include "alloc_background.h" 13 #include "alloc_foreground.h" 14 #include "sysfs.h" 15 #include "btree_cache.h" 16 #include "btree_io.h" 17 #include "btree_iter.h" 18 #include "btree_key_cache.h" 19 #include "btree_update.h" 20 #include "btree_gc.h" 21 #include "buckets.h" 22 #include "clock.h" 23 #include "compress.h" 24 #include "disk_groups.h" 25 #include "ec.h" 26 #include "inode.h" 27 #include "journal.h" 28 #include "keylist.h" 29 #include "move.h" 30 #include "movinggc.h" 31 #include "nocow_locking.h" 32 #include "opts.h" 33 #include "rebalance.h" 34 #include "replicas.h" 35 #include "super-io.h" 36 #include "tests.h" 37 38 #include <linux/blkdev.h> 39 #include <linux/sort.h> 40 #include <linux/sched/clock.h> 41 42 #include "util.h" 43 44 #define SYSFS_OPS(type) \ 45 const struct sysfs_ops type ## _sysfs_ops = { \ 46 .show = type ## _show, \ 47 .store = type ## _store \ 48 } 49 50 #define SHOW(fn) \ 51 static ssize_t fn ## _to_text(struct printbuf *, \ 52 struct kobject *, struct attribute *); \ 53 \ 54 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\ 55 char *buf) \ 56 { \ 57 struct printbuf out = PRINTBUF; \ 58 ssize_t ret = fn ## _to_text(&out, kobj, attr); \ 59 \ 60 if (out.pos && out.buf[out.pos - 1] != '\n') \ 61 prt_newline(&out); \ 62 \ 63 if (!ret && out.allocation_failure) \ 64 ret = -ENOMEM; \ 65 \ 66 if (!ret) { \ 67 ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \ 68 memcpy(buf, out.buf, ret); \ 69 } \ 70 printbuf_exit(&out); \ 71 return bch2_err_class(ret); \ 72 } \ 73 \ 74 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\ 75 struct attribute *attr) 76 77 #define STORE(fn) \ 78 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\ 79 const char *, size_t); \ 80 \ 81 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\ 82 const char *buf, size_t size) \ 83 { \ 84 return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \ 85 } \ 86 \ 87 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\ 88 const char *buf, size_t size) 89 90 #define __sysfs_attribute(_name, _mode) \ 91 static struct attribute sysfs_##_name = \ 92 { .name = #_name, .mode = _mode } 93 94 #define write_attribute(n) __sysfs_attribute(n, 0200) 95 #define read_attribute(n) __sysfs_attribute(n, 0444) 96 #define rw_attribute(n) __sysfs_attribute(n, 0644) 97 98 #define sysfs_printf(file, fmt, ...) \ 99 do { \ 100 if (attr == &sysfs_ ## file) \ 101 prt_printf(out, fmt "\n", __VA_ARGS__); \ 102 } while (0) 103 104 #define sysfs_print(file, var) \ 105 do { \ 106 if (attr == &sysfs_ ## file) \ 107 snprint(out, var); \ 108 } while (0) 109 110 #define sysfs_hprint(file, val) \ 111 do { \ 112 if (attr == &sysfs_ ## file) \ 113 prt_human_readable_s64(out, val); \ 114 } while (0) 115 116 #define sysfs_strtoul(file, var) \ 117 do { \ 118 if (attr == &sysfs_ ## file) \ 119 return strtoul_safe(buf, var) ?: (ssize_t) size; \ 120 } while (0) 121 122 #define sysfs_strtoul_clamp(file, var, min, max) \ 123 do { \ 124 if (attr == &sysfs_ ## file) \ 125 return strtoul_safe_clamp(buf, var, min, max) \ 126 ?: (ssize_t) size; \ 127 } while (0) 128 129 #define strtoul_or_return(cp) \ 130 ({ \ 131 unsigned long _v; \ 132 int _r = kstrtoul(cp, 10, &_v); \ 133 if (_r) \ 134 return _r; \ 135 _v; \ 136 }) 137 138 write_attribute(trigger_gc); 139 write_attribute(trigger_discards); 140 write_attribute(trigger_invalidates); 141 write_attribute(prune_cache); 142 write_attribute(btree_wakeup); 143 rw_attribute(btree_gc_periodic); 144 rw_attribute(gc_gens_pos); 145 146 read_attribute(uuid); 147 read_attribute(minor); 148 read_attribute(flags); 149 read_attribute(bucket_size); 150 read_attribute(first_bucket); 151 read_attribute(nbuckets); 152 rw_attribute(durability); 153 read_attribute(io_done); 154 read_attribute(io_errors); 155 write_attribute(io_errors_reset); 156 157 read_attribute(io_latency_read); 158 read_attribute(io_latency_write); 159 read_attribute(io_latency_stats_read); 160 read_attribute(io_latency_stats_write); 161 read_attribute(congested); 162 163 read_attribute(btree_write_stats); 164 165 read_attribute(btree_cache_size); 166 read_attribute(compression_stats); 167 read_attribute(journal_debug); 168 read_attribute(btree_cache); 169 read_attribute(btree_key_cache); 170 read_attribute(stripes_heap); 171 read_attribute(open_buckets); 172 read_attribute(open_buckets_partial); 173 read_attribute(write_points); 174 read_attribute(nocow_lock_table); 175 176 #ifdef BCH_WRITE_REF_DEBUG 177 read_attribute(write_refs); 178 179 static const char * const bch2_write_refs[] = { 180 #define x(n) #n, 181 BCH_WRITE_REFS() 182 #undef x 183 NULL 184 }; 185 186 static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c) 187 { 188 bch2_printbuf_tabstop_push(out, 24); 189 190 for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) { 191 prt_str(out, bch2_write_refs[i]); 192 prt_tab(out); 193 prt_printf(out, "%li", atomic_long_read(&c->writes[i])); 194 prt_newline(out); 195 } 196 } 197 #endif 198 199 read_attribute(internal_uuid); 200 read_attribute(disk_groups); 201 202 read_attribute(has_data); 203 read_attribute(alloc_debug); 204 205 #define x(t, n, ...) read_attribute(t); 206 BCH_PERSISTENT_COUNTERS() 207 #undef x 208 209 rw_attribute(discard); 210 rw_attribute(label); 211 212 rw_attribute(copy_gc_enabled); 213 read_attribute(copy_gc_wait); 214 215 rw_attribute(rebalance_enabled); 216 sysfs_pd_controller_attribute(rebalance); 217 read_attribute(rebalance_status); 218 rw_attribute(promote_whole_extents); 219 220 read_attribute(new_stripes); 221 222 read_attribute(io_timers_read); 223 read_attribute(io_timers_write); 224 225 read_attribute(moving_ctxts); 226 227 #ifdef CONFIG_BCACHEFS_TESTS 228 write_attribute(perf_test); 229 #endif /* CONFIG_BCACHEFS_TESTS */ 230 231 #define x(_name) \ 232 static struct attribute sysfs_time_stat_##_name = \ 233 { .name = #_name, .mode = 0444 }; 234 BCH_TIME_STATS() 235 #undef x 236 237 static struct attribute sysfs_state_rw = { 238 .name = "state", 239 .mode = 0444, 240 }; 241 242 static size_t bch2_btree_cache_size(struct bch_fs *c) 243 { 244 size_t ret = 0; 245 struct btree *b; 246 247 mutex_lock(&c->btree_cache.lock); 248 list_for_each_entry(b, &c->btree_cache.live, list) 249 ret += btree_buf_bytes(b); 250 251 mutex_unlock(&c->btree_cache.lock); 252 return ret; 253 } 254 255 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c) 256 { 257 struct btree_trans *trans; 258 enum btree_id id; 259 struct compression_type_stats { 260 u64 nr_extents; 261 u64 sectors_compressed; 262 u64 sectors_uncompressed; 263 } s[BCH_COMPRESSION_TYPE_NR]; 264 u64 compressed_incompressible = 0; 265 int ret = 0; 266 267 memset(s, 0, sizeof(s)); 268 269 if (!test_bit(BCH_FS_started, &c->flags)) 270 return -EPERM; 271 272 trans = bch2_trans_get(c); 273 274 for (id = 0; id < BTREE_ID_NR; id++) { 275 if (!btree_type_has_ptrs(id)) 276 continue; 277 278 ret = for_each_btree_key(trans, iter, id, POS_MIN, 279 BTREE_ITER_ALL_SNAPSHOTS, k, ({ 280 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 281 struct bch_extent_crc_unpacked crc; 282 const union bch_extent_entry *entry; 283 bool compressed = false, incompressible = false; 284 285 bkey_for_each_crc(k.k, ptrs, crc, entry) { 286 incompressible |= crc.compression_type == BCH_COMPRESSION_TYPE_incompressible; 287 compressed |= crc_is_compressed(crc); 288 289 if (crc_is_compressed(crc)) { 290 s[crc.compression_type].nr_extents++; 291 s[crc.compression_type].sectors_compressed += crc.compressed_size; 292 s[crc.compression_type].sectors_uncompressed += crc.uncompressed_size; 293 } 294 } 295 296 compressed_incompressible += compressed && incompressible; 297 298 if (!compressed) { 299 unsigned t = incompressible ? BCH_COMPRESSION_TYPE_incompressible : 0; 300 301 s[t].nr_extents++; 302 s[t].sectors_compressed += k.k->size; 303 s[t].sectors_uncompressed += k.k->size; 304 } 305 0; 306 })); 307 } 308 309 bch2_trans_put(trans); 310 311 if (ret) 312 return ret; 313 314 prt_str(out, "type"); 315 printbuf_tabstop_push(out, 12); 316 prt_tab(out); 317 318 prt_str(out, "compressed"); 319 printbuf_tabstop_push(out, 16); 320 prt_tab_rjust(out); 321 322 prt_str(out, "uncompressed"); 323 printbuf_tabstop_push(out, 16); 324 prt_tab_rjust(out); 325 326 prt_str(out, "average extent size"); 327 printbuf_tabstop_push(out, 24); 328 prt_tab_rjust(out); 329 prt_newline(out); 330 331 for (unsigned i = 0; i < ARRAY_SIZE(s); i++) { 332 bch2_prt_compression_type(out, i); 333 prt_tab(out); 334 335 prt_human_readable_u64(out, s[i].sectors_compressed << 9); 336 prt_tab_rjust(out); 337 338 prt_human_readable_u64(out, s[i].sectors_uncompressed << 9); 339 prt_tab_rjust(out); 340 341 prt_human_readable_u64(out, s[i].nr_extents 342 ? div_u64(s[i].sectors_uncompressed << 9, s[i].nr_extents) 343 : 0); 344 prt_tab_rjust(out); 345 prt_newline(out); 346 } 347 348 if (compressed_incompressible) { 349 prt_printf(out, "%llu compressed & incompressible extents", compressed_incompressible); 350 prt_newline(out); 351 } 352 353 return 0; 354 } 355 356 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c) 357 { 358 prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree)); 359 bch2_bpos_to_text(out, c->gc_gens_pos); 360 prt_printf(out, "\n"); 361 } 362 363 static void bch2_btree_wakeup_all(struct bch_fs *c) 364 { 365 struct btree_trans *trans; 366 367 seqmutex_lock(&c->btree_trans_lock); 368 list_for_each_entry(trans, &c->btree_trans_list, list) { 369 struct btree_bkey_cached_common *b = READ_ONCE(trans->locking); 370 371 if (b) 372 six_lock_wakeup_all(&b->lock); 373 374 } 375 seqmutex_unlock(&c->btree_trans_lock); 376 } 377 378 SHOW(bch2_fs) 379 { 380 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); 381 382 sysfs_print(minor, c->minor); 383 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b); 384 385 if (attr == &sysfs_flags) 386 prt_bitflags(out, bch2_fs_flag_strs, c->flags); 387 388 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); 389 390 if (attr == &sysfs_btree_write_stats) 391 bch2_btree_write_stats_to_text(out, c); 392 393 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); 394 395 if (attr == &sysfs_gc_gens_pos) 396 bch2_gc_gens_pos_to_text(out, c); 397 398 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); 399 400 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled); 401 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ 402 403 if (attr == &sysfs_copy_gc_wait) 404 bch2_copygc_wait_to_text(out, c); 405 406 if (attr == &sysfs_rebalance_status) 407 bch2_rebalance_status_to_text(out, c); 408 409 sysfs_print(promote_whole_extents, c->promote_whole_extents); 410 411 /* Debugging: */ 412 413 if (attr == &sysfs_journal_debug) 414 bch2_journal_debug_to_text(out, &c->journal); 415 416 if (attr == &sysfs_btree_cache) 417 bch2_btree_cache_to_text(out, c); 418 419 if (attr == &sysfs_btree_key_cache) 420 bch2_btree_key_cache_to_text(out, &c->btree_key_cache); 421 422 if (attr == &sysfs_stripes_heap) 423 bch2_stripes_heap_to_text(out, c); 424 425 if (attr == &sysfs_open_buckets) 426 bch2_open_buckets_to_text(out, c); 427 428 if (attr == &sysfs_open_buckets_partial) 429 bch2_open_buckets_partial_to_text(out, c); 430 431 if (attr == &sysfs_write_points) 432 bch2_write_points_to_text(out, c); 433 434 if (attr == &sysfs_compression_stats) 435 bch2_compression_stats_to_text(out, c); 436 437 if (attr == &sysfs_new_stripes) 438 bch2_new_stripes_to_text(out, c); 439 440 if (attr == &sysfs_io_timers_read) 441 bch2_io_timers_to_text(out, &c->io_clock[READ]); 442 443 if (attr == &sysfs_io_timers_write) 444 bch2_io_timers_to_text(out, &c->io_clock[WRITE]); 445 446 if (attr == &sysfs_moving_ctxts) 447 bch2_fs_moving_ctxts_to_text(out, c); 448 449 #ifdef BCH_WRITE_REF_DEBUG 450 if (attr == &sysfs_write_refs) 451 bch2_write_refs_to_text(out, c); 452 #endif 453 454 if (attr == &sysfs_nocow_lock_table) 455 bch2_nocow_locks_to_text(out, &c->nocow_locks); 456 457 if (attr == &sysfs_disk_groups) 458 bch2_disk_groups_to_text(out, c); 459 460 return 0; 461 } 462 463 STORE(bch2_fs) 464 { 465 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); 466 467 if (attr == &sysfs_btree_gc_periodic) { 468 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic) 469 ?: (ssize_t) size; 470 471 wake_up_process(c->gc_thread); 472 return ret; 473 } 474 475 if (attr == &sysfs_copy_gc_enabled) { 476 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled) 477 ?: (ssize_t) size; 478 479 if (c->copygc_thread) 480 wake_up_process(c->copygc_thread); 481 return ret; 482 } 483 484 if (attr == &sysfs_rebalance_enabled) { 485 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled) 486 ?: (ssize_t) size; 487 488 rebalance_wakeup(c); 489 return ret; 490 } 491 492 sysfs_pd_controller_store(rebalance, &c->rebalance.pd); 493 494 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents); 495 496 /* Debugging: */ 497 498 if (!test_bit(BCH_FS_started, &c->flags)) 499 return -EPERM; 500 501 /* Debugging: */ 502 503 if (!test_bit(BCH_FS_rw, &c->flags)) 504 return -EROFS; 505 506 if (attr == &sysfs_prune_cache) { 507 struct shrink_control sc; 508 509 sc.gfp_mask = GFP_KERNEL; 510 sc.nr_to_scan = strtoul_or_return(buf); 511 c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc); 512 } 513 514 if (attr == &sysfs_btree_wakeup) 515 bch2_btree_wakeup_all(c); 516 517 if (attr == &sysfs_trigger_gc) { 518 /* 519 * Full gc is currently incompatible with btree key cache: 520 */ 521 #if 0 522 down_read(&c->state_lock); 523 bch2_gc(c, false, false); 524 up_read(&c->state_lock); 525 #else 526 bch2_gc_gens(c); 527 #endif 528 } 529 530 if (attr == &sysfs_trigger_discards) 531 bch2_do_discards(c); 532 533 if (attr == &sysfs_trigger_invalidates) 534 bch2_do_invalidates(c); 535 536 #ifdef CONFIG_BCACHEFS_TESTS 537 if (attr == &sysfs_perf_test) { 538 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp; 539 char *test = strsep(&p, " \t\n"); 540 char *nr_str = strsep(&p, " \t\n"); 541 char *threads_str = strsep(&p, " \t\n"); 542 unsigned threads; 543 u64 nr; 544 int ret = -EINVAL; 545 546 if (threads_str && 547 !(ret = kstrtouint(threads_str, 10, &threads)) && 548 !(ret = bch2_strtoull_h(nr_str, &nr))) 549 ret = bch2_btree_perf_test(c, test, nr, threads); 550 kfree(tmp); 551 552 if (ret) 553 size = ret; 554 } 555 #endif 556 return size; 557 } 558 SYSFS_OPS(bch2_fs); 559 560 struct attribute *bch2_fs_files[] = { 561 &sysfs_minor, 562 &sysfs_btree_cache_size, 563 &sysfs_btree_write_stats, 564 565 &sysfs_promote_whole_extents, 566 567 &sysfs_compression_stats, 568 569 #ifdef CONFIG_BCACHEFS_TESTS 570 &sysfs_perf_test, 571 #endif 572 NULL 573 }; 574 575 /* counters dir */ 576 577 SHOW(bch2_fs_counters) 578 { 579 struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj); 580 u64 counter = 0; 581 u64 counter_since_mount = 0; 582 583 printbuf_tabstop_push(out, 32); 584 585 #define x(t, ...) \ 586 if (attr == &sysfs_##t) { \ 587 counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\ 588 counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\ 589 prt_printf(out, "since mount:"); \ 590 prt_tab(out); \ 591 prt_human_readable_u64(out, counter_since_mount); \ 592 prt_newline(out); \ 593 \ 594 prt_printf(out, "since filesystem creation:"); \ 595 prt_tab(out); \ 596 prt_human_readable_u64(out, counter); \ 597 prt_newline(out); \ 598 } 599 BCH_PERSISTENT_COUNTERS() 600 #undef x 601 return 0; 602 } 603 604 STORE(bch2_fs_counters) { 605 return 0; 606 } 607 608 SYSFS_OPS(bch2_fs_counters); 609 610 struct attribute *bch2_fs_counters_files[] = { 611 #define x(t, ...) \ 612 &sysfs_##t, 613 BCH_PERSISTENT_COUNTERS() 614 #undef x 615 NULL 616 }; 617 /* internal dir - just a wrapper */ 618 619 SHOW(bch2_fs_internal) 620 { 621 struct bch_fs *c = container_of(kobj, struct bch_fs, internal); 622 623 return bch2_fs_to_text(out, &c->kobj, attr); 624 } 625 626 STORE(bch2_fs_internal) 627 { 628 struct bch_fs *c = container_of(kobj, struct bch_fs, internal); 629 630 return bch2_fs_store(&c->kobj, attr, buf, size); 631 } 632 SYSFS_OPS(bch2_fs_internal); 633 634 struct attribute *bch2_fs_internal_files[] = { 635 &sysfs_flags, 636 &sysfs_journal_debug, 637 &sysfs_btree_cache, 638 &sysfs_btree_key_cache, 639 &sysfs_new_stripes, 640 &sysfs_stripes_heap, 641 &sysfs_open_buckets, 642 &sysfs_open_buckets_partial, 643 &sysfs_write_points, 644 #ifdef BCH_WRITE_REF_DEBUG 645 &sysfs_write_refs, 646 #endif 647 &sysfs_nocow_lock_table, 648 &sysfs_io_timers_read, 649 &sysfs_io_timers_write, 650 651 &sysfs_trigger_gc, 652 &sysfs_trigger_discards, 653 &sysfs_trigger_invalidates, 654 &sysfs_prune_cache, 655 &sysfs_btree_wakeup, 656 657 &sysfs_gc_gens_pos, 658 659 &sysfs_copy_gc_enabled, 660 &sysfs_copy_gc_wait, 661 662 &sysfs_rebalance_enabled, 663 &sysfs_rebalance_status, 664 sysfs_pd_controller_files(rebalance), 665 666 &sysfs_moving_ctxts, 667 668 &sysfs_internal_uuid, 669 670 &sysfs_disk_groups, 671 NULL 672 }; 673 674 /* options */ 675 676 SHOW(bch2_fs_opts_dir) 677 { 678 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); 679 const struct bch_option *opt = container_of(attr, struct bch_option, attr); 680 int id = opt - bch2_opt_table; 681 u64 v = bch2_opt_get_by_id(&c->opts, id); 682 683 bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST); 684 prt_char(out, '\n'); 685 686 return 0; 687 } 688 689 STORE(bch2_fs_opts_dir) 690 { 691 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); 692 const struct bch_option *opt = container_of(attr, struct bch_option, attr); 693 int ret, id = opt - bch2_opt_table; 694 char *tmp; 695 u64 v; 696 697 /* 698 * We don't need to take c->writes for correctness, but it eliminates an 699 * unsightly error message in the dmesg log when we're RO: 700 */ 701 if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))) 702 return -EROFS; 703 704 tmp = kstrdup(buf, GFP_KERNEL); 705 if (!tmp) { 706 ret = -ENOMEM; 707 goto err; 708 } 709 710 ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL); 711 kfree(tmp); 712 713 if (ret < 0) 714 goto err; 715 716 ret = bch2_opt_check_may_set(c, id, v); 717 if (ret < 0) 718 goto err; 719 720 bch2_opt_set_sb(c, opt, v); 721 bch2_opt_set_by_id(&c->opts, id, v); 722 723 if (v && 724 (id == Opt_background_target || 725 id == Opt_background_compression || 726 (id == Opt_compression && !c->opts.background_compression))) 727 bch2_set_rebalance_needs_scan(c, 0); 728 729 ret = size; 730 err: 731 bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); 732 return ret; 733 } 734 SYSFS_OPS(bch2_fs_opts_dir); 735 736 struct attribute *bch2_fs_opts_dir_files[] = { NULL }; 737 738 int bch2_opts_create_sysfs_files(struct kobject *kobj) 739 { 740 const struct bch_option *i; 741 int ret; 742 743 for (i = bch2_opt_table; 744 i < bch2_opt_table + bch2_opts_nr; 745 i++) { 746 if (!(i->flags & OPT_FS)) 747 continue; 748 749 ret = sysfs_create_file(kobj, &i->attr); 750 if (ret) 751 return ret; 752 } 753 754 return 0; 755 } 756 757 /* time stats */ 758 759 SHOW(bch2_fs_time_stats) 760 { 761 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats); 762 763 #define x(name) \ 764 if (attr == &sysfs_time_stat_##name) \ 765 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]); 766 BCH_TIME_STATS() 767 #undef x 768 769 return 0; 770 } 771 772 STORE(bch2_fs_time_stats) 773 { 774 return size; 775 } 776 SYSFS_OPS(bch2_fs_time_stats); 777 778 struct attribute *bch2_fs_time_stats_files[] = { 779 #define x(name) \ 780 &sysfs_time_stat_##name, 781 BCH_TIME_STATS() 782 #undef x 783 NULL 784 }; 785 786 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) 787 { 788 struct bch_fs *c = ca->fs; 789 struct bch_dev_usage stats = bch2_dev_usage_read(ca); 790 unsigned i, nr[BCH_DATA_NR]; 791 792 memset(nr, 0, sizeof(nr)); 793 794 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++) 795 nr[c->open_buckets[i].data_type]++; 796 797 printbuf_tabstop_push(out, 8); 798 printbuf_tabstop_push(out, 16); 799 printbuf_tabstop_push(out, 16); 800 printbuf_tabstop_push(out, 16); 801 printbuf_tabstop_push(out, 16); 802 803 bch2_dev_usage_to_text(out, &stats); 804 805 prt_newline(out); 806 807 prt_printf(out, "reserves:"); 808 prt_newline(out); 809 for (i = 0; i < BCH_WATERMARK_NR; i++) { 810 prt_str(out, bch2_watermarks[i]); 811 prt_tab(out); 812 prt_u64(out, bch2_dev_buckets_reserved(ca, i)); 813 prt_tab_rjust(out); 814 prt_newline(out); 815 } 816 817 prt_newline(out); 818 819 printbuf_tabstops_reset(out); 820 printbuf_tabstop_push(out, 24); 821 822 prt_str(out, "freelist_wait"); 823 prt_tab(out); 824 prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty"); 825 prt_newline(out); 826 827 prt_str(out, "open buckets allocated"); 828 prt_tab(out); 829 prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free); 830 prt_newline(out); 831 832 prt_str(out, "open buckets this dev"); 833 prt_tab(out); 834 prt_u64(out, ca->nr_open_buckets); 835 prt_newline(out); 836 837 prt_str(out, "open buckets total"); 838 prt_tab(out); 839 prt_u64(out, OPEN_BUCKETS_COUNT); 840 prt_newline(out); 841 842 prt_str(out, "open_buckets_wait"); 843 prt_tab(out); 844 prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty"); 845 prt_newline(out); 846 847 prt_str(out, "open_buckets_btree"); 848 prt_tab(out); 849 prt_u64(out, nr[BCH_DATA_btree]); 850 prt_newline(out); 851 852 prt_str(out, "open_buckets_user"); 853 prt_tab(out); 854 prt_u64(out, nr[BCH_DATA_user]); 855 prt_newline(out); 856 857 prt_str(out, "buckets_to_invalidate"); 858 prt_tab(out); 859 prt_u64(out, should_invalidate_buckets(ca, stats)); 860 prt_newline(out); 861 862 prt_str(out, "btree reserve cache"); 863 prt_tab(out); 864 prt_u64(out, c->btree_reserve_cache_nr); 865 prt_newline(out); 866 } 867 868 static const char * const bch2_rw[] = { 869 "read", 870 "write", 871 NULL 872 }; 873 874 static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca) 875 { 876 int rw, i; 877 878 for (rw = 0; rw < 2; rw++) { 879 prt_printf(out, "%s:\n", bch2_rw[rw]); 880 881 for (i = 1; i < BCH_DATA_NR; i++) 882 prt_printf(out, "%-12s:%12llu\n", 883 bch2_data_type_str(i), 884 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9); 885 } 886 } 887 888 SHOW(bch2_dev) 889 { 890 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); 891 struct bch_fs *c = ca->fs; 892 893 sysfs_printf(uuid, "%pU\n", ca->uuid.b); 894 895 sysfs_print(bucket_size, bucket_bytes(ca)); 896 sysfs_print(first_bucket, ca->mi.first_bucket); 897 sysfs_print(nbuckets, ca->mi.nbuckets); 898 sysfs_print(durability, ca->mi.durability); 899 sysfs_print(discard, ca->mi.discard); 900 901 if (attr == &sysfs_label) { 902 if (ca->mi.group) 903 bch2_disk_path_to_text(out, c, ca->mi.group - 1); 904 prt_char(out, '\n'); 905 } 906 907 if (attr == &sysfs_has_data) { 908 prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca)); 909 prt_char(out, '\n'); 910 } 911 912 if (attr == &sysfs_state_rw) { 913 prt_string_option(out, bch2_member_states, ca->mi.state); 914 prt_char(out, '\n'); 915 } 916 917 if (attr == &sysfs_io_done) 918 dev_io_done_to_text(out, ca); 919 920 if (attr == &sysfs_io_errors) 921 bch2_dev_io_errors_to_text(out, ca); 922 923 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ])); 924 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE])); 925 926 if (attr == &sysfs_io_latency_stats_read) 927 bch2_time_stats_to_text(out, &ca->io_latency[READ].stats); 928 929 if (attr == &sysfs_io_latency_stats_write) 930 bch2_time_stats_to_text(out, &ca->io_latency[WRITE].stats); 931 932 sysfs_printf(congested, "%u%%", 933 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX) 934 * 100 / CONGESTED_MAX); 935 936 if (attr == &sysfs_alloc_debug) 937 dev_alloc_debug_to_text(out, ca); 938 939 return 0; 940 } 941 942 STORE(bch2_dev) 943 { 944 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); 945 struct bch_fs *c = ca->fs; 946 struct bch_member *mi; 947 948 if (attr == &sysfs_discard) { 949 bool v = strtoul_or_return(buf); 950 951 mutex_lock(&c->sb_lock); 952 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 953 954 if (v != BCH_MEMBER_DISCARD(mi)) { 955 SET_BCH_MEMBER_DISCARD(mi, v); 956 bch2_write_super(c); 957 } 958 mutex_unlock(&c->sb_lock); 959 } 960 961 if (attr == &sysfs_durability) { 962 u64 v = strtoul_or_return(buf); 963 964 mutex_lock(&c->sb_lock); 965 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 966 967 if (v + 1 != BCH_MEMBER_DURABILITY(mi)) { 968 SET_BCH_MEMBER_DURABILITY(mi, v + 1); 969 bch2_write_super(c); 970 } 971 mutex_unlock(&c->sb_lock); 972 } 973 974 if (attr == &sysfs_label) { 975 char *tmp; 976 int ret; 977 978 tmp = kstrdup(buf, GFP_KERNEL); 979 if (!tmp) 980 return -ENOMEM; 981 982 ret = bch2_dev_group_set(c, ca, strim(tmp)); 983 kfree(tmp); 984 if (ret) 985 return ret; 986 } 987 988 if (attr == &sysfs_io_errors_reset) 989 bch2_dev_errors_reset(ca); 990 991 return size; 992 } 993 SYSFS_OPS(bch2_dev); 994 995 struct attribute *bch2_dev_files[] = { 996 &sysfs_uuid, 997 &sysfs_bucket_size, 998 &sysfs_first_bucket, 999 &sysfs_nbuckets, 1000 &sysfs_durability, 1001 1002 /* settings: */ 1003 &sysfs_discard, 1004 &sysfs_state_rw, 1005 &sysfs_label, 1006 1007 &sysfs_has_data, 1008 &sysfs_io_done, 1009 &sysfs_io_errors, 1010 &sysfs_io_errors_reset, 1011 1012 &sysfs_io_latency_read, 1013 &sysfs_io_latency_write, 1014 &sysfs_io_latency_stats_read, 1015 &sysfs_io_latency_stats_write, 1016 &sysfs_congested, 1017 1018 /* debug: */ 1019 &sysfs_alloc_debug, 1020 NULL 1021 }; 1022 1023 #endif /* _BCACHEFS_SYSFS_H_ */ 1024