1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache sysfs interfaces 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #ifndef NO_BCACHEFS_SYSFS 10 11 #include "bcachefs.h" 12 #include "alloc_background.h" 13 #include "alloc_foreground.h" 14 #include "sysfs.h" 15 #include "btree_cache.h" 16 #include "btree_io.h" 17 #include "btree_iter.h" 18 #include "btree_key_cache.h" 19 #include "btree_update.h" 20 #include "btree_update_interior.h" 21 #include "btree_gc.h" 22 #include "buckets.h" 23 #include "clock.h" 24 #include "compress.h" 25 #include "disk_accounting.h" 26 #include "disk_groups.h" 27 #include "ec.h" 28 #include "inode.h" 29 #include "journal.h" 30 #include "journal_reclaim.h" 31 #include "keylist.h" 32 #include "move.h" 33 #include "movinggc.h" 34 #include "nocow_locking.h" 35 #include "opts.h" 36 #include "rebalance.h" 37 #include "replicas.h" 38 #include "super-io.h" 39 #include "tests.h" 40 41 #include <linux/blkdev.h> 42 #include <linux/sort.h> 43 #include <linux/sched/clock.h> 44 45 #include "util.h" 46 47 #define SYSFS_OPS(type) \ 48 const struct sysfs_ops type ## _sysfs_ops = { \ 49 .show = type ## _show, \ 50 .store = type ## _store \ 51 } 52 53 #define SHOW(fn) \ 54 static ssize_t fn ## _to_text(struct printbuf *, \ 55 struct kobject *, struct attribute *); \ 56 \ 57 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\ 58 char *buf) \ 59 { \ 60 struct printbuf out = PRINTBUF; \ 61 ssize_t ret = fn ## _to_text(&out, kobj, attr); \ 62 \ 63 if (out.pos && out.buf[out.pos - 1] != '\n') \ 64 prt_newline(&out); \ 65 \ 66 if (!ret && out.allocation_failure) \ 67 ret = -ENOMEM; \ 68 \ 69 if (!ret) { \ 70 ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \ 71 memcpy(buf, out.buf, ret); \ 72 } \ 73 printbuf_exit(&out); \ 74 return bch2_err_class(ret); \ 75 } \ 76 \ 77 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\ 78 struct attribute *attr) 79 80 #define STORE(fn) \ 81 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\ 82 const char *, size_t); \ 83 \ 84 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\ 85 const char *buf, size_t size) \ 86 { \ 87 return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \ 88 } \ 89 \ 90 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\ 91 const char *buf, size_t size) 92 93 #define __sysfs_attribute(_name, _mode) \ 94 static struct attribute sysfs_##_name = \ 95 { .name = #_name, .mode = _mode } 96 97 #define write_attribute(n) __sysfs_attribute(n, 0200) 98 #define read_attribute(n) __sysfs_attribute(n, 0444) 99 #define rw_attribute(n) __sysfs_attribute(n, 0644) 100 101 #define sysfs_printf(file, fmt, ...) \ 102 do { \ 103 if (attr == &sysfs_ ## file) \ 104 prt_printf(out, fmt "\n", __VA_ARGS__); \ 105 } while (0) 106 107 #define sysfs_print(file, var) \ 108 do { \ 109 if (attr == &sysfs_ ## file) \ 110 snprint(out, var); \ 111 } while (0) 112 113 #define sysfs_hprint(file, val) \ 114 do { \ 115 if (attr == &sysfs_ ## file) \ 116 prt_human_readable_s64(out, val); \ 117 } while (0) 118 119 #define sysfs_strtoul(file, var) \ 120 do { \ 121 if (attr == &sysfs_ ## file) \ 122 return strtoul_safe(buf, var) ?: (ssize_t) size; \ 123 } while (0) 124 125 #define sysfs_strtoul_clamp(file, var, min, max) \ 126 do { \ 127 if (attr == &sysfs_ ## file) \ 128 return strtoul_safe_clamp(buf, var, min, max) \ 129 ?: (ssize_t) size; \ 130 } while (0) 131 132 #define strtoul_or_return(cp) \ 133 ({ \ 134 unsigned long _v; \ 135 int _r = kstrtoul(cp, 10, &_v); \ 136 if (_r) \ 137 return _r; \ 138 _v; \ 139 }) 140 141 write_attribute(trigger_gc); 142 write_attribute(trigger_discards); 143 write_attribute(trigger_invalidates); 144 write_attribute(trigger_journal_flush); 145 write_attribute(trigger_journal_writes); 146 write_attribute(trigger_btree_cache_shrink); 147 write_attribute(trigger_btree_key_cache_shrink); 148 write_attribute(trigger_freelist_wakeup); 149 write_attribute(trigger_btree_updates); 150 read_attribute(gc_gens_pos); 151 152 read_attribute(uuid); 153 read_attribute(minor); 154 read_attribute(flags); 155 read_attribute(first_bucket); 156 read_attribute(nbuckets); 157 read_attribute(io_done); 158 read_attribute(io_errors); 159 write_attribute(io_errors_reset); 160 161 read_attribute(io_latency_read); 162 read_attribute(io_latency_write); 163 read_attribute(io_latency_stats_read); 164 read_attribute(io_latency_stats_write); 165 read_attribute(congested); 166 167 read_attribute(btree_write_stats); 168 169 read_attribute(btree_cache_size); 170 read_attribute(compression_stats); 171 read_attribute(journal_debug); 172 read_attribute(btree_cache); 173 read_attribute(btree_key_cache); 174 read_attribute(btree_reserve_cache); 175 read_attribute(open_buckets); 176 read_attribute(open_buckets_partial); 177 read_attribute(nocow_lock_table); 178 179 #ifdef BCH_WRITE_REF_DEBUG 180 read_attribute(write_refs); 181 182 static const char * const bch2_write_refs[] = { 183 #define x(n) #n, 184 BCH_WRITE_REFS() 185 #undef x 186 NULL 187 }; 188 189 static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c) 190 { 191 bch2_printbuf_tabstop_push(out, 24); 192 193 for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) 194 prt_printf(out, "%s\t%li\n", bch2_write_refs[i], atomic_long_read(&c->writes[i])); 195 } 196 #endif 197 198 read_attribute(internal_uuid); 199 read_attribute(disk_groups); 200 201 read_attribute(has_data); 202 read_attribute(alloc_debug); 203 read_attribute(usage_base); 204 205 #define x(t, n, ...) read_attribute(t); 206 BCH_PERSISTENT_COUNTERS() 207 #undef x 208 209 rw_attribute(label); 210 211 read_attribute(copy_gc_wait); 212 213 sysfs_pd_controller_attribute(rebalance); 214 read_attribute(rebalance_status); 215 216 read_attribute(new_stripes); 217 218 read_attribute(io_timers_read); 219 read_attribute(io_timers_write); 220 221 read_attribute(moving_ctxts); 222 223 #ifdef CONFIG_BCACHEFS_TESTS 224 write_attribute(perf_test); 225 #endif /* CONFIG_BCACHEFS_TESTS */ 226 227 #define x(_name) \ 228 static struct attribute sysfs_time_stat_##_name = \ 229 { .name = #_name, .mode = 0644 }; 230 BCH_TIME_STATS() 231 #undef x 232 233 static size_t bch2_btree_cache_size(struct bch_fs *c) 234 { 235 struct btree_cache *bc = &c->btree_cache; 236 size_t ret = 0; 237 struct btree *b; 238 239 mutex_lock(&bc->lock); 240 list_for_each_entry(b, &bc->live[0].list, list) 241 ret += btree_buf_bytes(b); 242 list_for_each_entry(b, &bc->live[1].list, list) 243 ret += btree_buf_bytes(b); 244 list_for_each_entry(b, &bc->freeable, list) 245 ret += btree_buf_bytes(b); 246 mutex_unlock(&bc->lock); 247 return ret; 248 } 249 250 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c) 251 { 252 prt_str(out, "type"); 253 printbuf_tabstop_push(out, 12); 254 printbuf_tabstop_push(out, 16); 255 printbuf_tabstop_push(out, 16); 256 printbuf_tabstop_push(out, 24); 257 prt_printf(out, "type\tcompressed\runcompressed\raverage extent size\r\n"); 258 259 for (unsigned i = 1; i < BCH_COMPRESSION_TYPE_NR; i++) { 260 struct disk_accounting_pos a = { 261 .type = BCH_DISK_ACCOUNTING_compression, 262 .compression.type = i, 263 }; 264 struct bpos p = disk_accounting_pos_to_bpos(&a); 265 u64 v[3]; 266 bch2_accounting_mem_read(c, p, v, ARRAY_SIZE(v)); 267 268 u64 nr_extents = v[0]; 269 u64 sectors_uncompressed = v[1]; 270 u64 sectors_compressed = v[2]; 271 272 bch2_prt_compression_type(out, i); 273 prt_tab(out); 274 275 prt_human_readable_u64(out, sectors_compressed << 9); 276 prt_tab_rjust(out); 277 278 prt_human_readable_u64(out, sectors_uncompressed << 9); 279 prt_tab_rjust(out); 280 281 prt_human_readable_u64(out, nr_extents 282 ? div64_u64(sectors_uncompressed << 9, nr_extents) 283 : 0); 284 prt_tab_rjust(out); 285 prt_newline(out); 286 } 287 288 return 0; 289 } 290 291 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c) 292 { 293 bch2_btree_id_to_text(out, c->gc_gens_btree); 294 prt_printf(out, ": "); 295 bch2_bpos_to_text(out, c->gc_gens_pos); 296 prt_printf(out, "\n"); 297 } 298 299 static void bch2_fs_usage_base_to_text(struct printbuf *out, struct bch_fs *c) 300 { 301 struct bch_fs_usage_base b = {}; 302 303 acc_u64s_percpu(&b.hidden, &c->usage->hidden, sizeof(b) / sizeof(u64)); 304 305 prt_printf(out, "hidden:\t\t%llu\n", b.hidden); 306 prt_printf(out, "btree:\t\t%llu\n", b.btree); 307 prt_printf(out, "data:\t\t%llu\n", b.data); 308 prt_printf(out, "cached:\t%llu\n", b.cached); 309 prt_printf(out, "reserved:\t\t%llu\n", b.reserved); 310 prt_printf(out, "nr_inodes:\t%llu\n", b.nr_inodes); 311 } 312 313 SHOW(bch2_fs) 314 { 315 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); 316 317 sysfs_print(minor, c->minor); 318 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b); 319 320 if (attr == &sysfs_flags) 321 prt_bitflags(out, bch2_fs_flag_strs, c->flags); 322 323 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); 324 325 if (attr == &sysfs_btree_write_stats) 326 bch2_btree_write_stats_to_text(out, c); 327 328 if (attr == &sysfs_gc_gens_pos) 329 bch2_gc_gens_pos_to_text(out, c); 330 331 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ 332 333 if (attr == &sysfs_copy_gc_wait) 334 bch2_copygc_wait_to_text(out, c); 335 336 if (attr == &sysfs_rebalance_status) 337 bch2_rebalance_status_to_text(out, c); 338 339 /* Debugging: */ 340 341 if (attr == &sysfs_journal_debug) 342 bch2_journal_debug_to_text(out, &c->journal); 343 344 if (attr == &sysfs_btree_cache) 345 bch2_btree_cache_to_text(out, &c->btree_cache); 346 347 if (attr == &sysfs_btree_key_cache) 348 bch2_btree_key_cache_to_text(out, &c->btree_key_cache); 349 350 if (attr == &sysfs_btree_reserve_cache) 351 bch2_btree_reserve_cache_to_text(out, c); 352 353 if (attr == &sysfs_open_buckets) 354 bch2_open_buckets_to_text(out, c, NULL); 355 356 if (attr == &sysfs_open_buckets_partial) 357 bch2_open_buckets_partial_to_text(out, c); 358 359 if (attr == &sysfs_compression_stats) 360 bch2_compression_stats_to_text(out, c); 361 362 if (attr == &sysfs_new_stripes) 363 bch2_new_stripes_to_text(out, c); 364 365 if (attr == &sysfs_io_timers_read) 366 bch2_io_timers_to_text(out, &c->io_clock[READ]); 367 368 if (attr == &sysfs_io_timers_write) 369 bch2_io_timers_to_text(out, &c->io_clock[WRITE]); 370 371 if (attr == &sysfs_moving_ctxts) 372 bch2_fs_moving_ctxts_to_text(out, c); 373 374 #ifdef BCH_WRITE_REF_DEBUG 375 if (attr == &sysfs_write_refs) 376 bch2_write_refs_to_text(out, c); 377 #endif 378 379 if (attr == &sysfs_nocow_lock_table) 380 bch2_nocow_locks_to_text(out, &c->nocow_locks); 381 382 if (attr == &sysfs_disk_groups) 383 bch2_disk_groups_to_text(out, c); 384 385 if (attr == &sysfs_alloc_debug) 386 bch2_fs_alloc_debug_to_text(out, c); 387 388 if (attr == &sysfs_usage_base) 389 bch2_fs_usage_base_to_text(out, c); 390 391 return 0; 392 } 393 394 STORE(bch2_fs) 395 { 396 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); 397 398 sysfs_pd_controller_store(rebalance, &c->rebalance.pd); 399 400 /* Debugging: */ 401 402 if (!test_bit(BCH_FS_started, &c->flags)) 403 return -EPERM; 404 405 /* Debugging: */ 406 407 if (attr == &sysfs_trigger_btree_updates) 408 queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work); 409 410 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)) 411 return -EROFS; 412 413 if (attr == &sysfs_trigger_btree_cache_shrink) { 414 struct btree_cache *bc = &c->btree_cache; 415 struct shrink_control sc; 416 417 sc.gfp_mask = GFP_KERNEL; 418 sc.nr_to_scan = strtoul_or_return(buf); 419 bc->live[0].shrink->scan_objects(bc->live[0].shrink, &sc); 420 } 421 422 if (attr == &sysfs_trigger_btree_key_cache_shrink) { 423 struct shrink_control sc; 424 425 sc.gfp_mask = GFP_KERNEL; 426 sc.nr_to_scan = strtoul_or_return(buf); 427 c->btree_key_cache.shrink->scan_objects(c->btree_key_cache.shrink, &sc); 428 } 429 430 if (attr == &sysfs_trigger_gc) 431 bch2_gc_gens(c); 432 433 if (attr == &sysfs_trigger_discards) 434 bch2_do_discards(c); 435 436 if (attr == &sysfs_trigger_invalidates) 437 bch2_do_invalidates(c); 438 439 if (attr == &sysfs_trigger_journal_flush) { 440 bch2_journal_flush_all_pins(&c->journal); 441 bch2_journal_meta(&c->journal); 442 } 443 444 if (attr == &sysfs_trigger_journal_writes) 445 bch2_journal_do_writes(&c->journal); 446 447 if (attr == &sysfs_trigger_freelist_wakeup) 448 closure_wake_up(&c->freelist_wait); 449 450 #ifdef CONFIG_BCACHEFS_TESTS 451 if (attr == &sysfs_perf_test) { 452 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp; 453 char *test = strsep(&p, " \t\n"); 454 char *nr_str = strsep(&p, " \t\n"); 455 char *threads_str = strsep(&p, " \t\n"); 456 unsigned threads; 457 u64 nr; 458 int ret = -EINVAL; 459 460 if (threads_str && 461 !(ret = kstrtouint(threads_str, 10, &threads)) && 462 !(ret = bch2_strtoull_h(nr_str, &nr))) 463 ret = bch2_btree_perf_test(c, test, nr, threads); 464 kfree(tmp); 465 466 if (ret) 467 size = ret; 468 } 469 #endif 470 bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); 471 return size; 472 } 473 SYSFS_OPS(bch2_fs); 474 475 struct attribute *bch2_fs_files[] = { 476 &sysfs_minor, 477 &sysfs_btree_cache_size, 478 &sysfs_btree_write_stats, 479 480 &sysfs_rebalance_status, 481 482 &sysfs_compression_stats, 483 484 #ifdef CONFIG_BCACHEFS_TESTS 485 &sysfs_perf_test, 486 #endif 487 NULL 488 }; 489 490 /* counters dir */ 491 492 SHOW(bch2_fs_counters) 493 { 494 struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj); 495 u64 counter = 0; 496 u64 counter_since_mount = 0; 497 498 printbuf_tabstop_push(out, 32); 499 500 #define x(t, n, f, ...) \ 501 if (attr == &sysfs_##t) { \ 502 counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\ 503 counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\ 504 if (f & TYPE_SECTORS) { \ 505 counter <<= 9; \ 506 counter_since_mount <<= 9; \ 507 } \ 508 \ 509 prt_printf(out, "since mount:\t"); \ 510 (f & TYPE_COUNTER) ? prt_u64(out, counter_since_mount) :\ 511 prt_human_readable_u64(out, counter_since_mount); \ 512 prt_newline(out); \ 513 \ 514 prt_printf(out, "since filesystem creation:\t"); \ 515 (f & TYPE_COUNTER) ? prt_u64(out, counter) : \ 516 prt_human_readable_u64(out, counter); \ 517 prt_newline(out); \ 518 } 519 BCH_PERSISTENT_COUNTERS() 520 #undef x 521 return 0; 522 } 523 524 STORE(bch2_fs_counters) { 525 return 0; 526 } 527 528 SYSFS_OPS(bch2_fs_counters); 529 530 struct attribute *bch2_fs_counters_files[] = { 531 #define x(t, ...) \ 532 &sysfs_##t, 533 BCH_PERSISTENT_COUNTERS() 534 #undef x 535 NULL 536 }; 537 /* internal dir - just a wrapper */ 538 539 SHOW(bch2_fs_internal) 540 { 541 struct bch_fs *c = container_of(kobj, struct bch_fs, internal); 542 543 return bch2_fs_to_text(out, &c->kobj, attr); 544 } 545 546 STORE(bch2_fs_internal) 547 { 548 struct bch_fs *c = container_of(kobj, struct bch_fs, internal); 549 550 return bch2_fs_store(&c->kobj, attr, buf, size); 551 } 552 SYSFS_OPS(bch2_fs_internal); 553 554 struct attribute *bch2_fs_internal_files[] = { 555 &sysfs_flags, 556 &sysfs_journal_debug, 557 &sysfs_btree_cache, 558 &sysfs_btree_key_cache, 559 &sysfs_btree_reserve_cache, 560 &sysfs_new_stripes, 561 &sysfs_open_buckets, 562 &sysfs_open_buckets_partial, 563 #ifdef BCH_WRITE_REF_DEBUG 564 &sysfs_write_refs, 565 #endif 566 &sysfs_nocow_lock_table, 567 &sysfs_io_timers_read, 568 &sysfs_io_timers_write, 569 570 &sysfs_trigger_gc, 571 &sysfs_trigger_discards, 572 &sysfs_trigger_invalidates, 573 &sysfs_trigger_journal_flush, 574 &sysfs_trigger_journal_writes, 575 &sysfs_trigger_btree_cache_shrink, 576 &sysfs_trigger_btree_key_cache_shrink, 577 &sysfs_trigger_freelist_wakeup, 578 &sysfs_trigger_btree_updates, 579 580 &sysfs_gc_gens_pos, 581 582 &sysfs_copy_gc_wait, 583 584 sysfs_pd_controller_files(rebalance), 585 586 &sysfs_moving_ctxts, 587 588 &sysfs_internal_uuid, 589 590 &sysfs_disk_groups, 591 &sysfs_alloc_debug, 592 &sysfs_usage_base, 593 NULL 594 }; 595 596 /* options */ 597 598 static ssize_t sysfs_opt_show(struct bch_fs *c, 599 struct bch_dev *ca, 600 enum bch_opt_id id, 601 struct printbuf *out) 602 { 603 const struct bch_option *opt = bch2_opt_table + id; 604 u64 v; 605 606 if (opt->flags & OPT_FS) { 607 v = bch2_opt_get_by_id(&c->opts, id); 608 } else if ((opt->flags & OPT_DEVICE) && opt->get_member) { 609 v = bch2_opt_from_sb(c->disk_sb.sb, id, ca->dev_idx); 610 } else { 611 return -EINVAL; 612 } 613 614 bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST); 615 prt_char(out, '\n'); 616 return 0; 617 } 618 619 static ssize_t sysfs_opt_store(struct bch_fs *c, 620 struct bch_dev *ca, 621 enum bch_opt_id id, 622 const char *buf, size_t size) 623 { 624 const struct bch_option *opt = bch2_opt_table + id; 625 int ret = 0; 626 627 /* 628 * We don't need to take c->writes for correctness, but it eliminates an 629 * unsightly error message in the dmesg log when we're RO: 630 */ 631 if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))) 632 return -EROFS; 633 634 down_write(&c->state_lock); 635 636 char *tmp = kstrdup(buf, GFP_KERNEL); 637 if (!tmp) { 638 ret = -ENOMEM; 639 goto err; 640 } 641 642 u64 v; 643 ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL) ?: 644 bch2_opt_check_may_set(c, ca, id, v); 645 kfree(tmp); 646 647 if (ret < 0) 648 goto err; 649 650 bch2_opt_set_sb(c, ca, opt, v); 651 bch2_opt_set_by_id(&c->opts, id, v); 652 653 if (v && 654 (id == Opt_background_target || 655 (id == Opt_foreground_target && !c->opts.background_target) || 656 id == Opt_background_compression || 657 (id == Opt_compression && !c->opts.background_compression))) 658 bch2_set_rebalance_needs_scan(c, 0); 659 660 if (v && id == Opt_rebalance_enabled) 661 rebalance_wakeup(c); 662 663 if (v && id == Opt_copygc_enabled && 664 c->copygc_thread) 665 wake_up_process(c->copygc_thread); 666 667 if (id == Opt_discard && !ca) { 668 mutex_lock(&c->sb_lock); 669 for_each_member_device(c, ca) 670 opt->set_member(bch2_members_v2_get_mut(ca->disk_sb.sb, ca->dev_idx), v); 671 672 bch2_write_super(c); 673 mutex_unlock(&c->sb_lock); 674 } 675 676 ret = size; 677 err: 678 up_write(&c->state_lock); 679 bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); 680 return ret; 681 } 682 683 SHOW(bch2_fs_opts_dir) 684 { 685 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); 686 int id = bch2_opt_lookup(attr->name); 687 if (id < 0) 688 return 0; 689 690 return sysfs_opt_show(c, NULL, id, out); 691 } 692 693 STORE(bch2_fs_opts_dir) 694 { 695 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); 696 int id = bch2_opt_lookup(attr->name); 697 if (id < 0) 698 return 0; 699 700 return sysfs_opt_store(c, NULL, id, buf, size); 701 } 702 SYSFS_OPS(bch2_fs_opts_dir); 703 704 struct attribute *bch2_fs_opts_dir_files[] = { NULL }; 705 706 int bch2_opts_create_sysfs_files(struct kobject *kobj, unsigned type) 707 { 708 for (const struct bch_option *i = bch2_opt_table; 709 i < bch2_opt_table + bch2_opts_nr; 710 i++) { 711 if (i->flags & OPT_HIDDEN) 712 continue; 713 if (!(i->flags & type)) 714 continue; 715 716 int ret = sysfs_create_file(kobj, &i->attr); 717 if (ret) 718 return ret; 719 } 720 721 return 0; 722 } 723 724 /* time stats */ 725 726 SHOW(bch2_fs_time_stats) 727 { 728 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats); 729 730 #define x(name) \ 731 if (attr == &sysfs_time_stat_##name) \ 732 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]); 733 BCH_TIME_STATS() 734 #undef x 735 736 return 0; 737 } 738 739 STORE(bch2_fs_time_stats) 740 { 741 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats); 742 743 #define x(name) \ 744 if (attr == &sysfs_time_stat_##name) \ 745 bch2_time_stats_reset(&c->times[BCH_TIME_##name]); 746 BCH_TIME_STATS() 747 #undef x 748 return size; 749 } 750 SYSFS_OPS(bch2_fs_time_stats); 751 752 struct attribute *bch2_fs_time_stats_files[] = { 753 #define x(name) \ 754 &sysfs_time_stat_##name, 755 BCH_TIME_STATS() 756 #undef x 757 NULL 758 }; 759 760 static const char * const bch2_rw[] = { 761 "read", 762 "write", 763 NULL 764 }; 765 766 static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca) 767 { 768 int rw, i; 769 770 for (rw = 0; rw < 2; rw++) { 771 prt_printf(out, "%s:\n", bch2_rw[rw]); 772 773 for (i = 1; i < BCH_DATA_NR; i++) 774 prt_printf(out, "%-12s:%12llu\n", 775 bch2_data_type_str(i), 776 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9); 777 } 778 } 779 780 SHOW(bch2_dev) 781 { 782 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); 783 struct bch_fs *c = ca->fs; 784 785 sysfs_printf(uuid, "%pU\n", ca->uuid.b); 786 787 sysfs_print(first_bucket, ca->mi.first_bucket); 788 sysfs_print(nbuckets, ca->mi.nbuckets); 789 790 if (attr == &sysfs_label) { 791 if (ca->mi.group) 792 bch2_disk_path_to_text(out, c, ca->mi.group - 1); 793 prt_char(out, '\n'); 794 } 795 796 if (attr == &sysfs_has_data) { 797 prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca)); 798 prt_char(out, '\n'); 799 } 800 801 if (attr == &sysfs_io_done) 802 dev_io_done_to_text(out, ca); 803 804 if (attr == &sysfs_io_errors) 805 bch2_dev_io_errors_to_text(out, ca); 806 807 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ])); 808 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE])); 809 810 if (attr == &sysfs_io_latency_stats_read) 811 bch2_time_stats_to_text(out, &ca->io_latency[READ].stats); 812 813 if (attr == &sysfs_io_latency_stats_write) 814 bch2_time_stats_to_text(out, &ca->io_latency[WRITE].stats); 815 816 sysfs_printf(congested, "%u%%", 817 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX) 818 * 100 / CONGESTED_MAX); 819 820 if (attr == &sysfs_alloc_debug) 821 bch2_dev_alloc_debug_to_text(out, ca); 822 823 if (attr == &sysfs_open_buckets) 824 bch2_open_buckets_to_text(out, c, ca); 825 826 int opt_id = bch2_opt_lookup(attr->name); 827 if (opt_id >= 0) 828 return sysfs_opt_show(c, ca, opt_id, out); 829 830 return 0; 831 } 832 833 STORE(bch2_dev) 834 { 835 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); 836 struct bch_fs *c = ca->fs; 837 838 if (attr == &sysfs_label) { 839 char *tmp; 840 int ret; 841 842 tmp = kstrdup(buf, GFP_KERNEL); 843 if (!tmp) 844 return -ENOMEM; 845 846 ret = bch2_dev_group_set(c, ca, strim(tmp)); 847 kfree(tmp); 848 if (ret) 849 return ret; 850 } 851 852 if (attr == &sysfs_io_errors_reset) 853 bch2_dev_errors_reset(ca); 854 855 int opt_id = bch2_opt_lookup(attr->name); 856 if (opt_id >= 0) 857 return sysfs_opt_store(c, ca, opt_id, buf, size); 858 859 return size; 860 } 861 SYSFS_OPS(bch2_dev); 862 863 struct attribute *bch2_dev_files[] = { 864 &sysfs_uuid, 865 &sysfs_first_bucket, 866 &sysfs_nbuckets, 867 868 /* settings: */ 869 &sysfs_label, 870 871 &sysfs_has_data, 872 &sysfs_io_done, 873 &sysfs_io_errors, 874 &sysfs_io_errors_reset, 875 876 &sysfs_io_latency_read, 877 &sysfs_io_latency_write, 878 &sysfs_io_latency_stats_read, 879 &sysfs_io_latency_stats_write, 880 &sysfs_congested, 881 882 /* debug: */ 883 &sysfs_alloc_debug, 884 &sysfs_open_buckets, 885 NULL 886 }; 887 888 #endif /* _BCACHEFS_SYSFS_H_ */ 889