1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bcache sysfs interfaces 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "sysfs.h" 11 #include "btree.h" 12 #include "request.h" 13 #include "writeback.h" 14 #include "features.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/sort.h> 18 #include <linux/sched/clock.h> 19 20 extern bool bcache_is_reboot; 21 22 /* Default is 0 ("writethrough") */ 23 static const char * const bch_cache_modes[] = { 24 "writethrough", 25 "writeback", 26 "writearound", 27 "none", 28 NULL 29 }; 30 31 static const char * const bch_reada_cache_policies[] = { 32 "all", 33 "meta-only", 34 NULL 35 }; 36 37 /* Default is 0 ("auto") */ 38 static const char * const bch_stop_on_failure_modes[] = { 39 "auto", 40 "always", 41 NULL 42 }; 43 44 static const char * const cache_replacement_policies[] = { 45 "lru", 46 "fifo", 47 "random", 48 NULL 49 }; 50 51 static const char * const error_actions[] = { 52 "unregister", 53 "panic", 54 NULL 55 }; 56 57 write_attribute(attach); 58 write_attribute(detach); 59 write_attribute(unregister); 60 write_attribute(stop); 61 write_attribute(clear_stats); 62 write_attribute(trigger_gc); 63 write_attribute(prune_cache); 64 write_attribute(flash_vol_create); 65 66 read_attribute(bucket_size); 67 read_attribute(block_size); 68 read_attribute(nbuckets); 69 read_attribute(tree_depth); 70 read_attribute(root_usage_percent); 71 read_attribute(priority_stats); 72 read_attribute(btree_cache_size); 73 read_attribute(btree_cache_max_chain); 74 read_attribute(cache_available_percent); 75 read_attribute(written); 76 read_attribute(btree_written); 77 read_attribute(metadata_written); 78 read_attribute(active_journal_entries); 79 read_attribute(backing_dev_name); 80 read_attribute(backing_dev_uuid); 81 82 sysfs_time_stats_attribute(btree_gc, sec, ms); 83 sysfs_time_stats_attribute(btree_split, sec, us); 84 sysfs_time_stats_attribute(btree_sort, ms, us); 85 sysfs_time_stats_attribute(btree_read, ms, us); 86 87 read_attribute(btree_nodes); 88 read_attribute(btree_used_percent); 89 read_attribute(average_key_size); 90 read_attribute(dirty_data); 91 read_attribute(bset_tree_stats); 92 read_attribute(feature_compat); 93 read_attribute(feature_ro_compat); 94 read_attribute(feature_incompat); 95 96 read_attribute(state); 97 read_attribute(cache_read_races); 98 read_attribute(reclaim); 99 read_attribute(reclaimed_journal_buckets); 100 read_attribute(flush_write); 101 read_attribute(writeback_keys_done); 102 read_attribute(writeback_keys_failed); 103 read_attribute(io_errors); 104 read_attribute(congested); 105 read_attribute(cutoff_writeback); 106 read_attribute(cutoff_writeback_sync); 107 rw_attribute(congested_read_threshold_us); 108 rw_attribute(congested_write_threshold_us); 109 110 rw_attribute(sequential_cutoff); 111 rw_attribute(data_csum); 112 rw_attribute(cache_mode); 113 rw_attribute(readahead_cache_policy); 114 rw_attribute(stop_when_cache_set_failed); 115 rw_attribute(writeback_metadata); 116 rw_attribute(writeback_running); 117 rw_attribute(writeback_percent); 118 rw_attribute(writeback_delay); 119 rw_attribute(writeback_rate); 120 121 rw_attribute(writeback_rate_update_seconds); 122 rw_attribute(writeback_rate_i_term_inverse); 123 rw_attribute(writeback_rate_p_term_inverse); 124 rw_attribute(writeback_rate_minimum); 125 read_attribute(writeback_rate_debug); 126 127 read_attribute(stripe_size); 128 read_attribute(partial_stripes_expensive); 129 130 rw_attribute(synchronous); 131 rw_attribute(journal_delay_ms); 132 rw_attribute(io_disable); 133 rw_attribute(discard); 134 rw_attribute(running); 135 rw_attribute(label); 136 rw_attribute(readahead); 137 rw_attribute(errors); 138 rw_attribute(io_error_limit); 139 rw_attribute(io_error_halflife); 140 rw_attribute(verify); 141 rw_attribute(bypass_torture_test); 142 rw_attribute(key_merging_disabled); 143 rw_attribute(gc_always_rewrite); 144 rw_attribute(expensive_debug_checks); 145 rw_attribute(cache_replacement_policy); 146 rw_attribute(btree_shrinker_disabled); 147 rw_attribute(copy_gc_enabled); 148 rw_attribute(idle_max_writeback_rate); 149 rw_attribute(gc_after_writeback); 150 rw_attribute(size); 151 152 static ssize_t bch_snprint_string_list(char *buf, 153 size_t size, 154 const char * const list[], 155 size_t selected) 156 { 157 char *out = buf; 158 size_t i; 159 160 for (i = 0; list[i]; i++) 161 out += scnprintf(out, buf + size - out, 162 i == selected ? "[%s] " : "%s ", list[i]); 163 164 out[-1] = '\n'; 165 return out - buf; 166 } 167 168 SHOW(__bch_cached_dev) 169 { 170 struct cached_dev *dc = container_of(kobj, struct cached_dev, 171 disk.kobj); 172 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" }; 173 int wb = dc->writeback_running; 174 175 #define var(stat) (dc->stat) 176 177 if (attr == &sysfs_cache_mode) 178 return bch_snprint_string_list(buf, PAGE_SIZE, 179 bch_cache_modes, 180 BDEV_CACHE_MODE(&dc->sb)); 181 182 if (attr == &sysfs_readahead_cache_policy) 183 return bch_snprint_string_list(buf, PAGE_SIZE, 184 bch_reada_cache_policies, 185 dc->cache_readahead_policy); 186 187 if (attr == &sysfs_stop_when_cache_set_failed) 188 return bch_snprint_string_list(buf, PAGE_SIZE, 189 bch_stop_on_failure_modes, 190 dc->stop_when_cache_set_failed); 191 192 193 sysfs_printf(data_csum, "%i", dc->disk.data_csum); 194 var_printf(verify, "%i"); 195 var_printf(bypass_torture_test, "%i"); 196 var_printf(writeback_metadata, "%i"); 197 var_printf(writeback_running, "%i"); 198 var_print(writeback_delay); 199 var_print(writeback_percent); 200 sysfs_hprint(writeback_rate, 201 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); 202 sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors)); 203 sysfs_printf(io_error_limit, "%i", dc->error_limit); 204 sysfs_printf(io_disable, "%i", dc->io_disable); 205 var_print(writeback_rate_update_seconds); 206 var_print(writeback_rate_i_term_inverse); 207 var_print(writeback_rate_p_term_inverse); 208 var_print(writeback_rate_minimum); 209 210 if (attr == &sysfs_writeback_rate_debug) { 211 char rate[20]; 212 char dirty[20]; 213 char target[20]; 214 char proportional[20]; 215 char integral[20]; 216 char change[20]; 217 s64 next_io; 218 219 /* 220 * Except for dirty and target, other values should 221 * be 0 if writeback is not running. 222 */ 223 bch_hprint(rate, 224 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 225 : 0); 226 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); 227 bch_hprint(target, dc->writeback_rate_target << 9); 228 bch_hprint(proportional, 229 wb ? dc->writeback_rate_proportional << 9 : 0); 230 bch_hprint(integral, 231 wb ? dc->writeback_rate_integral_scaled << 9 : 0); 232 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0); 233 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(), 234 NSEC_PER_MSEC) : 0; 235 236 return sprintf(buf, 237 "rate:\t\t%s/sec\n" 238 "dirty:\t\t%s\n" 239 "target:\t\t%s\n" 240 "proportional:\t%s\n" 241 "integral:\t%s\n" 242 "change:\t\t%s/sec\n" 243 "next io:\t%llims\n", 244 rate, dirty, target, proportional, 245 integral, change, next_io); 246 } 247 248 sysfs_hprint(dirty_data, 249 bcache_dev_sectors_dirty(&dc->disk) << 9); 250 251 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); 252 var_printf(partial_stripes_expensive, "%u"); 253 254 var_hprint(sequential_cutoff); 255 var_hprint(readahead); 256 257 sysfs_print(running, atomic_read(&dc->running)); 258 sysfs_print(state, states[BDEV_STATE(&dc->sb)]); 259 260 if (attr == &sysfs_label) { 261 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 262 buf[SB_LABEL_SIZE + 1] = '\0'; 263 strcat(buf, "\n"); 264 return strlen(buf); 265 } 266 267 if (attr == &sysfs_backing_dev_name) { 268 snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name); 269 strcat(buf, "\n"); 270 return strlen(buf); 271 } 272 273 if (attr == &sysfs_backing_dev_uuid) { 274 /* convert binary uuid into 36-byte string plus '\0' */ 275 snprintf(buf, 36+1, "%pU", dc->sb.uuid); 276 strcat(buf, "\n"); 277 return strlen(buf); 278 } 279 280 #undef var 281 return 0; 282 } 283 SHOW_LOCKED(bch_cached_dev) 284 285 STORE(__cached_dev) 286 { 287 struct cached_dev *dc = container_of(kobj, struct cached_dev, 288 disk.kobj); 289 ssize_t v; 290 struct cache_set *c; 291 struct kobj_uevent_env *env; 292 293 /* no user space access if system is rebooting */ 294 if (bcache_is_reboot) 295 return -EBUSY; 296 297 #define d_strtoul(var) sysfs_strtoul(var, dc->var) 298 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) 299 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 300 301 sysfs_strtoul(data_csum, dc->disk.data_csum); 302 d_strtoul(verify); 303 sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); 304 sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); 305 sysfs_strtoul_bool(writeback_running, dc->writeback_running); 306 sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); 307 308 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 309 0, bch_cutoff_writeback); 310 311 if (attr == &sysfs_writeback_rate) { 312 ssize_t ret; 313 long int v = atomic_long_read(&dc->writeback_rate.rate); 314 315 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX); 316 317 if (!ret) { 318 atomic_long_set(&dc->writeback_rate.rate, v); 319 ret = size; 320 } 321 322 return ret; 323 } 324 325 sysfs_strtoul_clamp(writeback_rate_update_seconds, 326 dc->writeback_rate_update_seconds, 327 1, WRITEBACK_RATE_UPDATE_SECS_MAX); 328 sysfs_strtoul_clamp(writeback_rate_i_term_inverse, 329 dc->writeback_rate_i_term_inverse, 330 1, UINT_MAX); 331 sysfs_strtoul_clamp(writeback_rate_p_term_inverse, 332 dc->writeback_rate_p_term_inverse, 333 1, UINT_MAX); 334 sysfs_strtoul_clamp(writeback_rate_minimum, 335 dc->writeback_rate_minimum, 336 1, UINT_MAX); 337 338 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); 339 340 if (attr == &sysfs_io_disable) { 341 int v = strtoul_or_return(buf); 342 343 dc->io_disable = v ? 1 : 0; 344 } 345 346 sysfs_strtoul_clamp(sequential_cutoff, 347 dc->sequential_cutoff, 348 0, UINT_MAX); 349 d_strtoi_h(readahead); 350 351 if (attr == &sysfs_clear_stats) 352 bch_cache_accounting_clear(&dc->accounting); 353 354 if (attr == &sysfs_running && 355 strtoul_or_return(buf)) { 356 v = bch_cached_dev_run(dc); 357 if (v) 358 return v; 359 } 360 361 if (attr == &sysfs_cache_mode) { 362 v = __sysfs_match_string(bch_cache_modes, -1, buf); 363 if (v < 0) 364 return v; 365 366 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) { 367 SET_BDEV_CACHE_MODE(&dc->sb, v); 368 bch_write_bdev_super(dc, NULL); 369 } 370 } 371 372 if (attr == &sysfs_readahead_cache_policy) { 373 v = __sysfs_match_string(bch_reada_cache_policies, -1, buf); 374 if (v < 0) 375 return v; 376 377 if ((unsigned int) v != dc->cache_readahead_policy) 378 dc->cache_readahead_policy = v; 379 } 380 381 if (attr == &sysfs_stop_when_cache_set_failed) { 382 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); 383 if (v < 0) 384 return v; 385 386 dc->stop_when_cache_set_failed = v; 387 } 388 389 if (attr == &sysfs_label) { 390 if (size > SB_LABEL_SIZE) 391 return -EINVAL; 392 memcpy(dc->sb.label, buf, size); 393 if (size < SB_LABEL_SIZE) 394 dc->sb.label[size] = '\0'; 395 if (size && dc->sb.label[size - 1] == '\n') 396 dc->sb.label[size - 1] = '\0'; 397 bch_write_bdev_super(dc, NULL); 398 if (dc->disk.c) { 399 memcpy(dc->disk.c->uuids[dc->disk.id].label, 400 buf, SB_LABEL_SIZE); 401 bch_uuid_write(dc->disk.c); 402 } 403 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 404 if (!env) 405 return -ENOMEM; 406 add_uevent_var(env, "DRIVER=bcache"); 407 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid); 408 add_uevent_var(env, "CACHED_LABEL=%s", buf); 409 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, 410 KOBJ_CHANGE, 411 env->envp); 412 kfree(env); 413 } 414 415 if (attr == &sysfs_attach) { 416 uint8_t set_uuid[16]; 417 418 if (bch_parse_uuid(buf, set_uuid) < 16) 419 return -EINVAL; 420 421 v = -ENOENT; 422 list_for_each_entry(c, &bch_cache_sets, list) { 423 v = bch_cached_dev_attach(dc, c, set_uuid); 424 if (!v) 425 return size; 426 } 427 if (v == -ENOENT) 428 pr_err("Can't attach %s: cache set not found\n", buf); 429 return v; 430 } 431 432 if (attr == &sysfs_detach && dc->disk.c) 433 bch_cached_dev_detach(dc); 434 435 if (attr == &sysfs_stop) 436 bcache_device_stop(&dc->disk); 437 438 return size; 439 } 440 441 STORE(bch_cached_dev) 442 { 443 struct cached_dev *dc = container_of(kobj, struct cached_dev, 444 disk.kobj); 445 446 /* no user space access if system is rebooting */ 447 if (bcache_is_reboot) 448 return -EBUSY; 449 450 mutex_lock(&bch_register_lock); 451 size = __cached_dev_store(kobj, attr, buf, size); 452 453 if (attr == &sysfs_writeback_running) { 454 /* dc->writeback_running changed in __cached_dev_store() */ 455 if (IS_ERR_OR_NULL(dc->writeback_thread)) { 456 /* 457 * reject setting it to 1 via sysfs if writeback 458 * kthread is not created yet. 459 */ 460 if (dc->writeback_running) { 461 dc->writeback_running = false; 462 pr_err("%s: failed to run non-existent writeback thread\n", 463 dc->disk.disk->disk_name); 464 } 465 } else 466 /* 467 * writeback kthread will check if dc->writeback_running 468 * is true or false. 469 */ 470 bch_writeback_queue(dc); 471 } 472 473 /* 474 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to 475 * a cache set, otherwise it doesn't make sense. 476 */ 477 if (attr == &sysfs_writeback_percent) 478 if ((dc->disk.c != NULL) && 479 (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))) 480 schedule_delayed_work(&dc->writeback_rate_update, 481 dc->writeback_rate_update_seconds * HZ); 482 483 mutex_unlock(&bch_register_lock); 484 return size; 485 } 486 487 static struct attribute *bch_cached_dev_files[] = { 488 &sysfs_attach, 489 &sysfs_detach, 490 &sysfs_stop, 491 #if 0 492 &sysfs_data_csum, 493 #endif 494 &sysfs_cache_mode, 495 &sysfs_readahead_cache_policy, 496 &sysfs_stop_when_cache_set_failed, 497 &sysfs_writeback_metadata, 498 &sysfs_writeback_running, 499 &sysfs_writeback_delay, 500 &sysfs_writeback_percent, 501 &sysfs_writeback_rate, 502 &sysfs_writeback_rate_update_seconds, 503 &sysfs_writeback_rate_i_term_inverse, 504 &sysfs_writeback_rate_p_term_inverse, 505 &sysfs_writeback_rate_minimum, 506 &sysfs_writeback_rate_debug, 507 &sysfs_io_errors, 508 &sysfs_io_error_limit, 509 &sysfs_io_disable, 510 &sysfs_dirty_data, 511 &sysfs_stripe_size, 512 &sysfs_partial_stripes_expensive, 513 &sysfs_sequential_cutoff, 514 &sysfs_clear_stats, 515 &sysfs_running, 516 &sysfs_state, 517 &sysfs_label, 518 &sysfs_readahead, 519 #ifdef CONFIG_BCACHE_DEBUG 520 &sysfs_verify, 521 &sysfs_bypass_torture_test, 522 #endif 523 &sysfs_backing_dev_name, 524 &sysfs_backing_dev_uuid, 525 NULL 526 }; 527 KTYPE(bch_cached_dev); 528 529 SHOW(bch_flash_dev) 530 { 531 struct bcache_device *d = container_of(kobj, struct bcache_device, 532 kobj); 533 struct uuid_entry *u = &d->c->uuids[d->id]; 534 535 sysfs_printf(data_csum, "%i", d->data_csum); 536 sysfs_hprint(size, u->sectors << 9); 537 538 if (attr == &sysfs_label) { 539 memcpy(buf, u->label, SB_LABEL_SIZE); 540 buf[SB_LABEL_SIZE + 1] = '\0'; 541 strcat(buf, "\n"); 542 return strlen(buf); 543 } 544 545 return 0; 546 } 547 548 STORE(__bch_flash_dev) 549 { 550 struct bcache_device *d = container_of(kobj, struct bcache_device, 551 kobj); 552 struct uuid_entry *u = &d->c->uuids[d->id]; 553 554 /* no user space access if system is rebooting */ 555 if (bcache_is_reboot) 556 return -EBUSY; 557 558 sysfs_strtoul(data_csum, d->data_csum); 559 560 if (attr == &sysfs_size) { 561 uint64_t v; 562 563 strtoi_h_or_return(buf, v); 564 565 u->sectors = v >> 9; 566 bch_uuid_write(d->c); 567 set_capacity(d->disk, u->sectors); 568 } 569 570 if (attr == &sysfs_label) { 571 memcpy(u->label, buf, SB_LABEL_SIZE); 572 bch_uuid_write(d->c); 573 } 574 575 if (attr == &sysfs_unregister) { 576 set_bit(BCACHE_DEV_DETACHING, &d->flags); 577 bcache_device_stop(d); 578 } 579 580 return size; 581 } 582 STORE_LOCKED(bch_flash_dev) 583 584 static struct attribute *bch_flash_dev_files[] = { 585 &sysfs_unregister, 586 #if 0 587 &sysfs_data_csum, 588 #endif 589 &sysfs_label, 590 &sysfs_size, 591 NULL 592 }; 593 KTYPE(bch_flash_dev); 594 595 struct bset_stats_op { 596 struct btree_op op; 597 size_t nodes; 598 struct bset_stats stats; 599 }; 600 601 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) 602 { 603 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); 604 605 op->nodes++; 606 bch_btree_keys_stats(&b->keys, &op->stats); 607 608 return MAP_CONTINUE; 609 } 610 611 static int bch_bset_print_stats(struct cache_set *c, char *buf) 612 { 613 struct bset_stats_op op; 614 int ret; 615 616 memset(&op, 0, sizeof(op)); 617 bch_btree_op_init(&op.op, -1); 618 619 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); 620 if (ret < 0) 621 return ret; 622 623 return snprintf(buf, PAGE_SIZE, 624 "btree nodes: %zu\n" 625 "written sets: %zu\n" 626 "unwritten sets: %zu\n" 627 "written key bytes: %zu\n" 628 "unwritten key bytes: %zu\n" 629 "floats: %zu\n" 630 "failed: %zu\n", 631 op.nodes, 632 op.stats.sets_written, op.stats.sets_unwritten, 633 op.stats.bytes_written, op.stats.bytes_unwritten, 634 op.stats.floats, op.stats.failed); 635 } 636 637 static unsigned int bch_root_usage(struct cache_set *c) 638 { 639 unsigned int bytes = 0; 640 struct bkey *k; 641 struct btree *b; 642 struct btree_iter iter; 643 644 goto lock_root; 645 646 do { 647 rw_unlock(false, b); 648 lock_root: 649 b = c->root; 650 rw_lock(false, b, b->level); 651 } while (b != c->root); 652 653 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 654 bytes += bkey_bytes(k); 655 656 rw_unlock(false, b); 657 658 return (bytes * 100) / btree_bytes(c); 659 } 660 661 static size_t bch_cache_size(struct cache_set *c) 662 { 663 size_t ret = 0; 664 struct btree *b; 665 666 mutex_lock(&c->bucket_lock); 667 list_for_each_entry(b, &c->btree_cache, list) 668 ret += 1 << (b->keys.page_order + PAGE_SHIFT); 669 670 mutex_unlock(&c->bucket_lock); 671 return ret; 672 } 673 674 static unsigned int bch_cache_max_chain(struct cache_set *c) 675 { 676 unsigned int ret = 0; 677 struct hlist_head *h; 678 679 mutex_lock(&c->bucket_lock); 680 681 for (h = c->bucket_hash; 682 h < c->bucket_hash + (1 << BUCKET_HASH_BITS); 683 h++) { 684 unsigned int i = 0; 685 struct hlist_node *p; 686 687 hlist_for_each(p, h) 688 i++; 689 690 ret = max(ret, i); 691 } 692 693 mutex_unlock(&c->bucket_lock); 694 return ret; 695 } 696 697 static unsigned int bch_btree_used(struct cache_set *c) 698 { 699 return div64_u64(c->gc_stats.key_bytes * 100, 700 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); 701 } 702 703 static unsigned int bch_average_key_size(struct cache_set *c) 704 { 705 return c->gc_stats.nkeys 706 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) 707 : 0; 708 } 709 710 SHOW(__bch_cache_set) 711 { 712 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 713 714 sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb)); 715 sysfs_print(journal_delay_ms, c->journal_delay_ms); 716 sysfs_hprint(bucket_size, bucket_bytes(c->cache)); 717 sysfs_hprint(block_size, block_bytes(c->cache)); 718 sysfs_print(tree_depth, c->root->level); 719 sysfs_print(root_usage_percent, bch_root_usage(c)); 720 721 sysfs_hprint(btree_cache_size, bch_cache_size(c)); 722 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); 723 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); 724 725 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); 726 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); 727 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); 728 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); 729 730 sysfs_print(btree_used_percent, bch_btree_used(c)); 731 sysfs_print(btree_nodes, c->gc_stats.nodes); 732 sysfs_hprint(average_key_size, bch_average_key_size(c)); 733 734 sysfs_print(cache_read_races, 735 atomic_long_read(&c->cache_read_races)); 736 737 sysfs_print(reclaim, 738 atomic_long_read(&c->reclaim)); 739 740 sysfs_print(reclaimed_journal_buckets, 741 atomic_long_read(&c->reclaimed_journal_buckets)); 742 743 sysfs_print(flush_write, 744 atomic_long_read(&c->flush_write)); 745 746 sysfs_print(writeback_keys_done, 747 atomic_long_read(&c->writeback_keys_done)); 748 sysfs_print(writeback_keys_failed, 749 atomic_long_read(&c->writeback_keys_failed)); 750 751 if (attr == &sysfs_errors) 752 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions, 753 c->on_error); 754 755 /* See count_io_errors for why 88 */ 756 sysfs_print(io_error_halflife, c->error_decay * 88); 757 sysfs_print(io_error_limit, c->error_limit); 758 759 sysfs_hprint(congested, 760 ((uint64_t) bch_get_congested(c)) << 9); 761 sysfs_print(congested_read_threshold_us, 762 c->congested_read_threshold_us); 763 sysfs_print(congested_write_threshold_us, 764 c->congested_write_threshold_us); 765 766 sysfs_print(cutoff_writeback, bch_cutoff_writeback); 767 sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync); 768 769 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); 770 sysfs_printf(verify, "%i", c->verify); 771 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); 772 sysfs_printf(expensive_debug_checks, 773 "%i", c->expensive_debug_checks); 774 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); 775 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); 776 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); 777 sysfs_printf(idle_max_writeback_rate, "%i", 778 c->idle_max_writeback_rate_enabled); 779 sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback); 780 sysfs_printf(io_disable, "%i", 781 test_bit(CACHE_SET_IO_DISABLE, &c->flags)); 782 783 if (attr == &sysfs_bset_tree_stats) 784 return bch_bset_print_stats(c, buf); 785 786 if (attr == &sysfs_feature_compat) 787 return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE); 788 if (attr == &sysfs_feature_ro_compat) 789 return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE); 790 if (attr == &sysfs_feature_incompat) 791 return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE); 792 793 return 0; 794 } 795 SHOW_LOCKED(bch_cache_set) 796 797 STORE(__bch_cache_set) 798 { 799 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 800 ssize_t v; 801 802 /* no user space access if system is rebooting */ 803 if (bcache_is_reboot) 804 return -EBUSY; 805 806 if (attr == &sysfs_unregister) 807 bch_cache_set_unregister(c); 808 809 if (attr == &sysfs_stop) 810 bch_cache_set_stop(c); 811 812 if (attr == &sysfs_synchronous) { 813 bool sync = strtoul_or_return(buf); 814 815 if (sync != CACHE_SYNC(&c->cache->sb)) { 816 SET_CACHE_SYNC(&c->cache->sb, sync); 817 bcache_write_super(c); 818 } 819 } 820 821 if (attr == &sysfs_flash_vol_create) { 822 int r; 823 uint64_t v; 824 825 strtoi_h_or_return(buf, v); 826 827 r = bch_flash_dev_create(c, v); 828 if (r) 829 return r; 830 } 831 832 if (attr == &sysfs_clear_stats) { 833 atomic_long_set(&c->writeback_keys_done, 0); 834 atomic_long_set(&c->writeback_keys_failed, 0); 835 836 memset(&c->gc_stats, 0, sizeof(struct gc_stat)); 837 bch_cache_accounting_clear(&c->accounting); 838 } 839 840 if (attr == &sysfs_trigger_gc) 841 force_wake_up_gc(c); 842 843 if (attr == &sysfs_prune_cache) { 844 struct shrink_control sc; 845 846 sc.gfp_mask = GFP_KERNEL; 847 sc.nr_to_scan = strtoul_or_return(buf); 848 c->shrink.scan_objects(&c->shrink, &sc); 849 } 850 851 sysfs_strtoul_clamp(congested_read_threshold_us, 852 c->congested_read_threshold_us, 853 0, UINT_MAX); 854 sysfs_strtoul_clamp(congested_write_threshold_us, 855 c->congested_write_threshold_us, 856 0, UINT_MAX); 857 858 if (attr == &sysfs_errors) { 859 v = __sysfs_match_string(error_actions, -1, buf); 860 if (v < 0) 861 return v; 862 863 c->on_error = v; 864 } 865 866 sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX); 867 868 /* See count_io_errors() for why 88 */ 869 if (attr == &sysfs_io_error_halflife) { 870 unsigned long v = 0; 871 ssize_t ret; 872 873 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX); 874 if (!ret) { 875 c->error_decay = v / 88; 876 return size; 877 } 878 return ret; 879 } 880 881 if (attr == &sysfs_io_disable) { 882 v = strtoul_or_return(buf); 883 if (v) { 884 if (test_and_set_bit(CACHE_SET_IO_DISABLE, 885 &c->flags)) 886 pr_warn("CACHE_SET_IO_DISABLE already set\n"); 887 } else { 888 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE, 889 &c->flags)) 890 pr_warn("CACHE_SET_IO_DISABLE already cleared\n"); 891 } 892 } 893 894 sysfs_strtoul_clamp(journal_delay_ms, 895 c->journal_delay_ms, 896 0, USHRT_MAX); 897 sysfs_strtoul_bool(verify, c->verify); 898 sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled); 899 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); 900 sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite); 901 sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled); 902 sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled); 903 sysfs_strtoul_bool(idle_max_writeback_rate, 904 c->idle_max_writeback_rate_enabled); 905 906 /* 907 * write gc_after_writeback here may overwrite an already set 908 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be 909 * set in next chance. 910 */ 911 sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1); 912 913 return size; 914 } 915 STORE_LOCKED(bch_cache_set) 916 917 SHOW(bch_cache_set_internal) 918 { 919 struct cache_set *c = container_of(kobj, struct cache_set, internal); 920 921 return bch_cache_set_show(&c->kobj, attr, buf); 922 } 923 924 STORE(bch_cache_set_internal) 925 { 926 struct cache_set *c = container_of(kobj, struct cache_set, internal); 927 928 /* no user space access if system is rebooting */ 929 if (bcache_is_reboot) 930 return -EBUSY; 931 932 return bch_cache_set_store(&c->kobj, attr, buf, size); 933 } 934 935 static void bch_cache_set_internal_release(struct kobject *k) 936 { 937 } 938 939 static struct attribute *bch_cache_set_files[] = { 940 &sysfs_unregister, 941 &sysfs_stop, 942 &sysfs_synchronous, 943 &sysfs_journal_delay_ms, 944 &sysfs_flash_vol_create, 945 946 &sysfs_bucket_size, 947 &sysfs_block_size, 948 &sysfs_tree_depth, 949 &sysfs_root_usage_percent, 950 &sysfs_btree_cache_size, 951 &sysfs_cache_available_percent, 952 953 &sysfs_average_key_size, 954 955 &sysfs_errors, 956 &sysfs_io_error_limit, 957 &sysfs_io_error_halflife, 958 &sysfs_congested, 959 &sysfs_congested_read_threshold_us, 960 &sysfs_congested_write_threshold_us, 961 &sysfs_clear_stats, 962 NULL 963 }; 964 KTYPE(bch_cache_set); 965 966 static struct attribute *bch_cache_set_internal_files[] = { 967 &sysfs_active_journal_entries, 968 969 sysfs_time_stats_attribute_list(btree_gc, sec, ms) 970 sysfs_time_stats_attribute_list(btree_split, sec, us) 971 sysfs_time_stats_attribute_list(btree_sort, ms, us) 972 sysfs_time_stats_attribute_list(btree_read, ms, us) 973 974 &sysfs_btree_nodes, 975 &sysfs_btree_used_percent, 976 &sysfs_btree_cache_max_chain, 977 978 &sysfs_bset_tree_stats, 979 &sysfs_cache_read_races, 980 &sysfs_reclaim, 981 &sysfs_reclaimed_journal_buckets, 982 &sysfs_flush_write, 983 &sysfs_writeback_keys_done, 984 &sysfs_writeback_keys_failed, 985 986 &sysfs_trigger_gc, 987 &sysfs_prune_cache, 988 #ifdef CONFIG_BCACHE_DEBUG 989 &sysfs_verify, 990 &sysfs_key_merging_disabled, 991 &sysfs_expensive_debug_checks, 992 #endif 993 &sysfs_gc_always_rewrite, 994 &sysfs_btree_shrinker_disabled, 995 &sysfs_copy_gc_enabled, 996 &sysfs_idle_max_writeback_rate, 997 &sysfs_gc_after_writeback, 998 &sysfs_io_disable, 999 &sysfs_cutoff_writeback, 1000 &sysfs_cutoff_writeback_sync, 1001 &sysfs_feature_compat, 1002 &sysfs_feature_ro_compat, 1003 &sysfs_feature_incompat, 1004 NULL 1005 }; 1006 KTYPE(bch_cache_set_internal); 1007 1008 static int __bch_cache_cmp(const void *l, const void *r) 1009 { 1010 cond_resched(); 1011 return *((uint16_t *)r) - *((uint16_t *)l); 1012 } 1013 1014 SHOW(__bch_cache) 1015 { 1016 struct cache *ca = container_of(kobj, struct cache, kobj); 1017 1018 sysfs_hprint(bucket_size, bucket_bytes(ca)); 1019 sysfs_hprint(block_size, block_bytes(ca)); 1020 sysfs_print(nbuckets, ca->sb.nbuckets); 1021 sysfs_print(discard, ca->discard); 1022 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9); 1023 sysfs_hprint(btree_written, 1024 atomic_long_read(&ca->btree_sectors_written) << 9); 1025 sysfs_hprint(metadata_written, 1026 (atomic_long_read(&ca->meta_sectors_written) + 1027 atomic_long_read(&ca->btree_sectors_written)) << 9); 1028 1029 sysfs_print(io_errors, 1030 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); 1031 1032 if (attr == &sysfs_cache_replacement_policy) 1033 return bch_snprint_string_list(buf, PAGE_SIZE, 1034 cache_replacement_policies, 1035 CACHE_REPLACEMENT(&ca->sb)); 1036 1037 if (attr == &sysfs_priority_stats) { 1038 struct bucket *b; 1039 size_t n = ca->sb.nbuckets, i; 1040 size_t unused = 0, available = 0, dirty = 0, meta = 0; 1041 uint64_t sum = 0; 1042 /* Compute 31 quantiles */ 1043 uint16_t q[31], *p, *cached; 1044 ssize_t ret; 1045 1046 cached = p = vmalloc(array_size(sizeof(uint16_t), 1047 ca->sb.nbuckets)); 1048 if (!p) 1049 return -ENOMEM; 1050 1051 mutex_lock(&ca->set->bucket_lock); 1052 for_each_bucket(b, ca) { 1053 if (!GC_SECTORS_USED(b)) 1054 unused++; 1055 if (GC_MARK(b) == GC_MARK_RECLAIMABLE) 1056 available++; 1057 if (GC_MARK(b) == GC_MARK_DIRTY) 1058 dirty++; 1059 if (GC_MARK(b) == GC_MARK_METADATA) 1060 meta++; 1061 } 1062 1063 for (i = ca->sb.first_bucket; i < n; i++) 1064 p[i] = ca->buckets[i].prio; 1065 mutex_unlock(&ca->set->bucket_lock); 1066 1067 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL); 1068 1069 while (n && 1070 !cached[n - 1]) 1071 --n; 1072 1073 while (cached < p + n && 1074 *cached == BTREE_PRIO) 1075 cached++, n--; 1076 1077 for (i = 0; i < n; i++) 1078 sum += INITIAL_PRIO - cached[i]; 1079 1080 if (n) 1081 do_div(sum, n); 1082 1083 for (i = 0; i < ARRAY_SIZE(q); i++) 1084 q[i] = INITIAL_PRIO - cached[n * (i + 1) / 1085 (ARRAY_SIZE(q) + 1)]; 1086 1087 vfree(p); 1088 1089 ret = scnprintf(buf, PAGE_SIZE, 1090 "Unused: %zu%%\n" 1091 "Clean: %zu%%\n" 1092 "Dirty: %zu%%\n" 1093 "Metadata: %zu%%\n" 1094 "Average: %llu\n" 1095 "Sectors per Q: %zu\n" 1096 "Quantiles: [", 1097 unused * 100 / (size_t) ca->sb.nbuckets, 1098 available * 100 / (size_t) ca->sb.nbuckets, 1099 dirty * 100 / (size_t) ca->sb.nbuckets, 1100 meta * 100 / (size_t) ca->sb.nbuckets, sum, 1101 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); 1102 1103 for (i = 0; i < ARRAY_SIZE(q); i++) 1104 ret += scnprintf(buf + ret, PAGE_SIZE - ret, 1105 "%u ", q[i]); 1106 ret--; 1107 1108 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n"); 1109 1110 return ret; 1111 } 1112 1113 return 0; 1114 } 1115 SHOW_LOCKED(bch_cache) 1116 1117 STORE(__bch_cache) 1118 { 1119 struct cache *ca = container_of(kobj, struct cache, kobj); 1120 ssize_t v; 1121 1122 /* no user space access if system is rebooting */ 1123 if (bcache_is_reboot) 1124 return -EBUSY; 1125 1126 if (attr == &sysfs_discard) { 1127 bool v = strtoul_or_return(buf); 1128 1129 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1130 ca->discard = v; 1131 1132 if (v != CACHE_DISCARD(&ca->sb)) { 1133 SET_CACHE_DISCARD(&ca->sb, v); 1134 bcache_write_super(ca->set); 1135 } 1136 } 1137 1138 if (attr == &sysfs_cache_replacement_policy) { 1139 v = __sysfs_match_string(cache_replacement_policies, -1, buf); 1140 if (v < 0) 1141 return v; 1142 1143 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) { 1144 mutex_lock(&ca->set->bucket_lock); 1145 SET_CACHE_REPLACEMENT(&ca->sb, v); 1146 mutex_unlock(&ca->set->bucket_lock); 1147 1148 bcache_write_super(ca->set); 1149 } 1150 } 1151 1152 if (attr == &sysfs_clear_stats) { 1153 atomic_long_set(&ca->sectors_written, 0); 1154 atomic_long_set(&ca->btree_sectors_written, 0); 1155 atomic_long_set(&ca->meta_sectors_written, 0); 1156 atomic_set(&ca->io_count, 0); 1157 atomic_set(&ca->io_errors, 0); 1158 } 1159 1160 return size; 1161 } 1162 STORE_LOCKED(bch_cache) 1163 1164 static struct attribute *bch_cache_files[] = { 1165 &sysfs_bucket_size, 1166 &sysfs_block_size, 1167 &sysfs_nbuckets, 1168 &sysfs_priority_stats, 1169 &sysfs_discard, 1170 &sysfs_written, 1171 &sysfs_btree_written, 1172 &sysfs_metadata_written, 1173 &sysfs_io_errors, 1174 &sysfs_clear_stats, 1175 &sysfs_cache_replacement_policy, 1176 NULL 1177 }; 1178 KTYPE(bch_cache); 1179