Lines Matching +full:dc +full:- +full:dc
1 // SPDX-License-Identifier: GPL-2.0
33 "meta-only",
164 out += scnprintf(out, buf + size - out, in bch_snprint_string_list()
167 out[-1] = '\n'; in bch_snprint_string_list()
168 return out - buf; in bch_snprint_string_list()
173 struct cached_dev *dc = container_of(kobj, struct cached_dev, in SHOW() local
176 int wb = dc->writeback_running; in SHOW()
178 #define var(stat) (dc->stat) in SHOW()
183 BDEV_CACHE_MODE(&dc->sb)); in SHOW()
188 dc->cache_readahead_policy); in SHOW()
193 dc->stop_when_cache_set_failed); in SHOW()
196 sysfs_printf(data_csum, "%i", dc->disk.data_csum); in SHOW()
205 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); in SHOW()
206 sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors)); in SHOW()
207 sysfs_printf(io_error_limit, "%i", dc->error_limit); in SHOW()
208 sysfs_printf(io_disable, "%i", dc->io_disable); in SHOW()
231 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 in SHOW()
233 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); in SHOW()
234 bch_hprint(target, dc->writeback_rate_target << 9); in SHOW()
236 wb ? dc->writeback_rate_proportional << 9 : 0); in SHOW()
238 wb ? dc->writeback_rate_integral_scaled << 9 : 0); in SHOW()
239 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0); in SHOW()
240 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(), in SHOW()
256 bcache_dev_sectors_dirty(&dc->disk) << 9); in SHOW()
258 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); in SHOW()
263 sysfs_print(running, atomic_read(&dc->running)); in SHOW()
264 sysfs_print(state, states[BDEV_STATE(&dc->sb)]); in SHOW()
267 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); in SHOW()
274 snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev); in SHOW()
280 /* convert binary uuid into 36-byte string plus '\0' */ in SHOW()
281 snprintf(buf, 36+1, "%pU", dc->sb.uuid); in SHOW()
293 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE() local
301 return -EBUSY; in STORE()
303 #define d_strtoul(var) sysfs_strtoul(var, dc->var) in STORE()
304 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) in STORE()
305 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var) in STORE()
307 sysfs_strtoul(data_csum, dc->disk.data_csum); in STORE()
309 sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); in STORE()
310 sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); in STORE()
311 sysfs_strtoul_bool(writeback_running, dc->writeback_running); in STORE()
312 sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment); in STORE()
313 sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); in STORE()
315 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, in STORE()
320 long int v = atomic_long_read(&dc->writeback_rate.rate); in STORE()
325 atomic_long_set(&dc->writeback_rate.rate, v); in STORE()
333 dc->writeback_rate_update_seconds, in STORE()
336 dc->writeback_rate_i_term_inverse, in STORE()
339 dc->writeback_rate_p_term_inverse, in STORE()
342 dc->writeback_rate_fp_term_low, in STORE()
343 1, dc->writeback_rate_fp_term_mid - 1); in STORE()
345 dc->writeback_rate_fp_term_mid, in STORE()
346 dc->writeback_rate_fp_term_low + 1, in STORE()
347 dc->writeback_rate_fp_term_high - 1); in STORE()
349 dc->writeback_rate_fp_term_high, in STORE()
350 dc->writeback_rate_fp_term_mid + 1, UINT_MAX); in STORE()
352 dc->writeback_rate_minimum, in STORE()
355 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); in STORE()
360 dc->io_disable = v ? 1 : 0; in STORE()
364 dc->sequential_cutoff, in STORE()
368 bch_cache_accounting_clear(&dc->accounting); in STORE()
372 v = bch_cached_dev_run(dc); in STORE()
378 v = __sysfs_match_string(bch_cache_modes, -1, buf); in STORE()
382 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) { in STORE()
383 SET_BDEV_CACHE_MODE(&dc->sb, v); in STORE()
384 bch_write_bdev_super(dc, NULL); in STORE()
389 v = __sysfs_match_string(bch_reada_cache_policies, -1, buf); in STORE()
393 if ((unsigned int) v != dc->cache_readahead_policy) in STORE()
394 dc->cache_readahead_policy = v; in STORE()
398 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); in STORE()
402 dc->stop_when_cache_set_failed = v; in STORE()
407 return -EINVAL; in STORE()
408 memcpy(dc->sb.label, buf, size); in STORE()
410 dc->sb.label[size] = '\0'; in STORE()
411 if (size && dc->sb.label[size - 1] == '\n') in STORE()
412 dc->sb.label[size - 1] = '\0'; in STORE()
413 bch_write_bdev_super(dc, NULL); in STORE()
414 if (dc->disk.c) { in STORE()
415 memcpy(dc->disk.c->uuids[dc->disk.id].label, in STORE()
417 bch_uuid_write(dc->disk.c); in STORE()
421 return -ENOMEM; in STORE()
423 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid); in STORE()
425 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, in STORE()
427 env->envp); in STORE()
435 return -EINVAL; in STORE()
437 v = -ENOENT; in STORE()
439 v = bch_cached_dev_attach(dc, c, set_uuid); in STORE()
443 if (v == -ENOENT) in STORE()
448 if (attr == &sysfs_detach && dc->disk.c) in STORE()
449 bch_cached_dev_detach(dc); in STORE()
452 bcache_device_stop(&dc->disk); in STORE()
459 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE() local
464 return -EBUSY; in STORE()
470 /* dc->writeback_running changed in __cached_dev_store() */ in STORE()
471 if (IS_ERR_OR_NULL(dc->writeback_thread)) { in STORE()
476 if (dc->writeback_running) { in STORE()
477 dc->writeback_running = false; in STORE()
478 pr_err("%s: failed to run non-existent writeback thread\n", in STORE()
479 dc->disk.disk->disk_name); in STORE()
483 * writeback kthread will check if dc->writeback_running in STORE()
486 bch_writeback_queue(dc); in STORE()
494 if ((dc->disk.c != NULL) && in STORE()
495 (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))) in STORE()
496 schedule_delayed_work(&dc->writeback_rate_update, in STORE()
497 dc->writeback_rate_update_seconds * HZ); in STORE()
553 struct uuid_entry *u = &d->c->uuids[d->id]; in SHOW()
555 sysfs_printf(data_csum, "%i", d->data_csum); in SHOW()
556 sysfs_hprint(size, u->sectors << 9); in SHOW()
559 memcpy(buf, u->label, SB_LABEL_SIZE); in SHOW()
572 struct uuid_entry *u = &d->c->uuids[d->id]; in STORE()
576 return -EBUSY; in STORE()
578 sysfs_strtoul(data_csum, d->data_csum); in STORE()
585 u->sectors = v >> 9; in STORE()
586 bch_uuid_write(d->c); in STORE()
587 set_capacity(d->disk, u->sectors); in STORE()
591 memcpy(u->label, buf, SB_LABEL_SIZE); in STORE()
592 bch_uuid_write(d->c); in STORE()
596 set_bit(BCACHE_DEV_DETACHING, &d->flags); in STORE()
626 op->nodes++; in bch_btree_bset_stats()
627 bch_btree_keys_stats(&b->keys, &op->stats); in bch_btree_bset_stats()
638 bch_btree_op_init(&op.op, -1); in bch_bset_print_stats()
670 b = c->root; in bch_root_usage()
671 rw_lock(false, b, b->level); in bch_root_usage()
672 } while (b != c->root); in bch_root_usage()
674 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) in bch_root_usage()
687 mutex_lock(&c->bucket_lock); in bch_cache_size()
688 list_for_each_entry(b, &c->btree_cache, list) in bch_cache_size()
689 ret += 1 << (b->keys.page_order + PAGE_SHIFT); in bch_cache_size()
691 mutex_unlock(&c->bucket_lock); in bch_cache_size()
700 mutex_lock(&c->bucket_lock); in bch_cache_max_chain()
702 for (h = c->bucket_hash; in bch_cache_max_chain()
703 h < c->bucket_hash + (1 << BUCKET_HASH_BITS); in bch_cache_max_chain()
708 mutex_unlock(&c->bucket_lock); in bch_cache_max_chain()
714 return div64_u64(c->gc_stats.key_bytes * 100, in bch_btree_used()
715 (c->gc_stats.nodes ?: 1) * btree_bytes(c)); in bch_btree_used()
720 return c->gc_stats.nkeys in bch_average_key_size()
721 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) in bch_average_key_size()
729 sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb)); in SHOW()
730 sysfs_print(journal_delay_ms, c->journal_delay_ms); in SHOW()
731 sysfs_hprint(bucket_size, bucket_bytes(c->cache)); in SHOW()
732 sysfs_hprint(block_size, block_bytes(c->cache)); in SHOW()
733 sysfs_print(tree_depth, c->root->level); in SHOW()
738 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); in SHOW()
740 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); in SHOW()
741 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); in SHOW()
742 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); in SHOW()
743 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); in SHOW()
746 sysfs_print(btree_nodes, c->gc_stats.nodes); in SHOW()
750 atomic_long_read(&c->cache_read_races)); in SHOW()
753 atomic_long_read(&c->reclaim)); in SHOW()
756 atomic_long_read(&c->reclaimed_journal_buckets)); in SHOW()
759 atomic_long_read(&c->flush_write)); in SHOW()
762 atomic_long_read(&c->writeback_keys_done)); in SHOW()
764 atomic_long_read(&c->writeback_keys_failed)); in SHOW()
768 c->on_error); in SHOW()
771 sysfs_print(io_error_halflife, c->error_decay * 88); in SHOW()
772 sysfs_print(io_error_limit, c->error_limit); in SHOW()
777 c->congested_read_threshold_us); in SHOW()
779 c->congested_write_threshold_us); in SHOW()
784 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); in SHOW()
785 sysfs_printf(verify, "%i", c->verify); in SHOW()
786 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); in SHOW()
788 "%i", c->expensive_debug_checks); in SHOW()
789 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); in SHOW()
790 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); in SHOW()
791 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); in SHOW()
793 c->idle_max_writeback_rate_enabled); in SHOW()
794 sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback); in SHOW()
796 test_bit(CACHE_SET_IO_DISABLE, &c->flags)); in SHOW()
819 return -EBUSY; in STORE()
830 if (sync != CACHE_SYNC(&c->cache->sb)) { in STORE()
831 SET_CACHE_SYNC(&c->cache->sb, sync); in STORE()
848 atomic_long_set(&c->writeback_keys_done, 0); in STORE()
849 atomic_long_set(&c->writeback_keys_failed, 0); in STORE()
851 memset(&c->gc_stats, 0, sizeof(struct gc_stat)); in STORE()
852 bch_cache_accounting_clear(&c->accounting); in STORE()
863 if (c->shrink) in STORE()
864 c->shrink->scan_objects(c->shrink, &sc); in STORE()
868 c->congested_read_threshold_us, in STORE()
871 c->congested_write_threshold_us, in STORE()
875 v = __sysfs_match_string(error_actions, -1, buf); in STORE()
879 c->on_error = v; in STORE()
882 sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX); in STORE()
891 c->error_decay = v / 88; in STORE()
901 &c->flags)) in STORE()
905 &c->flags)) in STORE()
911 c->journal_delay_ms, in STORE()
913 sysfs_strtoul_bool(verify, c->verify); in STORE()
914 sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled); in STORE()
915 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); in STORE()
916 sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite); in STORE()
917 sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled); in STORE()
918 sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled); in STORE()
920 c->idle_max_writeback_rate_enabled); in STORE()
927 sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1); in STORE()
937 return bch_cache_set_show(&c->kobj, attr, buf); in SHOW()
946 return -EBUSY; in STORE()
948 return bch_cache_set_store(&c->kobj, attr, buf, size); in STORE()
1029 return *((uint16_t *)r) - *((uint16_t *)l); in __bch_cache_cmp()
1038 sysfs_print(nbuckets, ca->sb.nbuckets); in SHOW()
1039 sysfs_print(discard, ca->discard); in SHOW()
1040 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9); in SHOW()
1042 atomic_long_read(&ca->btree_sectors_written) << 9); in SHOW()
1044 (atomic_long_read(&ca->meta_sectors_written) + in SHOW()
1045 atomic_long_read(&ca->btree_sectors_written)) << 9); in SHOW()
1048 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); in SHOW()
1053 CACHE_REPLACEMENT(&ca->sb)); in SHOW()
1057 size_t n = ca->sb.nbuckets, i; in SHOW()
1065 ca->sb.nbuckets)); in SHOW()
1067 return -ENOMEM; in SHOW()
1069 mutex_lock(&ca->set->bucket_lock); in SHOW()
1081 for (i = ca->sb.first_bucket; i < n; i++) in SHOW()
1082 p[i] = ca->buckets[i].prio; in SHOW()
1083 mutex_unlock(&ca->set->bucket_lock); in SHOW()
1088 !cached[n - 1]) in SHOW()
1089 --n; in SHOW()
1094 n--; in SHOW()
1098 sum += INITIAL_PRIO - cached[i]; in SHOW()
1104 q[i] = INITIAL_PRIO - cached[n * (i + 1) / in SHOW()
1117 unused * 100 / (size_t) ca->sb.nbuckets, in SHOW()
1118 available * 100 / (size_t) ca->sb.nbuckets, in SHOW()
1119 dirty * 100 / (size_t) ca->sb.nbuckets, in SHOW()
1120 meta * 100 / (size_t) ca->sb.nbuckets, sum, in SHOW()
1121 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); in SHOW()
1125 ret--; in SHOW()
1143 return -EBUSY; in STORE()
1148 if (bdev_max_discard_sectors(ca->bdev)) in STORE()
1149 ca->discard = v; in STORE()
1151 if (v != CACHE_DISCARD(&ca->sb)) { in STORE()
1152 SET_CACHE_DISCARD(&ca->sb, v); in STORE()
1153 bcache_write_super(ca->set); in STORE()
1158 v = __sysfs_match_string(cache_replacement_policies, -1, buf); in STORE()
1162 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) { in STORE()
1163 mutex_lock(&ca->set->bucket_lock); in STORE()
1164 SET_CACHE_REPLACEMENT(&ca->sb, v); in STORE()
1165 mutex_unlock(&ca->set->bucket_lock); in STORE()
1167 bcache_write_super(ca->set); in STORE()
1172 atomic_long_set(&ca->sectors_written, 0); in STORE()
1173 atomic_long_set(&ca->btree_sectors_written, 0); in STORE()
1174 atomic_long_set(&ca->meta_sectors_written, 0); in STORE()
1175 atomic_set(&ca->io_count, 0); in STORE()
1176 atomic_set(&ca->io_errors, 0); in STORE()