xref: /linux/drivers/md/bcache/sysfs.c (revision 6f10f7d1b02b1bbc305f88d7696445dd38b13881)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18 
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes[] = {
21 	"writethrough",
22 	"writeback",
23 	"writearound",
24 	"none",
25 	NULL
26 };
27 
28 /* Default is -1; we skip past it for stop_when_cache_set_failed */
29 static const char * const bch_stop_on_failure_modes[] = {
30 	"auto",
31 	"always",
32 	NULL
33 };
34 
35 static const char * const cache_replacement_policies[] = {
36 	"lru",
37 	"fifo",
38 	"random",
39 	NULL
40 };
41 
42 static const char * const error_actions[] = {
43 	"unregister",
44 	"panic",
45 	NULL
46 };
47 
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
56 
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 
71 sysfs_time_stats_attribute(btree_gc,	sec, ms);
72 sysfs_time_stats_attribute(btree_split, sec, us);
73 sysfs_time_stats_attribute(btree_sort,	ms,  us);
74 sysfs_time_stats_attribute(btree_read,	ms,  us);
75 
76 read_attribute(btree_nodes);
77 read_attribute(btree_used_percent);
78 read_attribute(average_key_size);
79 read_attribute(dirty_data);
80 read_attribute(bset_tree_stats);
81 
82 read_attribute(state);
83 read_attribute(cache_read_races);
84 read_attribute(reclaim);
85 read_attribute(flush_write);
86 read_attribute(retry_flush_write);
87 read_attribute(writeback_keys_done);
88 read_attribute(writeback_keys_failed);
89 read_attribute(io_errors);
90 read_attribute(congested);
91 rw_attribute(congested_read_threshold_us);
92 rw_attribute(congested_write_threshold_us);
93 
94 rw_attribute(sequential_cutoff);
95 rw_attribute(data_csum);
96 rw_attribute(cache_mode);
97 rw_attribute(stop_when_cache_set_failed);
98 rw_attribute(writeback_metadata);
99 rw_attribute(writeback_running);
100 rw_attribute(writeback_percent);
101 rw_attribute(writeback_delay);
102 rw_attribute(writeback_rate);
103 
104 rw_attribute(writeback_rate_update_seconds);
105 rw_attribute(writeback_rate_i_term_inverse);
106 rw_attribute(writeback_rate_p_term_inverse);
107 rw_attribute(writeback_rate_minimum);
108 read_attribute(writeback_rate_debug);
109 
110 read_attribute(stripe_size);
111 read_attribute(partial_stripes_expensive);
112 
113 rw_attribute(synchronous);
114 rw_attribute(journal_delay_ms);
115 rw_attribute(io_disable);
116 rw_attribute(discard);
117 rw_attribute(running);
118 rw_attribute(label);
119 rw_attribute(readahead);
120 rw_attribute(errors);
121 rw_attribute(io_error_limit);
122 rw_attribute(io_error_halflife);
123 rw_attribute(verify);
124 rw_attribute(bypass_torture_test);
125 rw_attribute(key_merging_disabled);
126 rw_attribute(gc_always_rewrite);
127 rw_attribute(expensive_debug_checks);
128 rw_attribute(cache_replacement_policy);
129 rw_attribute(btree_shrinker_disabled);
130 rw_attribute(copy_gc_enabled);
131 rw_attribute(size);
132 
133 static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
134 			    size_t selected)
135 {
136 	char *out = buf;
137 	size_t i;
138 
139 	for (i = 0; list[i]; i++)
140 		out += snprintf(out, buf + size - out,
141 				i == selected ? "[%s] " : "%s ", list[i]);
142 
143 	out[-1] = '\n';
144 	return out - buf;
145 }
146 
147 SHOW(__bch_cached_dev)
148 {
149 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
150 					     disk.kobj);
151 	const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
152 	int wb = dc->writeback_running;
153 
154 #define var(stat)		(dc->stat)
155 
156 	if (attr == &sysfs_cache_mode)
157 		return bch_snprint_string_list(buf, PAGE_SIZE,
158 					       bch_cache_modes,
159 					       BDEV_CACHE_MODE(&dc->sb));
160 
161 	if (attr == &sysfs_stop_when_cache_set_failed)
162 		return bch_snprint_string_list(buf, PAGE_SIZE,
163 					       bch_stop_on_failure_modes,
164 					       dc->stop_when_cache_set_failed);
165 
166 
167 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
168 	var_printf(verify,		"%i");
169 	var_printf(bypass_torture_test,	"%i");
170 	var_printf(writeback_metadata,	"%i");
171 	var_printf(writeback_running,	"%i");
172 	var_print(writeback_delay);
173 	var_print(writeback_percent);
174 	sysfs_hprint(writeback_rate,
175 		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
176 	sysfs_hprint(io_errors,		atomic_read(&dc->io_errors));
177 	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
178 	sysfs_printf(io_disable,	"%i", dc->io_disable);
179 	var_print(writeback_rate_update_seconds);
180 	var_print(writeback_rate_i_term_inverse);
181 	var_print(writeback_rate_p_term_inverse);
182 	var_print(writeback_rate_minimum);
183 
184 	if (attr == &sysfs_writeback_rate_debug) {
185 		char rate[20];
186 		char dirty[20];
187 		char target[20];
188 		char proportional[20];
189 		char integral[20];
190 		char change[20];
191 		s64 next_io;
192 
193 		/*
194 		 * Except for dirty and target, other values should
195 		 * be 0 if writeback is not running.
196 		 */
197 		bch_hprint(rate,
198 			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
199 			      : 0);
200 		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
201 		bch_hprint(target, dc->writeback_rate_target << 9);
202 		bch_hprint(proportional,
203 			   wb ? dc->writeback_rate_proportional << 9 : 0);
204 		bch_hprint(integral,
205 			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
206 		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
207 		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
208 					 NSEC_PER_MSEC) : 0;
209 
210 		return sprintf(buf,
211 			       "rate:\t\t%s/sec\n"
212 			       "dirty:\t\t%s\n"
213 			       "target:\t\t%s\n"
214 			       "proportional:\t%s\n"
215 			       "integral:\t%s\n"
216 			       "change:\t\t%s/sec\n"
217 			       "next io:\t%llims\n",
218 			       rate, dirty, target, proportional,
219 			       integral, change, next_io);
220 	}
221 
222 	sysfs_hprint(dirty_data,
223 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
224 
225 	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
226 	var_printf(partial_stripes_expensive,	"%u");
227 
228 	var_hprint(sequential_cutoff);
229 	var_hprint(readahead);
230 
231 	sysfs_print(running,		atomic_read(&dc->running));
232 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
233 
234 	if (attr == &sysfs_label) {
235 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
236 		buf[SB_LABEL_SIZE + 1] = '\0';
237 		strcat(buf, "\n");
238 		return strlen(buf);
239 	}
240 
241 #undef var
242 	return 0;
243 }
244 SHOW_LOCKED(bch_cached_dev)
245 
246 STORE(__cached_dev)
247 {
248 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
249 					     disk.kobj);
250 	ssize_t v;
251 	struct cache_set *c;
252 	struct kobj_uevent_env *env;
253 
254 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
255 #define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
256 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
257 
258 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
259 	d_strtoul(verify);
260 	d_strtoul(bypass_torture_test);
261 	d_strtoul(writeback_metadata);
262 	d_strtoul(writeback_running);
263 	d_strtoul(writeback_delay);
264 
265 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
266 
267 	if (attr == &sysfs_writeback_rate) {
268 		ssize_t ret;
269 		long int v = atomic_long_read(&dc->writeback_rate.rate);
270 
271 		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
272 
273 		if (!ret) {
274 			atomic_long_set(&dc->writeback_rate.rate, v);
275 			ret = size;
276 		}
277 
278 		return ret;
279 	}
280 
281 	sysfs_strtoul_clamp(writeback_rate_update_seconds,
282 			    dc->writeback_rate_update_seconds,
283 			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
284 	d_strtoul(writeback_rate_i_term_inverse);
285 	d_strtoul_nonzero(writeback_rate_p_term_inverse);
286 
287 	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
288 
289 	if (attr == &sysfs_io_disable) {
290 		int v = strtoul_or_return(buf);
291 
292 		dc->io_disable = v ? 1 : 0;
293 	}
294 
295 	d_strtoi_h(sequential_cutoff);
296 	d_strtoi_h(readahead);
297 
298 	if (attr == &sysfs_clear_stats)
299 		bch_cache_accounting_clear(&dc->accounting);
300 
301 	if (attr == &sysfs_running &&
302 	    strtoul_or_return(buf))
303 		bch_cached_dev_run(dc);
304 
305 	if (attr == &sysfs_cache_mode) {
306 		v = __sysfs_match_string(bch_cache_modes, -1, buf);
307 		if (v < 0)
308 			return v;
309 
310 		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
311 			SET_BDEV_CACHE_MODE(&dc->sb, v);
312 			bch_write_bdev_super(dc, NULL);
313 		}
314 	}
315 
316 	if (attr == &sysfs_stop_when_cache_set_failed) {
317 		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
318 		if (v < 0)
319 			return v;
320 
321 		dc->stop_when_cache_set_failed = v;
322 	}
323 
324 	if (attr == &sysfs_label) {
325 		if (size > SB_LABEL_SIZE)
326 			return -EINVAL;
327 		memcpy(dc->sb.label, buf, size);
328 		if (size < SB_LABEL_SIZE)
329 			dc->sb.label[size] = '\0';
330 		if (size && dc->sb.label[size - 1] == '\n')
331 			dc->sb.label[size - 1] = '\0';
332 		bch_write_bdev_super(dc, NULL);
333 		if (dc->disk.c) {
334 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
335 			       buf, SB_LABEL_SIZE);
336 			bch_uuid_write(dc->disk.c);
337 		}
338 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
339 		if (!env)
340 			return -ENOMEM;
341 		add_uevent_var(env, "DRIVER=bcache");
342 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
343 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
344 		kobject_uevent_env(
345 			&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
346 		kfree(env);
347 	}
348 
349 	if (attr == &sysfs_attach) {
350 		uint8_t		set_uuid[16];
351 
352 		if (bch_parse_uuid(buf, set_uuid) < 16)
353 			return -EINVAL;
354 
355 		v = -ENOENT;
356 		list_for_each_entry(c, &bch_cache_sets, list) {
357 			v = bch_cached_dev_attach(dc, c, set_uuid);
358 			if (!v)
359 				return size;
360 		}
361 		if (v == -ENOENT)
362 			pr_err("Can't attach %s: cache set not found", buf);
363 		return v;
364 	}
365 
366 	if (attr == &sysfs_detach && dc->disk.c)
367 		bch_cached_dev_detach(dc);
368 
369 	if (attr == &sysfs_stop)
370 		bcache_device_stop(&dc->disk);
371 
372 	return size;
373 }
374 
375 STORE(bch_cached_dev)
376 {
377 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
378 					     disk.kobj);
379 
380 	mutex_lock(&bch_register_lock);
381 	size = __cached_dev_store(kobj, attr, buf, size);
382 
383 	if (attr == &sysfs_writeback_running)
384 		bch_writeback_queue(dc);
385 
386 	if (attr == &sysfs_writeback_percent)
387 		if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
388 			schedule_delayed_work(&dc->writeback_rate_update,
389 				      dc->writeback_rate_update_seconds * HZ);
390 
391 	mutex_unlock(&bch_register_lock);
392 	return size;
393 }
394 
395 static struct attribute *bch_cached_dev_files[] = {
396 	&sysfs_attach,
397 	&sysfs_detach,
398 	&sysfs_stop,
399 #if 0
400 	&sysfs_data_csum,
401 #endif
402 	&sysfs_cache_mode,
403 	&sysfs_stop_when_cache_set_failed,
404 	&sysfs_writeback_metadata,
405 	&sysfs_writeback_running,
406 	&sysfs_writeback_delay,
407 	&sysfs_writeback_percent,
408 	&sysfs_writeback_rate,
409 	&sysfs_writeback_rate_update_seconds,
410 	&sysfs_writeback_rate_i_term_inverse,
411 	&sysfs_writeback_rate_p_term_inverse,
412 	&sysfs_writeback_rate_debug,
413 	&sysfs_errors,
414 	&sysfs_io_error_limit,
415 	&sysfs_io_disable,
416 	&sysfs_dirty_data,
417 	&sysfs_stripe_size,
418 	&sysfs_partial_stripes_expensive,
419 	&sysfs_sequential_cutoff,
420 	&sysfs_clear_stats,
421 	&sysfs_running,
422 	&sysfs_state,
423 	&sysfs_label,
424 	&sysfs_readahead,
425 #ifdef CONFIG_BCACHE_DEBUG
426 	&sysfs_verify,
427 	&sysfs_bypass_torture_test,
428 #endif
429 	NULL
430 };
431 KTYPE(bch_cached_dev);
432 
433 SHOW(bch_flash_dev)
434 {
435 	struct bcache_device *d = container_of(kobj, struct bcache_device,
436 					       kobj);
437 	struct uuid_entry *u = &d->c->uuids[d->id];
438 
439 	sysfs_printf(data_csum,	"%i", d->data_csum);
440 	sysfs_hprint(size,	u->sectors << 9);
441 
442 	if (attr == &sysfs_label) {
443 		memcpy(buf, u->label, SB_LABEL_SIZE);
444 		buf[SB_LABEL_SIZE + 1] = '\0';
445 		strcat(buf, "\n");
446 		return strlen(buf);
447 	}
448 
449 	return 0;
450 }
451 
452 STORE(__bch_flash_dev)
453 {
454 	struct bcache_device *d = container_of(kobj, struct bcache_device,
455 					       kobj);
456 	struct uuid_entry *u = &d->c->uuids[d->id];
457 
458 	sysfs_strtoul(data_csum,	d->data_csum);
459 
460 	if (attr == &sysfs_size) {
461 		uint64_t v;
462 		strtoi_h_or_return(buf, v);
463 
464 		u->sectors = v >> 9;
465 		bch_uuid_write(d->c);
466 		set_capacity(d->disk, u->sectors);
467 	}
468 
469 	if (attr == &sysfs_label) {
470 		memcpy(u->label, buf, SB_LABEL_SIZE);
471 		bch_uuid_write(d->c);
472 	}
473 
474 	if (attr == &sysfs_unregister) {
475 		set_bit(BCACHE_DEV_DETACHING, &d->flags);
476 		bcache_device_stop(d);
477 	}
478 
479 	return size;
480 }
481 STORE_LOCKED(bch_flash_dev)
482 
483 static struct attribute *bch_flash_dev_files[] = {
484 	&sysfs_unregister,
485 #if 0
486 	&sysfs_data_csum,
487 #endif
488 	&sysfs_label,
489 	&sysfs_size,
490 	NULL
491 };
492 KTYPE(bch_flash_dev);
493 
494 struct bset_stats_op {
495 	struct btree_op op;
496 	size_t nodes;
497 	struct bset_stats stats;
498 };
499 
500 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
501 {
502 	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
503 
504 	op->nodes++;
505 	bch_btree_keys_stats(&b->keys, &op->stats);
506 
507 	return MAP_CONTINUE;
508 }
509 
510 static int bch_bset_print_stats(struct cache_set *c, char *buf)
511 {
512 	struct bset_stats_op op;
513 	int ret;
514 
515 	memset(&op, 0, sizeof(op));
516 	bch_btree_op_init(&op.op, -1);
517 
518 	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
519 	if (ret < 0)
520 		return ret;
521 
522 	return snprintf(buf, PAGE_SIZE,
523 			"btree nodes:		%zu\n"
524 			"written sets:		%zu\n"
525 			"unwritten sets:		%zu\n"
526 			"written key bytes:	%zu\n"
527 			"unwritten key bytes:	%zu\n"
528 			"floats:			%zu\n"
529 			"failed:			%zu\n",
530 			op.nodes,
531 			op.stats.sets_written, op.stats.sets_unwritten,
532 			op.stats.bytes_written, op.stats.bytes_unwritten,
533 			op.stats.floats, op.stats.failed);
534 }
535 
536 static unsigned int bch_root_usage(struct cache_set *c)
537 {
538 	unsigned int bytes = 0;
539 	struct bkey *k;
540 	struct btree *b;
541 	struct btree_iter iter;
542 
543 	goto lock_root;
544 
545 	do {
546 		rw_unlock(false, b);
547 lock_root:
548 		b = c->root;
549 		rw_lock(false, b, b->level);
550 	} while (b != c->root);
551 
552 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
553 		bytes += bkey_bytes(k);
554 
555 	rw_unlock(false, b);
556 
557 	return (bytes * 100) / btree_bytes(c);
558 }
559 
560 static size_t bch_cache_size(struct cache_set *c)
561 {
562 	size_t ret = 0;
563 	struct btree *b;
564 
565 	mutex_lock(&c->bucket_lock);
566 	list_for_each_entry(b, &c->btree_cache, list)
567 		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
568 
569 	mutex_unlock(&c->bucket_lock);
570 	return ret;
571 }
572 
573 static unsigned int bch_cache_max_chain(struct cache_set *c)
574 {
575 	unsigned int ret = 0;
576 	struct hlist_head *h;
577 
578 	mutex_lock(&c->bucket_lock);
579 
580 	for (h = c->bucket_hash;
581 	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
582 	     h++) {
583 		unsigned int i = 0;
584 		struct hlist_node *p;
585 
586 		hlist_for_each(p, h)
587 			i++;
588 
589 		ret = max(ret, i);
590 	}
591 
592 	mutex_unlock(&c->bucket_lock);
593 	return ret;
594 }
595 
596 static unsigned int bch_btree_used(struct cache_set *c)
597 {
598 	return div64_u64(c->gc_stats.key_bytes * 100,
599 			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
600 }
601 
602 static unsigned int bch_average_key_size(struct cache_set *c)
603 {
604 	return c->gc_stats.nkeys
605 		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
606 		: 0;
607 }
608 
609 SHOW(__bch_cache_set)
610 {
611 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
612 
613 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
614 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
615 	sysfs_hprint(bucket_size,		bucket_bytes(c));
616 	sysfs_hprint(block_size,		block_bytes(c));
617 	sysfs_print(tree_depth,			c->root->level);
618 	sysfs_print(root_usage_percent,		bch_root_usage(c));
619 
620 	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
621 	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
622 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
623 
624 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
625 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
626 	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
627 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
628 
629 	sysfs_print(btree_used_percent,	bch_btree_used(c));
630 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
631 	sysfs_hprint(average_key_size,	bch_average_key_size(c));
632 
633 	sysfs_print(cache_read_races,
634 		    atomic_long_read(&c->cache_read_races));
635 
636 	sysfs_print(reclaim,
637 		    atomic_long_read(&c->reclaim));
638 
639 	sysfs_print(flush_write,
640 		    atomic_long_read(&c->flush_write));
641 
642 	sysfs_print(retry_flush_write,
643 		    atomic_long_read(&c->retry_flush_write));
644 
645 	sysfs_print(writeback_keys_done,
646 		    atomic_long_read(&c->writeback_keys_done));
647 	sysfs_print(writeback_keys_failed,
648 		    atomic_long_read(&c->writeback_keys_failed));
649 
650 	if (attr == &sysfs_errors)
651 		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
652 					       c->on_error);
653 
654 	/* See count_io_errors for why 88 */
655 	sysfs_print(io_error_halflife,	c->error_decay * 88);
656 	sysfs_print(io_error_limit,	c->error_limit);
657 
658 	sysfs_hprint(congested,
659 		     ((uint64_t) bch_get_congested(c)) << 9);
660 	sysfs_print(congested_read_threshold_us,
661 		    c->congested_read_threshold_us);
662 	sysfs_print(congested_write_threshold_us,
663 		    c->congested_write_threshold_us);
664 
665 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
666 	sysfs_printf(verify,			"%i", c->verify);
667 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
668 	sysfs_printf(expensive_debug_checks,
669 		     "%i", c->expensive_debug_checks);
670 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
671 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
672 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
673 	sysfs_printf(io_disable,		"%i",
674 		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
675 
676 	if (attr == &sysfs_bset_tree_stats)
677 		return bch_bset_print_stats(c, buf);
678 
679 	return 0;
680 }
681 SHOW_LOCKED(bch_cache_set)
682 
683 STORE(__bch_cache_set)
684 {
685 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
686 	ssize_t v;
687 
688 	if (attr == &sysfs_unregister)
689 		bch_cache_set_unregister(c);
690 
691 	if (attr == &sysfs_stop)
692 		bch_cache_set_stop(c);
693 
694 	if (attr == &sysfs_synchronous) {
695 		bool sync = strtoul_or_return(buf);
696 
697 		if (sync != CACHE_SYNC(&c->sb)) {
698 			SET_CACHE_SYNC(&c->sb, sync);
699 			bcache_write_super(c);
700 		}
701 	}
702 
703 	if (attr == &sysfs_flash_vol_create) {
704 		int r;
705 		uint64_t v;
706 		strtoi_h_or_return(buf, v);
707 
708 		r = bch_flash_dev_create(c, v);
709 		if (r)
710 			return r;
711 	}
712 
713 	if (attr == &sysfs_clear_stats) {
714 		atomic_long_set(&c->writeback_keys_done,	0);
715 		atomic_long_set(&c->writeback_keys_failed,	0);
716 
717 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
718 		bch_cache_accounting_clear(&c->accounting);
719 	}
720 
721 	if (attr == &sysfs_trigger_gc) {
722 		/*
723 		 * Garbage collection thread only works when sectors_to_gc < 0,
724 		 * when users write to sysfs entry trigger_gc, most of time
725 		 * they want to forcibly triger gargage collection. Here -1 is
726 		 * set to c->sectors_to_gc, to make gc_should_run() give a
727 		 * chance to permit gc thread to run. "give a chance" means
728 		 * before going into gc_should_run(), there is still chance
729 		 * that c->sectors_to_gc being set to other positive value. So
730 		 * writing sysfs entry trigger_gc won't always make sure gc
731 		 * thread takes effect.
732 		 */
733 		atomic_set(&c->sectors_to_gc, -1);
734 		wake_up_gc(c);
735 	}
736 
737 	if (attr == &sysfs_prune_cache) {
738 		struct shrink_control sc;
739 		sc.gfp_mask = GFP_KERNEL;
740 		sc.nr_to_scan = strtoul_or_return(buf);
741 		c->shrink.scan_objects(&c->shrink, &sc);
742 	}
743 
744 	sysfs_strtoul(congested_read_threshold_us,
745 		      c->congested_read_threshold_us);
746 	sysfs_strtoul(congested_write_threshold_us,
747 		      c->congested_write_threshold_us);
748 
749 	if (attr == &sysfs_errors) {
750 		v = __sysfs_match_string(error_actions, -1, buf);
751 		if (v < 0)
752 			return v;
753 
754 		c->on_error = v;
755 	}
756 
757 	if (attr == &sysfs_io_error_limit)
758 		c->error_limit = strtoul_or_return(buf);
759 
760 	/* See count_io_errors() for why 88 */
761 	if (attr == &sysfs_io_error_halflife)
762 		c->error_decay = strtoul_or_return(buf) / 88;
763 
764 	if (attr == &sysfs_io_disable) {
765 		v = strtoul_or_return(buf);
766 		if (v) {
767 			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
768 					     &c->flags))
769 				pr_warn("CACHE_SET_IO_DISABLE already set");
770 		} else {
771 			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
772 						&c->flags))
773 				pr_warn("CACHE_SET_IO_DISABLE already cleared");
774 		}
775 	}
776 
777 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
778 	sysfs_strtoul(verify,			c->verify);
779 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
780 	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
781 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
782 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
783 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
784 
785 	return size;
786 }
787 STORE_LOCKED(bch_cache_set)
788 
789 SHOW(bch_cache_set_internal)
790 {
791 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
792 	return bch_cache_set_show(&c->kobj, attr, buf);
793 }
794 
795 STORE(bch_cache_set_internal)
796 {
797 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
798 	return bch_cache_set_store(&c->kobj, attr, buf, size);
799 }
800 
801 static void bch_cache_set_internal_release(struct kobject *k)
802 {
803 }
804 
805 static struct attribute *bch_cache_set_files[] = {
806 	&sysfs_unregister,
807 	&sysfs_stop,
808 	&sysfs_synchronous,
809 	&sysfs_journal_delay_ms,
810 	&sysfs_flash_vol_create,
811 
812 	&sysfs_bucket_size,
813 	&sysfs_block_size,
814 	&sysfs_tree_depth,
815 	&sysfs_root_usage_percent,
816 	&sysfs_btree_cache_size,
817 	&sysfs_cache_available_percent,
818 
819 	&sysfs_average_key_size,
820 
821 	&sysfs_errors,
822 	&sysfs_io_error_limit,
823 	&sysfs_io_error_halflife,
824 	&sysfs_congested,
825 	&sysfs_congested_read_threshold_us,
826 	&sysfs_congested_write_threshold_us,
827 	&sysfs_clear_stats,
828 	NULL
829 };
830 KTYPE(bch_cache_set);
831 
832 static struct attribute *bch_cache_set_internal_files[] = {
833 	&sysfs_active_journal_entries,
834 
835 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
836 	sysfs_time_stats_attribute_list(btree_split, sec, us)
837 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
838 	sysfs_time_stats_attribute_list(btree_read, ms, us)
839 
840 	&sysfs_btree_nodes,
841 	&sysfs_btree_used_percent,
842 	&sysfs_btree_cache_max_chain,
843 
844 	&sysfs_bset_tree_stats,
845 	&sysfs_cache_read_races,
846 	&sysfs_reclaim,
847 	&sysfs_flush_write,
848 	&sysfs_retry_flush_write,
849 	&sysfs_writeback_keys_done,
850 	&sysfs_writeback_keys_failed,
851 
852 	&sysfs_trigger_gc,
853 	&sysfs_prune_cache,
854 #ifdef CONFIG_BCACHE_DEBUG
855 	&sysfs_verify,
856 	&sysfs_key_merging_disabled,
857 	&sysfs_expensive_debug_checks,
858 #endif
859 	&sysfs_gc_always_rewrite,
860 	&sysfs_btree_shrinker_disabled,
861 	&sysfs_copy_gc_enabled,
862 	&sysfs_io_disable,
863 	NULL
864 };
865 KTYPE(bch_cache_set_internal);
866 
867 static int __bch_cache_cmp(const void *l, const void *r)
868 {
869 	return *((uint16_t *)r) - *((uint16_t *)l);
870 }
871 
872 SHOW(__bch_cache)
873 {
874 	struct cache *ca = container_of(kobj, struct cache, kobj);
875 
876 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
877 	sysfs_hprint(block_size,	block_bytes(ca));
878 	sysfs_print(nbuckets,		ca->sb.nbuckets);
879 	sysfs_print(discard,		ca->discard);
880 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
881 	sysfs_hprint(btree_written,
882 		     atomic_long_read(&ca->btree_sectors_written) << 9);
883 	sysfs_hprint(metadata_written,
884 		     (atomic_long_read(&ca->meta_sectors_written) +
885 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
886 
887 	sysfs_print(io_errors,
888 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
889 
890 	if (attr == &sysfs_cache_replacement_policy)
891 		return bch_snprint_string_list(buf, PAGE_SIZE,
892 					       cache_replacement_policies,
893 					       CACHE_REPLACEMENT(&ca->sb));
894 
895 	if (attr == &sysfs_priority_stats) {
896 		struct bucket *b;
897 		size_t n = ca->sb.nbuckets, i;
898 		size_t unused = 0, available = 0, dirty = 0, meta = 0;
899 		uint64_t sum = 0;
900 		/* Compute 31 quantiles */
901 		uint16_t q[31], *p, *cached;
902 		ssize_t ret;
903 
904 		cached = p = vmalloc(array_size(sizeof(uint16_t),
905 						ca->sb.nbuckets));
906 		if (!p)
907 			return -ENOMEM;
908 
909 		mutex_lock(&ca->set->bucket_lock);
910 		for_each_bucket(b, ca) {
911 			if (!GC_SECTORS_USED(b))
912 				unused++;
913 			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
914 				available++;
915 			if (GC_MARK(b) == GC_MARK_DIRTY)
916 				dirty++;
917 			if (GC_MARK(b) == GC_MARK_METADATA)
918 				meta++;
919 		}
920 
921 		for (i = ca->sb.first_bucket; i < n; i++)
922 			p[i] = ca->buckets[i].prio;
923 		mutex_unlock(&ca->set->bucket_lock);
924 
925 		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
926 
927 		while (n &&
928 		       !cached[n - 1])
929 			--n;
930 
931 		unused = ca->sb.nbuckets - n;
932 
933 		while (cached < p + n &&
934 		       *cached == BTREE_PRIO)
935 			cached++, n--;
936 
937 		for (i = 0; i < n; i++)
938 			sum += INITIAL_PRIO - cached[i];
939 
940 		if (n)
941 			do_div(sum, n);
942 
943 		for (i = 0; i < ARRAY_SIZE(q); i++)
944 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
945 				(ARRAY_SIZE(q) + 1)];
946 
947 		vfree(p);
948 
949 		ret = scnprintf(buf, PAGE_SIZE,
950 				"Unused:		%zu%%\n"
951 				"Clean:		%zu%%\n"
952 				"Dirty:		%zu%%\n"
953 				"Metadata:	%zu%%\n"
954 				"Average:	%llu\n"
955 				"Sectors per Q:	%zu\n"
956 				"Quantiles:	[",
957 				unused * 100 / (size_t) ca->sb.nbuckets,
958 				available * 100 / (size_t) ca->sb.nbuckets,
959 				dirty * 100 / (size_t) ca->sb.nbuckets,
960 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
961 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
962 
963 		for (i = 0; i < ARRAY_SIZE(q); i++)
964 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
965 					 "%u ", q[i]);
966 		ret--;
967 
968 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
969 
970 		return ret;
971 	}
972 
973 	return 0;
974 }
975 SHOW_LOCKED(bch_cache)
976 
977 STORE(__bch_cache)
978 {
979 	struct cache *ca = container_of(kobj, struct cache, kobj);
980 	ssize_t v;
981 
982 	if (attr == &sysfs_discard) {
983 		bool v = strtoul_or_return(buf);
984 
985 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
986 			ca->discard = v;
987 
988 		if (v != CACHE_DISCARD(&ca->sb)) {
989 			SET_CACHE_DISCARD(&ca->sb, v);
990 			bcache_write_super(ca->set);
991 		}
992 	}
993 
994 	if (attr == &sysfs_cache_replacement_policy) {
995 		v = __sysfs_match_string(cache_replacement_policies, -1, buf);
996 		if (v < 0)
997 			return v;
998 
999 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1000 			mutex_lock(&ca->set->bucket_lock);
1001 			SET_CACHE_REPLACEMENT(&ca->sb, v);
1002 			mutex_unlock(&ca->set->bucket_lock);
1003 
1004 			bcache_write_super(ca->set);
1005 		}
1006 	}
1007 
1008 	if (attr == &sysfs_clear_stats) {
1009 		atomic_long_set(&ca->sectors_written, 0);
1010 		atomic_long_set(&ca->btree_sectors_written, 0);
1011 		atomic_long_set(&ca->meta_sectors_written, 0);
1012 		atomic_set(&ca->io_count, 0);
1013 		atomic_set(&ca->io_errors, 0);
1014 	}
1015 
1016 	return size;
1017 }
1018 STORE_LOCKED(bch_cache)
1019 
1020 static struct attribute *bch_cache_files[] = {
1021 	&sysfs_bucket_size,
1022 	&sysfs_block_size,
1023 	&sysfs_nbuckets,
1024 	&sysfs_priority_stats,
1025 	&sysfs_discard,
1026 	&sysfs_written,
1027 	&sysfs_btree_written,
1028 	&sysfs_metadata_written,
1029 	&sysfs_io_errors,
1030 	&sysfs_clear_stats,
1031 	&sysfs_cache_replacement_policy,
1032 	NULL
1033 };
1034 KTYPE(bch_cache);
1035