xref: /linux/drivers/md/bcache/sysfs.c (revision 31a1b26f16e822577def5402ffc79cfe4aed2db9)
1 /*
2  * bcache sysfs interfaces
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include "bcache.h"
9 #include "sysfs.h"
10 #include "btree.h"
11 #include "request.h"
12 #include "writeback.h"
13 
14 #include <linux/blkdev.h>
15 #include <linux/sort.h>
16 
17 static const char * const cache_replacement_policies[] = {
18 	"lru",
19 	"fifo",
20 	"random",
21 	NULL
22 };
23 
24 write_attribute(attach);
25 write_attribute(detach);
26 write_attribute(unregister);
27 write_attribute(stop);
28 write_attribute(clear_stats);
29 write_attribute(trigger_gc);
30 write_attribute(prune_cache);
31 write_attribute(flash_vol_create);
32 
33 read_attribute(bucket_size);
34 read_attribute(block_size);
35 read_attribute(nbuckets);
36 read_attribute(tree_depth);
37 read_attribute(root_usage_percent);
38 read_attribute(priority_stats);
39 read_attribute(btree_cache_size);
40 read_attribute(btree_cache_max_chain);
41 read_attribute(cache_available_percent);
42 read_attribute(written);
43 read_attribute(btree_written);
44 read_attribute(metadata_written);
45 read_attribute(active_journal_entries);
46 
47 sysfs_time_stats_attribute(btree_gc,	sec, ms);
48 sysfs_time_stats_attribute(btree_split, sec, us);
49 sysfs_time_stats_attribute(btree_sort,	ms,  us);
50 sysfs_time_stats_attribute(btree_read,	ms,  us);
51 sysfs_time_stats_attribute(try_harder,	ms,  us);
52 
53 read_attribute(btree_nodes);
54 read_attribute(btree_used_percent);
55 read_attribute(average_key_size);
56 read_attribute(dirty_data);
57 read_attribute(bset_tree_stats);
58 
59 read_attribute(state);
60 read_attribute(cache_read_races);
61 read_attribute(writeback_keys_done);
62 read_attribute(writeback_keys_failed);
63 read_attribute(io_errors);
64 read_attribute(congested);
65 rw_attribute(congested_read_threshold_us);
66 rw_attribute(congested_write_threshold_us);
67 
68 rw_attribute(sequential_cutoff);
69 rw_attribute(sequential_merge);
70 rw_attribute(data_csum);
71 rw_attribute(cache_mode);
72 rw_attribute(writeback_metadata);
73 rw_attribute(writeback_running);
74 rw_attribute(writeback_percent);
75 rw_attribute(writeback_delay);
76 rw_attribute(writeback_rate);
77 
78 rw_attribute(writeback_rate_update_seconds);
79 rw_attribute(writeback_rate_d_term);
80 rw_attribute(writeback_rate_p_term_inverse);
81 rw_attribute(writeback_rate_d_smooth);
82 read_attribute(writeback_rate_debug);
83 
84 read_attribute(stripe_size);
85 read_attribute(partial_stripes_expensive);
86 
87 rw_attribute(synchronous);
88 rw_attribute(journal_delay_ms);
89 rw_attribute(discard);
90 rw_attribute(running);
91 rw_attribute(label);
92 rw_attribute(readahead);
93 rw_attribute(io_error_limit);
94 rw_attribute(io_error_halflife);
95 rw_attribute(verify);
96 rw_attribute(key_merging_disabled);
97 rw_attribute(gc_always_rewrite);
98 rw_attribute(freelist_percent);
99 rw_attribute(cache_replacement_policy);
100 rw_attribute(btree_shrinker_disabled);
101 rw_attribute(copy_gc_enabled);
102 rw_attribute(size);
103 
104 SHOW(__bch_cached_dev)
105 {
106 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
107 					     disk.kobj);
108 	const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
109 
110 #define var(stat)		(dc->stat)
111 
112 	if (attr == &sysfs_cache_mode)
113 		return bch_snprint_string_list(buf, PAGE_SIZE,
114 					       bch_cache_modes + 1,
115 					       BDEV_CACHE_MODE(&dc->sb));
116 
117 	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
118 	var_printf(verify,		"%i");
119 	var_printf(writeback_metadata,	"%i");
120 	var_printf(writeback_running,	"%i");
121 	var_print(writeback_delay);
122 	var_print(writeback_percent);
123 	sysfs_print(writeback_rate,	dc->writeback_rate.rate);
124 
125 	var_print(writeback_rate_update_seconds);
126 	var_print(writeback_rate_d_term);
127 	var_print(writeback_rate_p_term_inverse);
128 	var_print(writeback_rate_d_smooth);
129 
130 	if (attr == &sysfs_writeback_rate_debug) {
131 		char dirty[20];
132 		char derivative[20];
133 		char target[20];
134 		bch_hprint(dirty,
135 			   bcache_dev_sectors_dirty(&dc->disk) << 9);
136 		bch_hprint(derivative,	dc->writeback_rate_derivative << 9);
137 		bch_hprint(target,	dc->writeback_rate_target << 9);
138 
139 		return sprintf(buf,
140 			       "rate:\t\t%u\n"
141 			       "change:\t\t%i\n"
142 			       "dirty:\t\t%s\n"
143 			       "derivative:\t%s\n"
144 			       "target:\t\t%s\n",
145 			       dc->writeback_rate.rate,
146 			       dc->writeback_rate_change,
147 			       dirty, derivative, target);
148 	}
149 
150 	sysfs_hprint(dirty_data,
151 		     bcache_dev_sectors_dirty(&dc->disk) << 9);
152 
153 	sysfs_hprint(stripe_size,	(1 << dc->disk.stripe_size_bits) << 9);
154 	var_printf(partial_stripes_expensive,	"%u");
155 
156 	var_printf(sequential_merge,	"%i");
157 	var_hprint(sequential_cutoff);
158 	var_hprint(readahead);
159 
160 	sysfs_print(running,		atomic_read(&dc->running));
161 	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
162 
163 	if (attr == &sysfs_label) {
164 		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
165 		buf[SB_LABEL_SIZE + 1] = '\0';
166 		strcat(buf, "\n");
167 		return strlen(buf);
168 	}
169 
170 #undef var
171 	return 0;
172 }
173 SHOW_LOCKED(bch_cached_dev)
174 
175 STORE(__cached_dev)
176 {
177 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
178 					     disk.kobj);
179 	unsigned v = size;
180 	struct cache_set *c;
181 	struct kobj_uevent_env *env;
182 
183 #define d_strtoul(var)		sysfs_strtoul(var, dc->var)
184 #define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
185 
186 	sysfs_strtoul(data_csum,	dc->disk.data_csum);
187 	d_strtoul(verify);
188 	d_strtoul(writeback_metadata);
189 	d_strtoul(writeback_running);
190 	d_strtoul(writeback_delay);
191 	sysfs_strtoul_clamp(writeback_rate,
192 			    dc->writeback_rate.rate, 1, 1000000);
193 	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
194 
195 	d_strtoul(writeback_rate_update_seconds);
196 	d_strtoul(writeback_rate_d_term);
197 	d_strtoul(writeback_rate_p_term_inverse);
198 	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
199 			    dc->writeback_rate_p_term_inverse, 1, INT_MAX);
200 	d_strtoul(writeback_rate_d_smooth);
201 
202 	d_strtoul(sequential_merge);
203 	d_strtoi_h(sequential_cutoff);
204 	d_strtoi_h(readahead);
205 
206 	if (attr == &sysfs_clear_stats)
207 		bch_cache_accounting_clear(&dc->accounting);
208 
209 	if (attr == &sysfs_running &&
210 	    strtoul_or_return(buf))
211 		bch_cached_dev_run(dc);
212 
213 	if (attr == &sysfs_cache_mode) {
214 		ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
215 
216 		if (v < 0)
217 			return v;
218 
219 		if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
220 			SET_BDEV_CACHE_MODE(&dc->sb, v);
221 			bch_write_bdev_super(dc, NULL);
222 		}
223 	}
224 
225 	if (attr == &sysfs_label) {
226 		/* note: endlines are preserved */
227 		memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
228 		bch_write_bdev_super(dc, NULL);
229 		if (dc->disk.c) {
230 			memcpy(dc->disk.c->uuids[dc->disk.id].label,
231 			       buf, SB_LABEL_SIZE);
232 			bch_uuid_write(dc->disk.c);
233 		}
234 		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
235 		if (!env)
236 			return -ENOMEM;
237 		add_uevent_var(env, "DRIVER=bcache");
238 		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
239 		add_uevent_var(env, "CACHED_LABEL=%s", buf);
240 		kobject_uevent_env(
241 			&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
242 		kfree(env);
243 	}
244 
245 	if (attr == &sysfs_attach) {
246 		if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
247 			return -EINVAL;
248 
249 		list_for_each_entry(c, &bch_cache_sets, list) {
250 			v = bch_cached_dev_attach(dc, c);
251 			if (!v)
252 				return size;
253 		}
254 
255 		pr_err("Can't attach %s: cache set not found", buf);
256 		size = v;
257 	}
258 
259 	if (attr == &sysfs_detach && dc->disk.c)
260 		bch_cached_dev_detach(dc);
261 
262 	if (attr == &sysfs_stop)
263 		bcache_device_stop(&dc->disk);
264 
265 	return size;
266 }
267 
268 STORE(bch_cached_dev)
269 {
270 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
271 					     disk.kobj);
272 
273 	mutex_lock(&bch_register_lock);
274 	size = __cached_dev_store(kobj, attr, buf, size);
275 
276 	if (attr == &sysfs_writeback_running)
277 		bch_writeback_queue(dc);
278 
279 	if (attr == &sysfs_writeback_percent)
280 		schedule_delayed_work(&dc->writeback_rate_update,
281 				      dc->writeback_rate_update_seconds * HZ);
282 
283 	mutex_unlock(&bch_register_lock);
284 	return size;
285 }
286 
287 static struct attribute *bch_cached_dev_files[] = {
288 	&sysfs_attach,
289 	&sysfs_detach,
290 	&sysfs_stop,
291 #if 0
292 	&sysfs_data_csum,
293 #endif
294 	&sysfs_cache_mode,
295 	&sysfs_writeback_metadata,
296 	&sysfs_writeback_running,
297 	&sysfs_writeback_delay,
298 	&sysfs_writeback_percent,
299 	&sysfs_writeback_rate,
300 	&sysfs_writeback_rate_update_seconds,
301 	&sysfs_writeback_rate_d_term,
302 	&sysfs_writeback_rate_p_term_inverse,
303 	&sysfs_writeback_rate_d_smooth,
304 	&sysfs_writeback_rate_debug,
305 	&sysfs_dirty_data,
306 	&sysfs_stripe_size,
307 	&sysfs_partial_stripes_expensive,
308 	&sysfs_sequential_cutoff,
309 	&sysfs_sequential_merge,
310 	&sysfs_clear_stats,
311 	&sysfs_running,
312 	&sysfs_state,
313 	&sysfs_label,
314 	&sysfs_readahead,
315 #ifdef CONFIG_BCACHE_DEBUG
316 	&sysfs_verify,
317 #endif
318 	NULL
319 };
320 KTYPE(bch_cached_dev);
321 
322 SHOW(bch_flash_dev)
323 {
324 	struct bcache_device *d = container_of(kobj, struct bcache_device,
325 					       kobj);
326 	struct uuid_entry *u = &d->c->uuids[d->id];
327 
328 	sysfs_printf(data_csum,	"%i", d->data_csum);
329 	sysfs_hprint(size,	u->sectors << 9);
330 
331 	if (attr == &sysfs_label) {
332 		memcpy(buf, u->label, SB_LABEL_SIZE);
333 		buf[SB_LABEL_SIZE + 1] = '\0';
334 		strcat(buf, "\n");
335 		return strlen(buf);
336 	}
337 
338 	return 0;
339 }
340 
341 STORE(__bch_flash_dev)
342 {
343 	struct bcache_device *d = container_of(kobj, struct bcache_device,
344 					       kobj);
345 	struct uuid_entry *u = &d->c->uuids[d->id];
346 
347 	sysfs_strtoul(data_csum,	d->data_csum);
348 
349 	if (attr == &sysfs_size) {
350 		uint64_t v;
351 		strtoi_h_or_return(buf, v);
352 
353 		u->sectors = v >> 9;
354 		bch_uuid_write(d->c);
355 		set_capacity(d->disk, u->sectors);
356 	}
357 
358 	if (attr == &sysfs_label) {
359 		memcpy(u->label, buf, SB_LABEL_SIZE);
360 		bch_uuid_write(d->c);
361 	}
362 
363 	if (attr == &sysfs_unregister) {
364 		atomic_set(&d->detaching, 1);
365 		bcache_device_stop(d);
366 	}
367 
368 	return size;
369 }
370 STORE_LOCKED(bch_flash_dev)
371 
372 static struct attribute *bch_flash_dev_files[] = {
373 	&sysfs_unregister,
374 #if 0
375 	&sysfs_data_csum,
376 #endif
377 	&sysfs_label,
378 	&sysfs_size,
379 	NULL
380 };
381 KTYPE(bch_flash_dev);
382 
383 SHOW(__bch_cache_set)
384 {
385 	unsigned root_usage(struct cache_set *c)
386 	{
387 		unsigned bytes = 0;
388 		struct bkey *k;
389 		struct btree *b;
390 		struct btree_iter iter;
391 
392 		goto lock_root;
393 
394 		do {
395 			rw_unlock(false, b);
396 lock_root:
397 			b = c->root;
398 			rw_lock(false, b, b->level);
399 		} while (b != c->root);
400 
401 		for_each_key_filter(b, k, &iter, bch_ptr_bad)
402 			bytes += bkey_bytes(k);
403 
404 		rw_unlock(false, b);
405 
406 		return (bytes * 100) / btree_bytes(c);
407 	}
408 
409 	size_t cache_size(struct cache_set *c)
410 	{
411 		size_t ret = 0;
412 		struct btree *b;
413 
414 		mutex_lock(&c->bucket_lock);
415 		list_for_each_entry(b, &c->btree_cache, list)
416 			ret += 1 << (b->page_order + PAGE_SHIFT);
417 
418 		mutex_unlock(&c->bucket_lock);
419 		return ret;
420 	}
421 
422 	unsigned cache_max_chain(struct cache_set *c)
423 	{
424 		unsigned ret = 0;
425 		struct hlist_head *h;
426 
427 		mutex_lock(&c->bucket_lock);
428 
429 		for (h = c->bucket_hash;
430 		     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
431 		     h++) {
432 			unsigned i = 0;
433 			struct hlist_node *p;
434 
435 			hlist_for_each(p, h)
436 				i++;
437 
438 			ret = max(ret, i);
439 		}
440 
441 		mutex_unlock(&c->bucket_lock);
442 		return ret;
443 	}
444 
445 	unsigned btree_used(struct cache_set *c)
446 	{
447 		return div64_u64(c->gc_stats.key_bytes * 100,
448 				 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
449 	}
450 
451 	unsigned average_key_size(struct cache_set *c)
452 	{
453 		return c->gc_stats.nkeys
454 			? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
455 			: 0;
456 	}
457 
458 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
459 
460 	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
461 	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
462 	sysfs_hprint(bucket_size,		bucket_bytes(c));
463 	sysfs_hprint(block_size,		block_bytes(c));
464 	sysfs_print(tree_depth,			c->root->level);
465 	sysfs_print(root_usage_percent,		root_usage(c));
466 
467 	sysfs_hprint(btree_cache_size,		cache_size(c));
468 	sysfs_print(btree_cache_max_chain,	cache_max_chain(c));
469 	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
470 
471 	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
472 	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
473 	sysfs_print_time_stats(&c->sort_time,		btree_sort, ms, us);
474 	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
475 	sysfs_print_time_stats(&c->try_harder_time,	try_harder, ms, us);
476 
477 	sysfs_print(btree_used_percent,	btree_used(c));
478 	sysfs_print(btree_nodes,	c->gc_stats.nodes);
479 	sysfs_hprint(dirty_data,	c->gc_stats.dirty);
480 	sysfs_hprint(average_key_size,	average_key_size(c));
481 
482 	sysfs_print(cache_read_races,
483 		    atomic_long_read(&c->cache_read_races));
484 
485 	sysfs_print(writeback_keys_done,
486 		    atomic_long_read(&c->writeback_keys_done));
487 	sysfs_print(writeback_keys_failed,
488 		    atomic_long_read(&c->writeback_keys_failed));
489 
490 	/* See count_io_errors for why 88 */
491 	sysfs_print(io_error_halflife,	c->error_decay * 88);
492 	sysfs_print(io_error_limit,	c->error_limit >> IO_ERROR_SHIFT);
493 
494 	sysfs_hprint(congested,
495 		     ((uint64_t) bch_get_congested(c)) << 9);
496 	sysfs_print(congested_read_threshold_us,
497 		    c->congested_read_threshold_us);
498 	sysfs_print(congested_write_threshold_us,
499 		    c->congested_write_threshold_us);
500 
501 	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
502 	sysfs_printf(verify,			"%i", c->verify);
503 	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
504 	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
505 	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
506 	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
507 
508 	if (attr == &sysfs_bset_tree_stats)
509 		return bch_bset_print_stats(c, buf);
510 
511 	return 0;
512 }
513 SHOW_LOCKED(bch_cache_set)
514 
515 STORE(__bch_cache_set)
516 {
517 	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
518 
519 	if (attr == &sysfs_unregister)
520 		bch_cache_set_unregister(c);
521 
522 	if (attr == &sysfs_stop)
523 		bch_cache_set_stop(c);
524 
525 	if (attr == &sysfs_synchronous) {
526 		bool sync = strtoul_or_return(buf);
527 
528 		if (sync != CACHE_SYNC(&c->sb)) {
529 			SET_CACHE_SYNC(&c->sb, sync);
530 			bcache_write_super(c);
531 		}
532 	}
533 
534 	if (attr == &sysfs_flash_vol_create) {
535 		int r;
536 		uint64_t v;
537 		strtoi_h_or_return(buf, v);
538 
539 		r = bch_flash_dev_create(c, v);
540 		if (r)
541 			return r;
542 	}
543 
544 	if (attr == &sysfs_clear_stats) {
545 		atomic_long_set(&c->writeback_keys_done,	0);
546 		atomic_long_set(&c->writeback_keys_failed,	0);
547 
548 		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
549 		bch_cache_accounting_clear(&c->accounting);
550 	}
551 
552 	if (attr == &sysfs_trigger_gc)
553 		bch_queue_gc(c);
554 
555 	if (attr == &sysfs_prune_cache) {
556 		struct shrink_control sc;
557 		sc.gfp_mask = GFP_KERNEL;
558 		sc.nr_to_scan = strtoul_or_return(buf);
559 		c->shrink.shrink(&c->shrink, &sc);
560 	}
561 
562 	sysfs_strtoul(congested_read_threshold_us,
563 		      c->congested_read_threshold_us);
564 	sysfs_strtoul(congested_write_threshold_us,
565 		      c->congested_write_threshold_us);
566 
567 	if (attr == &sysfs_io_error_limit)
568 		c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
569 
570 	/* See count_io_errors() for why 88 */
571 	if (attr == &sysfs_io_error_halflife)
572 		c->error_decay = strtoul_or_return(buf) / 88;
573 
574 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
575 	sysfs_strtoul(verify,			c->verify);
576 	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
577 	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
578 	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
579 	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
580 
581 	return size;
582 }
583 STORE_LOCKED(bch_cache_set)
584 
585 SHOW(bch_cache_set_internal)
586 {
587 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
588 	return bch_cache_set_show(&c->kobj, attr, buf);
589 }
590 
591 STORE(bch_cache_set_internal)
592 {
593 	struct cache_set *c = container_of(kobj, struct cache_set, internal);
594 	return bch_cache_set_store(&c->kobj, attr, buf, size);
595 }
596 
597 static void bch_cache_set_internal_release(struct kobject *k)
598 {
599 }
600 
601 static struct attribute *bch_cache_set_files[] = {
602 	&sysfs_unregister,
603 	&sysfs_stop,
604 	&sysfs_synchronous,
605 	&sysfs_journal_delay_ms,
606 	&sysfs_flash_vol_create,
607 
608 	&sysfs_bucket_size,
609 	&sysfs_block_size,
610 	&sysfs_tree_depth,
611 	&sysfs_root_usage_percent,
612 	&sysfs_btree_cache_size,
613 	&sysfs_cache_available_percent,
614 
615 	&sysfs_average_key_size,
616 	&sysfs_dirty_data,
617 
618 	&sysfs_io_error_limit,
619 	&sysfs_io_error_halflife,
620 	&sysfs_congested,
621 	&sysfs_congested_read_threshold_us,
622 	&sysfs_congested_write_threshold_us,
623 	&sysfs_clear_stats,
624 	NULL
625 };
626 KTYPE(bch_cache_set);
627 
628 static struct attribute *bch_cache_set_internal_files[] = {
629 	&sysfs_active_journal_entries,
630 
631 	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
632 	sysfs_time_stats_attribute_list(btree_split, sec, us)
633 	sysfs_time_stats_attribute_list(btree_sort, ms, us)
634 	sysfs_time_stats_attribute_list(btree_read, ms, us)
635 	sysfs_time_stats_attribute_list(try_harder, ms, us)
636 
637 	&sysfs_btree_nodes,
638 	&sysfs_btree_used_percent,
639 	&sysfs_btree_cache_max_chain,
640 
641 	&sysfs_bset_tree_stats,
642 	&sysfs_cache_read_races,
643 	&sysfs_writeback_keys_done,
644 	&sysfs_writeback_keys_failed,
645 
646 	&sysfs_trigger_gc,
647 	&sysfs_prune_cache,
648 #ifdef CONFIG_BCACHE_DEBUG
649 	&sysfs_verify,
650 	&sysfs_key_merging_disabled,
651 #endif
652 	&sysfs_gc_always_rewrite,
653 	&sysfs_btree_shrinker_disabled,
654 	&sysfs_copy_gc_enabled,
655 	NULL
656 };
657 KTYPE(bch_cache_set_internal);
658 
659 SHOW(__bch_cache)
660 {
661 	struct cache *ca = container_of(kobj, struct cache, kobj);
662 
663 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
664 	sysfs_hprint(block_size,	block_bytes(ca));
665 	sysfs_print(nbuckets,		ca->sb.nbuckets);
666 	sysfs_print(discard,		ca->discard);
667 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
668 	sysfs_hprint(btree_written,
669 		     atomic_long_read(&ca->btree_sectors_written) << 9);
670 	sysfs_hprint(metadata_written,
671 		     (atomic_long_read(&ca->meta_sectors_written) +
672 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
673 
674 	sysfs_print(io_errors,
675 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
676 
677 	sysfs_print(freelist_percent, ca->free.size * 100 /
678 		    ((size_t) ca->sb.nbuckets));
679 
680 	if (attr == &sysfs_cache_replacement_policy)
681 		return bch_snprint_string_list(buf, PAGE_SIZE,
682 					       cache_replacement_policies,
683 					       CACHE_REPLACEMENT(&ca->sb));
684 
685 	if (attr == &sysfs_priority_stats) {
686 		int cmp(const void *l, const void *r)
687 		{	return *((uint16_t *) r) - *((uint16_t *) l); }
688 
689 		size_t n = ca->sb.nbuckets, i, unused, btree;
690 		uint64_t sum = 0;
691 		/* Compute 31 quantiles */
692 		uint16_t q[31], *p, *cached;
693 		ssize_t ret;
694 
695 		cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
696 		if (!p)
697 			return -ENOMEM;
698 
699 		mutex_lock(&ca->set->bucket_lock);
700 		for (i = ca->sb.first_bucket; i < n; i++)
701 			p[i] = ca->buckets[i].prio;
702 		mutex_unlock(&ca->set->bucket_lock);
703 
704 		sort(p, n, sizeof(uint16_t), cmp, NULL);
705 
706 		while (n &&
707 		       !cached[n - 1])
708 			--n;
709 
710 		unused = ca->sb.nbuckets - n;
711 
712 		while (cached < p + n &&
713 		       *cached == BTREE_PRIO)
714 			cached++;
715 
716 		btree = cached - p;
717 		n -= btree;
718 
719 		for (i = 0; i < n; i++)
720 			sum += INITIAL_PRIO - cached[i];
721 
722 		if (n)
723 			do_div(sum, n);
724 
725 		for (i = 0; i < ARRAY_SIZE(q); i++)
726 			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
727 				(ARRAY_SIZE(q) + 1)];
728 
729 		vfree(p);
730 
731 		ret = scnprintf(buf, PAGE_SIZE,
732 				"Unused:		%zu%%\n"
733 				"Metadata:	%zu%%\n"
734 				"Average:	%llu\n"
735 				"Sectors per Q:	%zu\n"
736 				"Quantiles:	[",
737 				unused * 100 / (size_t) ca->sb.nbuckets,
738 				btree * 100 / (size_t) ca->sb.nbuckets, sum,
739 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
740 
741 		for (i = 0; i < ARRAY_SIZE(q); i++)
742 			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
743 					 "%u ", q[i]);
744 		ret--;
745 
746 		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
747 
748 		return ret;
749 	}
750 
751 	return 0;
752 }
753 SHOW_LOCKED(bch_cache)
754 
755 STORE(__bch_cache)
756 {
757 	struct cache *ca = container_of(kobj, struct cache, kobj);
758 
759 	if (attr == &sysfs_discard) {
760 		bool v = strtoul_or_return(buf);
761 
762 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
763 			ca->discard = v;
764 
765 		if (v != CACHE_DISCARD(&ca->sb)) {
766 			SET_CACHE_DISCARD(&ca->sb, v);
767 			bcache_write_super(ca->set);
768 		}
769 	}
770 
771 	if (attr == &sysfs_cache_replacement_policy) {
772 		ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
773 
774 		if (v < 0)
775 			return v;
776 
777 		if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
778 			mutex_lock(&ca->set->bucket_lock);
779 			SET_CACHE_REPLACEMENT(&ca->sb, v);
780 			mutex_unlock(&ca->set->bucket_lock);
781 
782 			bcache_write_super(ca->set);
783 		}
784 	}
785 
786 	if (attr == &sysfs_freelist_percent) {
787 		DECLARE_FIFO(long, free);
788 		long i;
789 		size_t p = strtoul_or_return(buf);
790 
791 		p = clamp_t(size_t,
792 			    ((size_t) ca->sb.nbuckets * p) / 100,
793 			    roundup_pow_of_two(ca->sb.nbuckets) >> 9,
794 			    ca->sb.nbuckets / 2);
795 
796 		if (!init_fifo_exact(&free, p, GFP_KERNEL))
797 			return -ENOMEM;
798 
799 		mutex_lock(&ca->set->bucket_lock);
800 
801 		fifo_move(&free, &ca->free);
802 		fifo_swap(&free, &ca->free);
803 
804 		mutex_unlock(&ca->set->bucket_lock);
805 
806 		while (fifo_pop(&free, i))
807 			atomic_dec(&ca->buckets[i].pin);
808 
809 		free_fifo(&free);
810 	}
811 
812 	if (attr == &sysfs_clear_stats) {
813 		atomic_long_set(&ca->sectors_written, 0);
814 		atomic_long_set(&ca->btree_sectors_written, 0);
815 		atomic_long_set(&ca->meta_sectors_written, 0);
816 		atomic_set(&ca->io_count, 0);
817 		atomic_set(&ca->io_errors, 0);
818 	}
819 
820 	return size;
821 }
822 STORE_LOCKED(bch_cache)
823 
824 static struct attribute *bch_cache_files[] = {
825 	&sysfs_bucket_size,
826 	&sysfs_block_size,
827 	&sysfs_nbuckets,
828 	&sysfs_priority_stats,
829 	&sysfs_discard,
830 	&sysfs_written,
831 	&sysfs_btree_written,
832 	&sysfs_metadata_written,
833 	&sysfs_io_errors,
834 	&sysfs_clear_stats,
835 	&sysfs_freelist_percent,
836 	&sysfs_cache_replacement_policy,
837 	NULL
838 };
839 KTYPE(bch_cache);
840