xref: /linux/fs/bcachefs/sysfs.c (revision 8b40a46966d294bc64bad0feb13d3304fde738f2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #ifndef NO_BCACHEFS_SYSFS
10 
11 #include "bcachefs.h"
12 #include "alloc_background.h"
13 #include "alloc_foreground.h"
14 #include "sysfs.h"
15 #include "btree_cache.h"
16 #include "btree_io.h"
17 #include "btree_iter.h"
18 #include "btree_key_cache.h"
19 #include "btree_update.h"
20 #include "btree_update_interior.h"
21 #include "btree_gc.h"
22 #include "buckets.h"
23 #include "clock.h"
24 #include "compress.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "inode.h"
28 #include "journal.h"
29 #include "keylist.h"
30 #include "move.h"
31 #include "movinggc.h"
32 #include "nocow_locking.h"
33 #include "opts.h"
34 #include "rebalance.h"
35 #include "replicas.h"
36 #include "super-io.h"
37 #include "tests.h"
38 
39 #include <linux/blkdev.h>
40 #include <linux/sort.h>
41 #include <linux/sched/clock.h>
42 
43 #include "util.h"
44 
45 #define SYSFS_OPS(type)							\
46 const struct sysfs_ops type ## _sysfs_ops = {				\
47 	.show	= type ## _show,					\
48 	.store	= type ## _store					\
49 }
50 
51 #define SHOW(fn)							\
52 static ssize_t fn ## _to_text(struct printbuf *,			\
53 			      struct kobject *, struct attribute *);	\
54 									\
55 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
56 			   char *buf)					\
57 {									\
58 	struct printbuf out = PRINTBUF;					\
59 	ssize_t ret = fn ## _to_text(&out, kobj, attr);			\
60 									\
61 	if (out.pos && out.buf[out.pos - 1] != '\n')			\
62 		prt_newline(&out);					\
63 									\
64 	if (!ret && out.allocation_failure)				\
65 		ret = -ENOMEM;						\
66 									\
67 	if (!ret) {							\
68 		ret = min_t(size_t, out.pos, PAGE_SIZE - 1);		\
69 		memcpy(buf, out.buf, ret);				\
70 	}								\
71 	printbuf_exit(&out);						\
72 	return bch2_err_class(ret);					\
73 }									\
74 									\
75 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
76 			      struct attribute *attr)
77 
78 #define STORE(fn)							\
79 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
80 			    const char *, size_t);			\
81 									\
82 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
83 			    const char *buf, size_t size)		\
84 {									\
85 	return bch2_err_class(fn##_store_inner(kobj, attr, buf, size));	\
86 }									\
87 									\
88 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
89 				  const char *buf, size_t size)
90 
91 #define __sysfs_attribute(_name, _mode)					\
92 	static struct attribute sysfs_##_name =				\
93 		{ .name = #_name, .mode = _mode }
94 
95 #define write_attribute(n)	__sysfs_attribute(n, 0200)
96 #define read_attribute(n)	__sysfs_attribute(n, 0444)
97 #define rw_attribute(n)		__sysfs_attribute(n, 0644)
98 
99 #define sysfs_printf(file, fmt, ...)					\
100 do {									\
101 	if (attr == &sysfs_ ## file)					\
102 		prt_printf(out, fmt "\n", __VA_ARGS__);			\
103 } while (0)
104 
105 #define sysfs_print(file, var)						\
106 do {									\
107 	if (attr == &sysfs_ ## file)					\
108 		snprint(out, var);					\
109 } while (0)
110 
111 #define sysfs_hprint(file, val)						\
112 do {									\
113 	if (attr == &sysfs_ ## file)					\
114 		prt_human_readable_s64(out, val);			\
115 } while (0)
116 
117 #define sysfs_strtoul(file, var)					\
118 do {									\
119 	if (attr == &sysfs_ ## file)					\
120 		return strtoul_safe(buf, var) ?: (ssize_t) size;	\
121 } while (0)
122 
123 #define sysfs_strtoul_clamp(file, var, min, max)			\
124 do {									\
125 	if (attr == &sysfs_ ## file)					\
126 		return strtoul_safe_clamp(buf, var, min, max)		\
127 			?: (ssize_t) size;				\
128 } while (0)
129 
130 #define strtoul_or_return(cp)						\
131 ({									\
132 	unsigned long _v;						\
133 	int _r = kstrtoul(cp, 10, &_v);					\
134 	if (_r)								\
135 		return _r;						\
136 	_v;								\
137 })
138 
139 write_attribute(trigger_gc);
140 write_attribute(trigger_discards);
141 write_attribute(trigger_invalidates);
142 write_attribute(prune_cache);
143 write_attribute(btree_wakeup);
144 rw_attribute(btree_gc_periodic);
145 rw_attribute(gc_gens_pos);
146 
147 read_attribute(uuid);
148 read_attribute(minor);
149 read_attribute(flags);
150 read_attribute(bucket_size);
151 read_attribute(first_bucket);
152 read_attribute(nbuckets);
153 rw_attribute(durability);
154 read_attribute(io_done);
155 read_attribute(io_errors);
156 write_attribute(io_errors_reset);
157 
158 read_attribute(io_latency_read);
159 read_attribute(io_latency_write);
160 read_attribute(io_latency_stats_read);
161 read_attribute(io_latency_stats_write);
162 read_attribute(congested);
163 
164 read_attribute(btree_write_stats);
165 
166 read_attribute(btree_cache_size);
167 read_attribute(compression_stats);
168 read_attribute(journal_debug);
169 read_attribute(btree_updates);
170 read_attribute(btree_cache);
171 read_attribute(btree_key_cache);
172 read_attribute(stripes_heap);
173 read_attribute(open_buckets);
174 read_attribute(open_buckets_partial);
175 read_attribute(write_points);
176 read_attribute(nocow_lock_table);
177 
178 #ifdef BCH_WRITE_REF_DEBUG
179 read_attribute(write_refs);
180 
181 static const char * const bch2_write_refs[] = {
182 #define x(n)	#n,
183 	BCH_WRITE_REFS()
184 #undef x
185 	NULL
186 };
187 
188 static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
189 {
190 	bch2_printbuf_tabstop_push(out, 24);
191 
192 	for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
193 		prt_str(out, bch2_write_refs[i]);
194 		prt_tab(out);
195 		prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
196 		prt_newline(out);
197 	}
198 }
199 #endif
200 
201 read_attribute(internal_uuid);
202 read_attribute(disk_groups);
203 
204 read_attribute(has_data);
205 read_attribute(alloc_debug);
206 
207 #define x(t, n, ...) read_attribute(t);
208 BCH_PERSISTENT_COUNTERS()
209 #undef x
210 
211 rw_attribute(discard);
212 rw_attribute(label);
213 
214 rw_attribute(copy_gc_enabled);
215 read_attribute(copy_gc_wait);
216 
217 rw_attribute(rebalance_enabled);
218 sysfs_pd_controller_attribute(rebalance);
219 read_attribute(rebalance_status);
220 rw_attribute(promote_whole_extents);
221 
222 read_attribute(new_stripes);
223 
224 read_attribute(io_timers_read);
225 read_attribute(io_timers_write);
226 
227 read_attribute(moving_ctxts);
228 
229 #ifdef CONFIG_BCACHEFS_TESTS
230 write_attribute(perf_test);
231 #endif /* CONFIG_BCACHEFS_TESTS */
232 
233 #define x(_name)						\
234 	static struct attribute sysfs_time_stat_##_name =		\
235 		{ .name = #_name, .mode = 0444 };
236 	BCH_TIME_STATS()
237 #undef x
238 
239 static struct attribute sysfs_state_rw = {
240 	.name = "state",
241 	.mode =  0444,
242 };
243 
244 static size_t bch2_btree_cache_size(struct bch_fs *c)
245 {
246 	size_t ret = 0;
247 	struct btree *b;
248 
249 	mutex_lock(&c->btree_cache.lock);
250 	list_for_each_entry(b, &c->btree_cache.live, list)
251 		ret += btree_buf_bytes(b);
252 
253 	mutex_unlock(&c->btree_cache.lock);
254 	return ret;
255 }
256 
257 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
258 {
259 	struct btree_trans *trans;
260 	enum btree_id id;
261 	struct compression_type_stats {
262 		u64		nr_extents;
263 		u64		sectors_compressed;
264 		u64		sectors_uncompressed;
265 	} s[BCH_COMPRESSION_TYPE_NR];
266 	u64 compressed_incompressible = 0;
267 	int ret = 0;
268 
269 	memset(s, 0, sizeof(s));
270 
271 	if (!test_bit(BCH_FS_started, &c->flags))
272 		return -EPERM;
273 
274 	trans = bch2_trans_get(c);
275 
276 	for (id = 0; id < BTREE_ID_NR; id++) {
277 		if (!btree_type_has_ptrs(id))
278 			continue;
279 
280 		ret = for_each_btree_key(trans, iter, id, POS_MIN,
281 					 BTREE_ITER_ALL_SNAPSHOTS, k, ({
282 			struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
283 			struct bch_extent_crc_unpacked crc;
284 			const union bch_extent_entry *entry;
285 			bool compressed = false, incompressible = false;
286 
287 			bkey_for_each_crc(k.k, ptrs, crc, entry) {
288 				incompressible	|= crc.compression_type == BCH_COMPRESSION_TYPE_incompressible;
289 				compressed	|= crc_is_compressed(crc);
290 
291 				if (crc_is_compressed(crc)) {
292 					s[crc.compression_type].nr_extents++;
293 					s[crc.compression_type].sectors_compressed += crc.compressed_size;
294 					s[crc.compression_type].sectors_uncompressed += crc.uncompressed_size;
295 				}
296 			}
297 
298 			compressed_incompressible += compressed && incompressible;
299 
300 			if (!compressed) {
301 				unsigned t = incompressible ? BCH_COMPRESSION_TYPE_incompressible : 0;
302 
303 				s[t].nr_extents++;
304 				s[t].sectors_compressed += k.k->size;
305 				s[t].sectors_uncompressed += k.k->size;
306 			}
307 			0;
308 		}));
309 	}
310 
311 	bch2_trans_put(trans);
312 
313 	if (ret)
314 		return ret;
315 
316 	prt_str(out, "type");
317 	printbuf_tabstop_push(out, 12);
318 	prt_tab(out);
319 
320 	prt_str(out, "compressed");
321 	printbuf_tabstop_push(out, 16);
322 	prt_tab_rjust(out);
323 
324 	prt_str(out, "uncompressed");
325 	printbuf_tabstop_push(out, 16);
326 	prt_tab_rjust(out);
327 
328 	prt_str(out, "average extent size");
329 	printbuf_tabstop_push(out, 24);
330 	prt_tab_rjust(out);
331 	prt_newline(out);
332 
333 	for (unsigned i = 0; i < ARRAY_SIZE(s); i++) {
334 		bch2_prt_compression_type(out, i);
335 		prt_tab(out);
336 
337 		prt_human_readable_u64(out, s[i].sectors_compressed << 9);
338 		prt_tab_rjust(out);
339 
340 		prt_human_readable_u64(out, s[i].sectors_uncompressed << 9);
341 		prt_tab_rjust(out);
342 
343 		prt_human_readable_u64(out, s[i].nr_extents
344 				       ? div_u64(s[i].sectors_uncompressed << 9, s[i].nr_extents)
345 				       : 0);
346 		prt_tab_rjust(out);
347 		prt_newline(out);
348 	}
349 
350 	if (compressed_incompressible) {
351 		prt_printf(out, "%llu compressed & incompressible extents", compressed_incompressible);
352 		prt_newline(out);
353 	}
354 
355 	return 0;
356 }
357 
358 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
359 {
360 	prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
361 	bch2_bpos_to_text(out, c->gc_gens_pos);
362 	prt_printf(out, "\n");
363 }
364 
365 static void bch2_btree_wakeup_all(struct bch_fs *c)
366 {
367 	struct btree_trans *trans;
368 
369 	seqmutex_lock(&c->btree_trans_lock);
370 	list_for_each_entry(trans, &c->btree_trans_list, list) {
371 		struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
372 
373 		if (b)
374 			six_lock_wakeup_all(&b->lock);
375 
376 	}
377 	seqmutex_unlock(&c->btree_trans_lock);
378 }
379 
380 SHOW(bch2_fs)
381 {
382 	struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
383 
384 	sysfs_print(minor,			c->minor);
385 	sysfs_printf(internal_uuid, "%pU",	c->sb.uuid.b);
386 
387 	if (attr == &sysfs_flags)
388 		prt_bitflags(out, bch2_fs_flag_strs, c->flags);
389 
390 	sysfs_hprint(btree_cache_size,		bch2_btree_cache_size(c));
391 
392 	if (attr == &sysfs_btree_write_stats)
393 		bch2_btree_write_stats_to_text(out, c);
394 
395 	sysfs_printf(btree_gc_periodic, "%u",	(int) c->btree_gc_periodic);
396 
397 	if (attr == &sysfs_gc_gens_pos)
398 		bch2_gc_gens_pos_to_text(out, c);
399 
400 	sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
401 
402 	sysfs_printf(rebalance_enabled,		"%i", c->rebalance.enabled);
403 	sysfs_pd_controller_show(rebalance,	&c->rebalance.pd); /* XXX */
404 
405 	if (attr == &sysfs_copy_gc_wait)
406 		bch2_copygc_wait_to_text(out, c);
407 
408 	if (attr == &sysfs_rebalance_status)
409 		bch2_rebalance_status_to_text(out, c);
410 
411 	sysfs_print(promote_whole_extents,	c->promote_whole_extents);
412 
413 	/* Debugging: */
414 
415 	if (attr == &sysfs_journal_debug)
416 		bch2_journal_debug_to_text(out, &c->journal);
417 
418 	if (attr == &sysfs_btree_updates)
419 		bch2_btree_updates_to_text(out, c);
420 
421 	if (attr == &sysfs_btree_cache)
422 		bch2_btree_cache_to_text(out, c);
423 
424 	if (attr == &sysfs_btree_key_cache)
425 		bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
426 
427 	if (attr == &sysfs_stripes_heap)
428 		bch2_stripes_heap_to_text(out, c);
429 
430 	if (attr == &sysfs_open_buckets)
431 		bch2_open_buckets_to_text(out, c);
432 
433 	if (attr == &sysfs_open_buckets_partial)
434 		bch2_open_buckets_partial_to_text(out, c);
435 
436 	if (attr == &sysfs_write_points)
437 		bch2_write_points_to_text(out, c);
438 
439 	if (attr == &sysfs_compression_stats)
440 		bch2_compression_stats_to_text(out, c);
441 
442 	if (attr == &sysfs_new_stripes)
443 		bch2_new_stripes_to_text(out, c);
444 
445 	if (attr == &sysfs_io_timers_read)
446 		bch2_io_timers_to_text(out, &c->io_clock[READ]);
447 
448 	if (attr == &sysfs_io_timers_write)
449 		bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
450 
451 	if (attr == &sysfs_moving_ctxts)
452 		bch2_fs_moving_ctxts_to_text(out, c);
453 
454 #ifdef BCH_WRITE_REF_DEBUG
455 	if (attr == &sysfs_write_refs)
456 		bch2_write_refs_to_text(out, c);
457 #endif
458 
459 	if (attr == &sysfs_nocow_lock_table)
460 		bch2_nocow_locks_to_text(out, &c->nocow_locks);
461 
462 	if (attr == &sysfs_disk_groups)
463 		bch2_disk_groups_to_text(out, c);
464 
465 	return 0;
466 }
467 
468 STORE(bch2_fs)
469 {
470 	struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
471 
472 	if (attr == &sysfs_btree_gc_periodic) {
473 		ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
474 			?: (ssize_t) size;
475 
476 		wake_up_process(c->gc_thread);
477 		return ret;
478 	}
479 
480 	if (attr == &sysfs_copy_gc_enabled) {
481 		ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
482 			?: (ssize_t) size;
483 
484 		if (c->copygc_thread)
485 			wake_up_process(c->copygc_thread);
486 		return ret;
487 	}
488 
489 	if (attr == &sysfs_rebalance_enabled) {
490 		ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
491 			?: (ssize_t) size;
492 
493 		rebalance_wakeup(c);
494 		return ret;
495 	}
496 
497 	sysfs_pd_controller_store(rebalance,	&c->rebalance.pd);
498 
499 	sysfs_strtoul(promote_whole_extents,	c->promote_whole_extents);
500 
501 	/* Debugging: */
502 
503 	if (!test_bit(BCH_FS_started, &c->flags))
504 		return -EPERM;
505 
506 	/* Debugging: */
507 
508 	if (!test_bit(BCH_FS_rw, &c->flags))
509 		return -EROFS;
510 
511 	if (attr == &sysfs_prune_cache) {
512 		struct shrink_control sc;
513 
514 		sc.gfp_mask = GFP_KERNEL;
515 		sc.nr_to_scan = strtoul_or_return(buf);
516 		c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
517 	}
518 
519 	if (attr == &sysfs_btree_wakeup)
520 		bch2_btree_wakeup_all(c);
521 
522 	if (attr == &sysfs_trigger_gc) {
523 		/*
524 		 * Full gc is currently incompatible with btree key cache:
525 		 */
526 #if 0
527 		down_read(&c->state_lock);
528 		bch2_gc(c, false, false);
529 		up_read(&c->state_lock);
530 #else
531 		bch2_gc_gens(c);
532 #endif
533 	}
534 
535 	if (attr == &sysfs_trigger_discards)
536 		bch2_do_discards(c);
537 
538 	if (attr == &sysfs_trigger_invalidates)
539 		bch2_do_invalidates(c);
540 
541 #ifdef CONFIG_BCACHEFS_TESTS
542 	if (attr == &sysfs_perf_test) {
543 		char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
544 		char *test		= strsep(&p, " \t\n");
545 		char *nr_str		= strsep(&p, " \t\n");
546 		char *threads_str	= strsep(&p, " \t\n");
547 		unsigned threads;
548 		u64 nr;
549 		int ret = -EINVAL;
550 
551 		if (threads_str &&
552 		    !(ret = kstrtouint(threads_str, 10, &threads)) &&
553 		    !(ret = bch2_strtoull_h(nr_str, &nr)))
554 			ret = bch2_btree_perf_test(c, test, nr, threads);
555 		kfree(tmp);
556 
557 		if (ret)
558 			size = ret;
559 	}
560 #endif
561 	return size;
562 }
563 SYSFS_OPS(bch2_fs);
564 
565 struct attribute *bch2_fs_files[] = {
566 	&sysfs_minor,
567 	&sysfs_btree_cache_size,
568 	&sysfs_btree_write_stats,
569 
570 	&sysfs_promote_whole_extents,
571 
572 	&sysfs_compression_stats,
573 
574 #ifdef CONFIG_BCACHEFS_TESTS
575 	&sysfs_perf_test,
576 #endif
577 	NULL
578 };
579 
580 /* counters dir */
581 
582 SHOW(bch2_fs_counters)
583 {
584 	struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
585 	u64 counter = 0;
586 	u64 counter_since_mount = 0;
587 
588 	printbuf_tabstop_push(out, 32);
589 
590 	#define x(t, ...) \
591 		if (attr == &sysfs_##t) {					\
592 			counter             = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
593 			counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
594 			prt_printf(out, "since mount:");				\
595 			prt_tab(out);						\
596 			prt_human_readable_u64(out, counter_since_mount);	\
597 			prt_newline(out);					\
598 										\
599 			prt_printf(out, "since filesystem creation:");		\
600 			prt_tab(out);						\
601 			prt_human_readable_u64(out, counter);			\
602 			prt_newline(out);					\
603 		}
604 	BCH_PERSISTENT_COUNTERS()
605 	#undef x
606 	return 0;
607 }
608 
609 STORE(bch2_fs_counters) {
610 	return 0;
611 }
612 
613 SYSFS_OPS(bch2_fs_counters);
614 
615 struct attribute *bch2_fs_counters_files[] = {
616 #define x(t, ...) \
617 	&sysfs_##t,
618 	BCH_PERSISTENT_COUNTERS()
619 #undef x
620 	NULL
621 };
622 /* internal dir - just a wrapper */
623 
624 SHOW(bch2_fs_internal)
625 {
626 	struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
627 
628 	return bch2_fs_to_text(out, &c->kobj, attr);
629 }
630 
631 STORE(bch2_fs_internal)
632 {
633 	struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
634 
635 	return bch2_fs_store(&c->kobj, attr, buf, size);
636 }
637 SYSFS_OPS(bch2_fs_internal);
638 
639 struct attribute *bch2_fs_internal_files[] = {
640 	&sysfs_flags,
641 	&sysfs_journal_debug,
642 	&sysfs_btree_updates,
643 	&sysfs_btree_cache,
644 	&sysfs_btree_key_cache,
645 	&sysfs_new_stripes,
646 	&sysfs_stripes_heap,
647 	&sysfs_open_buckets,
648 	&sysfs_open_buckets_partial,
649 	&sysfs_write_points,
650 #ifdef BCH_WRITE_REF_DEBUG
651 	&sysfs_write_refs,
652 #endif
653 	&sysfs_nocow_lock_table,
654 	&sysfs_io_timers_read,
655 	&sysfs_io_timers_write,
656 
657 	&sysfs_trigger_gc,
658 	&sysfs_trigger_discards,
659 	&sysfs_trigger_invalidates,
660 	&sysfs_prune_cache,
661 	&sysfs_btree_wakeup,
662 
663 	&sysfs_gc_gens_pos,
664 
665 	&sysfs_copy_gc_enabled,
666 	&sysfs_copy_gc_wait,
667 
668 	&sysfs_rebalance_enabled,
669 	&sysfs_rebalance_status,
670 	sysfs_pd_controller_files(rebalance),
671 
672 	&sysfs_moving_ctxts,
673 
674 	&sysfs_internal_uuid,
675 
676 	&sysfs_disk_groups,
677 	NULL
678 };
679 
680 /* options */
681 
682 SHOW(bch2_fs_opts_dir)
683 {
684 	struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
685 	const struct bch_option *opt = container_of(attr, struct bch_option, attr);
686 	int id = opt - bch2_opt_table;
687 	u64 v = bch2_opt_get_by_id(&c->opts, id);
688 
689 	bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
690 	prt_char(out, '\n');
691 
692 	return 0;
693 }
694 
695 STORE(bch2_fs_opts_dir)
696 {
697 	struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
698 	const struct bch_option *opt = container_of(attr, struct bch_option, attr);
699 	int ret, id = opt - bch2_opt_table;
700 	char *tmp;
701 	u64 v;
702 
703 	/*
704 	 * We don't need to take c->writes for correctness, but it eliminates an
705 	 * unsightly error message in the dmesg log when we're RO:
706 	 */
707 	if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
708 		return -EROFS;
709 
710 	tmp = kstrdup(buf, GFP_KERNEL);
711 	if (!tmp) {
712 		ret = -ENOMEM;
713 		goto err;
714 	}
715 
716 	ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
717 	kfree(tmp);
718 
719 	if (ret < 0)
720 		goto err;
721 
722 	ret = bch2_opt_check_may_set(c, id, v);
723 	if (ret < 0)
724 		goto err;
725 
726 	bch2_opt_set_sb(c, opt, v);
727 	bch2_opt_set_by_id(&c->opts, id, v);
728 
729 	if (v &&
730 	    (id == Opt_background_target ||
731 	     id == Opt_background_compression ||
732 	     (id == Opt_compression && !c->opts.background_compression)))
733 		bch2_set_rebalance_needs_scan(c, 0);
734 
735 	ret = size;
736 err:
737 	bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
738 	return ret;
739 }
740 SYSFS_OPS(bch2_fs_opts_dir);
741 
742 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
743 
744 int bch2_opts_create_sysfs_files(struct kobject *kobj)
745 {
746 	const struct bch_option *i;
747 	int ret;
748 
749 	for (i = bch2_opt_table;
750 	     i < bch2_opt_table + bch2_opts_nr;
751 	     i++) {
752 		if (!(i->flags & OPT_FS))
753 			continue;
754 
755 		ret = sysfs_create_file(kobj, &i->attr);
756 		if (ret)
757 			return ret;
758 	}
759 
760 	return 0;
761 }
762 
763 /* time stats */
764 
765 SHOW(bch2_fs_time_stats)
766 {
767 	struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
768 
769 #define x(name)								\
770 	if (attr == &sysfs_time_stat_##name)				\
771 		bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
772 	BCH_TIME_STATS()
773 #undef x
774 
775 	return 0;
776 }
777 
778 STORE(bch2_fs_time_stats)
779 {
780 	return size;
781 }
782 SYSFS_OPS(bch2_fs_time_stats);
783 
784 struct attribute *bch2_fs_time_stats_files[] = {
785 #define x(name)						\
786 	&sysfs_time_stat_##name,
787 	BCH_TIME_STATS()
788 #undef x
789 	NULL
790 };
791 
792 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
793 {
794 	struct bch_fs *c = ca->fs;
795 	struct bch_dev_usage stats = bch2_dev_usage_read(ca);
796 	unsigned i, nr[BCH_DATA_NR];
797 
798 	memset(nr, 0, sizeof(nr));
799 
800 	for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
801 		nr[c->open_buckets[i].data_type]++;
802 
803 	printbuf_tabstop_push(out, 8);
804 	printbuf_tabstop_push(out, 16);
805 	printbuf_tabstop_push(out, 16);
806 	printbuf_tabstop_push(out, 16);
807 	printbuf_tabstop_push(out, 16);
808 
809 	bch2_dev_usage_to_text(out, &stats);
810 
811 	prt_newline(out);
812 
813 	prt_printf(out, "reserves:");
814 	prt_newline(out);
815 	for (i = 0; i < BCH_WATERMARK_NR; i++) {
816 		prt_str(out, bch2_watermarks[i]);
817 		prt_tab(out);
818 		prt_u64(out, bch2_dev_buckets_reserved(ca, i));
819 		prt_tab_rjust(out);
820 		prt_newline(out);
821 	}
822 
823 	prt_newline(out);
824 
825 	printbuf_tabstops_reset(out);
826 	printbuf_tabstop_push(out, 24);
827 
828 	prt_str(out, "freelist_wait");
829 	prt_tab(out);
830 	prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
831 	prt_newline(out);
832 
833 	prt_str(out, "open buckets allocated");
834 	prt_tab(out);
835 	prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
836 	prt_newline(out);
837 
838 	prt_str(out, "open buckets this dev");
839 	prt_tab(out);
840 	prt_u64(out, ca->nr_open_buckets);
841 	prt_newline(out);
842 
843 	prt_str(out, "open buckets total");
844 	prt_tab(out);
845 	prt_u64(out, OPEN_BUCKETS_COUNT);
846 	prt_newline(out);
847 
848 	prt_str(out, "open_buckets_wait");
849 	prt_tab(out);
850 	prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
851 	prt_newline(out);
852 
853 	prt_str(out, "open_buckets_btree");
854 	prt_tab(out);
855 	prt_u64(out, nr[BCH_DATA_btree]);
856 	prt_newline(out);
857 
858 	prt_str(out, "open_buckets_user");
859 	prt_tab(out);
860 	prt_u64(out, nr[BCH_DATA_user]);
861 	prt_newline(out);
862 
863 	prt_str(out, "buckets_to_invalidate");
864 	prt_tab(out);
865 	prt_u64(out, should_invalidate_buckets(ca, stats));
866 	prt_newline(out);
867 
868 	prt_str(out, "btree reserve cache");
869 	prt_tab(out);
870 	prt_u64(out, c->btree_reserve_cache_nr);
871 	prt_newline(out);
872 }
873 
874 static const char * const bch2_rw[] = {
875 	"read",
876 	"write",
877 	NULL
878 };
879 
880 static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
881 {
882 	int rw, i;
883 
884 	for (rw = 0; rw < 2; rw++) {
885 		prt_printf(out, "%s:\n", bch2_rw[rw]);
886 
887 		for (i = 1; i < BCH_DATA_NR; i++)
888 			prt_printf(out, "%-12s:%12llu\n",
889 			       bch2_data_type_str(i),
890 			       percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
891 	}
892 }
893 
894 SHOW(bch2_dev)
895 {
896 	struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
897 	struct bch_fs *c = ca->fs;
898 
899 	sysfs_printf(uuid,		"%pU\n", ca->uuid.b);
900 
901 	sysfs_print(bucket_size,	bucket_bytes(ca));
902 	sysfs_print(first_bucket,	ca->mi.first_bucket);
903 	sysfs_print(nbuckets,		ca->mi.nbuckets);
904 	sysfs_print(durability,		ca->mi.durability);
905 	sysfs_print(discard,		ca->mi.discard);
906 
907 	if (attr == &sysfs_label) {
908 		if (ca->mi.group)
909 			bch2_disk_path_to_text(out, c, ca->mi.group - 1);
910 		prt_char(out, '\n');
911 	}
912 
913 	if (attr == &sysfs_has_data) {
914 		prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca));
915 		prt_char(out, '\n');
916 	}
917 
918 	if (attr == &sysfs_state_rw) {
919 		prt_string_option(out, bch2_member_states, ca->mi.state);
920 		prt_char(out, '\n');
921 	}
922 
923 	if (attr == &sysfs_io_done)
924 		dev_io_done_to_text(out, ca);
925 
926 	if (attr == &sysfs_io_errors)
927 		bch2_dev_io_errors_to_text(out, ca);
928 
929 	sysfs_print(io_latency_read,		atomic64_read(&ca->cur_latency[READ]));
930 	sysfs_print(io_latency_write,		atomic64_read(&ca->cur_latency[WRITE]));
931 
932 	if (attr == &sysfs_io_latency_stats_read)
933 		bch2_time_stats_to_text(out, &ca->io_latency[READ].stats);
934 
935 	if (attr == &sysfs_io_latency_stats_write)
936 		bch2_time_stats_to_text(out, &ca->io_latency[WRITE].stats);
937 
938 	sysfs_printf(congested,			"%u%%",
939 		     clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
940 		     * 100 / CONGESTED_MAX);
941 
942 	if (attr == &sysfs_alloc_debug)
943 		dev_alloc_debug_to_text(out, ca);
944 
945 	return 0;
946 }
947 
948 STORE(bch2_dev)
949 {
950 	struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
951 	struct bch_fs *c = ca->fs;
952 	struct bch_member *mi;
953 
954 	if (attr == &sysfs_discard) {
955 		bool v = strtoul_or_return(buf);
956 
957 		mutex_lock(&c->sb_lock);
958 		mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
959 
960 		if (v != BCH_MEMBER_DISCARD(mi)) {
961 			SET_BCH_MEMBER_DISCARD(mi, v);
962 			bch2_write_super(c);
963 		}
964 		mutex_unlock(&c->sb_lock);
965 	}
966 
967 	if (attr == &sysfs_durability) {
968 		u64 v = strtoul_or_return(buf);
969 
970 		mutex_lock(&c->sb_lock);
971 		mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
972 
973 		if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
974 			SET_BCH_MEMBER_DURABILITY(mi, v + 1);
975 			bch2_write_super(c);
976 		}
977 		mutex_unlock(&c->sb_lock);
978 	}
979 
980 	if (attr == &sysfs_label) {
981 		char *tmp;
982 		int ret;
983 
984 		tmp = kstrdup(buf, GFP_KERNEL);
985 		if (!tmp)
986 			return -ENOMEM;
987 
988 		ret = bch2_dev_group_set(c, ca, strim(tmp));
989 		kfree(tmp);
990 		if (ret)
991 			return ret;
992 	}
993 
994 	if (attr == &sysfs_io_errors_reset)
995 		bch2_dev_errors_reset(ca);
996 
997 	return size;
998 }
999 SYSFS_OPS(bch2_dev);
1000 
1001 struct attribute *bch2_dev_files[] = {
1002 	&sysfs_uuid,
1003 	&sysfs_bucket_size,
1004 	&sysfs_first_bucket,
1005 	&sysfs_nbuckets,
1006 	&sysfs_durability,
1007 
1008 	/* settings: */
1009 	&sysfs_discard,
1010 	&sysfs_state_rw,
1011 	&sysfs_label,
1012 
1013 	&sysfs_has_data,
1014 	&sysfs_io_done,
1015 	&sysfs_io_errors,
1016 	&sysfs_io_errors_reset,
1017 
1018 	&sysfs_io_latency_read,
1019 	&sysfs_io_latency_write,
1020 	&sysfs_io_latency_stats_read,
1021 	&sysfs_io_latency_stats_write,
1022 	&sysfs_congested,
1023 
1024 	/* debug: */
1025 	&sysfs_alloc_debug,
1026 	NULL
1027 };
1028 
1029 #endif  /* _BCACHEFS_SYSFS_H_ */
1030