1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache sysfs interfaces
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9 #ifndef NO_BCACHEFS_SYSFS
10
11 #include "bcachefs.h"
12 #include "alloc_background.h"
13 #include "alloc_foreground.h"
14 #include "sysfs.h"
15 #include "btree_cache.h"
16 #include "btree_io.h"
17 #include "btree_iter.h"
18 #include "btree_key_cache.h"
19 #include "btree_update.h"
20 #include "btree_update_interior.h"
21 #include "btree_gc.h"
22 #include "buckets.h"
23 #include "clock.h"
24 #include "compress.h"
25 #include "disk_accounting.h"
26 #include "disk_groups.h"
27 #include "ec.h"
28 #include "enumerated_ref.h"
29 #include "error.h"
30 #include "inode.h"
31 #include "journal.h"
32 #include "journal_reclaim.h"
33 #include "keylist.h"
34 #include "move.h"
35 #include "movinggc.h"
36 #include "nocow_locking.h"
37 #include "opts.h"
38 #include "rebalance.h"
39 #include "recovery_passes.h"
40 #include "replicas.h"
41 #include "sb-errors.h"
42 #include "super-io.h"
43 #include "tests.h"
44
45 #include <linux/blkdev.h>
46 #include <linux/sort.h>
47 #include <linux/sched/clock.h>
48
49 #include "util.h"
50
51 #define SYSFS_OPS(type) \
52 const struct sysfs_ops type ## _sysfs_ops = { \
53 .show = type ## _show, \
54 .store = type ## _store \
55 }
56
57 #define SHOW(fn) \
58 static ssize_t fn ## _to_text(struct printbuf *, \
59 struct kobject *, struct attribute *); \
60 \
61 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
62 char *buf) \
63 { \
64 struct printbuf out = PRINTBUF; \
65 ssize_t ret = fn ## _to_text(&out, kobj, attr); \
66 \
67 if (out.pos && out.buf[out.pos - 1] != '\n') \
68 prt_newline(&out); \
69 \
70 if (!ret && out.allocation_failure) \
71 ret = -ENOMEM; \
72 \
73 if (!ret) { \
74 ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
75 memcpy(buf, out.buf, ret); \
76 } \
77 printbuf_exit(&out); \
78 return bch2_err_class(ret); \
79 } \
80 \
81 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
82 struct attribute *attr)
83
84 #define STORE(fn) \
85 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
86 const char *, size_t); \
87 \
88 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
89 const char *buf, size_t size) \
90 { \
91 return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \
92 } \
93 \
94 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
95 const char *buf, size_t size)
96
97 #define __sysfs_attribute(_name, _mode) \
98 static struct attribute sysfs_##_name = \
99 { .name = #_name, .mode = _mode }
100
101 #define write_attribute(n) __sysfs_attribute(n, 0200)
102 #define read_attribute(n) __sysfs_attribute(n, 0444)
103 #define rw_attribute(n) __sysfs_attribute(n, 0644)
104
105 #define sysfs_printf(file, fmt, ...) \
106 do { \
107 if (attr == &sysfs_ ## file) \
108 prt_printf(out, fmt "\n", __VA_ARGS__); \
109 } while (0)
110
111 #define sysfs_print(file, var) \
112 do { \
113 if (attr == &sysfs_ ## file) \
114 snprint(out, var); \
115 } while (0)
116
117 #define sysfs_hprint(file, val) \
118 do { \
119 if (attr == &sysfs_ ## file) \
120 prt_human_readable_s64(out, val); \
121 } while (0)
122
123 #define sysfs_strtoul(file, var) \
124 do { \
125 if (attr == &sysfs_ ## file) \
126 return strtoul_safe(buf, var) ?: (ssize_t) size; \
127 } while (0)
128
129 #define sysfs_strtoul_clamp(file, var, min, max) \
130 do { \
131 if (attr == &sysfs_ ## file) \
132 return strtoul_safe_clamp(buf, var, min, max) \
133 ?: (ssize_t) size; \
134 } while (0)
135
136 #define strtoul_or_return(cp) \
137 ({ \
138 unsigned long _v; \
139 int _r = kstrtoul(cp, 10, &_v); \
140 if (_r) \
141 return _r; \
142 _v; \
143 })
144
145 write_attribute(trigger_gc);
146 write_attribute(trigger_discards);
147 write_attribute(trigger_invalidates);
148 write_attribute(trigger_journal_commit);
149 write_attribute(trigger_journal_flush);
150 write_attribute(trigger_journal_writes);
151 write_attribute(trigger_btree_cache_shrink);
152 write_attribute(trigger_btree_key_cache_shrink);
153 write_attribute(trigger_btree_updates);
154 write_attribute(trigger_freelist_wakeup);
155 write_attribute(trigger_recalc_capacity);
156 write_attribute(trigger_delete_dead_snapshots);
157 write_attribute(trigger_emergency_read_only);
158 read_attribute(gc_gens_pos);
159
160 read_attribute(uuid);
161 read_attribute(minor);
162 read_attribute(flags);
163 read_attribute(first_bucket);
164 read_attribute(nbuckets);
165 read_attribute(io_done);
166 read_attribute(io_errors);
167 write_attribute(io_errors_reset);
168
169 read_attribute(io_latency_read);
170 read_attribute(io_latency_write);
171 read_attribute(io_latency_stats_read);
172 read_attribute(io_latency_stats_write);
173 read_attribute(congested);
174
175 read_attribute(btree_write_stats);
176
177 read_attribute(btree_cache_size);
178 read_attribute(compression_stats);
179 read_attribute(errors);
180 read_attribute(journal_debug);
181 read_attribute(btree_cache);
182 read_attribute(btree_key_cache);
183 read_attribute(btree_reserve_cache);
184 read_attribute(open_buckets);
185 read_attribute(open_buckets_partial);
186 read_attribute(nocow_lock_table);
187
188 read_attribute(read_refs);
189 read_attribute(write_refs);
190
191 read_attribute(internal_uuid);
192 read_attribute(disk_groups);
193
194 read_attribute(has_data);
195 read_attribute(alloc_debug);
196 read_attribute(usage_base);
197
198 #define x(t, n, ...) read_attribute(t);
199 BCH_PERSISTENT_COUNTERS()
200 #undef x
201
202 rw_attribute(label);
203
204 read_attribute(copy_gc_wait);
205
206 sysfs_pd_controller_attribute(rebalance);
207 read_attribute(rebalance_status);
208 read_attribute(snapshot_delete_status);
209 read_attribute(recovery_status);
210
211 read_attribute(new_stripes);
212
213 read_attribute(io_timers_read);
214 read_attribute(io_timers_write);
215
216 read_attribute(moving_ctxts);
217
218 #ifdef CONFIG_BCACHEFS_TESTS
219 write_attribute(perf_test);
220 #endif /* CONFIG_BCACHEFS_TESTS */
221
222 #define x(_name) \
223 static struct attribute sysfs_time_stat_##_name = \
224 { .name = #_name, .mode = 0644 };
BCH_TIME_STATS()225 BCH_TIME_STATS()
226 #undef x
227
228 static size_t bch2_btree_cache_size(struct bch_fs *c)
229 {
230 struct btree_cache *bc = &c->btree_cache;
231 size_t ret = 0;
232 struct btree *b;
233
234 mutex_lock(&bc->lock);
235 list_for_each_entry(b, &bc->live[0].list, list)
236 ret += btree_buf_bytes(b);
237 list_for_each_entry(b, &bc->live[1].list, list)
238 ret += btree_buf_bytes(b);
239 list_for_each_entry(b, &bc->freeable, list)
240 ret += btree_buf_bytes(b);
241 mutex_unlock(&bc->lock);
242 return ret;
243 }
244
bch2_compression_stats_to_text(struct printbuf * out,struct bch_fs * c)245 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
246 {
247 prt_str(out, "type");
248 printbuf_tabstop_push(out, 12);
249 printbuf_tabstop_push(out, 16);
250 printbuf_tabstop_push(out, 16);
251 printbuf_tabstop_push(out, 24);
252 prt_printf(out, "type\tcompressed\runcompressed\raverage extent size\r\n");
253
254 for (unsigned i = 1; i < BCH_COMPRESSION_TYPE_NR; i++) {
255 struct disk_accounting_pos a;
256 disk_accounting_key_init(a, compression, .type = i);
257 struct bpos p = disk_accounting_pos_to_bpos(&a);
258 u64 v[3];
259 bch2_accounting_mem_read(c, p, v, ARRAY_SIZE(v));
260
261 u64 nr_extents = v[0];
262 u64 sectors_uncompressed = v[1];
263 u64 sectors_compressed = v[2];
264
265 bch2_prt_compression_type(out, i);
266 prt_tab(out);
267
268 prt_human_readable_u64(out, sectors_compressed << 9);
269 prt_tab_rjust(out);
270
271 prt_human_readable_u64(out, sectors_uncompressed << 9);
272 prt_tab_rjust(out);
273
274 prt_human_readable_u64(out, nr_extents
275 ? div64_u64(sectors_uncompressed << 9, nr_extents)
276 : 0);
277 prt_tab_rjust(out);
278 prt_newline(out);
279 }
280
281 return 0;
282 }
283
bch2_gc_gens_pos_to_text(struct printbuf * out,struct bch_fs * c)284 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
285 {
286 bch2_btree_id_to_text(out, c->gc_gens_btree);
287 prt_printf(out, ": ");
288 bch2_bpos_to_text(out, c->gc_gens_pos);
289 prt_printf(out, "\n");
290 }
291
bch2_fs_usage_base_to_text(struct printbuf * out,struct bch_fs * c)292 static void bch2_fs_usage_base_to_text(struct printbuf *out, struct bch_fs *c)
293 {
294 struct bch_fs_usage_base b = {};
295
296 acc_u64s_percpu(&b.hidden, &c->usage->hidden, sizeof(b) / sizeof(u64));
297
298 prt_printf(out, "hidden:\t\t%llu\n", b.hidden);
299 prt_printf(out, "btree:\t\t%llu\n", b.btree);
300 prt_printf(out, "data:\t\t%llu\n", b.data);
301 prt_printf(out, "cached:\t%llu\n", b.cached);
302 prt_printf(out, "reserved:\t\t%llu\n", b.reserved);
303 prt_printf(out, "nr_inodes:\t%llu\n", b.nr_inodes);
304 }
305
SHOW(bch2_fs)306 SHOW(bch2_fs)
307 {
308 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
309
310 sysfs_print(minor, c->minor);
311 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
312
313 if (attr == &sysfs_flags)
314 prt_bitflags(out, bch2_fs_flag_strs, c->flags);
315
316 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
317
318 if (attr == &sysfs_btree_write_stats)
319 bch2_btree_write_stats_to_text(out, c);
320
321 if (attr == &sysfs_gc_gens_pos)
322 bch2_gc_gens_pos_to_text(out, c);
323
324 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
325
326 if (attr == &sysfs_copy_gc_wait)
327 bch2_copygc_wait_to_text(out, c);
328
329 if (attr == &sysfs_rebalance_status)
330 bch2_rebalance_status_to_text(out, c);
331
332 if (attr == &sysfs_snapshot_delete_status)
333 bch2_snapshot_delete_status_to_text(out, c);
334
335 if (attr == &sysfs_recovery_status)
336 bch2_recovery_pass_status_to_text(out, c);
337
338 /* Debugging: */
339
340 if (attr == &sysfs_journal_debug)
341 bch2_journal_debug_to_text(out, &c->journal);
342
343 if (attr == &sysfs_btree_cache)
344 bch2_btree_cache_to_text(out, &c->btree_cache);
345
346 if (attr == &sysfs_btree_key_cache)
347 bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
348
349 if (attr == &sysfs_btree_reserve_cache)
350 bch2_btree_reserve_cache_to_text(out, c);
351
352 if (attr == &sysfs_open_buckets)
353 bch2_open_buckets_to_text(out, c, NULL);
354
355 if (attr == &sysfs_open_buckets_partial)
356 bch2_open_buckets_partial_to_text(out, c);
357
358 if (attr == &sysfs_compression_stats)
359 bch2_compression_stats_to_text(out, c);
360
361 if (attr == &sysfs_errors)
362 bch2_fs_errors_to_text(out, c);
363
364 if (attr == &sysfs_new_stripes)
365 bch2_new_stripes_to_text(out, c);
366
367 if (attr == &sysfs_io_timers_read)
368 bch2_io_timers_to_text(out, &c->io_clock[READ]);
369
370 if (attr == &sysfs_io_timers_write)
371 bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
372
373 if (attr == &sysfs_moving_ctxts)
374 bch2_fs_moving_ctxts_to_text(out, c);
375
376 if (attr == &sysfs_write_refs)
377 enumerated_ref_to_text(out, &c->writes, bch2_write_refs);
378
379 if (attr == &sysfs_nocow_lock_table)
380 bch2_nocow_locks_to_text(out, &c->nocow_locks);
381
382 if (attr == &sysfs_disk_groups)
383 bch2_disk_groups_to_text(out, c);
384
385 if (attr == &sysfs_alloc_debug)
386 bch2_fs_alloc_debug_to_text(out, c);
387
388 if (attr == &sysfs_usage_base)
389 bch2_fs_usage_base_to_text(out, c);
390
391 return 0;
392 }
393
STORE(bch2_fs)394 STORE(bch2_fs)
395 {
396 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
397
398 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
399
400 /* Debugging: */
401
402 if (!test_bit(BCH_FS_started, &c->flags))
403 return -EPERM;
404
405 /* Debugging: */
406
407 if (attr == &sysfs_trigger_btree_updates)
408 queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
409
410 if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_sysfs))
411 return -EROFS;
412
413 if (attr == &sysfs_trigger_btree_cache_shrink) {
414 struct btree_cache *bc = &c->btree_cache;
415 struct shrink_control sc;
416
417 sc.gfp_mask = GFP_KERNEL;
418 sc.nr_to_scan = strtoul_or_return(buf);
419 bc->live[0].shrink->scan_objects(bc->live[0].shrink, &sc);
420 }
421
422 if (attr == &sysfs_trigger_btree_key_cache_shrink) {
423 struct shrink_control sc;
424
425 sc.gfp_mask = GFP_KERNEL;
426 sc.nr_to_scan = strtoul_or_return(buf);
427 c->btree_key_cache.shrink->scan_objects(c->btree_key_cache.shrink, &sc);
428 }
429
430 if (attr == &sysfs_trigger_gc)
431 bch2_gc_gens(c);
432
433 if (attr == &sysfs_trigger_discards)
434 bch2_do_discards(c);
435
436 if (attr == &sysfs_trigger_invalidates)
437 bch2_do_invalidates(c);
438
439 if (attr == &sysfs_trigger_journal_commit)
440 bch2_journal_flush(&c->journal);
441
442 if (attr == &sysfs_trigger_journal_flush) {
443 bch2_journal_flush_all_pins(&c->journal);
444 bch2_journal_meta(&c->journal);
445 }
446
447 if (attr == &sysfs_trigger_journal_writes)
448 bch2_journal_do_writes(&c->journal);
449
450 if (attr == &sysfs_trigger_freelist_wakeup)
451 closure_wake_up(&c->freelist_wait);
452
453 if (attr == &sysfs_trigger_recalc_capacity) {
454 down_read(&c->state_lock);
455 bch2_recalc_capacity(c);
456 up_read(&c->state_lock);
457 }
458
459 if (attr == &sysfs_trigger_delete_dead_snapshots)
460 __bch2_delete_dead_snapshots(c);
461
462 if (attr == &sysfs_trigger_emergency_read_only) {
463 struct printbuf buf = PRINTBUF;
464 bch2_log_msg_start(c, &buf);
465
466 prt_printf(&buf, "shutdown by sysfs\n");
467 bch2_fs_emergency_read_only2(c, &buf);
468 bch2_print_str(c, KERN_ERR, buf.buf);
469 printbuf_exit(&buf);
470 }
471
472 #ifdef CONFIG_BCACHEFS_TESTS
473 if (attr == &sysfs_perf_test) {
474 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
475 char *test = strsep(&p, " \t\n");
476 char *nr_str = strsep(&p, " \t\n");
477 char *threads_str = strsep(&p, " \t\n");
478 unsigned threads;
479 u64 nr;
480 int ret = -EINVAL;
481
482 if (threads_str &&
483 !(ret = kstrtouint(threads_str, 10, &threads)) &&
484 !(ret = bch2_strtoull_h(nr_str, &nr)))
485 ret = bch2_btree_perf_test(c, test, nr, threads);
486 kfree(tmp);
487
488 if (ret)
489 size = ret;
490 }
491 #endif
492 enumerated_ref_put(&c->writes, BCH_WRITE_REF_sysfs);
493 return size;
494 }
495 SYSFS_OPS(bch2_fs);
496
497 struct attribute *bch2_fs_files[] = {
498 &sysfs_minor,
499 &sysfs_btree_cache_size,
500 &sysfs_btree_write_stats,
501
502 &sysfs_rebalance_status,
503 &sysfs_snapshot_delete_status,
504 &sysfs_recovery_status,
505
506 &sysfs_compression_stats,
507 &sysfs_errors,
508
509 #ifdef CONFIG_BCACHEFS_TESTS
510 &sysfs_perf_test,
511 #endif
512 NULL
513 };
514
515 /* counters dir */
516
SHOW(bch2_fs_counters)517 SHOW(bch2_fs_counters)
518 {
519 struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
520 u64 counter = 0;
521 u64 counter_since_mount = 0;
522
523 printbuf_tabstop_push(out, 32);
524
525 #define x(t, n, f, ...) \
526 if (attr == &sysfs_##t) { \
527 counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
528 counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
529 if (f & TYPE_SECTORS) { \
530 counter <<= 9; \
531 counter_since_mount <<= 9; \
532 } \
533 \
534 prt_printf(out, "since mount:\t"); \
535 (f & TYPE_COUNTER) ? prt_u64(out, counter_since_mount) :\
536 prt_human_readable_u64(out, counter_since_mount); \
537 prt_newline(out); \
538 \
539 prt_printf(out, "since filesystem creation:\t"); \
540 (f & TYPE_COUNTER) ? prt_u64(out, counter) : \
541 prt_human_readable_u64(out, counter); \
542 prt_newline(out); \
543 }
544 BCH_PERSISTENT_COUNTERS()
545 #undef x
546 return 0;
547 }
548
STORE(bch2_fs_counters)549 STORE(bch2_fs_counters) {
550 return 0;
551 }
552
553 SYSFS_OPS(bch2_fs_counters);
554
555 struct attribute *bch2_fs_counters_files[] = {
556 #define x(t, ...) \
557 &sysfs_##t,
558 BCH_PERSISTENT_COUNTERS()
559 #undef x
560 NULL
561 };
562 /* internal dir - just a wrapper */
563
SHOW(bch2_fs_internal)564 SHOW(bch2_fs_internal)
565 {
566 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
567
568 return bch2_fs_to_text(out, &c->kobj, attr);
569 }
570
STORE(bch2_fs_internal)571 STORE(bch2_fs_internal)
572 {
573 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
574
575 return bch2_fs_store(&c->kobj, attr, buf, size);
576 }
577 SYSFS_OPS(bch2_fs_internal);
578
579 struct attribute *bch2_fs_internal_files[] = {
580 &sysfs_flags,
581 &sysfs_journal_debug,
582 &sysfs_btree_cache,
583 &sysfs_btree_key_cache,
584 &sysfs_btree_reserve_cache,
585 &sysfs_new_stripes,
586 &sysfs_open_buckets,
587 &sysfs_open_buckets_partial,
588 &sysfs_write_refs,
589 &sysfs_nocow_lock_table,
590 &sysfs_io_timers_read,
591 &sysfs_io_timers_write,
592
593 &sysfs_trigger_gc,
594 &sysfs_trigger_discards,
595 &sysfs_trigger_invalidates,
596 &sysfs_trigger_journal_commit,
597 &sysfs_trigger_journal_flush,
598 &sysfs_trigger_journal_writes,
599 &sysfs_trigger_btree_cache_shrink,
600 &sysfs_trigger_btree_key_cache_shrink,
601 &sysfs_trigger_btree_updates,
602 &sysfs_trigger_freelist_wakeup,
603 &sysfs_trigger_recalc_capacity,
604 &sysfs_trigger_delete_dead_snapshots,
605 &sysfs_trigger_emergency_read_only,
606
607 &sysfs_gc_gens_pos,
608
609 &sysfs_copy_gc_wait,
610
611 sysfs_pd_controller_files(rebalance),
612
613 &sysfs_moving_ctxts,
614
615 &sysfs_internal_uuid,
616
617 &sysfs_disk_groups,
618 &sysfs_alloc_debug,
619 &sysfs_usage_base,
620 NULL
621 };
622
623 /* options */
624
sysfs_opt_show(struct bch_fs * c,struct bch_dev * ca,enum bch_opt_id id,struct printbuf * out)625 static ssize_t sysfs_opt_show(struct bch_fs *c,
626 struct bch_dev *ca,
627 enum bch_opt_id id,
628 struct printbuf *out)
629 {
630 const struct bch_option *opt = bch2_opt_table + id;
631 u64 v;
632
633 if (opt->flags & OPT_FS) {
634 v = bch2_opt_get_by_id(&c->opts, id);
635 } else if ((opt->flags & OPT_DEVICE) && opt->get_member) {
636 v = bch2_opt_from_sb(c->disk_sb.sb, id, ca->dev_idx);
637 } else {
638 return -EINVAL;
639 }
640
641 bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
642 prt_char(out, '\n');
643 return 0;
644 }
645
sysfs_opt_store(struct bch_fs * c,struct bch_dev * ca,enum bch_opt_id id,const char * buf,size_t size)646 static ssize_t sysfs_opt_store(struct bch_fs *c,
647 struct bch_dev *ca,
648 enum bch_opt_id id,
649 const char *buf, size_t size)
650 {
651 const struct bch_option *opt = bch2_opt_table + id;
652 int ret = 0;
653
654 /*
655 * We don't need to take c->writes for correctness, but it eliminates an
656 * unsightly error message in the dmesg log when we're RO:
657 */
658 if (unlikely(!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_sysfs)))
659 return -EROFS;
660
661 char *tmp = kstrdup(buf, GFP_KERNEL);
662 if (!tmp) {
663 ret = -ENOMEM;
664 goto err;
665 }
666
667 u64 v;
668 ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL) ?:
669 bch2_opt_hook_pre_set(c, ca, id, v);
670 kfree(tmp);
671
672 if (ret < 0)
673 goto err;
674
675 bool is_sb = opt->get_sb || opt->get_member;
676 bool changed = false;
677
678 if (is_sb) {
679 changed = bch2_opt_set_sb(c, ca, opt, v);
680 } else if (!ca) {
681 changed = bch2_opt_get_by_id(&c->opts, id) != v;
682 } else {
683 /* device options that aren't superblock options aren't
684 * supported */
685 BUG();
686 }
687
688 if (!ca)
689 bch2_opt_set_by_id(&c->opts, id, v);
690
691 if (changed)
692 bch2_opt_hook_post_set(c, ca, 0, &c->opts, id);
693
694 ret = size;
695 err:
696 enumerated_ref_put(&c->writes, BCH_WRITE_REF_sysfs);
697 return ret;
698 }
699
SHOW(bch2_fs_opts_dir)700 SHOW(bch2_fs_opts_dir)
701 {
702 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
703 int id = bch2_opt_lookup(attr->name);
704 if (id < 0)
705 return 0;
706
707 return sysfs_opt_show(c, NULL, id, out);
708 }
709
STORE(bch2_fs_opts_dir)710 STORE(bch2_fs_opts_dir)
711 {
712 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
713 int id = bch2_opt_lookup(attr->name);
714 if (id < 0)
715 return 0;
716
717 return sysfs_opt_store(c, NULL, id, buf, size);
718 }
719 SYSFS_OPS(bch2_fs_opts_dir);
720
721 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
722
bch2_opts_create_sysfs_files(struct kobject * kobj,unsigned type)723 int bch2_opts_create_sysfs_files(struct kobject *kobj, unsigned type)
724 {
725 for (const struct bch_option *i = bch2_opt_table;
726 i < bch2_opt_table + bch2_opts_nr;
727 i++) {
728 if (i->flags & OPT_HIDDEN)
729 continue;
730 if (!(i->flags & type))
731 continue;
732
733 int ret = sysfs_create_file(kobj, &i->attr);
734 if (ret)
735 return ret;
736 }
737
738 return 0;
739 }
740
741 /* time stats */
742
SHOW(bch2_fs_time_stats)743 SHOW(bch2_fs_time_stats)
744 {
745 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
746
747 #define x(name) \
748 if (attr == &sysfs_time_stat_##name) \
749 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
750 BCH_TIME_STATS()
751 #undef x
752
753 return 0;
754 }
755
STORE(bch2_fs_time_stats)756 STORE(bch2_fs_time_stats)
757 {
758 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
759
760 #define x(name) \
761 if (attr == &sysfs_time_stat_##name) \
762 bch2_time_stats_reset(&c->times[BCH_TIME_##name]);
763 BCH_TIME_STATS()
764 #undef x
765 return size;
766 }
767 SYSFS_OPS(bch2_fs_time_stats);
768
769 struct attribute *bch2_fs_time_stats_files[] = {
770 #define x(name) \
771 &sysfs_time_stat_##name,
772 BCH_TIME_STATS()
773 #undef x
774 NULL
775 };
776
777 static const char * const bch2_rw[] = {
778 "read",
779 "write",
780 NULL
781 };
782
dev_io_done_to_text(struct printbuf * out,struct bch_dev * ca)783 static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
784 {
785 int rw, i;
786
787 for (rw = 0; rw < 2; rw++) {
788 prt_printf(out, "%s:\n", bch2_rw[rw]);
789
790 for (i = 1; i < BCH_DATA_NR; i++)
791 prt_printf(out, "%-12s:%12llu\n",
792 bch2_data_type_str(i),
793 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
794 }
795 }
796
SHOW(bch2_dev)797 SHOW(bch2_dev)
798 {
799 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
800 struct bch_fs *c = ca->fs;
801
802 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
803
804 sysfs_print(first_bucket, ca->mi.first_bucket);
805 sysfs_print(nbuckets, ca->mi.nbuckets);
806
807 if (attr == &sysfs_label) {
808 if (ca->mi.group)
809 bch2_disk_path_to_text(out, c, ca->mi.group - 1);
810 prt_char(out, '\n');
811 }
812
813 if (attr == &sysfs_has_data) {
814 prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca));
815 prt_char(out, '\n');
816 }
817
818 if (attr == &sysfs_io_done)
819 dev_io_done_to_text(out, ca);
820
821 if (attr == &sysfs_io_errors)
822 bch2_dev_io_errors_to_text(out, ca);
823
824 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
825 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
826
827 if (attr == &sysfs_io_latency_stats_read)
828 bch2_time_stats_to_text(out, &ca->io_latency[READ].stats);
829
830 if (attr == &sysfs_io_latency_stats_write)
831 bch2_time_stats_to_text(out, &ca->io_latency[WRITE].stats);
832
833 sysfs_printf(congested, "%u%%",
834 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
835 * 100 / CONGESTED_MAX);
836
837 if (attr == &sysfs_alloc_debug)
838 bch2_dev_alloc_debug_to_text(out, ca);
839
840 if (attr == &sysfs_open_buckets)
841 bch2_open_buckets_to_text(out, c, ca);
842
843 int opt_id = bch2_opt_lookup(attr->name);
844 if (opt_id >= 0)
845 return sysfs_opt_show(c, ca, opt_id, out);
846
847 if (attr == &sysfs_read_refs)
848 enumerated_ref_to_text(out, &ca->io_ref[READ], bch2_dev_read_refs);
849
850 if (attr == &sysfs_write_refs)
851 enumerated_ref_to_text(out, &ca->io_ref[WRITE], bch2_dev_write_refs);
852
853 return 0;
854 }
855
STORE(bch2_dev)856 STORE(bch2_dev)
857 {
858 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
859 struct bch_fs *c = ca->fs;
860
861 if (attr == &sysfs_label) {
862 char *tmp;
863 int ret;
864
865 tmp = kstrdup(buf, GFP_KERNEL);
866 if (!tmp)
867 return -ENOMEM;
868
869 ret = bch2_dev_group_set(c, ca, strim(tmp));
870 kfree(tmp);
871 if (ret)
872 return ret;
873 }
874
875 if (attr == &sysfs_io_errors_reset)
876 bch2_dev_errors_reset(ca);
877
878 int opt_id = bch2_opt_lookup(attr->name);
879 if (opt_id >= 0)
880 return sysfs_opt_store(c, ca, opt_id, buf, size);
881
882 return size;
883 }
884 SYSFS_OPS(bch2_dev);
885
886 struct attribute *bch2_dev_files[] = {
887 &sysfs_uuid,
888 &sysfs_first_bucket,
889 &sysfs_nbuckets,
890
891 /* settings: */
892 &sysfs_label,
893
894 &sysfs_has_data,
895 &sysfs_io_done,
896 &sysfs_io_errors,
897 &sysfs_io_errors_reset,
898
899 &sysfs_io_latency_read,
900 &sysfs_io_latency_write,
901 &sysfs_io_latency_stats_read,
902 &sysfs_io_latency_stats_write,
903 &sysfs_congested,
904
905 /* debug: */
906 &sysfs_alloc_debug,
907 &sysfs_open_buckets,
908
909 &sysfs_read_refs,
910 &sysfs_write_refs,
911 NULL
912 };
913
914 #endif /* _BCACHEFS_SYSFS_H_ */
915