1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_H
3 #define _BCACHEFS_H
4
5 /*
6 * SOME HIGH LEVEL CODE DOCUMENTATION:
7 *
8 * Bcache mostly works with cache sets, cache devices, and backing devices.
9 *
10 * Support for multiple cache devices hasn't quite been finished off yet, but
11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
12 * like a md raid array and its component devices. Most of the code doesn't care
13 * about individual cache devices, the main abstraction is the cache set.
14 *
15 * Multiple cache devices is intended to give us the ability to mirror dirty
16 * cached data and metadata, without mirroring clean cached data.
17 *
18 * Backing devices are different, in that they have a lifetime independent of a
19 * cache set. When you register a newly formatted backing device it'll come up
20 * in passthrough mode, and then you can attach and detach a backing device from
21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22 * invalidates any cached data for that backing device.
23 *
24 * A cache set can have multiple (many) backing devices attached to it.
25 *
26 * There's also flash only volumes - this is the reason for the distinction
27 * between struct cached_dev and struct bcache_device. A flash only volume
28 * works much like a bcache device that has a backing device, except the
29 * "cached" data is always dirty. The end result is that we get thin
30 * provisioning with very little additional code.
31 *
32 * Flash only volumes work but they're not production ready because the moving
33 * garbage collector needs more work. More on that later.
34 *
35 * BUCKETS/ALLOCATION:
36 *
37 * Bcache is primarily designed for caching, which means that in normal
38 * operation all of our available space will be allocated. Thus, we need an
39 * efficient way of deleting things from the cache so we can write new things to
40 * it.
41 *
42 * To do this, we first divide the cache device up into buckets. A bucket is the
43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
44 * works efficiently.
45 *
46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47 * it. The gens and priorities for all the buckets are stored contiguously and
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
49 * of bcache's metadata is stored in buckets).
50 *
51 * The priority is used to implement an LRU. We reset a bucket's priority when
52 * we allocate it or on cache it, and every so often we decrement the priority
53 * of each bucket. It could be used to implement something more sophisticated,
54 * if anyone ever gets around to it.
55 *
56 * The generation is used for invalidating buckets. Each pointer also has an 8
57 * bit generation embedded in it; for a pointer to be considered valid, its gen
58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all
59 * we have to do is increment its gen (and write its new gen to disk; we batch
60 * this up).
61 *
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63 * contain metadata (including btree nodes).
64 *
65 * THE BTREE:
66 *
67 * Bcache is in large part design around the btree.
68 *
69 * At a high level, the btree is just an index of key -> ptr tuples.
70 *
71 * Keys represent extents, and thus have a size field. Keys also have a variable
72 * number of pointers attached to them (potentially zero, which is handy for
73 * invalidating the cache).
74 *
75 * The key itself is an inode:offset pair. The inode number corresponds to a
76 * backing device or a flash only volume. The offset is the ending offset of the
77 * extent within the inode - not the starting offset; this makes lookups
78 * slightly more convenient.
79 *
80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
81 * generation number. More on the gen later.
82 *
83 * Index lookups are not fully abstracted - cache lookups in particular are
84 * still somewhat mixed in with the btree code, but things are headed in that
85 * direction.
86 *
87 * Updates are fairly well abstracted, though. There are two different ways of
88 * updating the btree; insert and replace.
89 *
90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
91 * overwriting (possibly only partially) any extents they overlap with. This is
92 * used to update the index after a write.
93 *
94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95 * overwriting a key that matches another given key. This is used for inserting
96 * data into the cache after a cache miss, and for background writeback, and for
97 * the moving garbage collector.
98 *
99 * There is no "delete" operation; deleting things from the index is
100 * accomplished by either by invalidating pointers (by incrementing a bucket's
101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
102 * previously present at that location in the index.
103 *
104 * This means that there are always stale/invalid keys in the btree. They're
105 * filtered out by the code that iterates through a btree node, and removed when
106 * a btree node is rewritten.
107 *
108 * BTREE NODES:
109 *
110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
111 * free smaller than a bucket - so, that's how big our btree nodes are.
112 *
113 * (If buckets are really big we'll only use part of the bucket for a btree node
114 * - no less than 1/4th - but a bucket still contains no more than a single
115 * btree node. I'd actually like to change this, but for now we rely on the
116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117 *
118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119 * btree implementation.
120 *
121 * The way this is solved is that btree nodes are internally log structured; we
122 * can append new keys to an existing btree node without rewriting it. This
123 * means each set of keys we write is sorted, but the node is not.
124 *
125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126 * be expensive, and we have to distinguish between the keys we have written and
127 * the keys we haven't. So to do a lookup in a btree node, we have to search
128 * each sorted set. But we do merge written sets together lazily, so the cost of
129 * these extra searches is quite low (normally most of the keys in a btree node
130 * will be in one big set, and then there'll be one or two sets that are much
131 * smaller).
132 *
133 * This log structure makes bcache's btree more of a hybrid between a
134 * conventional btree and a compacting data structure, with some of the
135 * advantages of both.
136 *
137 * GARBAGE COLLECTION:
138 *
139 * We can't just invalidate any bucket - it might contain dirty data or
140 * metadata. If it once contained dirty data, other writes might overwrite it
141 * later, leaving no valid pointers into that bucket in the index.
142 *
143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144 * It also counts how much valid data it each bucket currently contains, so that
145 * allocation can reuse buckets sooner when they've been mostly overwritten.
146 *
147 * It also does some things that are really internal to the btree
148 * implementation. If a btree node contains pointers that are stale by more than
149 * some threshold, it rewrites the btree node to avoid the bucket's generation
150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151 *
152 * THE JOURNAL:
153 *
154 * Bcache's journal is not necessary for consistency; we always strictly
155 * order metadata writes so that the btree and everything else is consistent on
156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
157 * caching (with recovery from unclean shutdown) before journalling was
158 * implemented.
159 *
160 * Rather, the journal is purely a performance optimization; we can't complete a
161 * write until we've updated the index on disk, otherwise the cache would be
162 * inconsistent in the event of an unclean shutdown. This means that without the
163 * journal, on random write workloads we constantly have to update all the leaf
164 * nodes in the btree, and those writes will be mostly empty (appending at most
165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
166 * and it puts more strain on the various btree resorting/compacting code.
167 *
168 * The journal is just a log of keys we've inserted; on startup we just reinsert
169 * all the keys in the open journal entries. That means that when we're updating
170 * a node in the btree, we can wait until a 4k block of keys fills up before
171 * writing them out.
172 *
173 * For simplicity, we only journal updates to leaf nodes; updates to parent
174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175 * the complexity to deal with journalling them (in particular, journal replay)
176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
177 */
178
179 #undef pr_fmt
180 #ifdef __KERNEL__
181 #define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
182 #else
183 #define pr_fmt(fmt) "%s() " fmt "\n", __func__
184 #endif
185
186 #ifdef CONFIG_BCACHEFS_DEBUG
187 #define ENUMERATED_REF_DEBUG
188 #endif
189
190 #ifndef dynamic_fault
191 #define dynamic_fault(...) 0
192 #endif
193
194 #define race_fault(...) dynamic_fault("bcachefs:race")
195
196 #include <linux/backing-dev-defs.h>
197 #include <linux/bug.h>
198 #include <linux/bio.h>
199 #include <linux/closure.h>
200 #include <linux/kobject.h>
201 #include <linux/list.h>
202 #include <linux/math64.h>
203 #include <linux/mutex.h>
204 #include <linux/percpu-refcount.h>
205 #include <linux/percpu-rwsem.h>
206 #include <linux/refcount.h>
207 #include <linux/rhashtable.h>
208 #include <linux/rwsem.h>
209 #include <linux/semaphore.h>
210 #include <linux/seqlock.h>
211 #include <linux/shrinker.h>
212 #include <linux/srcu.h>
213 #include <linux/types.h>
214 #include <linux/workqueue.h>
215 #include <linux/zstd.h>
216 #include <linux/unicode.h>
217
218 #include "bcachefs_format.h"
219 #include "btree_journal_iter_types.h"
220 #include "disk_accounting_types.h"
221 #include "errcode.h"
222 #include "fast_list.h"
223 #include "fifo.h"
224 #include "nocow_locking_types.h"
225 #include "opts.h"
226 #include "sb-errors_types.h"
227 #include "seqmutex.h"
228 #include "snapshot_types.h"
229 #include "time_stats.h"
230 #include "util.h"
231
232 #include "alloc_types.h"
233 #include "async_objs_types.h"
234 #include "btree_gc_types.h"
235 #include "btree_types.h"
236 #include "btree_node_scan_types.h"
237 #include "btree_write_buffer_types.h"
238 #include "buckets_types.h"
239 #include "buckets_waiting_for_journal_types.h"
240 #include "clock_types.h"
241 #include "disk_groups_types.h"
242 #include "ec_types.h"
243 #include "enumerated_ref_types.h"
244 #include "journal_types.h"
245 #include "keylist_types.h"
246 #include "quota_types.h"
247 #include "rebalance_types.h"
248 #include "recovery_passes_types.h"
249 #include "replicas_types.h"
250 #include "sb-members_types.h"
251 #include "subvolume_types.h"
252 #include "super_types.h"
253 #include "thread_with_file_types.h"
254
255 #include "trace.h"
256
257 #define count_event(_c, _name) this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
258
259 #define trace_and_count(_c, _name, ...) \
260 do { \
261 count_event(_c, _name); \
262 trace_##_name(__VA_ARGS__); \
263 } while (0)
264
265 #define bch2_fs_init_fault(name) \
266 dynamic_fault("bcachefs:bch_fs_init:" name)
267 #define bch2_meta_read_fault(name) \
268 dynamic_fault("bcachefs:meta:read:" name)
269 #define bch2_meta_write_fault(name) \
270 dynamic_fault("bcachefs:meta:write:" name)
271
272 #ifdef __KERNEL__
273 #define BCACHEFS_LOG_PREFIX
274 #endif
275
276 #ifdef BCACHEFS_LOG_PREFIX
277
278 #define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
279 #define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
280 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
281 #define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
282 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
283 "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
284
285 #else
286
287 #define bch2_log_msg(_c, fmt) fmt
288 #define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
289 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
290 #define bch2_fmt_inum(_c, _inum, fmt) "inum %llu: " fmt "\n", (_inum)
291 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
292 "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
293
294 #endif
295
296 #define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
297
298 void bch2_print_str(struct bch_fs *, const char *, const char *);
299
300 __printf(2, 3)
301 void bch2_print_opts(struct bch_opts *, const char *, ...);
302
303 __printf(2, 3)
304 void __bch2_print(struct bch_fs *c, const char *fmt, ...);
305
306 #define maybe_dev_to_fs(_c) _Generic((_c), \
307 struct bch_dev *: ((struct bch_dev *) (_c))->fs, \
308 struct bch_fs *: (_c))
309
310 #define bch2_print(_c, ...) __bch2_print(maybe_dev_to_fs(_c), __VA_ARGS__)
311
312 #define bch2_print_ratelimited(_c, ...) \
313 do { \
314 static DEFINE_RATELIMIT_STATE(_rs, \
315 DEFAULT_RATELIMIT_INTERVAL, \
316 DEFAULT_RATELIMIT_BURST); \
317 \
318 if (__ratelimit(&_rs)) \
319 bch2_print(_c, __VA_ARGS__); \
320 } while (0)
321
322 #define bch2_print_str_ratelimited(_c, ...) \
323 do { \
324 static DEFINE_RATELIMIT_STATE(_rs, \
325 DEFAULT_RATELIMIT_INTERVAL, \
326 DEFAULT_RATELIMIT_BURST); \
327 \
328 if (__ratelimit(&_rs)) \
329 bch2_print_str(_c, __VA_ARGS__); \
330 } while (0)
331
332 #define bch_info(c, fmt, ...) \
333 bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
334 #define bch_info_ratelimited(c, fmt, ...) \
335 bch2_print_ratelimited(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
336 #define bch_notice(c, fmt, ...) \
337 bch2_print(c, KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
338 #define bch_warn(c, fmt, ...) \
339 bch2_print(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
340 #define bch_warn_ratelimited(c, fmt, ...) \
341 bch2_print_ratelimited(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
342
343 #define bch_err(c, fmt, ...) \
344 bch2_print(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
345 #define bch_err_dev(ca, fmt, ...) \
346 bch2_print(c, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
347 #define bch_err_dev_offset(ca, _offset, fmt, ...) \
348 bch2_print(c, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
349 #define bch_err_inum(c, _inum, fmt, ...) \
350 bch2_print(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
351 #define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
352 bch2_print(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
353
354 #define bch_err_ratelimited(c, fmt, ...) \
355 bch2_print_ratelimited(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
356 #define bch_err_dev_ratelimited(ca, fmt, ...) \
357 bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
358 #define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
359 bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
360 #define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
361 bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
362 #define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
363 bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
364
should_print_err(int err)365 static inline bool should_print_err(int err)
366 {
367 return err && !bch2_err_matches(err, BCH_ERR_transaction_restart);
368 }
369
370 #define bch_err_fn(_c, _ret) \
371 do { \
372 if (should_print_err(_ret)) \
373 bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
374 } while (0)
375
376 #define bch_err_fn_ratelimited(_c, _ret) \
377 do { \
378 if (should_print_err(_ret)) \
379 bch_err_ratelimited(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
380 } while (0)
381
382 #define bch_err_msg(_c, _ret, _msg, ...) \
383 do { \
384 if (should_print_err(_ret)) \
385 bch_err(_c, "%s(): error " _msg " %s", __func__, \
386 ##__VA_ARGS__, bch2_err_str(_ret)); \
387 } while (0)
388
389 #define bch_verbose(c, fmt, ...) \
390 do { \
391 if ((c)->opts.verbose) \
392 bch_info(c, fmt, ##__VA_ARGS__); \
393 } while (0)
394
395 #define bch_verbose_ratelimited(c, fmt, ...) \
396 do { \
397 if ((c)->opts.verbose) \
398 bch_info_ratelimited(c, fmt, ##__VA_ARGS__); \
399 } while (0)
400
401 #define pr_verbose_init(opts, fmt, ...) \
402 do { \
403 if (opt_get(opts, verbose)) \
404 pr_info(fmt, ##__VA_ARGS__); \
405 } while (0)
406
__bch2_err_trace(struct bch_fs * c,int err)407 static inline int __bch2_err_trace(struct bch_fs *c, int err)
408 {
409 trace_error_throw(c, err, _THIS_IP_);
410 return err;
411 }
412
413 #define bch_err_throw(_c, _err) __bch2_err_trace(_c, -BCH_ERR_##_err)
414
415 /* Parameters that are useful for debugging, but should always be compiled in: */
416 #define BCH_DEBUG_PARAMS_ALWAYS() \
417 BCH_DEBUG_PARAM(key_merging_disabled, \
418 "Disables merging of extents") \
419 BCH_DEBUG_PARAM(btree_node_merging_disabled, \
420 "Disables merging of btree nodes") \
421 BCH_DEBUG_PARAM(btree_gc_always_rewrite, \
422 "Causes mark and sweep to compact and rewrite every " \
423 "btree node it traverses") \
424 BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \
425 "Disables rewriting of btree nodes during mark and sweep")\
426 BCH_DEBUG_PARAM(btree_shrinker_disabled, \
427 "Disables the shrinker callback for the btree node cache")\
428 BCH_DEBUG_PARAM(verify_btree_ondisk, \
429 "Reread btree nodes at various points to verify the " \
430 "mergesort in the read path against modifications " \
431 "done in memory") \
432 BCH_DEBUG_PARAM(verify_all_btree_replicas, \
433 "When reading btree nodes, read all replicas and " \
434 "compare them") \
435 BCH_DEBUG_PARAM(backpointers_no_use_write_buffer, \
436 "Don't use the write buffer for backpointers, enabling "\
437 "extra runtime checks") \
438 BCH_DEBUG_PARAM(debug_check_btree_locking, \
439 "Enable additional asserts for btree locking") \
440 BCH_DEBUG_PARAM(debug_check_iterators, \
441 "Enables extra verification for btree iterators") \
442 BCH_DEBUG_PARAM(debug_check_bset_lookups, \
443 "Enables extra verification for bset lookups") \
444 BCH_DEBUG_PARAM(debug_check_btree_accounting, \
445 "Verify btree accounting for keys within a node") \
446 BCH_DEBUG_PARAM(debug_check_bkey_unpack, \
447 "Enables extra verification for bkey unpack")
448
449 /* Parameters that should only be compiled in debug mode: */
450 #define BCH_DEBUG_PARAMS_DEBUG() \
451 BCH_DEBUG_PARAM(journal_seq_verify, \
452 "Store the journal sequence number in the version " \
453 "number of every btree key, and verify that btree " \
454 "update ordering is preserved during recovery") \
455 BCH_DEBUG_PARAM(inject_invalid_keys, \
456 "Store the journal sequence number in the version " \
457 "number of every btree key, and verify that btree " \
458 "update ordering is preserved during recovery") \
459 BCH_DEBUG_PARAM(test_alloc_startup, \
460 "Force allocator startup to use the slowpath where it" \
461 "can't find enough free buckets without invalidating" \
462 "cached data") \
463 BCH_DEBUG_PARAM(force_reconstruct_read, \
464 "Force reads to use the reconstruct path, when reading" \
465 "from erasure coded extents") \
466 BCH_DEBUG_PARAM(test_restart_gc, \
467 "Test restarting mark and sweep gc when bucket gens change")
468
469 #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
470
471 #ifdef CONFIG_BCACHEFS_DEBUG
472 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL()
473 #else
474 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
475 #endif
476
477 #define BCH_DEBUG_PARAM(name, description) extern struct static_key_false bch2_##name;
478 BCH_DEBUG_PARAMS_ALL()
479 #undef BCH_DEBUG_PARAM
480
481 #define BCH_TIME_STATS() \
482 x(btree_node_mem_alloc) \
483 x(btree_node_split) \
484 x(btree_node_compact) \
485 x(btree_node_merge) \
486 x(btree_node_sort) \
487 x(btree_node_get) \
488 x(btree_node_read) \
489 x(btree_node_read_done) \
490 x(btree_node_write) \
491 x(btree_interior_update_foreground) \
492 x(btree_interior_update_total) \
493 x(btree_gc) \
494 x(data_write) \
495 x(data_write_to_submit) \
496 x(data_write_to_queue) \
497 x(data_write_to_btree_update) \
498 x(data_write_btree_update) \
499 x(data_read) \
500 x(data_promote) \
501 x(journal_flush_write) \
502 x(journal_noflush_write) \
503 x(journal_flush_seq) \
504 x(blocked_journal_low_on_space) \
505 x(blocked_journal_low_on_pin) \
506 x(blocked_journal_max_in_flight) \
507 x(blocked_journal_max_open) \
508 x(blocked_key_cache_flush) \
509 x(blocked_allocate) \
510 x(blocked_allocate_open_bucket) \
511 x(blocked_write_buffer_full) \
512 x(nocow_lock_contended)
513
514 enum bch_time_stats {
515 #define x(name) BCH_TIME_##name,
516 BCH_TIME_STATS()
517 #undef x
518 BCH_TIME_STAT_NR
519 };
520
521 /* Number of nodes btree coalesce will try to coalesce at once */
522 #define GC_MERGE_NODES 4U
523
524 /* Maximum number of nodes we might need to allocate atomically: */
525 #define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
526
527 /* Size of the freelist we allocate btree nodes from: */
528 #define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
529
530 #define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
531
532 struct btree;
533
534 struct io_count {
535 u64 sectors[2][BCH_DATA_NR];
536 };
537
538 struct discard_in_flight {
539 bool in_progress:1;
540 u64 bucket:63;
541 };
542
543 #define BCH_DEV_READ_REFS() \
544 x(bch2_online_devs) \
545 x(trans_mark_dev_sbs) \
546 x(read_fua_test) \
547 x(sb_field_resize) \
548 x(write_super) \
549 x(journal_read) \
550 x(fs_journal_alloc) \
551 x(fs_resize_on_mount) \
552 x(btree_node_read) \
553 x(btree_node_read_all_replicas) \
554 x(btree_node_scrub) \
555 x(btree_node_write) \
556 x(btree_node_scan) \
557 x(btree_verify_replicas) \
558 x(btree_node_ondisk_to_text) \
559 x(io_read) \
560 x(check_extent_checksums) \
561 x(ec_block)
562
563 enum bch_dev_read_ref {
564 #define x(n) BCH_DEV_READ_REF_##n,
565 BCH_DEV_READ_REFS()
566 #undef x
567 BCH_DEV_READ_REF_NR,
568 };
569
570 #define BCH_DEV_WRITE_REFS() \
571 x(journal_write) \
572 x(journal_do_discards) \
573 x(dev_do_discards) \
574 x(discard_one_bucket_fast) \
575 x(do_invalidates) \
576 x(nocow_flush) \
577 x(io_write) \
578 x(ec_block) \
579 x(ec_bucket_zero)
580
581 enum bch_dev_write_ref {
582 #define x(n) BCH_DEV_WRITE_REF_##n,
583 BCH_DEV_WRITE_REFS()
584 #undef x
585 BCH_DEV_WRITE_REF_NR,
586 };
587
588 struct bucket_bitmap {
589 unsigned long *buckets;
590 u64 nr;
591 struct mutex lock;
592 };
593
594 struct bch_dev {
595 struct kobject kobj;
596 #ifdef CONFIG_BCACHEFS_DEBUG
597 atomic_long_t ref;
598 bool dying;
599 unsigned long last_put;
600 #else
601 struct percpu_ref ref;
602 #endif
603 struct completion ref_completion;
604 struct enumerated_ref io_ref[2];
605
606 struct bch_fs *fs;
607
608 u8 dev_idx;
609 /*
610 * Cached version of this device's member info from superblock
611 * Committed by bch2_write_super() -> bch_fs_mi_update()
612 */
613 struct bch_member_cpu mi;
614 atomic64_t errors[BCH_MEMBER_ERROR_NR];
615 unsigned long write_errors_start;
616
617 __uuid_t uuid;
618 char name[BDEVNAME_SIZE];
619
620 struct bch_sb_handle disk_sb;
621 struct bch_sb *sb_read_scratch;
622 int sb_write_error;
623 dev_t dev;
624 atomic_t flush_seq;
625
626 struct bch_devs_mask self;
627
628 /*
629 * Buckets:
630 * Per-bucket arrays are protected by either rcu_read_lock or
631 * state_lock, for device resize.
632 */
633 GENRADIX(struct bucket) buckets_gc;
634 struct bucket_gens __rcu *bucket_gens;
635 u8 *oldest_gen;
636 unsigned long *buckets_nouse;
637
638 struct bucket_bitmap bucket_backpointer_mismatch;
639 struct bucket_bitmap bucket_backpointer_empty;
640
641 struct bch_dev_usage_full __percpu
642 *usage;
643
644 /* Allocator: */
645 u64 alloc_cursor[3];
646
647 unsigned nr_open_buckets;
648 unsigned nr_partial_buckets;
649 unsigned nr_btree_reserve;
650
651 struct work_struct invalidate_work;
652 struct work_struct discard_work;
653 struct mutex discard_buckets_in_flight_lock;
654 DARRAY(struct discard_in_flight) discard_buckets_in_flight;
655 struct work_struct discard_fast_work;
656
657 atomic64_t rebalance_work;
658
659 struct journal_device journal;
660 u64 prev_journal_sector;
661
662 struct work_struct io_error_work;
663
664 /* The rest of this all shows up in sysfs */
665 atomic64_t cur_latency[2];
666 struct bch2_time_stats_quantiles io_latency[2];
667
668 #define CONGESTED_MAX 1024
669 atomic_t congested;
670 u64 congested_last;
671
672 struct io_count __percpu *io_done;
673 };
674
675 /*
676 * initial_gc_unfixed
677 * error
678 * topology error
679 */
680
681 #define BCH_FS_FLAGS() \
682 x(new_fs) \
683 x(started) \
684 x(clean_recovery) \
685 x(btree_running) \
686 x(accounting_replay_done) \
687 x(may_go_rw) \
688 x(rw) \
689 x(rw_init_done) \
690 x(was_rw) \
691 x(stopping) \
692 x(emergency_ro) \
693 x(going_ro) \
694 x(write_disable_complete) \
695 x(clean_shutdown) \
696 x(in_recovery) \
697 x(in_fsck) \
698 x(initial_gc_unfixed) \
699 x(need_delete_dead_snapshots) \
700 x(error) \
701 x(topology_error) \
702 x(errors_fixed) \
703 x(errors_not_fixed) \
704 x(no_invalid_checks) \
705 x(discard_mount_opt_set) \
706
707 enum bch_fs_flags {
708 #define x(n) BCH_FS_##n,
709 BCH_FS_FLAGS()
710 #undef x
711 };
712
713 struct btree_debug {
714 unsigned id;
715 };
716
717 #define BCH_TRANSACTIONS_NR 128
718
719 struct btree_transaction_stats {
720 struct bch2_time_stats duration;
721 struct bch2_time_stats lock_hold_times;
722 struct mutex lock;
723 unsigned nr_max_paths;
724 unsigned max_mem;
725 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
726 darray_trans_kmalloc_trace trans_kmalloc_trace;
727 #endif
728 char *max_paths_text;
729 };
730
731 struct bch_fs_pcpu {
732 u64 sectors_available;
733 };
734
735 struct journal_seq_blacklist_table {
736 size_t nr;
737 struct journal_seq_blacklist_table_entry {
738 u64 start;
739 u64 end;
740 bool dirty;
741 } entries[];
742 };
743
744 struct btree_trans_buf {
745 struct btree_trans *trans;
746 };
747
748 #define BCH_WRITE_REFS() \
749 x(journal) \
750 x(trans) \
751 x(write) \
752 x(promote) \
753 x(node_rewrite) \
754 x(stripe_create) \
755 x(stripe_delete) \
756 x(reflink) \
757 x(fallocate) \
758 x(fsync) \
759 x(dio_write) \
760 x(discard) \
761 x(discard_fast) \
762 x(check_discard_freespace_key) \
763 x(invalidate) \
764 x(delete_dead_snapshots) \
765 x(gc_gens) \
766 x(snapshot_delete_pagecache) \
767 x(sysfs) \
768 x(btree_write_buffer) \
769 x(btree_node_scrub) \
770 x(async_recovery_passes) \
771 x(ioctl_data)
772
773 enum bch_write_ref {
774 #define x(n) BCH_WRITE_REF_##n,
775 BCH_WRITE_REFS()
776 #undef x
777 BCH_WRITE_REF_NR,
778 };
779
780 #define BCH_FS_DEFAULT_UTF8_ENCODING UNICODE_AGE(12, 1, 0)
781
782 struct bch_fs {
783 struct closure cl;
784
785 struct list_head list;
786 struct kobject kobj;
787 struct kobject counters_kobj;
788 struct kobject internal;
789 struct kobject opts_dir;
790 struct kobject time_stats;
791 unsigned long flags;
792
793 int minor;
794 struct device *chardev;
795 struct super_block *vfs_sb;
796 dev_t dev;
797 char name[40];
798 struct stdio_redirect *stdio;
799 struct task_struct *stdio_filter;
800
801 /* ro/rw, add/remove/resize devices: */
802 struct rw_semaphore state_lock;
803
804 /* Counts outstanding writes, for clean transition to read-only */
805 struct enumerated_ref writes;
806 /*
807 * Certain operations are only allowed in single threaded mode, during
808 * recovery, and we want to assert that this is the case:
809 */
810 struct task_struct *recovery_task;
811
812 /*
813 * Analagous to c->writes, for asynchronous ops that don't necessarily
814 * need fs to be read-write
815 */
816 refcount_t ro_ref;
817 wait_queue_head_t ro_ref_wait;
818
819 struct work_struct read_only_work;
820
821 struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
822
823 struct bch_accounting_mem accounting;
824
825 struct bch_replicas_cpu replicas;
826 struct bch_replicas_cpu replicas_gc;
827 struct mutex replicas_gc_lock;
828
829 struct journal_entry_res btree_root_journal_res;
830 struct journal_entry_res clock_journal_res;
831
832 struct bch_disk_groups_cpu __rcu *disk_groups;
833
834 struct bch_opts opts;
835
836 /* Updated by bch2_sb_update():*/
837 struct {
838 __uuid_t uuid;
839 __uuid_t user_uuid;
840
841 u16 version;
842 u16 version_incompat;
843 u16 version_incompat_allowed;
844 u16 version_min;
845 u16 version_upgrade_complete;
846
847 u8 nr_devices;
848 u8 clean;
849 bool multi_device; /* true if we've ever had more than one device */
850
851 u8 encryption_type;
852
853 u64 time_base_lo;
854 u32 time_base_hi;
855 unsigned time_units_per_sec;
856 unsigned nsec_per_time_unit;
857 u64 features;
858 u64 compat;
859 u64 recovery_passes_required;
860 unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)];
861 u64 btrees_lost_data;
862 } sb;
863 DARRAY(enum bcachefs_metadata_version)
864 incompat_versions_requested;
865
866 #ifdef CONFIG_UNICODE
867 struct unicode_map *cf_encoding;
868 #endif
869
870 struct bch_sb_handle disk_sb;
871
872 unsigned short block_bits; /* ilog2(block_size) */
873
874 u16 btree_foreground_merge_threshold;
875
876 struct closure sb_write;
877 struct mutex sb_lock;
878
879 /* snapshot.c: */
880 struct snapshot_table __rcu *snapshots;
881 struct mutex snapshot_table_lock;
882 struct rw_semaphore snapshot_create_lock;
883
884 struct snapshot_delete snapshot_delete;
885 struct work_struct snapshot_wait_for_pagecache_and_delete_work;
886 snapshot_id_list snapshots_unlinked;
887 struct mutex snapshots_unlinked_lock;
888
889 /* BTREE CACHE */
890 struct bio_set btree_bio;
891 struct workqueue_struct *btree_read_complete_wq;
892 struct workqueue_struct *btree_write_submit_wq;
893
894 struct btree_root btree_roots_known[BTREE_ID_NR];
895 DARRAY(struct btree_root) btree_roots_extra;
896 struct mutex btree_root_lock;
897
898 struct btree_cache btree_cache;
899
900 /*
901 * Cache of allocated btree nodes - if we allocate a btree node and
902 * don't use it, if we free it that space can't be reused until going
903 * _all_ the way through the allocator (which exposes us to a livelock
904 * when allocating btree reserves fail halfway through) - instead, we
905 * can stick them here:
906 */
907 struct btree_alloc btree_reserve_cache[BTREE_NODE_RESERVE * 2];
908 unsigned btree_reserve_cache_nr;
909 struct mutex btree_reserve_cache_lock;
910
911 mempool_t btree_interior_update_pool;
912 struct list_head btree_interior_update_list;
913 struct list_head btree_interior_updates_unwritten;
914 struct mutex btree_interior_update_lock;
915 struct closure_waitlist btree_interior_update_wait;
916
917 struct workqueue_struct *btree_interior_update_worker;
918 struct work_struct btree_interior_update_work;
919
920 struct workqueue_struct *btree_node_rewrite_worker;
921 struct list_head btree_node_rewrites;
922 struct list_head btree_node_rewrites_pending;
923 spinlock_t btree_node_rewrites_lock;
924 struct closure_waitlist btree_node_rewrites_wait;
925
926 /* btree_io.c: */
927 spinlock_t btree_write_error_lock;
928 struct btree_write_stats {
929 atomic64_t nr;
930 atomic64_t bytes;
931 } btree_write_stats[BTREE_WRITE_TYPE_NR];
932
933 /* btree_iter.c: */
934 struct seqmutex btree_trans_lock;
935 struct list_head btree_trans_list;
936 mempool_t btree_trans_pool;
937 mempool_t btree_trans_mem_pool;
938 struct btree_trans_buf __percpu *btree_trans_bufs;
939
940 struct srcu_struct btree_trans_barrier;
941 bool btree_trans_barrier_initialized;
942
943 struct btree_key_cache btree_key_cache;
944 unsigned btree_key_cache_btrees;
945
946 struct btree_write_buffer btree_write_buffer;
947
948 struct workqueue_struct *btree_update_wq;
949 struct workqueue_struct *btree_write_complete_wq;
950 /* copygc needs its own workqueue for index updates.. */
951 struct workqueue_struct *copygc_wq;
952 /*
953 * Use a dedicated wq for write ref holder tasks. Required to avoid
954 * dependency problems with other wq tasks that can block on ref
955 * draining, such as read-only transition.
956 */
957 struct workqueue_struct *write_ref_wq;
958
959 /* ALLOCATION */
960 struct bch_devs_mask online_devs;
961 struct bch_devs_mask rw_devs[BCH_DATA_NR];
962 unsigned long rw_devs_change_count;
963
964 u64 capacity; /* sectors */
965 u64 reserved; /* sectors */
966
967 /*
968 * When capacity _decreases_ (due to a disk being removed), we
969 * increment capacity_gen - this invalidates outstanding reservations
970 * and forces them to be revalidated
971 */
972 u32 capacity_gen;
973 unsigned bucket_size_max;
974
975 atomic64_t sectors_available;
976 struct mutex sectors_available_lock;
977
978 struct bch_fs_pcpu __percpu *pcpu;
979
980 struct percpu_rw_semaphore mark_lock;
981
982 seqcount_t usage_lock;
983 struct bch_fs_usage_base __percpu *usage;
984 u64 __percpu *online_reserved;
985
986 unsigned long allocator_last_stuck;
987
988 struct io_clock io_clock[2];
989
990 /* JOURNAL SEQ BLACKLIST */
991 struct journal_seq_blacklist_table *
992 journal_seq_blacklist_table;
993
994 /* ALLOCATOR */
995 spinlock_t freelist_lock;
996 struct closure_waitlist freelist_wait;
997
998 open_bucket_idx_t open_buckets_freelist;
999 open_bucket_idx_t open_buckets_nr_free;
1000 struct closure_waitlist open_buckets_wait;
1001 struct open_bucket open_buckets[OPEN_BUCKETS_COUNT];
1002 open_bucket_idx_t open_buckets_hash[OPEN_BUCKETS_COUNT];
1003
1004 open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
1005 open_bucket_idx_t open_buckets_partial_nr;
1006
1007 struct write_point btree_write_point;
1008 struct write_point rebalance_write_point;
1009
1010 struct write_point write_points[WRITE_POINT_MAX];
1011 struct hlist_head write_points_hash[WRITE_POINT_HASH_NR];
1012 struct mutex write_points_hash_lock;
1013 unsigned write_points_nr;
1014
1015 struct buckets_waiting_for_journal buckets_waiting_for_journal;
1016
1017 /* GARBAGE COLLECTION */
1018 struct work_struct gc_gens_work;
1019 unsigned long gc_count;
1020
1021 enum btree_id gc_gens_btree;
1022 struct bpos gc_gens_pos;
1023
1024 /*
1025 * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
1026 * has been marked by GC.
1027 *
1028 * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
1029 *
1030 * Protected by gc_pos_lock. Only written to by GC thread, so GC thread
1031 * can read without a lock.
1032 */
1033 seqcount_t gc_pos_lock;
1034 struct gc_pos gc_pos;
1035
1036 /*
1037 * The allocation code needs gc_mark in struct bucket to be correct, but
1038 * it's not while a gc is in progress.
1039 */
1040 struct rw_semaphore gc_lock;
1041 struct mutex gc_gens_lock;
1042
1043 /* IO PATH */
1044 struct semaphore io_in_flight;
1045 struct bio_set bio_read;
1046 struct bio_set bio_read_split;
1047 struct bio_set bio_write;
1048 struct bio_set replica_set;
1049 struct mutex bio_bounce_pages_lock;
1050 mempool_t bio_bounce_pages;
1051 struct bucket_nocow_lock_table
1052 nocow_locks;
1053 struct rhashtable promote_table;
1054
1055 #ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS
1056 struct async_obj_list async_objs[BCH_ASYNC_OBJ_NR];
1057 #endif
1058
1059 mempool_t compression_bounce[2];
1060 mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR];
1061 size_t zstd_workspace_size;
1062
1063 struct bch_key chacha20_key;
1064 bool chacha20_key_set;
1065
1066 atomic64_t key_version;
1067
1068 mempool_t large_bkey_pool;
1069
1070 /* MOVE.C */
1071 struct list_head moving_context_list;
1072 struct mutex moving_context_lock;
1073
1074 /* REBALANCE */
1075 struct bch_fs_rebalance rebalance;
1076
1077 /* COPYGC */
1078 struct task_struct *copygc_thread;
1079 struct write_point copygc_write_point;
1080 s64 copygc_wait_at;
1081 s64 copygc_wait;
1082 bool copygc_running;
1083 wait_queue_head_t copygc_running_wq;
1084
1085 /* STRIPES: */
1086 GENRADIX(struct gc_stripe) gc_stripes;
1087
1088 struct hlist_head ec_stripes_new[32];
1089 spinlock_t ec_stripes_new_lock;
1090
1091 /* ERASURE CODING */
1092 struct list_head ec_stripe_head_list;
1093 struct mutex ec_stripe_head_lock;
1094
1095 struct list_head ec_stripe_new_list;
1096 struct mutex ec_stripe_new_lock;
1097 wait_queue_head_t ec_stripe_new_wait;
1098
1099 struct work_struct ec_stripe_create_work;
1100 u64 ec_stripe_hint;
1101
1102 struct work_struct ec_stripe_delete_work;
1103
1104 struct bio_set ec_bioset;
1105
1106 /* REFLINK */
1107 reflink_gc_table reflink_gc_table;
1108 size_t reflink_gc_nr;
1109
1110 /* fs.c */
1111 struct list_head vfs_inodes_list;
1112 struct mutex vfs_inodes_lock;
1113 struct rhashtable vfs_inodes_table;
1114 struct rhltable vfs_inodes_by_inum_table;
1115
1116 /* VFS IO PATH - fs-io.c */
1117 struct bio_set writepage_bioset;
1118 struct bio_set dio_write_bioset;
1119 struct bio_set dio_read_bioset;
1120 struct bio_set nocow_flush_bioset;
1121
1122 /* QUOTAS */
1123 struct bch_memquota_type quotas[QTYP_NR];
1124
1125 /* RECOVERY */
1126 u64 journal_replay_seq_start;
1127 u64 journal_replay_seq_end;
1128 struct bch_fs_recovery recovery;
1129
1130 /* DEBUG JUNK */
1131 struct dentry *fs_debug_dir;
1132 struct dentry *btree_debug_dir;
1133 struct dentry *async_obj_dir;
1134 struct btree_debug btree_debug[BTREE_ID_NR];
1135 struct btree *verify_data;
1136 struct btree_node *verify_ondisk;
1137 struct mutex verify_lock;
1138
1139 /*
1140 * A btree node on disk could have too many bsets for an iterator to fit
1141 * on the stack - have to dynamically allocate them
1142 */
1143 mempool_t fill_iter;
1144
1145 mempool_t btree_bounce_pool;
1146
1147 struct journal journal;
1148 GENRADIX(struct journal_replay *) journal_entries;
1149 u64 journal_entries_base_seq;
1150 struct journal_keys journal_keys;
1151 struct list_head journal_iters;
1152
1153 struct find_btree_nodes found_btree_nodes;
1154
1155 u64 last_bucket_seq_cleanup;
1156
1157 u64 counters_on_mount[BCH_COUNTER_NR];
1158 u64 __percpu *counters;
1159
1160 struct bch2_time_stats times[BCH_TIME_STAT_NR];
1161
1162 struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
1163
1164 /* ERRORS */
1165 struct list_head fsck_error_msgs;
1166 struct mutex fsck_error_msgs_lock;
1167 bool fsck_alloc_msgs_err;
1168
1169 bch_sb_errors_cpu fsck_error_counts;
1170 struct mutex fsck_error_counts_lock;
1171 };
1172
1173 extern struct wait_queue_head bch2_read_only_wait;
1174
bch2_ro_ref_tryget(struct bch_fs * c)1175 static inline bool bch2_ro_ref_tryget(struct bch_fs *c)
1176 {
1177 if (test_bit(BCH_FS_stopping, &c->flags))
1178 return false;
1179
1180 return refcount_inc_not_zero(&c->ro_ref);
1181 }
1182
bch2_ro_ref_put(struct bch_fs * c)1183 static inline void bch2_ro_ref_put(struct bch_fs *c)
1184 {
1185 if (refcount_dec_and_test(&c->ro_ref))
1186 wake_up(&c->ro_ref_wait);
1187 }
1188
bch2_set_ra_pages(struct bch_fs * c,unsigned ra_pages)1189 static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
1190 {
1191 #ifndef NO_BCACHEFS_FS
1192 if (c->vfs_sb)
1193 c->vfs_sb->s_bdi->ra_pages = ra_pages;
1194 #endif
1195 }
1196
bucket_bytes(const struct bch_dev * ca)1197 static inline unsigned bucket_bytes(const struct bch_dev *ca)
1198 {
1199 return ca->mi.bucket_size << 9;
1200 }
1201
block_bytes(const struct bch_fs * c)1202 static inline unsigned block_bytes(const struct bch_fs *c)
1203 {
1204 return c->opts.block_size;
1205 }
1206
block_sectors(const struct bch_fs * c)1207 static inline unsigned block_sectors(const struct bch_fs *c)
1208 {
1209 return c->opts.block_size >> 9;
1210 }
1211
btree_id_cached(const struct bch_fs * c,enum btree_id btree)1212 static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
1213 {
1214 return c->btree_key_cache_btrees & (1U << btree);
1215 }
1216
bch2_time_to_timespec(const struct bch_fs * c,s64 time)1217 static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
1218 {
1219 struct timespec64 t;
1220 s64 sec;
1221 s32 rem;
1222
1223 time += c->sb.time_base_lo;
1224
1225 sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
1226
1227 set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit);
1228
1229 return t;
1230 }
1231
timespec_to_bch2_time(const struct bch_fs * c,struct timespec64 ts)1232 static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
1233 {
1234 return (ts.tv_sec * c->sb.time_units_per_sec +
1235 (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
1236 }
1237
bch2_current_time(const struct bch_fs * c)1238 static inline s64 bch2_current_time(const struct bch_fs *c)
1239 {
1240 struct timespec64 now;
1241
1242 ktime_get_coarse_real_ts64(&now);
1243 return timespec_to_bch2_time(c, now);
1244 }
1245
bch2_current_io_time(const struct bch_fs * c,int rw)1246 static inline u64 bch2_current_io_time(const struct bch_fs *c, int rw)
1247 {
1248 return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX);
1249 }
1250
bch2_fs_stdio_redirect(struct bch_fs * c)1251 static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
1252 {
1253 struct stdio_redirect *stdio = c->stdio;
1254
1255 if (c->stdio_filter && c->stdio_filter != current)
1256 stdio = NULL;
1257 return stdio;
1258 }
1259
metadata_replicas_required(struct bch_fs * c)1260 static inline unsigned metadata_replicas_required(struct bch_fs *c)
1261 {
1262 return min(c->opts.metadata_replicas,
1263 c->opts.metadata_replicas_required);
1264 }
1265
data_replicas_required(struct bch_fs * c)1266 static inline unsigned data_replicas_required(struct bch_fs *c)
1267 {
1268 return min(c->opts.data_replicas,
1269 c->opts.data_replicas_required);
1270 }
1271
1272 #define BKEY_PADDED_ONSTACK(key, pad) \
1273 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
1274
1275 /*
1276 * This is needed because discard is both a filesystem option and a device
1277 * option, and mount options are supposed to apply to that mount and not be
1278 * persisted, i.e. if it's set as a mount option we can't propagate it to the
1279 * device.
1280 */
bch2_discard_opt_enabled(struct bch_fs * c,struct bch_dev * ca)1281 static inline bool bch2_discard_opt_enabled(struct bch_fs *c, struct bch_dev *ca)
1282 {
1283 return test_bit(BCH_FS_discard_mount_opt_set, &c->flags)
1284 ? c->opts.discard
1285 : ca->mi.discard;
1286 }
1287
1288 #endif /* _BCACHEFS_H */
1289