xref: /linux/fs/bcachefs/bcachefs.h (revision 2622f290417001b0440f4a48dc6978f5f1e12a56)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_H
3 #define _BCACHEFS_H
4 
5 /*
6  * SOME HIGH LEVEL CODE DOCUMENTATION:
7  *
8  * Bcache mostly works with cache sets, cache devices, and backing devices.
9  *
10  * Support for multiple cache devices hasn't quite been finished off yet, but
11  * it's about 95% plumbed through. A cache set and its cache devices is sort of
12  * like a md raid array and its component devices. Most of the code doesn't care
13  * about individual cache devices, the main abstraction is the cache set.
14  *
15  * Multiple cache devices is intended to give us the ability to mirror dirty
16  * cached data and metadata, without mirroring clean cached data.
17  *
18  * Backing devices are different, in that they have a lifetime independent of a
19  * cache set. When you register a newly formatted backing device it'll come up
20  * in passthrough mode, and then you can attach and detach a backing device from
21  * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22  * invalidates any cached data for that backing device.
23  *
24  * A cache set can have multiple (many) backing devices attached to it.
25  *
26  * There's also flash only volumes - this is the reason for the distinction
27  * between struct cached_dev and struct bcache_device. A flash only volume
28  * works much like a bcache device that has a backing device, except the
29  * "cached" data is always dirty. The end result is that we get thin
30  * provisioning with very little additional code.
31  *
32  * Flash only volumes work but they're not production ready because the moving
33  * garbage collector needs more work. More on that later.
34  *
35  * BUCKETS/ALLOCATION:
36  *
37  * Bcache is primarily designed for caching, which means that in normal
38  * operation all of our available space will be allocated. Thus, we need an
39  * efficient way of deleting things from the cache so we can write new things to
40  * it.
41  *
42  * To do this, we first divide the cache device up into buckets. A bucket is the
43  * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
44  * works efficiently.
45  *
46  * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47  * it. The gens and priorities for all the buckets are stored contiguously and
48  * packed on disk (in a linked list of buckets - aside from the superblock, all
49  * of bcache's metadata is stored in buckets).
50  *
51  * The priority is used to implement an LRU. We reset a bucket's priority when
52  * we allocate it or on cache it, and every so often we decrement the priority
53  * of each bucket. It could be used to implement something more sophisticated,
54  * if anyone ever gets around to it.
55  *
56  * The generation is used for invalidating buckets. Each pointer also has an 8
57  * bit generation embedded in it; for a pointer to be considered valid, its gen
58  * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
59  * we have to do is increment its gen (and write its new gen to disk; we batch
60  * this up).
61  *
62  * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63  * contain metadata (including btree nodes).
64  *
65  * THE BTREE:
66  *
67  * Bcache is in large part design around the btree.
68  *
69  * At a high level, the btree is just an index of key -> ptr tuples.
70  *
71  * Keys represent extents, and thus have a size field. Keys also have a variable
72  * number of pointers attached to them (potentially zero, which is handy for
73  * invalidating the cache).
74  *
75  * The key itself is an inode:offset pair. The inode number corresponds to a
76  * backing device or a flash only volume. The offset is the ending offset of the
77  * extent within the inode - not the starting offset; this makes lookups
78  * slightly more convenient.
79  *
80  * Pointers contain the cache device id, the offset on that device, and an 8 bit
81  * generation number. More on the gen later.
82  *
83  * Index lookups are not fully abstracted - cache lookups in particular are
84  * still somewhat mixed in with the btree code, but things are headed in that
85  * direction.
86  *
87  * Updates are fairly well abstracted, though. There are two different ways of
88  * updating the btree; insert and replace.
89  *
90  * BTREE_INSERT will just take a list of keys and insert them into the btree -
91  * overwriting (possibly only partially) any extents they overlap with. This is
92  * used to update the index after a write.
93  *
94  * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95  * overwriting a key that matches another given key. This is used for inserting
96  * data into the cache after a cache miss, and for background writeback, and for
97  * the moving garbage collector.
98  *
99  * There is no "delete" operation; deleting things from the index is
100  * accomplished by either by invalidating pointers (by incrementing a bucket's
101  * gen) or by inserting a key with 0 pointers - which will overwrite anything
102  * previously present at that location in the index.
103  *
104  * This means that there are always stale/invalid keys in the btree. They're
105  * filtered out by the code that iterates through a btree node, and removed when
106  * a btree node is rewritten.
107  *
108  * BTREE NODES:
109  *
110  * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
111  * free smaller than a bucket - so, that's how big our btree nodes are.
112  *
113  * (If buckets are really big we'll only use part of the bucket for a btree node
114  * - no less than 1/4th - but a bucket still contains no more than a single
115  * btree node. I'd actually like to change this, but for now we rely on the
116  * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117  *
118  * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119  * btree implementation.
120  *
121  * The way this is solved is that btree nodes are internally log structured; we
122  * can append new keys to an existing btree node without rewriting it. This
123  * means each set of keys we write is sorted, but the node is not.
124  *
125  * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126  * be expensive, and we have to distinguish between the keys we have written and
127  * the keys we haven't. So to do a lookup in a btree node, we have to search
128  * each sorted set. But we do merge written sets together lazily, so the cost of
129  * these extra searches is quite low (normally most of the keys in a btree node
130  * will be in one big set, and then there'll be one or two sets that are much
131  * smaller).
132  *
133  * This log structure makes bcache's btree more of a hybrid between a
134  * conventional btree and a compacting data structure, with some of the
135  * advantages of both.
136  *
137  * GARBAGE COLLECTION:
138  *
139  * We can't just invalidate any bucket - it might contain dirty data or
140  * metadata. If it once contained dirty data, other writes might overwrite it
141  * later, leaving no valid pointers into that bucket in the index.
142  *
143  * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144  * It also counts how much valid data it each bucket currently contains, so that
145  * allocation can reuse buckets sooner when they've been mostly overwritten.
146  *
147  * It also does some things that are really internal to the btree
148  * implementation. If a btree node contains pointers that are stale by more than
149  * some threshold, it rewrites the btree node to avoid the bucket's generation
150  * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151  *
152  * THE JOURNAL:
153  *
154  * Bcache's journal is not necessary for consistency; we always strictly
155  * order metadata writes so that the btree and everything else is consistent on
156  * disk in the event of an unclean shutdown, and in fact bcache had writeback
157  * caching (with recovery from unclean shutdown) before journalling was
158  * implemented.
159  *
160  * Rather, the journal is purely a performance optimization; we can't complete a
161  * write until we've updated the index on disk, otherwise the cache would be
162  * inconsistent in the event of an unclean shutdown. This means that without the
163  * journal, on random write workloads we constantly have to update all the leaf
164  * nodes in the btree, and those writes will be mostly empty (appending at most
165  * a few keys each) - highly inefficient in terms of amount of metadata writes,
166  * and it puts more strain on the various btree resorting/compacting code.
167  *
168  * The journal is just a log of keys we've inserted; on startup we just reinsert
169  * all the keys in the open journal entries. That means that when we're updating
170  * a node in the btree, we can wait until a 4k block of keys fills up before
171  * writing them out.
172  *
173  * For simplicity, we only journal updates to leaf nodes; updates to parent
174  * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175  * the complexity to deal with journalling them (in particular, journal replay)
176  * - updates to non leaf nodes just happen synchronously (see btree_split()).
177  */
178 
179 #undef pr_fmt
180 #ifdef __KERNEL__
181 #define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
182 #else
183 #define pr_fmt(fmt) "%s() " fmt "\n", __func__
184 #endif
185 
186 #include <linux/backing-dev-defs.h>
187 #include <linux/bug.h>
188 #include <linux/bio.h>
189 #include <linux/closure.h>
190 #include <linux/kobject.h>
191 #include <linux/list.h>
192 #include <linux/math64.h>
193 #include <linux/mutex.h>
194 #include <linux/percpu-refcount.h>
195 #include <linux/percpu-rwsem.h>
196 #include <linux/refcount.h>
197 #include <linux/rhashtable.h>
198 #include <linux/rwsem.h>
199 #include <linux/semaphore.h>
200 #include <linux/seqlock.h>
201 #include <linux/shrinker.h>
202 #include <linux/srcu.h>
203 #include <linux/types.h>
204 #include <linux/workqueue.h>
205 #include <linux/zstd.h>
206 
207 #include "bcachefs_format.h"
208 #include "btree_journal_iter_types.h"
209 #include "disk_accounting_types.h"
210 #include "errcode.h"
211 #include "fifo.h"
212 #include "nocow_locking_types.h"
213 #include "opts.h"
214 #include "recovery_passes_types.h"
215 #include "sb-errors_types.h"
216 #include "seqmutex.h"
217 #include "time_stats.h"
218 #include "util.h"
219 
220 #ifdef CONFIG_BCACHEFS_DEBUG
221 #define BCH_WRITE_REF_DEBUG
222 #endif
223 
224 #ifndef dynamic_fault
225 #define dynamic_fault(...)		0
226 #endif
227 
228 #define race_fault(...)			dynamic_fault("bcachefs:race")
229 
230 #define count_event(_c, _name)	this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
231 
232 #define trace_and_count(_c, _name, ...)					\
233 do {									\
234 	count_event(_c, _name);						\
235 	trace_##_name(__VA_ARGS__);					\
236 } while (0)
237 
238 #define bch2_fs_init_fault(name)					\
239 	dynamic_fault("bcachefs:bch_fs_init:" name)
240 #define bch2_meta_read_fault(name)					\
241 	 dynamic_fault("bcachefs:meta:read:" name)
242 #define bch2_meta_write_fault(name)					\
243 	 dynamic_fault("bcachefs:meta:write:" name)
244 
245 #ifdef __KERNEL__
246 #define BCACHEFS_LOG_PREFIX
247 #endif
248 
249 #ifdef BCACHEFS_LOG_PREFIX
250 
251 #define bch2_log_msg(_c, fmt)			"bcachefs (%s): " fmt, ((_c)->name)
252 #define bch2_fmt_dev(_ca, fmt)			"bcachefs (%s): " fmt "\n", ((_ca)->name)
253 #define bch2_fmt_dev_offset(_ca, _offset, fmt)	"bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
254 #define bch2_fmt_inum(_c, _inum, fmt)		"bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
255 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt)			\
256 	 "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
257 
258 #else
259 
260 #define bch2_log_msg(_c, fmt)			fmt
261 #define bch2_fmt_dev(_ca, fmt)			"%s: " fmt "\n", ((_ca)->name)
262 #define bch2_fmt_dev_offset(_ca, _offset, fmt)	"%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
263 #define bch2_fmt_inum(_c, _inum, fmt)		"inum %llu: " fmt "\n", (_inum)
264 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt)				\
265 	 "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
266 
267 #endif
268 
269 #define bch2_fmt(_c, fmt)		bch2_log_msg(_c, fmt "\n")
270 
271 void bch2_print_str(struct bch_fs *, const char *);
272 
273 __printf(2, 3)
274 void bch2_print_opts(struct bch_opts *, const char *, ...);
275 
276 __printf(2, 3)
277 void __bch2_print(struct bch_fs *c, const char *fmt, ...);
278 
279 #define maybe_dev_to_fs(_c)	_Generic((_c),				\
280 	struct bch_dev *:	((struct bch_dev *) (_c))->fs,		\
281 	struct bch_fs *:	(_c))
282 
283 #define bch2_print(_c, ...) __bch2_print(maybe_dev_to_fs(_c), __VA_ARGS__)
284 
285 #define bch2_print_ratelimited(_c, ...)					\
286 do {									\
287 	static DEFINE_RATELIMIT_STATE(_rs,				\
288 				      DEFAULT_RATELIMIT_INTERVAL,	\
289 				      DEFAULT_RATELIMIT_BURST);		\
290 									\
291 	if (__ratelimit(&_rs))						\
292 		bch2_print(_c, __VA_ARGS__);				\
293 } while (0)
294 
295 #define bch_info(c, fmt, ...) \
296 	bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
297 #define bch_info_ratelimited(c, fmt, ...) \
298 	bch2_print_ratelimited(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
299 #define bch_notice(c, fmt, ...) \
300 	bch2_print(c, KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
301 #define bch_warn(c, fmt, ...) \
302 	bch2_print(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
303 #define bch_warn_ratelimited(c, fmt, ...) \
304 	bch2_print_ratelimited(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
305 
306 #define bch_err(c, fmt, ...) \
307 	bch2_print(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
308 #define bch_err_dev(ca, fmt, ...) \
309 	bch2_print(c, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
310 #define bch_err_dev_offset(ca, _offset, fmt, ...) \
311 	bch2_print(c, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
312 #define bch_err_inum(c, _inum, fmt, ...) \
313 	bch2_print(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
314 #define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
315 	bch2_print(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
316 
317 #define bch_err_ratelimited(c, fmt, ...) \
318 	bch2_print_ratelimited(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
319 #define bch_err_dev_ratelimited(ca, fmt, ...) \
320 	bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
321 #define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
322 	bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
323 #define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
324 	bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
325 #define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
326 	bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
327 
should_print_err(int err)328 static inline bool should_print_err(int err)
329 {
330 	return err && !bch2_err_matches(err, BCH_ERR_transaction_restart);
331 }
332 
333 #define bch_err_fn(_c, _ret)						\
334 do {									\
335 	if (should_print_err(_ret))					\
336 		bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
337 } while (0)
338 
339 #define bch_err_fn_ratelimited(_c, _ret)				\
340 do {									\
341 	if (should_print_err(_ret))					\
342 		bch_err_ratelimited(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
343 } while (0)
344 
345 #define bch_err_msg(_c, _ret, _msg, ...)				\
346 do {									\
347 	if (should_print_err(_ret))					\
348 		bch_err(_c, "%s(): error " _msg " %s", __func__,	\
349 			##__VA_ARGS__, bch2_err_str(_ret));		\
350 } while (0)
351 
352 #define bch_verbose(c, fmt, ...)					\
353 do {									\
354 	if ((c)->opts.verbose)						\
355 		bch_info(c, fmt, ##__VA_ARGS__);			\
356 } while (0)
357 
358 #define bch_verbose_ratelimited(c, fmt, ...)				\
359 do {									\
360 	if ((c)->opts.verbose)						\
361 		bch_info_ratelimited(c, fmt, ##__VA_ARGS__);		\
362 } while (0)
363 
364 #define pr_verbose_init(opts, fmt, ...)					\
365 do {									\
366 	if (opt_get(opts, verbose))					\
367 		pr_info(fmt, ##__VA_ARGS__);				\
368 } while (0)
369 
370 /* Parameters that are useful for debugging, but should always be compiled in: */
371 #define BCH_DEBUG_PARAMS_ALWAYS()					\
372 	BCH_DEBUG_PARAM(key_merging_disabled,				\
373 		"Disables merging of extents")				\
374 	BCH_DEBUG_PARAM(btree_node_merging_disabled,			\
375 		"Disables merging of btree nodes")			\
376 	BCH_DEBUG_PARAM(btree_gc_always_rewrite,			\
377 		"Causes mark and sweep to compact and rewrite every "	\
378 		"btree node it traverses")				\
379 	BCH_DEBUG_PARAM(btree_gc_rewrite_disabled,			\
380 		"Disables rewriting of btree nodes during mark and sweep")\
381 	BCH_DEBUG_PARAM(btree_shrinker_disabled,			\
382 		"Disables the shrinker callback for the btree node cache")\
383 	BCH_DEBUG_PARAM(verify_btree_ondisk,				\
384 		"Reread btree nodes at various points to verify the "	\
385 		"mergesort in the read path against modifications "	\
386 		"done in memory")					\
387 	BCH_DEBUG_PARAM(verify_all_btree_replicas,			\
388 		"When reading btree nodes, read all replicas and "	\
389 		"compare them")						\
390 	BCH_DEBUG_PARAM(backpointers_no_use_write_buffer,		\
391 		"Don't use the write buffer for backpointers, enabling "\
392 		"extra runtime checks")
393 
394 /* Parameters that should only be compiled in debug mode: */
395 #define BCH_DEBUG_PARAMS_DEBUG()					\
396 	BCH_DEBUG_PARAM(expensive_debug_checks,				\
397 		"Enables various runtime debugging checks that "	\
398 		"significantly affect performance")			\
399 	BCH_DEBUG_PARAM(debug_check_iterators,				\
400 		"Enables extra verification for btree iterators")	\
401 	BCH_DEBUG_PARAM(debug_check_btree_accounting,			\
402 		"Verify btree accounting for keys within a node")	\
403 	BCH_DEBUG_PARAM(journal_seq_verify,				\
404 		"Store the journal sequence number in the version "	\
405 		"number of every btree key, and verify that btree "	\
406 		"update ordering is preserved during recovery")		\
407 	BCH_DEBUG_PARAM(inject_invalid_keys,				\
408 		"Store the journal sequence number in the version "	\
409 		"number of every btree key, and verify that btree "	\
410 		"update ordering is preserved during recovery")		\
411 	BCH_DEBUG_PARAM(test_alloc_startup,				\
412 		"Force allocator startup to use the slowpath where it"	\
413 		"can't find enough free buckets without invalidating"	\
414 		"cached data")						\
415 	BCH_DEBUG_PARAM(force_reconstruct_read,				\
416 		"Force reads to use the reconstruct path, when reading"	\
417 		"from erasure coded extents")				\
418 	BCH_DEBUG_PARAM(test_restart_gc,				\
419 		"Test restarting mark and sweep gc when bucket gens change")
420 
421 #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
422 
423 #ifdef CONFIG_BCACHEFS_DEBUG
424 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL()
425 #else
426 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
427 #endif
428 
429 #define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
430 BCH_DEBUG_PARAMS()
431 #undef BCH_DEBUG_PARAM
432 
433 #ifndef CONFIG_BCACHEFS_DEBUG
434 #define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name;
435 BCH_DEBUG_PARAMS_DEBUG()
436 #undef BCH_DEBUG_PARAM
437 #endif
438 
439 #define BCH_TIME_STATS()			\
440 	x(btree_node_mem_alloc)			\
441 	x(btree_node_split)			\
442 	x(btree_node_compact)			\
443 	x(btree_node_merge)			\
444 	x(btree_node_sort)			\
445 	x(btree_node_read)			\
446 	x(btree_node_read_done)			\
447 	x(btree_interior_update_foreground)	\
448 	x(btree_interior_update_total)		\
449 	x(btree_gc)				\
450 	x(data_write)				\
451 	x(data_read)				\
452 	x(data_promote)				\
453 	x(journal_flush_write)			\
454 	x(journal_noflush_write)		\
455 	x(journal_flush_seq)			\
456 	x(blocked_journal_low_on_space)		\
457 	x(blocked_journal_low_on_pin)		\
458 	x(blocked_journal_max_in_flight)	\
459 	x(blocked_key_cache_flush)		\
460 	x(blocked_allocate)			\
461 	x(blocked_allocate_open_bucket)		\
462 	x(blocked_write_buffer_full)		\
463 	x(nocow_lock_contended)
464 
465 enum bch_time_stats {
466 #define x(name) BCH_TIME_##name,
467 	BCH_TIME_STATS()
468 #undef x
469 	BCH_TIME_STAT_NR
470 };
471 
472 #include "alloc_types.h"
473 #include "btree_gc_types.h"
474 #include "btree_types.h"
475 #include "btree_node_scan_types.h"
476 #include "btree_write_buffer_types.h"
477 #include "buckets_types.h"
478 #include "buckets_waiting_for_journal_types.h"
479 #include "clock_types.h"
480 #include "disk_groups_types.h"
481 #include "ec_types.h"
482 #include "journal_types.h"
483 #include "keylist_types.h"
484 #include "quota_types.h"
485 #include "rebalance_types.h"
486 #include "replicas_types.h"
487 #include "sb-members_types.h"
488 #include "subvolume_types.h"
489 #include "super_types.h"
490 #include "thread_with_file_types.h"
491 
492 /* Number of nodes btree coalesce will try to coalesce at once */
493 #define GC_MERGE_NODES		4U
494 
495 /* Maximum number of nodes we might need to allocate atomically: */
496 #define BTREE_RESERVE_MAX	(BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
497 
498 /* Size of the freelist we allocate btree nodes from: */
499 #define BTREE_NODE_RESERVE	(BTREE_RESERVE_MAX * 4)
500 
501 #define BTREE_NODE_OPEN_BUCKET_RESERVE	(BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
502 
503 struct btree;
504 
505 struct io_count {
506 	u64			sectors[2][BCH_DATA_NR];
507 };
508 
509 struct discard_in_flight {
510 	bool			in_progress:1;
511 	u64			bucket:63;
512 };
513 
514 struct bch_dev {
515 	struct kobject		kobj;
516 #ifdef CONFIG_BCACHEFS_DEBUG
517 	atomic_long_t		ref;
518 	bool			dying;
519 	unsigned long		last_put;
520 #else
521 	struct percpu_ref	ref;
522 #endif
523 	struct completion	ref_completion;
524 	struct percpu_ref	io_ref;
525 	struct completion	io_ref_completion;
526 
527 	struct bch_fs		*fs;
528 
529 	u8			dev_idx;
530 	/*
531 	 * Cached version of this device's member info from superblock
532 	 * Committed by bch2_write_super() -> bch_fs_mi_update()
533 	 */
534 	struct bch_member_cpu	mi;
535 	atomic64_t		errors[BCH_MEMBER_ERROR_NR];
536 
537 	__uuid_t		uuid;
538 	char			name[BDEVNAME_SIZE];
539 
540 	struct bch_sb_handle	disk_sb;
541 	struct bch_sb		*sb_read_scratch;
542 	int			sb_write_error;
543 	dev_t			dev;
544 	atomic_t		flush_seq;
545 
546 	struct bch_devs_mask	self;
547 
548 	/*
549 	 * Buckets:
550 	 * Per-bucket arrays are protected by either rcu_read_lock or
551 	 * state_lock, for device resize.
552 	 */
553 	GENRADIX(struct bucket)	buckets_gc;
554 	struct bucket_gens __rcu *bucket_gens;
555 	u8			*oldest_gen;
556 	unsigned long		*buckets_nouse;
557 
558 	unsigned long		*bucket_backpointer_mismatches;
559 	unsigned long		*bucket_backpointer_empty;
560 
561 	struct bch_dev_usage __percpu	*usage;
562 
563 	/* Allocator: */
564 	u64			alloc_cursor[3];
565 
566 	unsigned		nr_open_buckets;
567 	unsigned		nr_partial_buckets;
568 	unsigned		nr_btree_reserve;
569 
570 	size_t			inc_gen_needs_gc;
571 	size_t			inc_gen_really_needs_gc;
572 	size_t			buckets_waiting_on_journal;
573 
574 	struct work_struct	invalidate_work;
575 	struct work_struct	discard_work;
576 	struct mutex		discard_buckets_in_flight_lock;
577 	DARRAY(struct discard_in_flight)	discard_buckets_in_flight;
578 	struct work_struct	discard_fast_work;
579 
580 	atomic64_t		rebalance_work;
581 
582 	struct journal_device	journal;
583 	u64			prev_journal_sector;
584 
585 	struct work_struct	io_error_work;
586 
587 	/* The rest of this all shows up in sysfs */
588 	atomic64_t		cur_latency[2];
589 	struct bch2_time_stats_quantiles io_latency[2];
590 
591 #define CONGESTED_MAX		1024
592 	atomic_t		congested;
593 	u64			congested_last;
594 
595 	struct io_count __percpu *io_done;
596 };
597 
598 /*
599  * initial_gc_unfixed
600  * error
601  * topology error
602  */
603 
604 #define BCH_FS_FLAGS()			\
605 	x(new_fs)			\
606 	x(started)			\
607 	x(clean_recovery)		\
608 	x(btree_running)		\
609 	x(accounting_replay_done)	\
610 	x(may_go_rw)			\
611 	x(rw)				\
612 	x(was_rw)			\
613 	x(stopping)			\
614 	x(emergency_ro)			\
615 	x(going_ro)			\
616 	x(write_disable_complete)	\
617 	x(clean_shutdown)		\
618 	x(recovery_running)		\
619 	x(fsck_running)			\
620 	x(initial_gc_unfixed)		\
621 	x(need_delete_dead_snapshots)	\
622 	x(error)			\
623 	x(topology_error)		\
624 	x(errors_fixed)			\
625 	x(errors_not_fixed)		\
626 	x(no_invalid_checks)
627 
628 enum bch_fs_flags {
629 #define x(n)		BCH_FS_##n,
630 	BCH_FS_FLAGS()
631 #undef x
632 };
633 
634 struct btree_debug {
635 	unsigned		id;
636 };
637 
638 #define BCH_TRANSACTIONS_NR 128
639 
640 struct btree_transaction_stats {
641 	struct bch2_time_stats	duration;
642 	struct bch2_time_stats	lock_hold_times;
643 	struct mutex		lock;
644 	unsigned		nr_max_paths;
645 	unsigned		journal_entries_size;
646 	unsigned		max_mem;
647 	char			*max_paths_text;
648 };
649 
650 struct bch_fs_pcpu {
651 	u64			sectors_available;
652 };
653 
654 struct journal_seq_blacklist_table {
655 	size_t			nr;
656 	struct journal_seq_blacklist_table_entry {
657 		u64		start;
658 		u64		end;
659 		bool		dirty;
660 	}			entries[];
661 };
662 
663 struct btree_trans_buf {
664 	struct btree_trans	*trans;
665 };
666 
667 #define BCACHEFS_ROOT_SUBVOL_INUM					\
668 	((subvol_inum) { BCACHEFS_ROOT_SUBVOL,	BCACHEFS_ROOT_INO })
669 
670 #define BCH_WRITE_REFS()						\
671 	x(journal)							\
672 	x(trans)							\
673 	x(write)							\
674 	x(promote)							\
675 	x(node_rewrite)							\
676 	x(stripe_create)						\
677 	x(stripe_delete)						\
678 	x(reflink)							\
679 	x(fallocate)							\
680 	x(fsync)							\
681 	x(dio_write)							\
682 	x(discard)							\
683 	x(discard_fast)							\
684 	x(check_discard_freespace_key)					\
685 	x(invalidate)							\
686 	x(delete_dead_snapshots)					\
687 	x(gc_gens)							\
688 	x(snapshot_delete_pagecache)					\
689 	x(sysfs)							\
690 	x(btree_write_buffer)
691 
692 enum bch_write_ref {
693 #define x(n) BCH_WRITE_REF_##n,
694 	BCH_WRITE_REFS()
695 #undef x
696 	BCH_WRITE_REF_NR,
697 };
698 
699 struct bch_fs {
700 	struct closure		cl;
701 
702 	struct list_head	list;
703 	struct kobject		kobj;
704 	struct kobject		counters_kobj;
705 	struct kobject		internal;
706 	struct kobject		opts_dir;
707 	struct kobject		time_stats;
708 	unsigned long		flags;
709 
710 	int			minor;
711 	struct device		*chardev;
712 	struct super_block	*vfs_sb;
713 	dev_t			dev;
714 	char			name[40];
715 	struct stdio_redirect	*stdio;
716 	struct task_struct	*stdio_filter;
717 
718 	/* ro/rw, add/remove/resize devices: */
719 	struct rw_semaphore	state_lock;
720 
721 	/* Counts outstanding writes, for clean transition to read-only */
722 #ifdef BCH_WRITE_REF_DEBUG
723 	atomic_long_t		writes[BCH_WRITE_REF_NR];
724 #else
725 	struct percpu_ref	writes;
726 #endif
727 	/*
728 	 * Certain operations are only allowed in single threaded mode, during
729 	 * recovery, and we want to assert that this is the case:
730 	 */
731 	struct task_struct	*recovery_task;
732 
733 	/*
734 	 * Analagous to c->writes, for asynchronous ops that don't necessarily
735 	 * need fs to be read-write
736 	 */
737 	refcount_t		ro_ref;
738 	wait_queue_head_t	ro_ref_wait;
739 
740 	struct work_struct	read_only_work;
741 
742 	struct bch_dev __rcu	*devs[BCH_SB_MEMBERS_MAX];
743 
744 	struct bch_accounting_mem accounting;
745 
746 	struct bch_replicas_cpu replicas;
747 	struct bch_replicas_cpu replicas_gc;
748 	struct mutex		replicas_gc_lock;
749 
750 	struct journal_entry_res btree_root_journal_res;
751 	struct journal_entry_res clock_journal_res;
752 
753 	struct bch_disk_groups_cpu __rcu *disk_groups;
754 
755 	struct bch_opts		opts;
756 
757 	/* Updated by bch2_sb_update():*/
758 	struct {
759 		__uuid_t	uuid;
760 		__uuid_t	user_uuid;
761 
762 		u16		version;
763 		u16		version_incompat;
764 		u16		version_incompat_allowed;
765 		u16		version_min;
766 		u16		version_upgrade_complete;
767 
768 		u8		nr_devices;
769 		u8		clean;
770 
771 		u8		encryption_type;
772 
773 		u64		time_base_lo;
774 		u32		time_base_hi;
775 		unsigned	time_units_per_sec;
776 		unsigned	nsec_per_time_unit;
777 		u64		features;
778 		u64		compat;
779 		unsigned long	errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)];
780 		u64		btrees_lost_data;
781 	}			sb;
782 
783 
784 	struct bch_sb_handle	disk_sb;
785 
786 	unsigned short		block_bits;	/* ilog2(block_size) */
787 
788 	u16			btree_foreground_merge_threshold;
789 
790 	struct closure		sb_write;
791 	struct mutex		sb_lock;
792 
793 	/* snapshot.c: */
794 	struct snapshot_table __rcu *snapshots;
795 	struct mutex		snapshot_table_lock;
796 	struct rw_semaphore	snapshot_create_lock;
797 
798 	struct work_struct	snapshot_delete_work;
799 	struct work_struct	snapshot_wait_for_pagecache_and_delete_work;
800 	snapshot_id_list	snapshots_unlinked;
801 	struct mutex		snapshots_unlinked_lock;
802 
803 	/* BTREE CACHE */
804 	struct bio_set		btree_bio;
805 	struct workqueue_struct	*btree_read_complete_wq;
806 	struct workqueue_struct	*btree_write_submit_wq;
807 
808 	struct btree_root	btree_roots_known[BTREE_ID_NR];
809 	DARRAY(struct btree_root) btree_roots_extra;
810 	struct mutex		btree_root_lock;
811 
812 	struct btree_cache	btree_cache;
813 
814 	/*
815 	 * Cache of allocated btree nodes - if we allocate a btree node and
816 	 * don't use it, if we free it that space can't be reused until going
817 	 * _all_ the way through the allocator (which exposes us to a livelock
818 	 * when allocating btree reserves fail halfway through) - instead, we
819 	 * can stick them here:
820 	 */
821 	struct btree_alloc	btree_reserve_cache[BTREE_NODE_RESERVE * 2];
822 	unsigned		btree_reserve_cache_nr;
823 	struct mutex		btree_reserve_cache_lock;
824 
825 	mempool_t		btree_interior_update_pool;
826 	struct list_head	btree_interior_update_list;
827 	struct list_head	btree_interior_updates_unwritten;
828 	struct mutex		btree_interior_update_lock;
829 	struct closure_waitlist	btree_interior_update_wait;
830 
831 	struct workqueue_struct	*btree_interior_update_worker;
832 	struct work_struct	btree_interior_update_work;
833 
834 	struct workqueue_struct	*btree_node_rewrite_worker;
835 	struct list_head	btree_node_rewrites;
836 	struct list_head	btree_node_rewrites_pending;
837 	spinlock_t		btree_node_rewrites_lock;
838 	struct closure_waitlist	btree_node_rewrites_wait;
839 
840 	/* btree_io.c: */
841 	spinlock_t		btree_write_error_lock;
842 	struct btree_write_stats {
843 		atomic64_t	nr;
844 		atomic64_t	bytes;
845 	}			btree_write_stats[BTREE_WRITE_TYPE_NR];
846 
847 	/* btree_iter.c: */
848 	struct seqmutex		btree_trans_lock;
849 	struct list_head	btree_trans_list;
850 	mempool_t		btree_trans_pool;
851 	mempool_t		btree_trans_mem_pool;
852 	struct btree_trans_buf  __percpu	*btree_trans_bufs;
853 
854 	struct srcu_struct	btree_trans_barrier;
855 	bool			btree_trans_barrier_initialized;
856 
857 	struct btree_key_cache	btree_key_cache;
858 	unsigned		btree_key_cache_btrees;
859 
860 	struct btree_write_buffer btree_write_buffer;
861 
862 	struct workqueue_struct	*btree_update_wq;
863 	struct workqueue_struct	*btree_io_complete_wq;
864 	/* copygc needs its own workqueue for index updates.. */
865 	struct workqueue_struct	*copygc_wq;
866 	/*
867 	 * Use a dedicated wq for write ref holder tasks. Required to avoid
868 	 * dependency problems with other wq tasks that can block on ref
869 	 * draining, such as read-only transition.
870 	 */
871 	struct workqueue_struct *write_ref_wq;
872 
873 	/* ALLOCATION */
874 	struct bch_devs_mask	rw_devs[BCH_DATA_NR];
875 	unsigned long		rw_devs_change_count;
876 
877 	u64			capacity; /* sectors */
878 	u64			reserved; /* sectors */
879 
880 	/*
881 	 * When capacity _decreases_ (due to a disk being removed), we
882 	 * increment capacity_gen - this invalidates outstanding reservations
883 	 * and forces them to be revalidated
884 	 */
885 	u32			capacity_gen;
886 	unsigned		bucket_size_max;
887 
888 	atomic64_t		sectors_available;
889 	struct mutex		sectors_available_lock;
890 
891 	struct bch_fs_pcpu __percpu	*pcpu;
892 
893 	struct percpu_rw_semaphore	mark_lock;
894 
895 	seqcount_t			usage_lock;
896 	struct bch_fs_usage_base __percpu *usage;
897 	u64 __percpu		*online_reserved;
898 
899 	unsigned long		allocator_last_stuck;
900 
901 	struct io_clock		io_clock[2];
902 
903 	/* JOURNAL SEQ BLACKLIST */
904 	struct journal_seq_blacklist_table *
905 				journal_seq_blacklist_table;
906 
907 	/* ALLOCATOR */
908 	spinlock_t		freelist_lock;
909 	struct closure_waitlist	freelist_wait;
910 
911 	open_bucket_idx_t	open_buckets_freelist;
912 	open_bucket_idx_t	open_buckets_nr_free;
913 	struct closure_waitlist	open_buckets_wait;
914 	struct open_bucket	open_buckets[OPEN_BUCKETS_COUNT];
915 	open_bucket_idx_t	open_buckets_hash[OPEN_BUCKETS_COUNT];
916 
917 	open_bucket_idx_t	open_buckets_partial[OPEN_BUCKETS_COUNT];
918 	open_bucket_idx_t	open_buckets_partial_nr;
919 
920 	struct write_point	btree_write_point;
921 	struct write_point	rebalance_write_point;
922 
923 	struct write_point	write_points[WRITE_POINT_MAX];
924 	struct hlist_head	write_points_hash[WRITE_POINT_HASH_NR];
925 	struct mutex		write_points_hash_lock;
926 	unsigned		write_points_nr;
927 
928 	struct buckets_waiting_for_journal buckets_waiting_for_journal;
929 
930 	/* GARBAGE COLLECTION */
931 	struct work_struct	gc_gens_work;
932 	unsigned long		gc_count;
933 
934 	enum btree_id		gc_gens_btree;
935 	struct bpos		gc_gens_pos;
936 
937 	/*
938 	 * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
939 	 * has been marked by GC.
940 	 *
941 	 * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
942 	 *
943 	 * Protected by gc_pos_lock. Only written to by GC thread, so GC thread
944 	 * can read without a lock.
945 	 */
946 	seqcount_t		gc_pos_lock;
947 	struct gc_pos		gc_pos;
948 
949 	/*
950 	 * The allocation code needs gc_mark in struct bucket to be correct, but
951 	 * it's not while a gc is in progress.
952 	 */
953 	struct rw_semaphore	gc_lock;
954 	struct mutex		gc_gens_lock;
955 
956 	/* IO PATH */
957 	struct semaphore	io_in_flight;
958 	struct bio_set		bio_read;
959 	struct bio_set		bio_read_split;
960 	struct bio_set		bio_write;
961 	struct bio_set		replica_set;
962 	struct mutex		bio_bounce_pages_lock;
963 	mempool_t		bio_bounce_pages;
964 	struct bucket_nocow_lock_table
965 				nocow_locks;
966 	struct rhashtable	promote_table;
967 
968 	mempool_t		compression_bounce[2];
969 	mempool_t		compress_workspace[BCH_COMPRESSION_OPT_NR];
970 	size_t			zstd_workspace_size;
971 
972 	struct crypto_shash	*sha256;
973 	struct crypto_sync_skcipher *chacha20;
974 	struct crypto_shash	*poly1305;
975 
976 	atomic64_t		key_version;
977 
978 	mempool_t		large_bkey_pool;
979 
980 	/* MOVE.C */
981 	struct list_head	moving_context_list;
982 	struct mutex		moving_context_lock;
983 
984 	/* REBALANCE */
985 	struct bch_fs_rebalance	rebalance;
986 
987 	/* COPYGC */
988 	struct task_struct	*copygc_thread;
989 	struct write_point	copygc_write_point;
990 	s64			copygc_wait_at;
991 	s64			copygc_wait;
992 	bool			copygc_running;
993 	wait_queue_head_t	copygc_running_wq;
994 
995 	/* STRIPES: */
996 	GENRADIX(struct stripe) stripes;
997 	GENRADIX(struct gc_stripe) gc_stripes;
998 
999 	struct hlist_head	ec_stripes_new[32];
1000 	spinlock_t		ec_stripes_new_lock;
1001 
1002 	ec_stripes_heap		ec_stripes_heap;
1003 	struct mutex		ec_stripes_heap_lock;
1004 
1005 	/* ERASURE CODING */
1006 	struct list_head	ec_stripe_head_list;
1007 	struct mutex		ec_stripe_head_lock;
1008 
1009 	struct list_head	ec_stripe_new_list;
1010 	struct mutex		ec_stripe_new_lock;
1011 	wait_queue_head_t	ec_stripe_new_wait;
1012 
1013 	struct work_struct	ec_stripe_create_work;
1014 	u64			ec_stripe_hint;
1015 
1016 	struct work_struct	ec_stripe_delete_work;
1017 
1018 	struct bio_set		ec_bioset;
1019 
1020 	/* REFLINK */
1021 	reflink_gc_table	reflink_gc_table;
1022 	size_t			reflink_gc_nr;
1023 
1024 	/* fs.c */
1025 	struct list_head	vfs_inodes_list;
1026 	struct mutex		vfs_inodes_lock;
1027 	struct rhashtable	vfs_inodes_table;
1028 	struct rhltable		vfs_inodes_by_inum_table;
1029 
1030 	/* VFS IO PATH - fs-io.c */
1031 	struct bio_set		writepage_bioset;
1032 	struct bio_set		dio_write_bioset;
1033 	struct bio_set		dio_read_bioset;
1034 	struct bio_set		nocow_flush_bioset;
1035 
1036 	/* QUOTAS */
1037 	struct bch_memquota_type quotas[QTYP_NR];
1038 
1039 	/* RECOVERY */
1040 	u64			journal_replay_seq_start;
1041 	u64			journal_replay_seq_end;
1042 	/*
1043 	 * Two different uses:
1044 	 * "Has this fsck pass?" - i.e. should this type of error be an
1045 	 * emergency read-only
1046 	 * And, in certain situations fsck will rewind to an earlier pass: used
1047 	 * for signaling to the toplevel code which pass we want to run now.
1048 	 */
1049 	enum bch_recovery_pass	curr_recovery_pass;
1050 	enum bch_recovery_pass	next_recovery_pass;
1051 	/* bitmask of recovery passes that we actually ran */
1052 	u64			recovery_passes_complete;
1053 	/* never rewinds version of curr_recovery_pass */
1054 	enum bch_recovery_pass	recovery_pass_done;
1055 	spinlock_t		recovery_pass_lock;
1056 	struct semaphore	online_fsck_mutex;
1057 
1058 	/* DEBUG JUNK */
1059 	struct dentry		*fs_debug_dir;
1060 	struct dentry		*btree_debug_dir;
1061 	struct btree_debug	btree_debug[BTREE_ID_NR];
1062 	struct btree		*verify_data;
1063 	struct btree_node	*verify_ondisk;
1064 	struct mutex		verify_lock;
1065 
1066 	/*
1067 	 * A btree node on disk could have too many bsets for an iterator to fit
1068 	 * on the stack - have to dynamically allocate them
1069 	 */
1070 	mempool_t		fill_iter;
1071 
1072 	mempool_t		btree_bounce_pool;
1073 
1074 	struct journal		journal;
1075 	GENRADIX(struct journal_replay *) journal_entries;
1076 	u64			journal_entries_base_seq;
1077 	struct journal_keys	journal_keys;
1078 	struct list_head	journal_iters;
1079 
1080 	struct find_btree_nodes	found_btree_nodes;
1081 
1082 	u64			last_bucket_seq_cleanup;
1083 
1084 	u64			counters_on_mount[BCH_COUNTER_NR];
1085 	u64 __percpu		*counters;
1086 
1087 	struct bch2_time_stats	times[BCH_TIME_STAT_NR];
1088 
1089 	struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
1090 
1091 	/* ERRORS */
1092 	struct list_head	fsck_error_msgs;
1093 	struct mutex		fsck_error_msgs_lock;
1094 	bool			fsck_alloc_msgs_err;
1095 
1096 	bch_sb_errors_cpu	fsck_error_counts;
1097 	struct mutex		fsck_error_counts_lock;
1098 };
1099 
1100 extern struct wait_queue_head bch2_read_only_wait;
1101 
bch2_write_ref_get(struct bch_fs * c,enum bch_write_ref ref)1102 static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref)
1103 {
1104 #ifdef BCH_WRITE_REF_DEBUG
1105 	atomic_long_inc(&c->writes[ref]);
1106 #else
1107 	percpu_ref_get(&c->writes);
1108 #endif
1109 }
1110 
__bch2_write_ref_tryget(struct bch_fs * c,enum bch_write_ref ref)1111 static inline bool __bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
1112 {
1113 #ifdef BCH_WRITE_REF_DEBUG
1114 	return !test_bit(BCH_FS_going_ro, &c->flags) &&
1115 		atomic_long_inc_not_zero(&c->writes[ref]);
1116 #else
1117 	return percpu_ref_tryget(&c->writes);
1118 #endif
1119 }
1120 
bch2_write_ref_tryget(struct bch_fs * c,enum bch_write_ref ref)1121 static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
1122 {
1123 #ifdef BCH_WRITE_REF_DEBUG
1124 	return !test_bit(BCH_FS_going_ro, &c->flags) &&
1125 		atomic_long_inc_not_zero(&c->writes[ref]);
1126 #else
1127 	return percpu_ref_tryget_live(&c->writes);
1128 #endif
1129 }
1130 
bch2_write_ref_put(struct bch_fs * c,enum bch_write_ref ref)1131 static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref)
1132 {
1133 #ifdef BCH_WRITE_REF_DEBUG
1134 	long v = atomic_long_dec_return(&c->writes[ref]);
1135 
1136 	BUG_ON(v < 0);
1137 	if (v)
1138 		return;
1139 	for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
1140 		if (atomic_long_read(&c->writes[i]))
1141 			return;
1142 
1143 	set_bit(BCH_FS_write_disable_complete, &c->flags);
1144 	wake_up(&bch2_read_only_wait);
1145 #else
1146 	percpu_ref_put(&c->writes);
1147 #endif
1148 }
1149 
bch2_ro_ref_tryget(struct bch_fs * c)1150 static inline bool bch2_ro_ref_tryget(struct bch_fs *c)
1151 {
1152 	if (test_bit(BCH_FS_stopping, &c->flags))
1153 		return false;
1154 
1155 	return refcount_inc_not_zero(&c->ro_ref);
1156 }
1157 
bch2_ro_ref_put(struct bch_fs * c)1158 static inline void bch2_ro_ref_put(struct bch_fs *c)
1159 {
1160 	if (refcount_dec_and_test(&c->ro_ref))
1161 		wake_up(&c->ro_ref_wait);
1162 }
1163 
bch2_set_ra_pages(struct bch_fs * c,unsigned ra_pages)1164 static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
1165 {
1166 #ifndef NO_BCACHEFS_FS
1167 	if (c->vfs_sb)
1168 		c->vfs_sb->s_bdi->ra_pages = ra_pages;
1169 #endif
1170 }
1171 
bucket_bytes(const struct bch_dev * ca)1172 static inline unsigned bucket_bytes(const struct bch_dev *ca)
1173 {
1174 	return ca->mi.bucket_size << 9;
1175 }
1176 
block_bytes(const struct bch_fs * c)1177 static inline unsigned block_bytes(const struct bch_fs *c)
1178 {
1179 	return c->opts.block_size;
1180 }
1181 
block_sectors(const struct bch_fs * c)1182 static inline unsigned block_sectors(const struct bch_fs *c)
1183 {
1184 	return c->opts.block_size >> 9;
1185 }
1186 
btree_id_cached(const struct bch_fs * c,enum btree_id btree)1187 static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
1188 {
1189 	return c->btree_key_cache_btrees & (1U << btree);
1190 }
1191 
bch2_time_to_timespec(const struct bch_fs * c,s64 time)1192 static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
1193 {
1194 	struct timespec64 t;
1195 	s64 sec;
1196 	s32 rem;
1197 
1198 	time += c->sb.time_base_lo;
1199 
1200 	sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
1201 
1202 	set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit);
1203 
1204 	return t;
1205 }
1206 
timespec_to_bch2_time(const struct bch_fs * c,struct timespec64 ts)1207 static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
1208 {
1209 	return (ts.tv_sec * c->sb.time_units_per_sec +
1210 		(int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
1211 }
1212 
bch2_current_time(const struct bch_fs * c)1213 static inline s64 bch2_current_time(const struct bch_fs *c)
1214 {
1215 	struct timespec64 now;
1216 
1217 	ktime_get_coarse_real_ts64(&now);
1218 	return timespec_to_bch2_time(c, now);
1219 }
1220 
bch2_current_io_time(const struct bch_fs * c,int rw)1221 static inline u64 bch2_current_io_time(const struct bch_fs *c, int rw)
1222 {
1223 	return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX);
1224 }
1225 
bch2_fs_stdio_redirect(struct bch_fs * c)1226 static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
1227 {
1228 	struct stdio_redirect *stdio = c->stdio;
1229 
1230 	if (c->stdio_filter && c->stdio_filter != current)
1231 		stdio = NULL;
1232 	return stdio;
1233 }
1234 
metadata_replicas_required(struct bch_fs * c)1235 static inline unsigned metadata_replicas_required(struct bch_fs *c)
1236 {
1237 	return min(c->opts.metadata_replicas,
1238 		   c->opts.metadata_replicas_required);
1239 }
1240 
data_replicas_required(struct bch_fs * c)1241 static inline unsigned data_replicas_required(struct bch_fs *c)
1242 {
1243 	return min(c->opts.data_replicas,
1244 		   c->opts.data_replicas_required);
1245 }
1246 
1247 #define BKEY_PADDED_ONSTACK(key, pad)				\
1248 	struct { struct bkey_i key; __u64 key ## _pad[pad]; }
1249 
1250 #endif /* _BCACHEFS_H */
1251