xref: /linux/fs/bcachefs/btree_types.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
4 
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
7 
8 #include "bbpos_types.h"
9 #include "btree_key_cache_types.h"
10 #include "buckets_types.h"
11 #include "darray.h"
12 #include "errcode.h"
13 #include "journal_types.h"
14 #include "replicas_types.h"
15 #include "six.h"
16 
17 struct open_bucket;
18 struct btree_update;
19 struct btree_trans;
20 
21 #define MAX_BSETS		3U
22 
23 struct btree_nr_keys {
24 
25 	/*
26 	 * Amount of live metadata (i.e. size of node after a compaction) in
27 	 * units of u64s
28 	 */
29 	u16			live_u64s;
30 	u16			bset_u64s[MAX_BSETS];
31 
32 	/* live keys only: */
33 	u16			packed_keys;
34 	u16			unpacked_keys;
35 };
36 
37 struct bset_tree {
38 	/*
39 	 * We construct a binary tree in an array as if the array
40 	 * started at 1, so that things line up on the same cachelines
41 	 * better: see comments in bset.c at cacheline_to_bkey() for
42 	 * details
43 	 */
44 
45 	/* size of the binary tree and prev array */
46 	u16			size;
47 
48 	/* function of size - precalculated for to_inorder() */
49 	u16			extra;
50 
51 	u16			data_offset;
52 	u16			aux_data_offset;
53 	u16			end_offset;
54 };
55 
56 struct btree_write {
57 	struct journal_entry_pin	journal;
58 };
59 
60 struct btree_alloc {
61 	struct open_buckets	ob;
62 	__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
63 };
64 
65 struct btree_bkey_cached_common {
66 	struct six_lock		lock;
67 	u8			level;
68 	u8			btree_id;
69 	bool			cached;
70 };
71 
72 struct btree {
73 	struct btree_bkey_cached_common c;
74 
75 	struct rhash_head	hash;
76 	u64			hash_val;
77 
78 	unsigned long		flags;
79 	u16			written;
80 	u8			nsets;
81 	u8			nr_key_bits;
82 	u16			version_ondisk;
83 
84 	struct bkey_format	format;
85 
86 	struct btree_node	*data;
87 	void			*aux_data;
88 
89 	/*
90 	 * Sets of sorted keys - the real btree node - plus a binary search tree
91 	 *
92 	 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
93 	 * to the memory we have allocated for this btree node. Additionally,
94 	 * set[0]->data points to the entire btree node as it exists on disk.
95 	 */
96 	struct bset_tree	set[MAX_BSETS];
97 
98 	struct btree_nr_keys	nr;
99 	u16			sib_u64s[2];
100 	u16			whiteout_u64s;
101 	u8			byte_order;
102 	u8			unpack_fn_len;
103 
104 	struct btree_write	writes[2];
105 
106 	/* Key/pointer for this btree node */
107 	__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
108 
109 	/*
110 	 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
111 	 * fails because the lock sequence number has changed - i.e. the
112 	 * contents were modified - we can still relock the node if it's still
113 	 * the one we want, without redoing the traversal
114 	 */
115 
116 	/*
117 	 * For asynchronous splits/interior node updates:
118 	 * When we do a split, we allocate new child nodes and update the parent
119 	 * node to point to them: we update the parent in memory immediately,
120 	 * but then we must wait until the children have been written out before
121 	 * the update to the parent can be written - this is a list of the
122 	 * btree_updates that are blocking this node from being
123 	 * written:
124 	 */
125 	struct list_head	write_blocked;
126 
127 	/*
128 	 * Also for asynchronous splits/interior node updates:
129 	 * If a btree node isn't reachable yet, we don't want to kick off
130 	 * another write - because that write also won't yet be reachable and
131 	 * marking it as completed before it's reachable would be incorrect:
132 	 */
133 	unsigned long		will_make_reachable;
134 
135 	struct open_buckets	ob;
136 
137 	/* lru list */
138 	struct list_head	list;
139 };
140 
141 struct btree_cache {
142 	struct rhashtable	table;
143 	bool			table_init_done;
144 	/*
145 	 * We never free a struct btree, except on shutdown - we just put it on
146 	 * the btree_cache_freed list and reuse it later. This simplifies the
147 	 * code, and it doesn't cost us much memory as the memory usage is
148 	 * dominated by buffers that hold the actual btree node data and those
149 	 * can be freed - and the number of struct btrees allocated is
150 	 * effectively bounded.
151 	 *
152 	 * btree_cache_freeable effectively is a small cache - we use it because
153 	 * high order page allocations can be rather expensive, and it's quite
154 	 * common to delete and allocate btree nodes in quick succession. It
155 	 * should never grow past ~2-3 nodes in practice.
156 	 */
157 	struct mutex		lock;
158 	struct list_head	live;
159 	struct list_head	freeable;
160 	struct list_head	freed_pcpu;
161 	struct list_head	freed_nonpcpu;
162 
163 	/* Number of elements in live + freeable lists */
164 	unsigned		used;
165 	unsigned		reserve;
166 	unsigned		freed;
167 	unsigned		not_freed_lock_intent;
168 	unsigned		not_freed_lock_write;
169 	unsigned		not_freed_dirty;
170 	unsigned		not_freed_read_in_flight;
171 	unsigned		not_freed_write_in_flight;
172 	unsigned		not_freed_noevict;
173 	unsigned		not_freed_write_blocked;
174 	unsigned		not_freed_will_make_reachable;
175 	unsigned		not_freed_access_bit;
176 	atomic_t		dirty;
177 	struct shrinker		*shrink;
178 
179 	unsigned		used_by_btree[BTREE_ID_NR];
180 
181 	/*
182 	 * If we need to allocate memory for a new btree node and that
183 	 * allocation fails, we can cannibalize another node in the btree cache
184 	 * to satisfy the allocation - lock to guarantee only one thread does
185 	 * this at a time:
186 	 */
187 	struct task_struct	*alloc_lock;
188 	struct closure_waitlist	alloc_wait;
189 
190 	struct bbpos		pinned_nodes_start;
191 	struct bbpos		pinned_nodes_end;
192 	u64			pinned_nodes_leaf_mask;
193 	u64			pinned_nodes_interior_mask;
194 };
195 
196 struct btree_node_iter {
197 	struct btree_node_iter_set {
198 		u16	k, end;
199 	} data[MAX_BSETS];
200 };
201 
202 #define BTREE_ITER_FLAGS()			\
203 	x(slots)				\
204 	x(intent)				\
205 	x(prefetch)				\
206 	x(is_extents)				\
207 	x(not_extents)				\
208 	x(cached)				\
209 	x(with_key_cache)			\
210 	x(with_updates)				\
211 	x(with_journal)				\
212 	x(snapshot_field)			\
213 	x(all_snapshots)			\
214 	x(filter_snapshots)			\
215 	x(nopreserve)				\
216 	x(cached_nofill)			\
217 	x(key_cache_fill)			\
218 
219 #define STR_HASH_FLAGS()			\
220 	x(must_create)				\
221 	x(must_replace)
222 
223 #define BTREE_UPDATE_FLAGS()			\
224 	x(internal_snapshot_node)		\
225 	x(nojournal)				\
226 	x(key_cache_reclaim)
227 
228 
229 /*
230  * BTREE_TRIGGER_norun - don't run triggers at all
231  *
232  * BTREE_TRIGGER_transactional - we're running transactional triggers as part of
233  * a transaction commit: triggers may generate new updates
234  *
235  * BTREE_TRIGGER_atomic - we're running atomic triggers during a transaction
236  * commit: we have our journal reservation, we're holding btree node write
237  * locks, and we know the transaction is going to commit (returning an error
238  * here is a fatal error, causing us to go emergency read-only)
239  *
240  * BTREE_TRIGGER_gc - we're in gc/fsck: running triggers to recalculate e.g. disk usage
241  *
242  * BTREE_TRIGGER_insert - @new is entering the btree
243  * BTREE_TRIGGER_overwrite - @old is leaving the btree
244  *
245  * BTREE_TRIGGER_bucket_invalidate - signal from bucket invalidate path to alloc
246  * trigger
247  */
248 #define BTREE_TRIGGER_FLAGS()			\
249 	x(norun)				\
250 	x(transactional)			\
251 	x(atomic)				\
252 	x(check_repair)				\
253 	x(gc)					\
254 	x(insert)				\
255 	x(overwrite)				\
256 	x(is_root)				\
257 	x(bucket_invalidate)
258 
259 enum {
260 #define x(n) BTREE_ITER_FLAG_BIT_##n,
261 	BTREE_ITER_FLAGS()
262 	STR_HASH_FLAGS()
263 	BTREE_UPDATE_FLAGS()
264 	BTREE_TRIGGER_FLAGS()
265 #undef x
266 };
267 
268 /* iter flags must fit in a u16: */
269 //BUILD_BUG_ON(BTREE_ITER_FLAG_BIT_key_cache_fill > 15);
270 
271 enum btree_iter_update_trigger_flags {
272 #define x(n) BTREE_ITER_##n	= 1U << BTREE_ITER_FLAG_BIT_##n,
273 	BTREE_ITER_FLAGS()
274 #undef x
275 #define x(n) STR_HASH_##n	= 1U << BTREE_ITER_FLAG_BIT_##n,
276 	STR_HASH_FLAGS()
277 #undef x
278 #define x(n) BTREE_UPDATE_##n	= 1U << BTREE_ITER_FLAG_BIT_##n,
279 	BTREE_UPDATE_FLAGS()
280 #undef x
281 #define x(n) BTREE_TRIGGER_##n	= 1U << BTREE_ITER_FLAG_BIT_##n,
282 	BTREE_TRIGGER_FLAGS()
283 #undef x
284 };
285 
286 enum btree_path_uptodate {
287 	BTREE_ITER_UPTODATE		= 0,
288 	BTREE_ITER_NEED_RELOCK		= 1,
289 	BTREE_ITER_NEED_TRAVERSE	= 2,
290 };
291 
292 #if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
293 #define TRACK_PATH_ALLOCATED
294 #endif
295 
296 typedef u16 btree_path_idx_t;
297 
298 struct btree_path {
299 	btree_path_idx_t	sorted_idx;
300 	u8			ref;
301 	u8			intent_ref;
302 
303 	/* btree_iter_copy starts here: */
304 	struct bpos		pos;
305 
306 	enum btree_id		btree_id:5;
307 	bool			cached:1;
308 	bool			preserve:1;
309 	enum btree_path_uptodate uptodate:2;
310 	/*
311 	 * When true, failing to relock this path will cause the transaction to
312 	 * restart:
313 	 */
314 	bool			should_be_locked:1;
315 	unsigned		level:3,
316 				locks_want:3;
317 	u8			nodes_locked;
318 
319 	struct btree_path_level {
320 		struct btree	*b;
321 		struct btree_node_iter iter;
322 		u32		lock_seq;
323 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
324 		u64             lock_taken_time;
325 #endif
326 	}			l[BTREE_MAX_DEPTH];
327 #ifdef TRACK_PATH_ALLOCATED
328 	unsigned long		ip_allocated;
329 #endif
330 };
331 
path_l(struct btree_path * path)332 static inline struct btree_path_level *path_l(struct btree_path *path)
333 {
334 	return path->l + path->level;
335 }
336 
btree_path_ip_allocated(struct btree_path * path)337 static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
338 {
339 #ifdef TRACK_PATH_ALLOCATED
340 	return path->ip_allocated;
341 #else
342 	return _THIS_IP_;
343 #endif
344 }
345 
346 /*
347  * @pos			- iterator's current position
348  * @level		- current btree depth
349  * @locks_want		- btree level below which we start taking intent locks
350  * @nodes_locked	- bitmask indicating which nodes in @nodes are locked
351  * @nodes_intent_locked	- bitmask indicating which locks are intent locks
352  */
353 struct btree_iter {
354 	struct btree_trans	*trans;
355 	btree_path_idx_t	path;
356 	btree_path_idx_t	update_path;
357 	btree_path_idx_t	key_cache_path;
358 
359 	enum btree_id		btree_id:8;
360 	u8			min_depth;
361 
362 	/* btree_iter_copy starts here: */
363 	u16			flags;
364 
365 	/* When we're filtering by snapshot, the snapshot ID we're looking for: */
366 	unsigned		snapshot;
367 
368 	struct bpos		pos;
369 	/*
370 	 * Current unpacked key - so that bch2_btree_iter_next()/
371 	 * bch2_btree_iter_next_slot() can correctly advance pos.
372 	 */
373 	struct bkey		k;
374 
375 	/* BTREE_ITER_with_journal: */
376 	size_t			journal_idx;
377 #ifdef TRACK_PATH_ALLOCATED
378 	unsigned long		ip_allocated;
379 #endif
380 };
381 
382 #define BKEY_CACHED_ACCESSED		0
383 #define BKEY_CACHED_DIRTY		1
384 
385 struct bkey_cached {
386 	struct btree_bkey_cached_common c;
387 
388 	unsigned long		flags;
389 	unsigned long		btree_trans_barrier_seq;
390 	u16			u64s;
391 	struct bkey_cached_key	key;
392 
393 	struct rhash_head	hash;
394 	struct list_head	list;
395 
396 	struct journal_entry_pin journal;
397 	u64			seq;
398 
399 	struct bkey_i		*k;
400 };
401 
btree_node_pos(struct btree_bkey_cached_common * b)402 static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
403 {
404 	return !b->cached
405 		? container_of(b, struct btree, c)->key.k.p
406 		: container_of(b, struct bkey_cached, c)->key.pos;
407 }
408 
409 struct btree_insert_entry {
410 	unsigned		flags;
411 	u8			bkey_type;
412 	enum btree_id		btree_id:8;
413 	u8			level:4;
414 	bool			cached:1;
415 	bool			insert_trigger_run:1;
416 	bool			overwrite_trigger_run:1;
417 	bool			key_cache_already_flushed:1;
418 	/*
419 	 * @old_k may be a key from the journal; @old_btree_u64s always refers
420 	 * to the size of the key being overwritten in the btree:
421 	 */
422 	u8			old_btree_u64s;
423 	btree_path_idx_t	path;
424 	struct bkey_i		*k;
425 	/* key being overwritten: */
426 	struct bkey		old_k;
427 	const struct bch_val	*old_v;
428 	unsigned long		ip_allocated;
429 };
430 
431 /* Number of btree paths we preallocate, usually enough */
432 #define BTREE_ITER_INITIAL		64
433 /*
434  * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code
435  * paths should run inside this limit, and if they don't it usually indicates a
436  * bug (leaking/duplicated btree paths).
437  *
438  * exception: some fsck paths
439  *
440  * bugs with excessive path usage seem to have possibly been eliminated now, so
441  * we might consider eliminating this (and btree_trans_too_many_iter()) at some
442  * point.
443  */
444 #define BTREE_ITER_NORMAL_LIMIT		256
445 /* never exceed limit */
446 #define BTREE_ITER_MAX			(1U << 10)
447 
448 struct btree_trans_commit_hook;
449 typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
450 
451 struct btree_trans_commit_hook {
452 	btree_trans_commit_hook_fn	*fn;
453 	struct btree_trans_commit_hook	*next;
454 };
455 
456 #define BTREE_TRANS_MEM_MAX	(1U << 16)
457 
458 #define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS	10000
459 
460 struct btree_trans_paths {
461 	unsigned long		nr_paths;
462 	struct btree_path	paths[];
463 };
464 
465 struct btree_trans {
466 	struct bch_fs		*c;
467 
468 	unsigned long		*paths_allocated;
469 	struct btree_path	*paths;
470 	btree_path_idx_t	*sorted;
471 	struct btree_insert_entry *updates;
472 
473 	void			*mem;
474 	unsigned		mem_top;
475 	unsigned		mem_bytes;
476 
477 	btree_path_idx_t	nr_sorted;
478 	btree_path_idx_t	nr_paths;
479 	btree_path_idx_t	nr_paths_max;
480 	btree_path_idx_t	nr_updates;
481 	u8			fn_idx;
482 	u8			lock_must_abort;
483 	bool			lock_may_not_fail:1;
484 	bool			srcu_held:1;
485 	bool			locked:1;
486 	bool			pf_memalloc_nofs:1;
487 	bool			write_locked:1;
488 	bool			used_mempool:1;
489 	bool			in_traverse_all:1;
490 	bool			paths_sorted:1;
491 	bool			memory_allocation_failure:1;
492 	bool			journal_transaction_names:1;
493 	bool			journal_replay_not_finished:1;
494 	bool			notrace_relock_fail:1;
495 	enum bch_errcode	restarted:16;
496 	u32			restart_count;
497 
498 	u64			last_begin_time;
499 	unsigned long		last_begin_ip;
500 	unsigned long		last_restarted_ip;
501 	unsigned long		last_unlock_ip;
502 	unsigned long		srcu_lock_time;
503 
504 	const char		*fn;
505 	struct btree_bkey_cached_common *locking;
506 	struct six_lock_waiter	locking_wait;
507 	int			srcu_idx;
508 
509 	/* update path: */
510 	u16			journal_entries_u64s;
511 	u16			journal_entries_size;
512 	struct jset_entry	*journal_entries;
513 
514 	struct btree_trans_commit_hook *hooks;
515 	struct journal_entry_pin *journal_pin;
516 
517 	struct journal_res	journal_res;
518 	u64			*journal_seq;
519 	struct disk_reservation *disk_res;
520 
521 	struct bch_fs_usage_base fs_usage_delta;
522 
523 	unsigned		journal_u64s;
524 	unsigned		extra_disk_res; /* XXX kill */
525 
526 #ifdef CONFIG_DEBUG_LOCK_ALLOC
527 	struct lockdep_map	dep_map;
528 #endif
529 	/* Entries before this are zeroed out on every bch2_trans_get() call */
530 
531 	struct list_head	list;
532 	struct closure		ref;
533 
534 	unsigned long		_paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
535 	struct btree_trans_paths trans_paths;
536 	struct btree_path	_paths[BTREE_ITER_INITIAL];
537 	btree_path_idx_t	_sorted[BTREE_ITER_INITIAL + 4];
538 	struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
539 };
540 
btree_iter_path(struct btree_trans * trans,struct btree_iter * iter)541 static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
542 {
543 	return trans->paths + iter->path;
544 }
545 
btree_iter_key_cache_path(struct btree_trans * trans,struct btree_iter * iter)546 static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
547 {
548 	return iter->key_cache_path
549 		? trans->paths + iter->key_cache_path
550 		: NULL;
551 }
552 
553 #define BCH_BTREE_WRITE_TYPES()						\
554 	x(initial,		0)					\
555 	x(init_next_bset,	1)					\
556 	x(cache_reclaim,	2)					\
557 	x(journal_reclaim,	3)					\
558 	x(interior,		4)
559 
560 enum btree_write_type {
561 #define x(t, n) BTREE_WRITE_##t,
562 	BCH_BTREE_WRITE_TYPES()
563 #undef x
564 	BTREE_WRITE_TYPE_NR,
565 };
566 
567 #define BTREE_WRITE_TYPE_MASK	(roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
568 #define BTREE_WRITE_TYPE_BITS	ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
569 
570 #define BTREE_FLAGS()							\
571 	x(read_in_flight)						\
572 	x(read_error)							\
573 	x(dirty)							\
574 	x(need_write)							\
575 	x(write_blocked)						\
576 	x(will_make_reachable)						\
577 	x(noevict)							\
578 	x(write_idx)							\
579 	x(accessed)							\
580 	x(write_in_flight)						\
581 	x(write_in_flight_inner)					\
582 	x(just_written)							\
583 	x(dying)							\
584 	x(fake)								\
585 	x(need_rewrite)							\
586 	x(never_write)
587 
588 enum btree_flags {
589 	/* First bits for btree node write type */
590 	BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
591 #define x(flag)	BTREE_NODE_##flag,
592 	BTREE_FLAGS()
593 #undef x
594 };
595 
596 #define x(flag)								\
597 static inline bool btree_node_ ## flag(struct btree *b)			\
598 {	return test_bit(BTREE_NODE_ ## flag, &b->flags); }		\
599 									\
600 static inline void set_btree_node_ ## flag(struct btree *b)		\
601 {	set_bit(BTREE_NODE_ ## flag, &b->flags); }			\
602 									\
603 static inline void clear_btree_node_ ## flag(struct btree *b)		\
604 {	clear_bit(BTREE_NODE_ ## flag, &b->flags); }
605 
BTREE_FLAGS()606 BTREE_FLAGS()
607 #undef x
608 
609 static inline struct btree_write *btree_current_write(struct btree *b)
610 {
611 	return b->writes + btree_node_write_idx(b);
612 }
613 
btree_prev_write(struct btree * b)614 static inline struct btree_write *btree_prev_write(struct btree *b)
615 {
616 	return b->writes + (btree_node_write_idx(b) ^ 1);
617 }
618 
bset_tree_last(struct btree * b)619 static inline struct bset_tree *bset_tree_last(struct btree *b)
620 {
621 	EBUG_ON(!b->nsets);
622 	return b->set + b->nsets - 1;
623 }
624 
625 static inline void *
__btree_node_offset_to_ptr(const struct btree * b,u16 offset)626 __btree_node_offset_to_ptr(const struct btree *b, u16 offset)
627 {
628 	return (void *) ((u64 *) b->data + 1 + offset);
629 }
630 
631 static inline u16
__btree_node_ptr_to_offset(const struct btree * b,const void * p)632 __btree_node_ptr_to_offset(const struct btree *b, const void *p)
633 {
634 	u16 ret = (u64 *) p - 1 - (u64 *) b->data;
635 
636 	EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
637 	return ret;
638 }
639 
bset(const struct btree * b,const struct bset_tree * t)640 static inline struct bset *bset(const struct btree *b,
641 				const struct bset_tree *t)
642 {
643 	return __btree_node_offset_to_ptr(b, t->data_offset);
644 }
645 
set_btree_bset_end(struct btree * b,struct bset_tree * t)646 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
647 {
648 	t->end_offset =
649 		__btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
650 }
651 
set_btree_bset(struct btree * b,struct bset_tree * t,const struct bset * i)652 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
653 				  const struct bset *i)
654 {
655 	t->data_offset = __btree_node_ptr_to_offset(b, i);
656 	set_btree_bset_end(b, t);
657 }
658 
btree_bset_first(struct btree * b)659 static inline struct bset *btree_bset_first(struct btree *b)
660 {
661 	return bset(b, b->set);
662 }
663 
btree_bset_last(struct btree * b)664 static inline struct bset *btree_bset_last(struct btree *b)
665 {
666 	return bset(b, bset_tree_last(b));
667 }
668 
669 static inline u16
__btree_node_key_to_offset(const struct btree * b,const struct bkey_packed * k)670 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
671 {
672 	return __btree_node_ptr_to_offset(b, k);
673 }
674 
675 static inline struct bkey_packed *
__btree_node_offset_to_key(const struct btree * b,u16 k)676 __btree_node_offset_to_key(const struct btree *b, u16 k)
677 {
678 	return __btree_node_offset_to_ptr(b, k);
679 }
680 
btree_bkey_first_offset(const struct bset_tree * t)681 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
682 {
683 	return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
684 }
685 
686 #define btree_bkey_first(_b, _t)					\
687 ({									\
688 	EBUG_ON(bset(_b, _t)->start !=					\
689 		__btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
690 									\
691 	bset(_b, _t)->start;						\
692 })
693 
694 #define btree_bkey_last(_b, _t)						\
695 ({									\
696 	EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=	\
697 		vstruct_last(bset(_b, _t)));				\
698 									\
699 	__btree_node_offset_to_key(_b, (_t)->end_offset);		\
700 })
701 
bset_u64s(struct bset_tree * t)702 static inline unsigned bset_u64s(struct bset_tree *t)
703 {
704 	return t->end_offset - t->data_offset -
705 		sizeof(struct bset) / sizeof(u64);
706 }
707 
bset_dead_u64s(struct btree * b,struct bset_tree * t)708 static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
709 {
710 	return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
711 }
712 
bset_byte_offset(struct btree * b,void * i)713 static inline unsigned bset_byte_offset(struct btree *b, void *i)
714 {
715 	return i - (void *) b->data;
716 }
717 
718 enum btree_node_type {
719 	BKEY_TYPE_btree,
720 #define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
721 	BCH_BTREE_IDS()
722 #undef x
723 	BKEY_TYPE_NR
724 };
725 
726 /* Type of a key in btree @id at level @level: */
__btree_node_type(unsigned level,enum btree_id id)727 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
728 {
729 	return level ? BKEY_TYPE_btree : (unsigned) id + 1;
730 }
731 
732 /* Type of keys @b contains: */
btree_node_type(struct btree * b)733 static inline enum btree_node_type btree_node_type(struct btree *b)
734 {
735 	return __btree_node_type(b->c.level, b->c.btree_id);
736 }
737 
738 const char *bch2_btree_node_type_str(enum btree_node_type);
739 
740 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS		\
741 	(BIT_ULL(BKEY_TYPE_extents)|			\
742 	 BIT_ULL(BKEY_TYPE_alloc)|			\
743 	 BIT_ULL(BKEY_TYPE_inodes)|			\
744 	 BIT_ULL(BKEY_TYPE_stripes)|			\
745 	 BIT_ULL(BKEY_TYPE_reflink)|			\
746 	 BIT_ULL(BKEY_TYPE_subvolumes)|			\
747 	 BIT_ULL(BKEY_TYPE_btree))
748 
749 #define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS		\
750 	(BIT_ULL(BKEY_TYPE_alloc)|			\
751 	 BIT_ULL(BKEY_TYPE_inodes)|			\
752 	 BIT_ULL(BKEY_TYPE_stripes)|			\
753 	 BIT_ULL(BKEY_TYPE_snapshots))
754 
755 #define BTREE_NODE_TYPE_HAS_TRIGGERS			\
756 	(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|		\
757 	 BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)
758 
btree_node_type_has_trans_triggers(enum btree_node_type type)759 static inline bool btree_node_type_has_trans_triggers(enum btree_node_type type)
760 {
761 	return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS;
762 }
763 
btree_node_type_has_atomic_triggers(enum btree_node_type type)764 static inline bool btree_node_type_has_atomic_triggers(enum btree_node_type type)
765 {
766 	return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS;
767 }
768 
btree_node_type_has_triggers(enum btree_node_type type)769 static inline bool btree_node_type_has_triggers(enum btree_node_type type)
770 {
771 	return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_TRIGGERS;
772 }
773 
btree_node_type_is_extents(enum btree_node_type type)774 static inline bool btree_node_type_is_extents(enum btree_node_type type)
775 {
776 	const u64 mask = 0
777 #define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
778 	BCH_BTREE_IDS()
779 #undef x
780 	;
781 
782 	return BIT_ULL(type) & mask;
783 }
784 
btree_id_is_extents(enum btree_id btree)785 static inline bool btree_id_is_extents(enum btree_id btree)
786 {
787 	return btree_node_type_is_extents(__btree_node_type(0, btree));
788 }
789 
btree_type_has_snapshots(enum btree_id id)790 static inline bool btree_type_has_snapshots(enum btree_id id)
791 {
792 	const u64 mask = 0
793 #define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
794 	BCH_BTREE_IDS()
795 #undef x
796 	;
797 
798 	return BIT_ULL(id) & mask;
799 }
800 
btree_type_has_snapshot_field(enum btree_id id)801 static inline bool btree_type_has_snapshot_field(enum btree_id id)
802 {
803 	const u64 mask = 0
804 #define x(name, nr, flags, ...)	|((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
805 	BCH_BTREE_IDS()
806 #undef x
807 	;
808 
809 	return BIT_ULL(id) & mask;
810 }
811 
btree_type_has_ptrs(enum btree_id id)812 static inline bool btree_type_has_ptrs(enum btree_id id)
813 {
814 	const u64 mask = 0
815 #define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_DATA)) << nr)
816 	BCH_BTREE_IDS()
817 #undef x
818 	;
819 
820 	return BIT_ULL(id) & mask;
821 }
822 
823 struct btree_root {
824 	struct btree		*b;
825 
826 	/* On disk root - see async splits: */
827 	__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
828 	u8			level;
829 	u8			alive;
830 	s16			error;
831 };
832 
833 enum btree_gc_coalesce_fail_reason {
834 	BTREE_GC_COALESCE_FAIL_RESERVE_GET,
835 	BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
836 	BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
837 };
838 
839 enum btree_node_sibling {
840 	btree_prev_sib,
841 	btree_next_sib,
842 };
843 
844 struct get_locks_fail {
845 	unsigned	l;
846 	struct btree	*b;
847 };
848 
849 #endif /* _BCACHEFS_BTREE_TYPES_H */
850