xref: /linux/fs/bcachefs/backpointers.h (revision 1a20a9a0ddef17c0bd67eece34a7439b02a7b0ba)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
3 #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
4 
5 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "super.h"
10 
11 static inline u64 swab40(u64 x)
12 {
13 	return (((x & 0x00000000ffULL) << 32)|
14 		((x & 0x000000ff00ULL) << 16)|
15 		((x & 0x0000ff0000ULL) >>  0)|
16 		((x & 0x00ff000000ULL) >> 16)|
17 		((x & 0xff00000000ULL) >> 32));
18 }
19 
20 int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
21 			     enum bkey_invalid_flags, struct printbuf *);
22 void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
23 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
24 void bch2_backpointer_swab(struct bkey_s);
25 
26 #define bch2_bkey_ops_backpointer ((struct bkey_ops) {	\
27 	.key_invalid	= bch2_backpointer_invalid,	\
28 	.val_to_text	= bch2_backpointer_k_to_text,	\
29 	.swab		= bch2_backpointer_swab,	\
30 	.min_val_size	= 32,				\
31 })
32 
33 #define MAX_EXTENT_COMPRESS_RATIO_SHIFT		10
34 
35 /*
36  * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
37  * btree:
38  */
39 static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
40 					   struct bpos bp_pos)
41 {
42 	struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
43 	u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
44 
45 	return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
46 }
47 
48 /*
49  * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
50  */
51 static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
52 					   struct bpos bucket,
53 					   u64 bucket_offset)
54 {
55 	struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
56 	struct bpos ret = POS(bucket.inode,
57 			      (bucket_to_sector(ca, bucket.offset) <<
58 			       MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
59 
60 	EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
61 	return ret;
62 }
63 
64 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bpos bucket,
65 				struct bch_backpointer, struct bkey_s_c, bool);
66 
67 static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
68 				struct bpos bucket,
69 				struct bch_backpointer bp,
70 				struct bkey_s_c orig_k,
71 				bool insert)
72 {
73 	if (unlikely(bch2_backpointers_no_use_write_buffer))
74 		return bch2_bucket_backpointer_mod_nowritebuffer(trans, bucket, bp, orig_k, insert);
75 
76 	struct bkey_i_backpointer bp_k;
77 
78 	bkey_backpointer_init(&bp_k.k_i);
79 	bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
80 	bp_k.v = bp;
81 
82 	if (!insert) {
83 		bp_k.k.type = KEY_TYPE_deleted;
84 		set_bkey_val_u64s(&bp_k.k, 0);
85 	}
86 
87 	return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
88 }
89 
90 static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
91 							 struct extent_ptr_decoded p,
92 							 const union bch_extent_entry *entry)
93 {
94 	switch (k.k->type) {
95 	case KEY_TYPE_btree_ptr:
96 	case KEY_TYPE_btree_ptr_v2:
97 		return BCH_DATA_btree;
98 	case KEY_TYPE_extent:
99 	case KEY_TYPE_reflink_v:
100 		return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
101 	case KEY_TYPE_stripe: {
102 		const struct bch_extent_ptr *ptr = &entry->ptr;
103 		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
104 
105 		BUG_ON(ptr < s.v->ptrs ||
106 		       ptr >= s.v->ptrs + s.v->nr_blocks);
107 
108 		return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
109 			? BCH_DATA_parity
110 			: BCH_DATA_user;
111 	}
112 	default:
113 		BUG();
114 	}
115 }
116 
117 static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
118 			   enum btree_id btree_id, unsigned level,
119 			   struct bkey_s_c k, struct extent_ptr_decoded p,
120 			   const union bch_extent_entry *entry,
121 			   struct bpos *bucket_pos, struct bch_backpointer *bp)
122 {
123 	enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
124 	s64 sectors = level ? btree_sectors(c) : k.k->size;
125 	u32 bucket_offset;
126 
127 	*bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
128 	*bp = (struct bch_backpointer) {
129 		.btree_id	= btree_id,
130 		.level		= level,
131 		.data_type	= data_type,
132 		.bucket_offset	= ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
133 			p.crc.offset,
134 		.bucket_len	= ptr_disk_sectors(sectors, p),
135 		.pos		= k.k->p,
136 	};
137 }
138 
139 int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int,
140 			      struct bpos *, struct bch_backpointer *, unsigned);
141 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
142 					 struct bpos, struct bch_backpointer,
143 					 unsigned);
144 struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
145 					struct bpos, struct bch_backpointer);
146 
147 int bch2_check_btree_backpointers(struct bch_fs *);
148 int bch2_check_extents_to_backpointers(struct bch_fs *);
149 int bch2_check_backpointers_to_extents(struct bch_fs *);
150 
151 #endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
152