1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
3 #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
4
5 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "error.h"
10 #include "super.h"
11
swab40(u64 x)12 static inline u64 swab40(u64 x)
13 {
14 return (((x & 0x00000000ffULL) << 32)|
15 ((x & 0x000000ff00ULL) << 16)|
16 ((x & 0x0000ff0000ULL) >> 0)|
17 ((x & 0x00ff000000ULL) >> 16)|
18 ((x & 0xff00000000ULL) >> 32));
19 }
20
21 int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, enum bch_validate_flags);
22 void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
23 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
24 void bch2_backpointer_swab(struct bkey_s);
25
26 #define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
27 .key_validate = bch2_backpointer_validate, \
28 .val_to_text = bch2_backpointer_k_to_text, \
29 .swab = bch2_backpointer_swab, \
30 .min_val_size = 32, \
31 })
32
33 #define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
34
35 /*
36 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
37 * btree:
38 */
bp_pos_to_bucket(const struct bch_dev * ca,struct bpos bp_pos)39 static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos bp_pos)
40 {
41 u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
42
43 return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
44 }
45
bp_pos_to_bucket_nodev_noerror(struct bch_fs * c,struct bpos bp_pos,struct bpos * bucket)46 static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
47 {
48 rcu_read_lock();
49 struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode);
50 if (ca)
51 *bucket = bp_pos_to_bucket(ca, bp_pos);
52 rcu_read_unlock();
53 return ca != NULL;
54 }
55
bp_pos_to_bucket_nodev(struct bch_fs * c,struct bpos bp_pos,struct bpos * bucket)56 static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
57 {
58 return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket),
59 c, "backpointer for missing device %llu", bp_pos.inode);
60 }
61
bucket_pos_to_bp_noerror(const struct bch_dev * ca,struct bpos bucket,u64 bucket_offset)62 static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca,
63 struct bpos bucket,
64 u64 bucket_offset)
65 {
66 return POS(bucket.inode,
67 (bucket_to_sector(ca, bucket.offset) <<
68 MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
69 }
70
71 /*
72 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
73 */
bucket_pos_to_bp(const struct bch_dev * ca,struct bpos bucket,u64 bucket_offset)74 static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca,
75 struct bpos bucket,
76 u64 bucket_offset)
77 {
78 struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset);
79 EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret)));
80 return ret;
81 }
82
83 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *,
84 struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool);
85
bch2_bucket_backpointer_mod(struct btree_trans * trans,struct bch_dev * ca,struct bpos bucket,struct bch_backpointer bp,struct bkey_s_c orig_k,bool insert)86 static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
87 struct bch_dev *ca,
88 struct bpos bucket,
89 struct bch_backpointer bp,
90 struct bkey_s_c orig_k,
91 bool insert)
92 {
93 if (unlikely(bch2_backpointers_no_use_write_buffer))
94 return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert);
95
96 struct bkey_i_backpointer bp_k;
97
98 bkey_backpointer_init(&bp_k.k_i);
99 bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
100 bp_k.v = bp;
101
102 if (!insert) {
103 bp_k.k.type = KEY_TYPE_deleted;
104 set_bkey_val_u64s(&bp_k.k, 0);
105 }
106
107 return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
108 }
109
bch2_bkey_ptr_data_type(struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry)110 static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
111 struct extent_ptr_decoded p,
112 const union bch_extent_entry *entry)
113 {
114 switch (k.k->type) {
115 case KEY_TYPE_btree_ptr:
116 case KEY_TYPE_btree_ptr_v2:
117 return BCH_DATA_btree;
118 case KEY_TYPE_extent:
119 case KEY_TYPE_reflink_v:
120 return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
121 case KEY_TYPE_stripe: {
122 const struct bch_extent_ptr *ptr = &entry->ptr;
123 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
124
125 BUG_ON(ptr < s.v->ptrs ||
126 ptr >= s.v->ptrs + s.v->nr_blocks);
127
128 return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
129 ? BCH_DATA_parity
130 : BCH_DATA_user;
131 }
132 default:
133 BUG();
134 }
135 }
136
__bch2_extent_ptr_to_bp(struct bch_fs * c,struct bch_dev * ca,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,struct bpos * bucket_pos,struct bch_backpointer * bp,u64 sectors)137 static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
138 enum btree_id btree_id, unsigned level,
139 struct bkey_s_c k, struct extent_ptr_decoded p,
140 const union bch_extent_entry *entry,
141 struct bpos *bucket_pos, struct bch_backpointer *bp,
142 u64 sectors)
143 {
144 u32 bucket_offset;
145 *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
146 *bp = (struct bch_backpointer) {
147 .btree_id = btree_id,
148 .level = level,
149 .data_type = bch2_bkey_ptr_data_type(k, p, entry),
150 .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
151 p.crc.offset,
152 .bucket_len = sectors,
153 .pos = k.k->p,
154 };
155 }
156
bch2_extent_ptr_to_bp(struct bch_fs * c,struct bch_dev * ca,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct extent_ptr_decoded p,const union bch_extent_entry * entry,struct bpos * bucket_pos,struct bch_backpointer * bp)157 static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
158 enum btree_id btree_id, unsigned level,
159 struct bkey_s_c k, struct extent_ptr_decoded p,
160 const union bch_extent_entry *entry,
161 struct bpos *bucket_pos, struct bch_backpointer *bp)
162 {
163 u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
164
165 __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors);
166 }
167
168 int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int,
169 struct bpos *, struct bch_backpointer *, unsigned);
170 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
171 struct bpos, struct bch_backpointer,
172 unsigned);
173 struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
174 struct bpos, struct bch_backpointer);
175
176 int bch2_check_btree_backpointers(struct bch_fs *);
177 int bch2_check_extents_to_backpointers(struct bch_fs *);
178 int bch2_check_backpointers_to_extents(struct bch_fs *);
179
180 #endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
181