xref: /linux/fs/bcachefs/btree_update.h (revision 2622f290417001b0440f4a48dc6978f5f1e12a56)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_UPDATE_H
3 #define _BCACHEFS_BTREE_UPDATE_H
4 
5 #include "btree_iter.h"
6 #include "journal.h"
7 
8 struct bch_fs;
9 struct btree;
10 
11 void bch2_btree_node_prep_for_write(struct btree_trans *,
12 				    struct btree_path *, struct btree *);
13 bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_path *,
14 				struct btree *, struct btree_node_iter *,
15 				struct bkey_i *);
16 
17 int bch2_btree_node_flush0(struct journal *, struct journal_entry_pin *, u64);
18 int bch2_btree_node_flush1(struct journal *, struct journal_entry_pin *, u64);
19 void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
20 
21 void bch2_btree_insert_key_leaf(struct btree_trans *, struct btree_path *,
22 				struct bkey_i *, u64);
23 
24 #define BCH_TRANS_COMMIT_FLAGS()							\
25 	x(no_enospc,	"don't check for enospc")					\
26 	x(no_check_rw,	"don't attempt to take a ref on c->writes")			\
27 	x(no_journal_res, "don't take a journal reservation, instead "			\
28 			"pin journal entry referred to by trans->journal_res.seq")	\
29 	x(journal_reclaim, "operation required for journal reclaim; may return error"	\
30 			"instead of deadlocking if BCH_WATERMARK_reclaim not specified")\
31 	x(skip_accounting_apply, "we're in journal replay - accounting updates have already been applied")
32 
33 enum __bch_trans_commit_flags {
34 	/* First bits for bch_watermark: */
35 	__BCH_TRANS_COMMIT_FLAGS_START = BCH_WATERMARK_BITS,
36 #define x(n, ...)	__BCH_TRANS_COMMIT_##n,
37 	BCH_TRANS_COMMIT_FLAGS()
38 #undef x
39 };
40 
41 enum bch_trans_commit_flags {
42 #define x(n, ...)	BCH_TRANS_COMMIT_##n = BIT(__BCH_TRANS_COMMIT_##n),
43 	BCH_TRANS_COMMIT_FLAGS()
44 #undef x
45 };
46 
47 void bch2_trans_commit_flags_to_text(struct printbuf *, enum bch_trans_commit_flags);
48 
49 int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
50 int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned);
51 
52 int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
53 				struct bkey_i *, enum btree_iter_update_trigger_flags);
54 
55 int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *,
56 			enum btree_iter_update_trigger_flags);
57 int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, struct
58 		disk_reservation *, int flags, enum
59 		btree_iter_update_trigger_flags iter_flags);
60 
61 int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
62 				  struct bpos, struct bpos, unsigned, u64 *);
63 int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
64 			    struct bpos, struct bpos, unsigned, u64 *);
65 
66 int bch2_btree_bit_mod_iter(struct btree_trans *, struct btree_iter *, bool);
67 int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool);
68 int bch2_btree_bit_mod_buffered(struct btree_trans *, enum btree_id, struct bpos, bool);
69 
bch2_btree_delete_at_buffered(struct btree_trans * trans,enum btree_id btree,struct bpos pos)70 static inline int bch2_btree_delete_at_buffered(struct btree_trans *trans,
71 						enum btree_id btree, struct bpos pos)
72 {
73 	return bch2_btree_bit_mod_buffered(trans, btree, pos, false);
74 }
75 
76 int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
77 				     struct bpos, struct bpos);
78 
79 /*
80  * For use when splitting extents in existing snapshots:
81  *
82  * If @old_pos is an interior snapshot node, iterate over descendent snapshot
83  * nodes: for every descendent snapshot in whiche @old_pos is overwritten and
84  * not visible, emit a whiteout at @new_pos.
85  */
bch2_insert_snapshot_whiteouts(struct btree_trans * trans,enum btree_id btree,struct bpos old_pos,struct bpos new_pos)86 static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
87 						 enum btree_id btree,
88 						 struct bpos old_pos,
89 						 struct bpos new_pos)
90 {
91 	if (!btree_type_has_snapshots(btree) ||
92 	    bkey_eq(old_pos, new_pos))
93 		return 0;
94 
95 	return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos);
96 }
97 
98 int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *,
99 				       enum btree_iter_update_trigger_flags,
100 				       struct bkey_s_c, struct bkey_s_c);
101 
102 int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
103 			     enum btree_id, struct bpos);
104 
105 int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
106 				   struct bkey_i *, enum btree_iter_update_trigger_flags);
107 
108 struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *, unsigned);
109 
btree_trans_journal_entries_top(struct btree_trans * trans)110 static inline struct jset_entry *btree_trans_journal_entries_top(struct btree_trans *trans)
111 {
112 	return (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
113 }
114 
115 static inline struct jset_entry *
bch2_trans_jset_entry_alloc(struct btree_trans * trans,unsigned u64s)116 bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
117 {
118 	if (!trans->journal_entries ||
119 	    trans->journal_entries_u64s + u64s > trans->journal_entries_size)
120 		return __bch2_trans_jset_entry_alloc(trans, u64s);
121 
122 	struct jset_entry *e = btree_trans_journal_entries_top(trans);
123 	trans->journal_entries_u64s += u64s;
124 	return e;
125 }
126 
127 int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *);
128 
bch2_trans_update_buffered(struct btree_trans * trans,enum btree_id btree,struct bkey_i * k)129 static inline int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
130 					    enum btree_id btree,
131 					    struct bkey_i *k)
132 {
133 	/*
134 	 * Most updates skip the btree write buffer until journal replay is
135 	 * finished because synchronization with journal replay relies on having
136 	 * a btree node locked - if we're overwriting a key in the journal that
137 	 * journal replay hasn't yet replayed, we have to mark it as
138 	 * overwritten.
139 	 *
140 	 * But accounting updates don't overwrite, they're deltas, and they have
141 	 * to be flushed to the btree strictly in order for journal replay to be
142 	 * able to tell which updates need to be applied:
143 	 */
144 	if (k->k.type != KEY_TYPE_accounting &&
145 	    unlikely(trans->journal_replay_not_finished))
146 		return bch2_btree_insert_clone_trans(trans, btree, k);
147 
148 	struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(k->k.u64s));
149 	int ret = PTR_ERR_OR_ZERO(e);
150 	if (ret)
151 		return ret;
152 
153 	journal_entry_init(e, BCH_JSET_ENTRY_write_buffer_keys, btree, 0, k->k.u64s);
154 	bkey_copy(e->start, k);
155 	return 0;
156 }
157 
158 void bch2_trans_commit_hook(struct btree_trans *,
159 			    struct btree_trans_commit_hook *);
160 int __bch2_trans_commit(struct btree_trans *, unsigned);
161 
162 int bch2_trans_log_msg(struct btree_trans *, struct printbuf *);
163 __printf(2, 3) int bch2_fs_log_msg(struct bch_fs *, const char *, ...);
164 __printf(2, 3) int bch2_journal_log_msg(struct bch_fs *, const char *, ...);
165 
166 /**
167  * bch2_trans_commit - insert keys at given iterator positions
168  *
169  * This is main entry point for btree updates.
170  *
171  * Return values:
172  * -EROFS: filesystem read only
173  * -EIO: journal or btree node IO error
174  */
bch2_trans_commit(struct btree_trans * trans,struct disk_reservation * disk_res,u64 * journal_seq,unsigned flags)175 static inline int bch2_trans_commit(struct btree_trans *trans,
176 				    struct disk_reservation *disk_res,
177 				    u64 *journal_seq,
178 				    unsigned flags)
179 {
180 	trans->disk_res		= disk_res;
181 	trans->journal_seq	= journal_seq;
182 
183 	return __bch2_trans_commit(trans, flags);
184 }
185 
186 #define commit_do(_trans, _disk_res, _journal_seq, _flags, _do)	\
187 	lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
188 					(_journal_seq), (_flags)))
189 
190 #define nested_commit_do(_trans, _disk_res, _journal_seq, _flags, _do)	\
191 	nested_lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
192 					(_journal_seq), (_flags)))
193 
194 #define bch2_trans_commit_do(_c, _disk_res, _journal_seq, _flags, _do)		\
195 	bch2_trans_run(_c, commit_do(trans, _disk_res, _journal_seq, _flags, _do))
196 
197 #define trans_for_each_update(_trans, _i)				\
198 	for (struct btree_insert_entry *_i = (_trans)->updates;		\
199 	     (_i) < (_trans)->updates + (_trans)->nr_updates;		\
200 	     (_i)++)
201 
bch2_trans_reset_updates(struct btree_trans * trans)202 static inline void bch2_trans_reset_updates(struct btree_trans *trans)
203 {
204 	trans_for_each_update(trans, i)
205 		bch2_path_put(trans, i->path, true);
206 
207 	trans->nr_updates		= 0;
208 	trans->journal_entries_u64s	= 0;
209 	trans->hooks			= NULL;
210 	trans->extra_disk_res		= 0;
211 }
212 
__bch2_bkey_make_mut_noupdate(struct btree_trans * trans,struct bkey_s_c k,unsigned type,unsigned min_bytes)213 static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k,
214 						  unsigned type, unsigned min_bytes)
215 {
216 	unsigned bytes = max_t(unsigned, min_bytes, bkey_bytes(k.k));
217 	struct bkey_i *mut;
218 
219 	if (type && k.k->type != type)
220 		return ERR_PTR(-ENOENT);
221 
222 	/* extra padding for varint_decode_fast... */
223 	mut = bch2_trans_kmalloc_nomemzero(trans, bytes + 8);
224 	if (!IS_ERR(mut)) {
225 		bkey_reassemble(mut, k);
226 
227 		if (unlikely(bytes > bkey_bytes(k.k))) {
228 			memset((void *) mut + bkey_bytes(k.k), 0,
229 			       bytes - bkey_bytes(k.k));
230 			mut->k.u64s = DIV_ROUND_UP(bytes, sizeof(u64));
231 		}
232 	}
233 	return mut;
234 }
235 
bch2_bkey_make_mut_noupdate(struct btree_trans * trans,struct bkey_s_c k)236 static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k)
237 {
238 	return __bch2_bkey_make_mut_noupdate(trans, k, 0, 0);
239 }
240 
241 #define bch2_bkey_make_mut_noupdate_typed(_trans, _k, _type)		\
242 	bkey_i_to_##_type(__bch2_bkey_make_mut_noupdate(_trans, _k,	\
243 				KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
244 
__bch2_bkey_make_mut(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k,enum btree_iter_update_trigger_flags flags,unsigned type,unsigned min_bytes)245 static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
246 					struct bkey_s_c *k,
247 					enum btree_iter_update_trigger_flags flags,
248 					unsigned type, unsigned min_bytes)
249 {
250 	struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes);
251 	int ret;
252 
253 	if (IS_ERR(mut))
254 		return mut;
255 
256 	ret = bch2_trans_update(trans, iter, mut, flags);
257 	if (ret)
258 		return ERR_PTR(ret);
259 
260 	*k = bkey_i_to_s_c(mut);
261 	return mut;
262 }
263 
bch2_bkey_make_mut(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k,enum btree_iter_update_trigger_flags flags)264 static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans,
265 						struct btree_iter *iter, struct bkey_s_c *k,
266 						enum btree_iter_update_trigger_flags flags)
267 {
268 	return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0);
269 }
270 
271 #define bch2_bkey_make_mut_typed(_trans, _iter, _k, _flags, _type)	\
272 	bkey_i_to_##_type(__bch2_bkey_make_mut(_trans, _iter, _k, _flags,\
273 				KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
274 
__bch2_bkey_get_mut_noupdate(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,enum btree_iter_update_trigger_flags flags,unsigned type,unsigned min_bytes)275 static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
276 					 struct btree_iter *iter,
277 					 unsigned btree_id, struct bpos pos,
278 					 enum btree_iter_update_trigger_flags flags,
279 					 unsigned type, unsigned min_bytes)
280 {
281 	struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
282 				btree_id, pos, flags|BTREE_ITER_intent, type);
283 	struct bkey_i *ret = IS_ERR(k.k)
284 		? ERR_CAST(k.k)
285 		: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
286 	if (IS_ERR(ret))
287 		bch2_trans_iter_exit(trans, iter);
288 	return ret;
289 }
290 
bch2_bkey_get_mut_noupdate(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,enum btree_iter_update_trigger_flags flags)291 static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
292 					       struct btree_iter *iter,
293 					       unsigned btree_id, struct bpos pos,
294 					       enum btree_iter_update_trigger_flags flags)
295 {
296 	return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0);
297 }
298 
__bch2_bkey_get_mut(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,enum btree_iter_update_trigger_flags flags,unsigned type,unsigned min_bytes)299 static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
300 					 struct btree_iter *iter,
301 					 unsigned btree_id, struct bpos pos,
302 					 enum btree_iter_update_trigger_flags flags,
303 					 unsigned type, unsigned min_bytes)
304 {
305 	struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter,
306 				btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes);
307 	int ret;
308 
309 	if (IS_ERR(mut))
310 		return mut;
311 
312 	ret = bch2_trans_update(trans, iter, mut, flags);
313 	if (ret) {
314 		bch2_trans_iter_exit(trans, iter);
315 		return ERR_PTR(ret);
316 	}
317 
318 	return mut;
319 }
320 
bch2_bkey_get_mut_minsize(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,enum btree_iter_update_trigger_flags flags,unsigned min_bytes)321 static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans,
322 						       struct btree_iter *iter,
323 						       unsigned btree_id, struct bpos pos,
324 						       enum btree_iter_update_trigger_flags flags,
325 						       unsigned min_bytes)
326 {
327 	return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes);
328 }
329 
bch2_bkey_get_mut(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,enum btree_iter_update_trigger_flags flags)330 static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
331 					       struct btree_iter *iter,
332 					       unsigned btree_id, struct bpos pos,
333 					       enum btree_iter_update_trigger_flags flags)
334 {
335 	return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0);
336 }
337 
338 #define bch2_bkey_get_mut_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
339 	bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _iter,		\
340 			_btree_id, _pos, _flags,			\
341 			KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
342 
__bch2_bkey_alloc(struct btree_trans * trans,struct btree_iter * iter,enum btree_iter_update_trigger_flags flags,unsigned type,unsigned val_size)343 static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter,
344 					       enum btree_iter_update_trigger_flags flags,
345 					       unsigned type, unsigned val_size)
346 {
347 	struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size);
348 	int ret;
349 
350 	if (IS_ERR(k))
351 		return k;
352 
353 	bkey_init(&k->k);
354 	k->k.p = iter->pos;
355 	k->k.type = type;
356 	set_bkey_val_bytes(&k->k, val_size);
357 
358 	ret = bch2_trans_update(trans, iter, k, flags);
359 	if (unlikely(ret))
360 		return ERR_PTR(ret);
361 	return k;
362 }
363 
364 #define bch2_bkey_alloc(_trans, _iter, _flags, _type)			\
365 	bkey_i_to_##_type(__bch2_bkey_alloc(_trans, _iter, _flags,	\
366 				KEY_TYPE_##_type, sizeof(struct bch_##_type)))
367 
368 #endif /* _BCACHEFS_BTREE_UPDATE_H */
369