xref: /linux/fs/bcachefs/btree_iter.h (revision ff0905bbf991f4337b5ebc19c0d43525ebb0d96b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
4 
5 #include "bset.h"
6 #include "btree_types.h"
7 #include "trace.h"
8 
9 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
10 void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
11 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
12 void bch2_dump_trans_paths_updates(struct btree_trans *);
13 
__bkey_err(const struct bkey * k)14 static inline int __bkey_err(const struct bkey *k)
15 {
16 	return PTR_ERR_OR_ZERO(k);
17 }
18 
19 #define bkey_err(_k)	__bkey_err((_k).k)
20 
__btree_path_get(struct btree_trans * trans,struct btree_path * path,bool intent)21 static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
22 {
23 	unsigned idx = path - trans->paths;
24 
25 	EBUG_ON(idx >= trans->nr_paths);
26 	EBUG_ON(!test_bit(idx, trans->paths_allocated));
27 	if (unlikely(path->ref == U8_MAX)) {
28 		bch2_dump_trans_paths_updates(trans);
29 		panic("path %u refcount overflow\n", idx);
30 	}
31 
32 	path->ref++;
33 	path->intent_ref += intent;
34 	trace_btree_path_get_ll(trans, path);
35 }
36 
__btree_path_put(struct btree_trans * trans,struct btree_path * path,bool intent)37 static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
38 {
39 	EBUG_ON(path - trans->paths >= trans->nr_paths);
40 	EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
41 	EBUG_ON(!path->ref);
42 	EBUG_ON(!path->intent_ref && intent);
43 
44 	trace_btree_path_put_ll(trans, path);
45 	path->intent_ref -= intent;
46 	return --path->ref == 0;
47 }
48 
btree_path_set_dirty(struct btree_trans * trans,struct btree_path * path,enum btree_path_uptodate u)49 static inline void btree_path_set_dirty(struct btree_trans *trans,
50 					struct btree_path *path,
51 					enum btree_path_uptodate u)
52 {
53 	BUG_ON(path->should_be_locked && trans->locked && !trans->restarted);
54 	path->uptodate = max_t(unsigned, path->uptodate, u);
55 }
56 
btree_path_node(struct btree_path * path,unsigned level)57 static inline struct btree *btree_path_node(struct btree_path *path,
58 					    unsigned level)
59 {
60 	return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
61 }
62 
btree_node_lock_seq_matches(const struct btree_path * path,const struct btree * b,unsigned level)63 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
64 					const struct btree *b, unsigned level)
65 {
66 	return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
67 }
68 
btree_node_parent(struct btree_path * path,struct btree * b)69 static inline struct btree *btree_node_parent(struct btree_path *path,
70 					      struct btree *b)
71 {
72 	return btree_path_node(path, b->c.level + 1);
73 }
74 
75 /* Iterate over paths within a transaction: */
76 
77 void __bch2_btree_trans_sort_paths(struct btree_trans *);
78 
btree_trans_sort_paths(struct btree_trans * trans)79 static inline void btree_trans_sort_paths(struct btree_trans *trans)
80 {
81 	if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
82 	    trans->paths_sorted)
83 		return;
84 	__bch2_btree_trans_sort_paths(trans);
85 }
86 
trans_paths_nr(struct btree_path * paths)87 static inline unsigned long *trans_paths_nr(struct btree_path *paths)
88 {
89 	return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
90 }
91 
trans_paths_allocated(struct btree_path * paths)92 static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
93 {
94 	unsigned long *v = trans_paths_nr(paths);
95 	return v - BITS_TO_LONGS(*v);
96 }
97 
98 #define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
99 	for (_idx = _start;						\
100 	     (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr;	\
101 	     _idx++)
102 
103 static inline struct btree_path *
__trans_next_path(struct btree_trans * trans,unsigned * idx)104 __trans_next_path(struct btree_trans *trans, unsigned *idx)
105 {
106 	unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
107 	/*
108 	 * Open coded find_next_bit(), because
109 	 *  - this is fast path, we can't afford the function call
110 	 *  - and we know that nr_paths is a multiple of BITS_PER_LONG,
111 	 */
112 	while (*idx < trans->nr_paths) {
113 		unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
114 		if (v) {
115 			*idx += __ffs(v);
116 			return trans->paths + *idx;
117 		}
118 
119 		*idx += BITS_PER_LONG;
120 		*idx &= ~(BITS_PER_LONG - 1);
121 		w++;
122 	}
123 
124 	return NULL;
125 }
126 
127 /*
128  * This version is intended to be safe for use on a btree_trans that is owned by
129  * another thread, for bch2_btree_trans_to_text();
130  */
131 #define trans_for_each_path_from(_trans, _path, _idx, _start)		\
132 	for (_idx = _start;						\
133 	     (_path = __trans_next_path((_trans), &_idx));		\
134 	     _idx++)
135 
136 #define trans_for_each_path(_trans, _path, _idx)			\
137 	trans_for_each_path_from(_trans, _path, _idx, 1)
138 
next_btree_path(struct btree_trans * trans,struct btree_path * path)139 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
140 {
141 	unsigned idx = path ? path->sorted_idx + 1 : 0;
142 
143 	EBUG_ON(idx > trans->nr_sorted);
144 
145 	return idx < trans->nr_sorted
146 		? trans->paths + trans->sorted[idx]
147 		: NULL;
148 }
149 
prev_btree_path(struct btree_trans * trans,struct btree_path * path)150 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
151 {
152 	unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
153 
154 	return idx
155 		? trans->paths + trans->sorted[idx - 1]
156 		: NULL;
157 }
158 
159 #define trans_for_each_path_idx_inorder(_trans, _iter)			\
160 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
161 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
162 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
163 	     _iter.sorted_idx++)
164 
165 struct trans_for_each_path_inorder_iter {
166 	btree_path_idx_t	sorted_idx;
167 	btree_path_idx_t	path_idx;
168 };
169 
170 #define trans_for_each_path_inorder(_trans, _path, _iter)		\
171 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
172 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
173 	      _path = (_trans)->paths + _iter.path_idx,			\
174 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
175 	     _iter.sorted_idx++)
176 
177 #define trans_for_each_path_inorder_reverse(_trans, _path, _i)		\
178 	for (_i = trans->nr_sorted - 1;					\
179 	     ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
180 	     --_i)
181 
__path_has_node(const struct btree_path * path,const struct btree * b)182 static inline bool __path_has_node(const struct btree_path *path,
183 				   const struct btree *b)
184 {
185 	return path->l[b->c.level].b == b &&
186 		btree_node_lock_seq_matches(path, b, b->c.level);
187 }
188 
189 static inline struct btree_path *
__trans_next_path_with_node(struct btree_trans * trans,struct btree * b,unsigned * idx)190 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
191 			    unsigned *idx)
192 {
193 	struct btree_path *path;
194 
195 	while ((path = __trans_next_path(trans, idx)) &&
196 		!__path_has_node(path, b))
197 	       (*idx)++;
198 
199 	return path;
200 }
201 
202 #define trans_for_each_path_with_node(_trans, _b, _path, _iter)		\
203 	for (_iter = 1;							\
204 	     (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
205 	     _iter++)
206 
207 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
208 					    bool, unsigned long);
209 
210 static inline btree_path_idx_t __must_check
bch2_btree_path_make_mut(struct btree_trans * trans,btree_path_idx_t path,bool intent,unsigned long ip)211 bch2_btree_path_make_mut(struct btree_trans *trans,
212 			 btree_path_idx_t path, bool intent,
213 			 unsigned long ip)
214 {
215 	if (trans->paths[path].ref > 1 ||
216 	    trans->paths[path].preserve)
217 		path = __bch2_btree_path_make_mut(trans, path, intent, ip);
218 	trans->paths[path].should_be_locked = false;
219 	return path;
220 }
221 
222 btree_path_idx_t __must_check
223 __bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
224 			  struct bpos, bool, unsigned long);
225 
226 static inline btree_path_idx_t __must_check
bch2_btree_path_set_pos(struct btree_trans * trans,btree_path_idx_t path,struct bpos new_pos,bool intent,unsigned long ip)227 bch2_btree_path_set_pos(struct btree_trans *trans,
228 			btree_path_idx_t path, struct bpos new_pos,
229 			bool intent, unsigned long ip)
230 {
231 	return !bpos_eq(new_pos, trans->paths[path].pos)
232 		? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
233 		: path;
234 }
235 
236 int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
237 					      btree_path_idx_t,
238 					      unsigned, unsigned long);
239 
240 static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *);
241 
bch2_btree_path_traverse(struct btree_trans * trans,btree_path_idx_t path,unsigned flags)242 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
243 					  btree_path_idx_t path, unsigned flags)
244 {
245 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
246 
247 	if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
248 		return 0;
249 
250 	return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
251 }
252 
253 btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
254 				 unsigned, unsigned, unsigned, unsigned long);
255 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
256 					    unsigned, struct bpos);
257 
258 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
259 
260 /*
261  * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
262  * different snapshot:
263  */
bch2_btree_path_peek_slot_exact(struct btree_path * path,struct bkey * u)264 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
265 {
266 	struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
267 
268 	if (k.k && bpos_eq(path->pos, k.k->p))
269 		return k;
270 
271 	bkey_init(u);
272 	u->p = path->pos;
273 	return (struct bkey_s_c) { u, NULL };
274 }
275 
276 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
277 					struct btree_iter *, struct bpos);
278 
279 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
280 
281 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
282 
bch2_trans_mutex_lock(struct btree_trans * trans,struct mutex * lock)283 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
284 {
285 	return mutex_trylock(lock)
286 		? 0
287 		: __bch2_trans_mutex_lock(trans, lock);
288 }
289 
290 /* Debug: */
291 
292 void __bch2_trans_verify_paths(struct btree_trans *);
293 void __bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
294 
bch2_trans_verify_paths(struct btree_trans * trans)295 static inline void bch2_trans_verify_paths(struct btree_trans *trans)
296 {
297 	if (static_branch_unlikely(&bch2_debug_check_iterators))
298 		__bch2_trans_verify_paths(trans);
299 }
300 
bch2_assert_pos_locked(struct btree_trans * trans,enum btree_id btree,struct bpos pos)301 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id btree,
302 					  struct bpos pos)
303 {
304 	if (static_branch_unlikely(&bch2_debug_check_iterators))
305 		__bch2_assert_pos_locked(trans, btree, pos);
306 }
307 
308 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
309 				      struct btree *, struct bkey_packed *);
310 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
311 			      struct btree *, struct btree_node_iter *,
312 			      struct bkey_packed *, unsigned, unsigned);
313 
314 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
315 
316 void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
317 
318 int bch2_trans_relock(struct btree_trans *);
319 int bch2_trans_relock_notrace(struct btree_trans *);
320 void bch2_trans_unlock(struct btree_trans *);
321 void bch2_trans_unlock_long(struct btree_trans *);
322 
trans_was_restarted(struct btree_trans * trans,u32 restart_count)323 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
324 {
325 	return restart_count != trans->restart_count
326 		? -BCH_ERR_transaction_restart_nested
327 		: 0;
328 }
329 
330 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
331 
bch2_trans_verify_not_restarted(struct btree_trans * trans,u32 restart_count)332 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
333 						   u32 restart_count)
334 {
335 	if (trans_was_restarted(trans, restart_count))
336 		bch2_trans_restart_error(trans, restart_count);
337 }
338 
339 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *);
340 
bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans * trans)341 static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *trans)
342 {
343 	if (trans->restarted || !trans->locked)
344 		bch2_trans_unlocked_or_in_restart_error(trans);
345 }
346 
347 __always_inline
btree_trans_restart_foreign_task(struct btree_trans * trans,int err,unsigned long ip)348 static int btree_trans_restart_foreign_task(struct btree_trans *trans, int err, unsigned long ip)
349 {
350 	BUG_ON(err <= 0);
351 	BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
352 
353 	trans->restarted = err;
354 	trans->last_restarted_ip = ip;
355 	return -err;
356 }
357 
358 __always_inline
btree_trans_restart_ip(struct btree_trans * trans,int err,unsigned long ip)359 static int btree_trans_restart_ip(struct btree_trans *trans, int err, unsigned long ip)
360 {
361 	btree_trans_restart_foreign_task(trans, err, ip);
362 #ifdef CONFIG_BCACHEFS_DEBUG
363 	darray_exit(&trans->last_restarted_trace);
364 	bch2_save_backtrace(&trans->last_restarted_trace, current, 0, GFP_NOWAIT);
365 #endif
366 	return -err;
367 }
368 
369 __always_inline
btree_trans_restart(struct btree_trans * trans,int err)370 static int btree_trans_restart(struct btree_trans *trans, int err)
371 {
372 	return btree_trans_restart_ip(trans, err, _THIS_IP_);
373 }
374 
trans_maybe_inject_restart(struct btree_trans * trans,unsigned long ip)375 static inline int trans_maybe_inject_restart(struct btree_trans *trans, unsigned long ip)
376 {
377 #ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS
378 	if (!(ktime_get_ns() & ~(~0ULL << min(63, (10 + trans->restart_count_this_trans))))) {
379 		trace_and_count(trans->c, trans_restart_injected, trans, ip);
380 		return btree_trans_restart_ip(trans,
381 					BCH_ERR_transaction_restart_fault_inject, ip);
382 	}
383 #endif
384 	return 0;
385 }
386 
387 bool bch2_btree_node_upgrade(struct btree_trans *,
388 			     struct btree_path *, unsigned);
389 
390 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
391 
bch2_btree_path_downgrade(struct btree_trans * trans,struct btree_path * path)392 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
393 					     struct btree_path *path)
394 {
395 	unsigned new_locks_want = path->level + !!path->intent_ref;
396 
397 	if (path->locks_want > new_locks_want)
398 		__bch2_btree_path_downgrade(trans, path, new_locks_want);
399 }
400 
401 void bch2_trans_downgrade(struct btree_trans *);
402 
403 void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
404 void bch2_trans_node_drop(struct btree_trans *trans, struct btree *);
405 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
406 
407 int __must_check __bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
408 int __must_check bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
409 
410 struct btree *bch2_btree_iter_peek_node(struct btree_trans *, struct btree_iter *);
411 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *, struct btree_iter *);
412 struct btree *bch2_btree_iter_next_node(struct btree_trans *, struct btree_iter *);
413 
414 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *, struct btree_iter *, struct bpos);
415 struct bkey_s_c bch2_btree_iter_next(struct btree_trans *, struct btree_iter *);
416 
bch2_btree_iter_peek(struct btree_trans * trans,struct btree_iter * iter)417 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_trans *trans,
418 						   struct btree_iter *iter)
419 {
420 	return bch2_btree_iter_peek_max(trans, iter, SPOS_MAX);
421 }
422 
423 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *, struct btree_iter *, struct bpos);
424 
bch2_btree_iter_peek_prev(struct btree_trans * trans,struct btree_iter * iter)425 static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter)
426 {
427 	return bch2_btree_iter_peek_prev_min(trans, iter, POS_MIN);
428 }
429 
430 struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *, struct btree_iter *);
431 
432 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *, struct btree_iter *);
433 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *, struct btree_iter *);
434 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *, struct btree_iter *);
435 
436 bool bch2_btree_iter_advance(struct btree_trans *, struct btree_iter *);
437 bool bch2_btree_iter_rewind(struct btree_trans *, struct btree_iter *);
438 
__bch2_btree_iter_set_pos(struct btree_iter * iter,struct bpos new_pos)439 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
440 {
441 	iter->k.type = KEY_TYPE_deleted;
442 	iter->k.p.inode		= iter->pos.inode	= new_pos.inode;
443 	iter->k.p.offset	= iter->pos.offset	= new_pos.offset;
444 	iter->k.p.snapshot	= iter->pos.snapshot	= new_pos.snapshot;
445 	iter->k.size = 0;
446 }
447 
bch2_btree_iter_set_pos(struct btree_trans * trans,struct btree_iter * iter,struct bpos new_pos)448 static inline void bch2_btree_iter_set_pos(struct btree_trans *trans,
449 					   struct btree_iter *iter, struct bpos new_pos)
450 {
451 	if (unlikely(iter->update_path))
452 		bch2_path_put(trans, iter->update_path,
453 			      iter->flags & BTREE_ITER_intent);
454 	iter->update_path = 0;
455 
456 	if (!(iter->flags & BTREE_ITER_all_snapshots))
457 		new_pos.snapshot = iter->snapshot;
458 
459 	__bch2_btree_iter_set_pos(iter, new_pos);
460 }
461 
bch2_btree_iter_set_pos_to_extent_start(struct btree_iter * iter)462 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
463 {
464 	BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
465 	iter->pos = bkey_start_pos(&iter->k);
466 }
467 
bch2_btree_iter_set_snapshot(struct btree_trans * trans,struct btree_iter * iter,u32 snapshot)468 static inline void bch2_btree_iter_set_snapshot(struct btree_trans *trans,
469 						struct btree_iter *iter, u32 snapshot)
470 {
471 	struct bpos pos = iter->pos;
472 
473 	iter->snapshot = snapshot;
474 	pos.snapshot = snapshot;
475 	bch2_btree_iter_set_pos(trans, iter, pos);
476 }
477 
478 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
479 
bch2_btree_iter_flags(struct btree_trans * trans,unsigned btree_id,unsigned level,unsigned flags)480 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
481 					     unsigned btree_id,
482 					     unsigned level,
483 					     unsigned flags)
484 {
485 	if (level || !btree_id_cached(trans->c, btree_id)) {
486 		flags &= ~BTREE_ITER_cached;
487 		flags &= ~BTREE_ITER_with_key_cache;
488 	} else if (!(flags & BTREE_ITER_cached))
489 		flags |= BTREE_ITER_with_key_cache;
490 
491 	if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
492 	    btree_id_is_extents(btree_id))
493 		flags |= BTREE_ITER_is_extents;
494 
495 	if (!(flags & BTREE_ITER_snapshot_field) &&
496 	    !btree_type_has_snapshot_field(btree_id))
497 		flags &= ~BTREE_ITER_all_snapshots;
498 
499 	if (!(flags & BTREE_ITER_all_snapshots) &&
500 	    btree_type_has_snapshots(btree_id))
501 		flags |= BTREE_ITER_filter_snapshots;
502 
503 	if (trans->journal_replay_not_finished)
504 		flags |= BTREE_ITER_with_journal;
505 
506 	return flags;
507 }
508 
bch2_trans_iter_init_common(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned locks_want,unsigned depth,unsigned flags,unsigned long ip)509 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
510 					  struct btree_iter *iter,
511 					  unsigned btree_id, struct bpos pos,
512 					  unsigned locks_want,
513 					  unsigned depth,
514 					  unsigned flags,
515 					  unsigned long ip)
516 {
517 	iter->update_path	= 0;
518 	iter->key_cache_path	= 0;
519 	iter->btree_id		= btree_id;
520 	iter->min_depth		= 0;
521 	iter->flags		= flags;
522 	iter->snapshot		= pos.snapshot;
523 	iter->pos		= pos;
524 	iter->k			= POS_KEY(pos);
525 	iter->journal_idx	= 0;
526 #ifdef CONFIG_BCACHEFS_DEBUG
527 	iter->ip_allocated = ip;
528 #endif
529 	iter->path = bch2_path_get(trans, btree_id, iter->pos,
530 				   locks_want, depth, flags, ip);
531 }
532 
533 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
534 			  enum btree_id, struct bpos, unsigned);
535 
bch2_trans_iter_init(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned flags)536 static inline void bch2_trans_iter_init(struct btree_trans *trans,
537 			  struct btree_iter *iter,
538 			  unsigned btree_id, struct bpos pos,
539 			  unsigned flags)
540 {
541 	if (__builtin_constant_p(btree_id) &&
542 	    __builtin_constant_p(flags))
543 		bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
544 				bch2_btree_iter_flags(trans, btree_id, 0, flags),
545 				_THIS_IP_);
546 	else
547 		bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
548 }
549 
550 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
551 			       enum btree_id, struct bpos,
552 			       unsigned, unsigned, unsigned);
553 void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btree_iter *);
554 
555 void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *);
556 
557 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
558 void bch2_trans_kmalloc_trace_to_text(struct printbuf *,
559 				      darray_trans_kmalloc_trace *);
560 #endif
561 
562 void *__bch2_trans_kmalloc(struct btree_trans *, size_t, unsigned long);
563 
bch2_trans_kmalloc_trace(struct btree_trans * trans,size_t size,unsigned long ip)564 static inline void bch2_trans_kmalloc_trace(struct btree_trans *trans, size_t size,
565 					    unsigned long ip)
566 {
567 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
568 	darray_push(&trans->trans_kmalloc_trace,
569 		    ((struct trans_kmalloc_trace) { .ip = ip, .bytes = size }));
570 #endif
571 }
572 
bch2_trans_kmalloc_nomemzero_ip(struct btree_trans * trans,size_t size,unsigned long ip)573 static __always_inline void *bch2_trans_kmalloc_nomemzero_ip(struct btree_trans *trans, size_t size,
574 						    unsigned long ip)
575 {
576 	size = roundup(size, 8);
577 
578 	bch2_trans_kmalloc_trace(trans, size, ip);
579 
580 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
581 		void *p = trans->mem + trans->mem_top;
582 
583 		trans->mem_top += size;
584 		return p;
585 	} else {
586 		return __bch2_trans_kmalloc(trans, size, ip);
587 	}
588 }
589 
bch2_trans_kmalloc_ip(struct btree_trans * trans,size_t size,unsigned long ip)590 static __always_inline void *bch2_trans_kmalloc_ip(struct btree_trans *trans, size_t size,
591 					  unsigned long ip)
592 {
593 	size = roundup(size, 8);
594 
595 	bch2_trans_kmalloc_trace(trans, size, ip);
596 
597 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
598 		void *p = trans->mem + trans->mem_top;
599 
600 		trans->mem_top += size;
601 		memset(p, 0, size);
602 		return p;
603 	} else {
604 		return __bch2_trans_kmalloc(trans, size, ip);
605 	}
606 }
607 
608 /**
609  * bch2_trans_kmalloc - allocate memory for use by the current transaction
610  *
611  * Must be called after bch2_trans_begin, which on second and further calls
612  * frees all memory allocated in this transaction
613  */
bch2_trans_kmalloc(struct btree_trans * trans,size_t size)614 static __always_inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
615 {
616 	return bch2_trans_kmalloc_ip(trans, size, _THIS_IP_);
617 }
618 
bch2_trans_kmalloc_nomemzero(struct btree_trans * trans,size_t size)619 static __always_inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
620 {
621 	return bch2_trans_kmalloc_nomemzero_ip(trans, size, _THIS_IP_);
622 }
623 
__bch2_bkey_get_iter(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned flags,unsigned type)624 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
625 				struct btree_iter *iter,
626 				unsigned btree_id, struct bpos pos,
627 				unsigned flags, unsigned type)
628 {
629 	struct bkey_s_c k;
630 
631 	bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
632 	k = bch2_btree_iter_peek_slot(trans, iter);
633 
634 	if (!bkey_err(k) && type && k.k->type != type)
635 		k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
636 	if (unlikely(bkey_err(k)))
637 		bch2_trans_iter_exit(trans, iter);
638 	return k;
639 }
640 
bch2_bkey_get_iter(struct btree_trans * trans,struct btree_iter * iter,unsigned btree_id,struct bpos pos,unsigned flags)641 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
642 				struct btree_iter *iter,
643 				unsigned btree_id, struct bpos pos,
644 				unsigned flags)
645 {
646 	return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
647 }
648 
649 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
650 	bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter,			\
651 				       _btree_id, _pos, _flags, KEY_TYPE_##_type))
652 
__bkey_val_copy(void * dst_v,unsigned dst_size,struct bkey_s_c src_k)653 static inline void __bkey_val_copy(void *dst_v, unsigned dst_size, struct bkey_s_c src_k)
654 {
655 	unsigned b = min_t(unsigned, dst_size, bkey_val_bytes(src_k.k));
656 	memcpy(dst_v, src_k.v, b);
657 	if (unlikely(b < dst_size))
658 		memset(dst_v + b, 0, dst_size - b);
659 }
660 
661 #define bkey_val_copy(_dst_v, _src_k)					\
662 do {									\
663 	BUILD_BUG_ON(!__typecheck(*_dst_v, *_src_k.v));			\
664 	__bkey_val_copy(_dst_v, sizeof(*_dst_v), _src_k.s_c);		\
665 } while (0)
666 
__bch2_bkey_get_val_typed(struct btree_trans * trans,unsigned btree_id,struct bpos pos,unsigned flags,unsigned type,unsigned val_size,void * val)667 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
668 				unsigned btree_id, struct bpos pos,
669 				unsigned flags, unsigned type,
670 				unsigned val_size, void *val)
671 {
672 	struct btree_iter iter;
673 	struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
674 	int ret = bkey_err(k);
675 	if (!ret) {
676 		__bkey_val_copy(val, val_size, k);
677 		bch2_trans_iter_exit(trans, &iter);
678 	}
679 
680 	return ret;
681 }
682 
683 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
684 	__bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,	\
685 				  KEY_TYPE_##_type, sizeof(*_val), _val)
686 
687 void bch2_trans_srcu_unlock(struct btree_trans *);
688 
689 u32 bch2_trans_begin(struct btree_trans *);
690 
691 #define __for_each_btree_node(_trans, _iter, _btree_id, _start,			\
692 			      _locks_want, _depth, _flags, _b, _do)		\
693 ({										\
694 	bch2_trans_begin((_trans));						\
695 										\
696 	struct btree_iter _iter;						\
697 	bch2_trans_node_iter_init((_trans), &_iter, (_btree_id),		\
698 				  _start, _locks_want, _depth, _flags);		\
699 	int _ret3 = 0;								\
700 	do {									\
701 		_ret3 = lockrestart_do((_trans), ({				\
702 			struct btree *_b = bch2_btree_iter_peek_node(_trans, &_iter);\
703 			if (!_b)						\
704 				break;						\
705 										\
706 			PTR_ERR_OR_ZERO(_b) ?: (_do);				\
707 		})) ?:								\
708 		lockrestart_do((_trans),					\
709 			PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(_trans, &_iter)));\
710 	} while (!_ret3);							\
711 										\
712 	bch2_trans_iter_exit((_trans), &(_iter));				\
713 	_ret3;									\
714 })
715 
716 #define for_each_btree_node(_trans, _iter, _btree_id, _start,		\
717 			    _flags, _b, _do)				\
718 	__for_each_btree_node(_trans, _iter, _btree_id, _start,	\
719 			      0, 0, _flags, _b, _do)
720 
bch2_btree_iter_peek_prev_type(struct btree_trans * trans,struct btree_iter * iter,unsigned flags)721 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_trans *trans,
722 							     struct btree_iter *iter,
723 							     unsigned flags)
724 {
725 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(trans, iter) :
726 						bch2_btree_iter_peek_prev(trans, iter);
727 }
728 
bch2_btree_iter_peek_type(struct btree_trans * trans,struct btree_iter * iter,unsigned flags)729 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_trans *trans,
730 							struct btree_iter *iter,
731 							unsigned flags)
732 {
733 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(trans, iter) :
734 						bch2_btree_iter_peek(trans, iter);
735 }
736 
bch2_btree_iter_peek_max_type(struct btree_trans * trans,struct btree_iter * iter,struct bpos end,unsigned flags)737 static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_trans *trans,
738 							    struct btree_iter *iter,
739 							    struct bpos end,
740 							    unsigned flags)
741 {
742 	if (!(flags & BTREE_ITER_slots))
743 		return bch2_btree_iter_peek_max(trans, iter, end);
744 
745 	if (bkey_gt(iter->pos, end))
746 		return bkey_s_c_null;
747 
748 	return bch2_btree_iter_peek_slot(trans, iter);
749 }
750 
751 int __bch2_btree_trans_too_many_iters(struct btree_trans *);
752 
btree_trans_too_many_iters(struct btree_trans * trans)753 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
754 {
755 	if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
756 		return __bch2_btree_trans_too_many_iters(trans);
757 
758 	return 0;
759 }
760 
761 /*
762  * goto instead of loop, so that when used inside for_each_btree_key2()
763  * break/continue work correctly
764  */
765 #define lockrestart_do(_trans, _do)					\
766 ({									\
767 	__label__ transaction_restart;					\
768 	u32 _restart_count;						\
769 	int _ret2;							\
770 transaction_restart:							\
771 	_restart_count = bch2_trans_begin(_trans);			\
772 	_ret2 = (_do);							\
773 									\
774 	if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart))	\
775 		goto transaction_restart;				\
776 									\
777 	if (!_ret2)							\
778 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
779 	_ret2;								\
780 })
781 
782 /*
783  * nested_lockrestart_do(), nested_commit_do():
784  *
785  * These are like lockrestart_do() and commit_do(), with two differences:
786  *
787  *  - We don't call bch2_trans_begin() unless we had a transaction restart
788  *  - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
789  *  transaction restart
790  */
791 #define nested_lockrestart_do(_trans, _do)				\
792 ({									\
793 	u32 _restart_count, _orig_restart_count;			\
794 	int _ret2;							\
795 									\
796 	_restart_count = _orig_restart_count = (_trans)->restart_count;	\
797 									\
798 	while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
799 		_restart_count = bch2_trans_begin(_trans);		\
800 									\
801 	if (!_ret2)							\
802 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
803 									\
804 	_ret2 ?: trans_was_restarted(_trans, _orig_restart_count);		\
805 })
806 
807 #define for_each_btree_key_max_continue(_trans, _iter,			\
808 					 _end, _flags, _k, _do)		\
809 ({									\
810 	struct bkey_s_c _k;						\
811 	int _ret3 = 0;							\
812 									\
813 	do {								\
814 		_ret3 = lockrestart_do(_trans, ({			\
815 			(_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter),	\
816 						_end, (_flags));	\
817 			if (!(_k).k)					\
818 				break;					\
819 									\
820 			bkey_err(_k) ?: (_do);				\
821 		}));							\
822 	} while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter)));	\
823 									\
824 	bch2_trans_iter_exit((_trans), &(_iter));			\
825 	_ret3;								\
826 })
827 
828 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do)	\
829 	for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
830 
831 #define for_each_btree_key_max(_trans, _iter, _btree_id,		\
832 				_start, _end, _flags, _k, _do)		\
833 ({									\
834 	bch2_trans_begin(trans);					\
835 									\
836 	struct btree_iter _iter;					\
837 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
838 			     (_start), (_flags));			\
839 									\
840 	for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\
841 })
842 
843 #define for_each_btree_key(_trans, _iter, _btree_id,			\
844 			   _start, _flags, _k, _do)			\
845 	for_each_btree_key_max(_trans, _iter, _btree_id, _start,	\
846 				 SPOS_MAX, _flags, _k, _do)
847 
848 #define for_each_btree_key_reverse(_trans, _iter, _btree_id,		\
849 				   _start, _flags, _k, _do)		\
850 ({									\
851 	struct btree_iter _iter;					\
852 	struct bkey_s_c _k;						\
853 	int _ret3 = 0;							\
854 									\
855 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
856 			     (_start), (_flags));			\
857 									\
858 	do {								\
859 		_ret3 = lockrestart_do(_trans, ({			\
860 			(_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter),	\
861 							(_flags));	\
862 			if (!(_k).k)					\
863 				break;					\
864 									\
865 			bkey_err(_k) ?: (_do);				\
866 		}));							\
867 	} while (!_ret3 && bch2_btree_iter_rewind(_trans, &(_iter)));	\
868 									\
869 	bch2_trans_iter_exit((_trans), &(_iter));			\
870 	_ret3;								\
871 })
872 
873 #define for_each_btree_key_commit(_trans, _iter, _btree_id,		\
874 				  _start, _iter_flags, _k,		\
875 				  _disk_res, _journal_seq, _commit_flags,\
876 				  _do)					\
877 	for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
878 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
879 					(_journal_seq), (_commit_flags)))
880 
881 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id,	\
882 				  _start, _iter_flags, _k,		\
883 				  _disk_res, _journal_seq, _commit_flags,\
884 				  _do)					\
885 	for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
886 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
887 					(_journal_seq), (_commit_flags)))
888 
889 #define for_each_btree_key_max_commit(_trans, _iter, _btree_id,	\
890 				  _start, _end, _iter_flags, _k,	\
891 				  _disk_res, _journal_seq, _commit_flags,\
892 				  _do)					\
893 	for_each_btree_key_max(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
894 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
895 					(_journal_seq), (_commit_flags)))
896 
897 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
898 							  struct btree_iter *);
899 
900 #define for_each_btree_key_max_norestart(_trans, _iter, _btree_id,	\
901 			   _start, _end, _flags, _k, _ret)		\
902 	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),	\
903 				  (_start), (_flags));			\
904 	     (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),\
905 	     !((_ret) = bkey_err(_k)) && (_k).k;			\
906 	     bch2_btree_iter_advance(_trans, &(_iter)))
907 
908 #define for_each_btree_key_max_continue_norestart(_trans, _iter, _end, _flags, _k, _ret)\
909 	for (;									\
910 	     (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),	\
911 	     !((_ret) = bkey_err(_k)) && (_k).k;				\
912 	     bch2_btree_iter_advance(_trans, &(_iter)))
913 
914 #define for_each_btree_key_norestart(_trans, _iter, _btree_id,		\
915 			   _start, _flags, _k, _ret)			\
916 	for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\
917 					  SPOS_MAX, _flags, _k, _ret)
918 
919 #define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id,		\
920 					     _start, _flags, _k, _ret)		\
921 	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
922 				  (_start), (_flags));				\
923 	     (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), _flags),	\
924 	     !((_ret) = bkey_err(_k)) && (_k).k;				\
925 	     bch2_btree_iter_rewind(_trans, &(_iter)))
926 
927 #define for_each_btree_key_continue_norestart(_trans, _iter, _flags, _k, _ret)	\
928 	for_each_btree_key_max_continue_norestart(_trans, _iter, SPOS_MAX, _flags, _k, _ret)
929 
930 /*
931  * This should not be used in a fastpath, without first trying _do in
932  * nonblocking mode - it will cause excessive transaction restarts and
933  * potentially livelocking:
934  */
935 #define drop_locks_do(_trans, _do)					\
936 ({									\
937 	bch2_trans_unlock(_trans);					\
938 	(_do) ?: bch2_trans_relock(_trans);				\
939 })
940 
941 #define allocate_dropping_locks_errcode(_trans, _do)			\
942 ({									\
943 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
944 	int _ret = _do;							\
945 									\
946 	if (bch2_err_matches(_ret, ENOMEM)) {				\
947 		_gfp = GFP_KERNEL;					\
948 		_ret = drop_locks_do(_trans, _do);			\
949 	}								\
950 	_ret;								\
951 })
952 
953 #define allocate_dropping_locks(_trans, _ret, _do)			\
954 ({									\
955 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
956 	typeof(_do) _p = _do;						\
957 									\
958 	_ret = 0;							\
959 	if (unlikely(!_p)) {						\
960 		_gfp = GFP_KERNEL;					\
961 		_ret = drop_locks_do(_trans, ((_p = _do), 0));		\
962 	}								\
963 	_p;								\
964 })
965 
966 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
967 void bch2_trans_put(struct btree_trans *);
968 
969 bool bch2_current_has_btree_trans(struct bch_fs *);
970 
971 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
972 unsigned bch2_trans_get_fn_idx(const char *);
973 
974 #define bch2_trans_get(_c)						\
975 ({									\
976 	static unsigned trans_fn_idx;					\
977 									\
978 	if (unlikely(!trans_fn_idx))					\
979 		trans_fn_idx = bch2_trans_get_fn_idx(__func__);		\
980 	__bch2_trans_get(_c, trans_fn_idx);				\
981 })
982 
983 /*
984  * We don't use DEFINE_CLASS() because using a function for the constructor
985  * breaks bch2_trans_get()'s use of __func__
986  */
987 typedef struct btree_trans * class_btree_trans_t;
class_btree_trans_destructor(struct btree_trans ** p)988 static inline void class_btree_trans_destructor(struct btree_trans **p)
989 {
990 	struct btree_trans *trans = *p;
991 	bch2_trans_put(trans);
992 }
993 
994 #define class_btree_trans_constructor(_c)	bch2_trans_get(_c)
995 
996 #define bch2_trans_run(_c, _do)						\
997 ({									\
998 	CLASS(btree_trans, trans)(_c);					\
999 	(_do);								\
1000 })
1001 
1002 #define bch2_trans_do(_c, _do)	bch2_trans_run(_c, lockrestart_do(trans, _do))
1003 
1004 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
1005 
1006 void bch2_fs_btree_iter_exit(struct bch_fs *);
1007 void bch2_fs_btree_iter_init_early(struct bch_fs *);
1008 int bch2_fs_btree_iter_init(struct bch_fs *);
1009 
1010 #endif /* _BCACHEFS_BTREE_ITER_H */
1011