xref: /linux/fs/bcachefs/btree_iter.h (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
4 
5 #include "bset.h"
6 #include "btree_types.h"
7 #include "trace.h"
8 
9 static inline int __bkey_err(const struct bkey *k)
10 {
11 	return PTR_ERR_OR_ZERO(k);
12 }
13 
14 #define bkey_err(_k)	__bkey_err((_k).k)
15 
16 static inline void __btree_path_get(struct btree_path *path, bool intent)
17 {
18 	path->ref++;
19 	path->intent_ref += intent;
20 }
21 
22 static inline bool __btree_path_put(struct btree_path *path, bool intent)
23 {
24 	EBUG_ON(!path->ref);
25 	EBUG_ON(!path->intent_ref && intent);
26 	path->intent_ref -= intent;
27 	return --path->ref == 0;
28 }
29 
30 static inline void btree_path_set_dirty(struct btree_path *path,
31 					enum btree_path_uptodate u)
32 {
33 	path->uptodate = max_t(unsigned, path->uptodate, u);
34 }
35 
36 static inline struct btree *btree_path_node(struct btree_path *path,
37 					    unsigned level)
38 {
39 	return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
40 }
41 
42 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
43 					const struct btree *b, unsigned level)
44 {
45 	return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
46 }
47 
48 static inline struct btree *btree_node_parent(struct btree_path *path,
49 					      struct btree *b)
50 {
51 	return btree_path_node(path, b->c.level + 1);
52 }
53 
54 /* Iterate over paths within a transaction: */
55 
56 void __bch2_btree_trans_sort_paths(struct btree_trans *);
57 
58 static inline void btree_trans_sort_paths(struct btree_trans *trans)
59 {
60 	if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
61 	    trans->paths_sorted)
62 		return;
63 	__bch2_btree_trans_sort_paths(trans);
64 }
65 
66 static inline unsigned long *trans_paths_nr(struct btree_path *paths)
67 {
68 	return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
69 }
70 
71 static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
72 {
73 	unsigned long *v = trans_paths_nr(paths);
74 	return v - BITS_TO_LONGS(*v);
75 }
76 
77 #define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
78 	for (_idx = _start;						\
79 	     (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr;	\
80 	     _idx++)
81 
82 static inline struct btree_path *
83 __trans_next_path(struct btree_trans *trans, unsigned *idx)
84 {
85 	unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
86 	/*
87 	 * Open coded find_next_bit(), because
88 	 *  - this is fast path, we can't afford the function call
89 	 *  - and we know that nr_paths is a multiple of BITS_PER_LONG,
90 	 */
91 	while (*idx < trans->nr_paths) {
92 		unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
93 		if (v) {
94 			*idx += __ffs(v);
95 			return trans->paths + *idx;
96 		}
97 
98 		*idx += BITS_PER_LONG;
99 		*idx &= ~(BITS_PER_LONG - 1);
100 		w++;
101 	}
102 
103 	return NULL;
104 }
105 
106 /*
107  * This version is intended to be safe for use on a btree_trans that is owned by
108  * another thread, for bch2_btree_trans_to_text();
109  */
110 #define trans_for_each_path_from(_trans, _path, _idx, _start)		\
111 	for (_idx = _start;						\
112 	     (_path = __trans_next_path((_trans), &_idx));		\
113 	     _idx++)
114 
115 #define trans_for_each_path(_trans, _path, _idx)			\
116 	trans_for_each_path_from(_trans, _path, _idx, 1)
117 
118 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
119 {
120 	unsigned idx = path ? path->sorted_idx + 1 : 0;
121 
122 	EBUG_ON(idx > trans->nr_sorted);
123 
124 	return idx < trans->nr_sorted
125 		? trans->paths + trans->sorted[idx]
126 		: NULL;
127 }
128 
129 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
130 {
131 	unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
132 
133 	return idx
134 		? trans->paths + trans->sorted[idx - 1]
135 		: NULL;
136 }
137 
138 #define trans_for_each_path_idx_inorder(_trans, _iter)			\
139 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
140 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
141 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
142 	     _iter.sorted_idx++)
143 
144 struct trans_for_each_path_inorder_iter {
145 	btree_path_idx_t	sorted_idx;
146 	btree_path_idx_t	path_idx;
147 };
148 
149 #define trans_for_each_path_inorder(_trans, _path, _iter)		\
150 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
151 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
152 	      _path = (_trans)->paths + _iter.path_idx,			\
153 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
154 	     _iter.sorted_idx++)
155 
156 #define trans_for_each_path_inorder_reverse(_trans, _path, _i)		\
157 	for (_i = trans->nr_sorted - 1;					\
158 	     ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
159 	     --_i)
160 
161 static inline bool __path_has_node(const struct btree_path *path,
162 				   const struct btree *b)
163 {
164 	return path->l[b->c.level].b == b &&
165 		btree_node_lock_seq_matches(path, b, b->c.level);
166 }
167 
168 static inline struct btree_path *
169 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
170 			    unsigned *idx)
171 {
172 	struct btree_path *path;
173 
174 	while ((path = __trans_next_path(trans, idx)) &&
175 		!__path_has_node(path, b))
176 	       (*idx)++;
177 
178 	return path;
179 }
180 
181 #define trans_for_each_path_with_node(_trans, _b, _path, _iter)		\
182 	for (_iter = 1;							\
183 	     (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
184 	     _iter++)
185 
186 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
187 					    bool, unsigned long);
188 
189 static inline btree_path_idx_t __must_check
190 bch2_btree_path_make_mut(struct btree_trans *trans,
191 			 btree_path_idx_t path, bool intent,
192 			 unsigned long ip)
193 {
194 	if (trans->paths[path].ref > 1 ||
195 	    trans->paths[path].preserve)
196 		path = __bch2_btree_path_make_mut(trans, path, intent, ip);
197 	trans->paths[path].should_be_locked = false;
198 	return path;
199 }
200 
201 btree_path_idx_t __must_check
202 __bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
203 			  struct bpos, bool, unsigned long);
204 
205 static inline btree_path_idx_t __must_check
206 bch2_btree_path_set_pos(struct btree_trans *trans,
207 			btree_path_idx_t path, struct bpos new_pos,
208 			bool intent, unsigned long ip)
209 {
210 	return !bpos_eq(new_pos, trans->paths[path].pos)
211 		? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
212 		: path;
213 }
214 
215 int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
216 					      btree_path_idx_t,
217 					      unsigned, unsigned long);
218 
219 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *);
220 
221 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
222 					  btree_path_idx_t path, unsigned flags)
223 {
224 	bch2_trans_verify_not_unlocked(trans);
225 
226 	if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
227 		return 0;
228 
229 	return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
230 }
231 
232 btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
233 				 unsigned, unsigned, unsigned, unsigned long);
234 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
235 					    unsigned, struct bpos);
236 
237 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
238 
239 /*
240  * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
241  * different snapshot:
242  */
243 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
244 {
245 	struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
246 
247 	if (k.k && bpos_eq(path->pos, k.k->p))
248 		return k;
249 
250 	bkey_init(u);
251 	u->p = path->pos;
252 	return (struct bkey_s_c) { u, NULL };
253 }
254 
255 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
256 					struct btree_iter *, struct bpos);
257 
258 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
259 
260 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
261 
262 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
263 {
264 	return mutex_trylock(lock)
265 		? 0
266 		: __bch2_trans_mutex_lock(trans, lock);
267 }
268 
269 #ifdef CONFIG_BCACHEFS_DEBUG
270 void bch2_trans_verify_paths(struct btree_trans *);
271 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
272 #else
273 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
274 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
275 					  struct bpos pos) {}
276 #endif
277 
278 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
279 				      struct btree *, struct bkey_packed *);
280 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
281 			      struct btree *, struct btree_node_iter *,
282 			      struct bkey_packed *, unsigned, unsigned);
283 
284 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
285 
286 void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
287 
288 int bch2_trans_relock(struct btree_trans *);
289 int bch2_trans_relock_notrace(struct btree_trans *);
290 void bch2_trans_unlock(struct btree_trans *);
291 void bch2_trans_unlock_long(struct btree_trans *);
292 
293 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
294 {
295 	return restart_count != trans->restart_count
296 		? -BCH_ERR_transaction_restart_nested
297 		: 0;
298 }
299 
300 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
301 
302 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
303 						   u32 restart_count)
304 {
305 	if (trans_was_restarted(trans, restart_count))
306 		bch2_trans_restart_error(trans, restart_count);
307 }
308 
309 void __noreturn bch2_trans_in_restart_error(struct btree_trans *);
310 
311 static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
312 {
313 	if (trans->restarted)
314 		bch2_trans_in_restart_error(trans);
315 }
316 
317 void __noreturn bch2_trans_unlocked_error(struct btree_trans *);
318 
319 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans)
320 {
321 	if (!trans->locked)
322 		bch2_trans_unlocked_error(trans);
323 }
324 
325 __always_inline
326 static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
327 {
328 	BUG_ON(err <= 0);
329 	BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
330 
331 	trans->restarted = err;
332 	trans->last_restarted_ip = _THIS_IP_;
333 	return -err;
334 }
335 
336 __always_inline
337 static int btree_trans_restart(struct btree_trans *trans, int err)
338 {
339 	btree_trans_restart_nounlock(trans, err);
340 	return -err;
341 }
342 
343 bool bch2_btree_node_upgrade(struct btree_trans *,
344 			     struct btree_path *, unsigned);
345 
346 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
347 
348 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
349 					     struct btree_path *path)
350 {
351 	unsigned new_locks_want = path->level + !!path->intent_ref;
352 
353 	if (path->locks_want > new_locks_want)
354 		__bch2_btree_path_downgrade(trans, path, new_locks_want);
355 }
356 
357 void bch2_trans_downgrade(struct btree_trans *);
358 
359 void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
360 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
361 
362 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
363 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
364 
365 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
366 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
367 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
368 
369 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
370 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
371 
372 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
373 {
374 	return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
375 }
376 
377 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
378 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
379 
380 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
381 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
382 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
383 
384 bool bch2_btree_iter_advance(struct btree_iter *);
385 bool bch2_btree_iter_rewind(struct btree_iter *);
386 
387 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
388 {
389 	iter->k.type = KEY_TYPE_deleted;
390 	iter->k.p.inode		= iter->pos.inode	= new_pos.inode;
391 	iter->k.p.offset	= iter->pos.offset	= new_pos.offset;
392 	iter->k.p.snapshot	= iter->pos.snapshot	= new_pos.snapshot;
393 	iter->k.size = 0;
394 }
395 
396 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
397 {
398 	struct btree_trans *trans = iter->trans;
399 
400 	if (unlikely(iter->update_path))
401 		bch2_path_put(trans, iter->update_path,
402 			      iter->flags & BTREE_ITER_intent);
403 	iter->update_path = 0;
404 
405 	if (!(iter->flags & BTREE_ITER_all_snapshots))
406 		new_pos.snapshot = iter->snapshot;
407 
408 	__bch2_btree_iter_set_pos(iter, new_pos);
409 }
410 
411 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
412 {
413 	BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
414 	iter->pos = bkey_start_pos(&iter->k);
415 }
416 
417 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
418 {
419 	struct bpos pos = iter->pos;
420 
421 	iter->snapshot = snapshot;
422 	pos.snapshot = snapshot;
423 	bch2_btree_iter_set_pos(iter, pos);
424 }
425 
426 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
427 
428 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
429 					       unsigned btree_id,
430 					       unsigned flags)
431 {
432 	if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
433 	    btree_id_is_extents(btree_id))
434 		flags |= BTREE_ITER_is_extents;
435 
436 	if (!(flags & BTREE_ITER_snapshot_field) &&
437 	    !btree_type_has_snapshot_field(btree_id))
438 		flags &= ~BTREE_ITER_all_snapshots;
439 
440 	if (!(flags & BTREE_ITER_all_snapshots) &&
441 	    btree_type_has_snapshots(btree_id))
442 		flags |= BTREE_ITER_filter_snapshots;
443 
444 	if (trans->journal_replay_not_finished)
445 		flags |= BTREE_ITER_with_journal;
446 
447 	return flags;
448 }
449 
450 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
451 					     unsigned btree_id,
452 					     unsigned flags)
453 {
454 	if (!btree_id_cached(trans->c, btree_id)) {
455 		flags &= ~BTREE_ITER_cached;
456 		flags &= ~BTREE_ITER_with_key_cache;
457 	} else if (!(flags & BTREE_ITER_cached))
458 		flags |= BTREE_ITER_with_key_cache;
459 
460 	return __bch2_btree_iter_flags(trans, btree_id, flags);
461 }
462 
463 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
464 					  struct btree_iter *iter,
465 					  unsigned btree_id, struct bpos pos,
466 					  unsigned locks_want,
467 					  unsigned depth,
468 					  unsigned flags,
469 					  unsigned long ip)
470 {
471 	iter->trans		= trans;
472 	iter->update_path	= 0;
473 	iter->key_cache_path	= 0;
474 	iter->btree_id		= btree_id;
475 	iter->min_depth		= 0;
476 	iter->flags		= flags;
477 	iter->snapshot		= pos.snapshot;
478 	iter->pos		= pos;
479 	iter->k			= POS_KEY(pos);
480 	iter->journal_idx	= 0;
481 #ifdef CONFIG_BCACHEFS_DEBUG
482 	iter->ip_allocated = ip;
483 #endif
484 	iter->path = bch2_path_get(trans, btree_id, iter->pos,
485 				   locks_want, depth, flags, ip);
486 }
487 
488 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
489 			  enum btree_id, struct bpos, unsigned);
490 
491 static inline void bch2_trans_iter_init(struct btree_trans *trans,
492 			  struct btree_iter *iter,
493 			  unsigned btree_id, struct bpos pos,
494 			  unsigned flags)
495 {
496 	if (__builtin_constant_p(btree_id) &&
497 	    __builtin_constant_p(flags))
498 		bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
499 				bch2_btree_iter_flags(trans, btree_id, flags),
500 				_THIS_IP_);
501 	else
502 		bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
503 }
504 
505 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
506 			       enum btree_id, struct bpos,
507 			       unsigned, unsigned, unsigned);
508 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
509 
510 void bch2_set_btree_iter_dontneed(struct btree_iter *);
511 
512 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
513 
514 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
515 {
516 	size = roundup(size, 8);
517 
518 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
519 		void *p = trans->mem + trans->mem_top;
520 
521 		trans->mem_top += size;
522 		memset(p, 0, size);
523 		return p;
524 	} else {
525 		return __bch2_trans_kmalloc(trans, size);
526 	}
527 }
528 
529 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
530 {
531 	size = round_up(size, 8);
532 
533 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
534 		void *p = trans->mem + trans->mem_top;
535 
536 		trans->mem_top += size;
537 		return p;
538 	} else {
539 		return __bch2_trans_kmalloc(trans, size);
540 	}
541 }
542 
543 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
544 				struct btree_iter *iter,
545 				unsigned btree_id, struct bpos pos,
546 				unsigned flags, unsigned type)
547 {
548 	struct bkey_s_c k;
549 
550 	bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
551 	k = bch2_btree_iter_peek_slot(iter);
552 
553 	if (!bkey_err(k) && type && k.k->type != type)
554 		k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
555 	if (unlikely(bkey_err(k)))
556 		bch2_trans_iter_exit(trans, iter);
557 	return k;
558 }
559 
560 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
561 				struct btree_iter *iter,
562 				unsigned btree_id, struct bpos pos,
563 				unsigned flags)
564 {
565 	return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
566 }
567 
568 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
569 	bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter,			\
570 				       _btree_id, _pos, _flags, KEY_TYPE_##_type))
571 
572 #define bkey_val_copy(_dst_v, _src_k)					\
573 do {									\
574 	unsigned b = min_t(unsigned, sizeof(*_dst_v),			\
575 			   bkey_val_bytes(_src_k.k));			\
576 	memcpy(_dst_v, _src_k.v, b);					\
577 	if (b < sizeof(*_dst_v))					\
578 		memset((void *) (_dst_v) + b, 0, sizeof(*_dst_v) - b);	\
579 } while (0)
580 
581 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
582 				unsigned btree_id, struct bpos pos,
583 				unsigned flags, unsigned type,
584 				unsigned val_size, void *val)
585 {
586 	struct btree_iter iter;
587 	struct bkey_s_c k;
588 	int ret;
589 
590 	k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
591 	ret = bkey_err(k);
592 	if (!ret) {
593 		unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
594 
595 		memcpy(val, k.v, b);
596 		if (unlikely(b < sizeof(*val)))
597 			memset((void *) val + b, 0, sizeof(*val) - b);
598 		bch2_trans_iter_exit(trans, &iter);
599 	}
600 
601 	return ret;
602 }
603 
604 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
605 	__bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,	\
606 				  KEY_TYPE_##_type, sizeof(*_val), _val)
607 
608 void bch2_trans_srcu_unlock(struct btree_trans *);
609 
610 u32 bch2_trans_begin(struct btree_trans *);
611 
612 #define __for_each_btree_node(_trans, _iter, _btree_id, _start,			\
613 			      _locks_want, _depth, _flags, _b, _do)		\
614 ({										\
615 	bch2_trans_begin((_trans));						\
616 										\
617 	struct btree_iter _iter;						\
618 	bch2_trans_node_iter_init((_trans), &_iter, (_btree_id),		\
619 				  _start, _locks_want, _depth, _flags);		\
620 	int _ret3 = 0;								\
621 	do {									\
622 		_ret3 = lockrestart_do((_trans), ({				\
623 			struct btree *_b = bch2_btree_iter_peek_node(&_iter);	\
624 			if (!_b)						\
625 				break;						\
626 										\
627 			PTR_ERR_OR_ZERO(_b) ?: (_do);				\
628 		})) ?:								\
629 		lockrestart_do((_trans),					\
630 			PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter)));	\
631 	} while (!_ret3);							\
632 										\
633 	bch2_trans_iter_exit((_trans), &(_iter));				\
634 	_ret3;									\
635 })
636 
637 #define for_each_btree_node(_trans, _iter, _btree_id, _start,		\
638 			    _flags, _b, _do)				\
639 	__for_each_btree_node(_trans, _iter, _btree_id, _start,	\
640 			      0, 0, _flags, _b, _do)
641 
642 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
643 							     unsigned flags)
644 {
645 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
646 						bch2_btree_iter_peek_prev(iter);
647 }
648 
649 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
650 							unsigned flags)
651 {
652 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
653 						bch2_btree_iter_peek(iter);
654 }
655 
656 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
657 							     struct bpos end,
658 							     unsigned flags)
659 {
660 	if (!(flags & BTREE_ITER_slots))
661 		return bch2_btree_iter_peek_upto(iter, end);
662 
663 	if (bkey_gt(iter->pos, end))
664 		return bkey_s_c_null;
665 
666 	return bch2_btree_iter_peek_slot(iter);
667 }
668 
669 int __bch2_btree_trans_too_many_iters(struct btree_trans *);
670 
671 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
672 {
673 	if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
674 		return __bch2_btree_trans_too_many_iters(trans);
675 
676 	return 0;
677 }
678 
679 /*
680  * goto instead of loop, so that when used inside for_each_btree_key2()
681  * break/continue work correctly
682  */
683 #define lockrestart_do(_trans, _do)					\
684 ({									\
685 	__label__ transaction_restart;					\
686 	u32 _restart_count;						\
687 	int _ret2;							\
688 transaction_restart:							\
689 	_restart_count = bch2_trans_begin(_trans);			\
690 	_ret2 = (_do);							\
691 									\
692 	if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart))	\
693 		goto transaction_restart;				\
694 									\
695 	if (!_ret2)							\
696 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
697 	_ret2;								\
698 })
699 
700 /*
701  * nested_lockrestart_do(), nested_commit_do():
702  *
703  * These are like lockrestart_do() and commit_do(), with two differences:
704  *
705  *  - We don't call bch2_trans_begin() unless we had a transaction restart
706  *  - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
707  *  transaction restart
708  */
709 #define nested_lockrestart_do(_trans, _do)				\
710 ({									\
711 	u32 _restart_count, _orig_restart_count;			\
712 	int _ret2;							\
713 									\
714 	_restart_count = _orig_restart_count = (_trans)->restart_count;	\
715 									\
716 	while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
717 		_restart_count = bch2_trans_begin(_trans);		\
718 									\
719 	if (!_ret2)							\
720 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
721 									\
722 	_ret2 ?: trans_was_restarted(_trans, _restart_count);		\
723 })
724 
725 #define for_each_btree_key_upto_continue(_trans, _iter,			\
726 					 _end, _flags, _k, _do)		\
727 ({									\
728 	struct bkey_s_c _k;						\
729 	int _ret3 = 0;							\
730 									\
731 	do {								\
732 		_ret3 = lockrestart_do(_trans, ({			\
733 			(_k) = bch2_btree_iter_peek_upto_type(&(_iter),	\
734 						_end, (_flags));	\
735 			if (!(_k).k)					\
736 				break;					\
737 									\
738 			bkey_err(_k) ?: (_do);				\
739 		}));							\
740 	} while (!_ret3 && bch2_btree_iter_advance(&(_iter)));		\
741 									\
742 	bch2_trans_iter_exit((_trans), &(_iter));			\
743 	_ret3;								\
744 })
745 
746 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do)	\
747 	for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
748 
749 #define for_each_btree_key_upto(_trans, _iter, _btree_id,		\
750 				_start, _end, _flags, _k, _do)		\
751 ({									\
752 	bch2_trans_begin(trans);					\
753 									\
754 	struct btree_iter _iter;					\
755 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
756 			     (_start), (_flags));			\
757 									\
758 	for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\
759 })
760 
761 #define for_each_btree_key(_trans, _iter, _btree_id,			\
762 			   _start, _flags, _k, _do)			\
763 	for_each_btree_key_upto(_trans, _iter, _btree_id, _start,	\
764 				 SPOS_MAX, _flags, _k, _do)
765 
766 #define for_each_btree_key_reverse(_trans, _iter, _btree_id,		\
767 				   _start, _flags, _k, _do)		\
768 ({									\
769 	struct btree_iter _iter;					\
770 	struct bkey_s_c _k;						\
771 	int _ret3 = 0;							\
772 									\
773 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
774 			     (_start), (_flags));			\
775 									\
776 	do {								\
777 		_ret3 = lockrestart_do(_trans, ({			\
778 			(_k) = bch2_btree_iter_peek_prev_type(&(_iter),	\
779 							(_flags));	\
780 			if (!(_k).k)					\
781 				break;					\
782 									\
783 			bkey_err(_k) ?: (_do);				\
784 		}));							\
785 	} while (!_ret3 && bch2_btree_iter_rewind(&(_iter)));		\
786 									\
787 	bch2_trans_iter_exit((_trans), &(_iter));			\
788 	_ret3;								\
789 })
790 
791 #define for_each_btree_key_commit(_trans, _iter, _btree_id,		\
792 				  _start, _iter_flags, _k,		\
793 				  _disk_res, _journal_seq, _commit_flags,\
794 				  _do)					\
795 	for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
796 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
797 					(_journal_seq), (_commit_flags)))
798 
799 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id,	\
800 				  _start, _iter_flags, _k,		\
801 				  _disk_res, _journal_seq, _commit_flags,\
802 				  _do)					\
803 	for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
804 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
805 					(_journal_seq), (_commit_flags)))
806 
807 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id,	\
808 				  _start, _end, _iter_flags, _k,	\
809 				  _disk_res, _journal_seq, _commit_flags,\
810 				  _do)					\
811 	for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
812 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
813 					(_journal_seq), (_commit_flags)))
814 
815 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
816 
817 static inline struct bkey_s_c
818 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
819 				   struct btree_iter *iter, unsigned flags)
820 {
821 	struct bkey_s_c k;
822 
823 	while (btree_trans_too_many_iters(trans) ||
824 	       (k = bch2_btree_iter_peek_type(iter, flags),
825 		bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
826 		bch2_trans_begin(trans);
827 
828 	return k;
829 }
830 
831 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id,	\
832 			   _start, _end, _flags, _k, _ret)		\
833 	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),	\
834 				  (_start), (_flags));			\
835 	     (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
836 	     !((_ret) = bkey_err(_k)) && (_k).k;			\
837 	     bch2_btree_iter_advance(&(_iter)))
838 
839 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
840 	for (;									\
841 	     (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),	\
842 	     !((_ret) = bkey_err(_k)) && (_k).k;				\
843 	     bch2_btree_iter_advance(&(_iter)))
844 
845 #define for_each_btree_key_norestart(_trans, _iter, _btree_id,		\
846 			   _start, _flags, _k, _ret)			\
847 	for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\
848 					  SPOS_MAX, _flags, _k, _ret)
849 
850 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret)	\
851 	for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
852 
853 /*
854  * This should not be used in a fastpath, without first trying _do in
855  * nonblocking mode - it will cause excessive transaction restarts and
856  * potentially livelocking:
857  */
858 #define drop_locks_do(_trans, _do)					\
859 ({									\
860 	bch2_trans_unlock(_trans);					\
861 	(_do) ?: bch2_trans_relock(_trans);				\
862 })
863 
864 #define allocate_dropping_locks_errcode(_trans, _do)			\
865 ({									\
866 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
867 	int _ret = _do;							\
868 									\
869 	if (bch2_err_matches(_ret, ENOMEM)) {				\
870 		_gfp = GFP_KERNEL;					\
871 		_ret = drop_locks_do(trans, _do);			\
872 	}								\
873 	_ret;								\
874 })
875 
876 #define allocate_dropping_locks(_trans, _ret, _do)			\
877 ({									\
878 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
879 	typeof(_do) _p = _do;						\
880 									\
881 	_ret = 0;							\
882 	if (unlikely(!_p)) {						\
883 		_gfp = GFP_KERNEL;					\
884 		_ret = drop_locks_do(trans, ((_p = _do), 0));		\
885 	}								\
886 	_p;								\
887 })
888 
889 #define bch2_trans_run(_c, _do)						\
890 ({									\
891 	struct btree_trans *trans = bch2_trans_get(_c);			\
892 	int _ret = (_do);						\
893 	bch2_trans_put(trans);						\
894 	_ret;								\
895 })
896 
897 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
898 void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
899 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
900 void bch2_dump_trans_updates(struct btree_trans *);
901 void bch2_dump_trans_paths_updates(struct btree_trans *);
902 
903 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
904 void bch2_trans_put(struct btree_trans *);
905 
906 bool bch2_current_has_btree_trans(struct bch_fs *);
907 
908 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
909 unsigned bch2_trans_get_fn_idx(const char *);
910 
911 #define bch2_trans_get(_c)						\
912 ({									\
913 	static unsigned trans_fn_idx;					\
914 									\
915 	if (unlikely(!trans_fn_idx))					\
916 		trans_fn_idx = bch2_trans_get_fn_idx(__func__);		\
917 	__bch2_trans_get(_c, trans_fn_idx);				\
918 })
919 
920 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
921 
922 void bch2_fs_btree_iter_exit(struct bch_fs *);
923 void bch2_fs_btree_iter_init_early(struct bch_fs *);
924 int bch2_fs_btree_iter_init(struct bch_fs *);
925 
926 #endif /* _BCACHEFS_BTREE_ITER_H */
927