xref: /linux/fs/bcachefs/btree_iter.h (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
4 
5 #include "bset.h"
6 #include "btree_types.h"
7 #include "trace.h"
8 
9 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
10 void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
11 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
12 void bch2_dump_trans_updates(struct btree_trans *);
13 void bch2_dump_trans_paths_updates(struct btree_trans *);
14 
15 static inline int __bkey_err(const struct bkey *k)
16 {
17 	return PTR_ERR_OR_ZERO(k);
18 }
19 
20 #define bkey_err(_k)	__bkey_err((_k).k)
21 
22 static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
23 {
24 	unsigned idx = path - trans->paths;
25 
26 	EBUG_ON(idx >= trans->nr_paths);
27 	EBUG_ON(!test_bit(idx, trans->paths_allocated));
28 	if (unlikely(path->ref == U8_MAX)) {
29 		bch2_dump_trans_paths_updates(trans);
30 		panic("path %u refcount overflow\n", idx);
31 	}
32 
33 	path->ref++;
34 	path->intent_ref += intent;
35 	trace_btree_path_get_ll(trans, path);
36 }
37 
38 static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
39 {
40 	EBUG_ON(path - trans->paths >= trans->nr_paths);
41 	EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
42 	EBUG_ON(!path->ref);
43 	EBUG_ON(!path->intent_ref && intent);
44 
45 	trace_btree_path_put_ll(trans, path);
46 	path->intent_ref -= intent;
47 	return --path->ref == 0;
48 }
49 
50 static inline void btree_path_set_dirty(struct btree_path *path,
51 					enum btree_path_uptodate u)
52 {
53 	path->uptodate = max_t(unsigned, path->uptodate, u);
54 }
55 
56 static inline struct btree *btree_path_node(struct btree_path *path,
57 					    unsigned level)
58 {
59 	return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
60 }
61 
62 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
63 					const struct btree *b, unsigned level)
64 {
65 	return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
66 }
67 
68 static inline struct btree *btree_node_parent(struct btree_path *path,
69 					      struct btree *b)
70 {
71 	return btree_path_node(path, b->c.level + 1);
72 }
73 
74 /* Iterate over paths within a transaction: */
75 
76 void __bch2_btree_trans_sort_paths(struct btree_trans *);
77 
78 static inline void btree_trans_sort_paths(struct btree_trans *trans)
79 {
80 	if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
81 	    trans->paths_sorted)
82 		return;
83 	__bch2_btree_trans_sort_paths(trans);
84 }
85 
86 static inline unsigned long *trans_paths_nr(struct btree_path *paths)
87 {
88 	return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
89 }
90 
91 static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
92 {
93 	unsigned long *v = trans_paths_nr(paths);
94 	return v - BITS_TO_LONGS(*v);
95 }
96 
97 #define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
98 	for (_idx = _start;						\
99 	     (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr;	\
100 	     _idx++)
101 
102 static inline struct btree_path *
103 __trans_next_path(struct btree_trans *trans, unsigned *idx)
104 {
105 	unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
106 	/*
107 	 * Open coded find_next_bit(), because
108 	 *  - this is fast path, we can't afford the function call
109 	 *  - and we know that nr_paths is a multiple of BITS_PER_LONG,
110 	 */
111 	while (*idx < trans->nr_paths) {
112 		unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
113 		if (v) {
114 			*idx += __ffs(v);
115 			return trans->paths + *idx;
116 		}
117 
118 		*idx += BITS_PER_LONG;
119 		*idx &= ~(BITS_PER_LONG - 1);
120 		w++;
121 	}
122 
123 	return NULL;
124 }
125 
126 /*
127  * This version is intended to be safe for use on a btree_trans that is owned by
128  * another thread, for bch2_btree_trans_to_text();
129  */
130 #define trans_for_each_path_from(_trans, _path, _idx, _start)		\
131 	for (_idx = _start;						\
132 	     (_path = __trans_next_path((_trans), &_idx));		\
133 	     _idx++)
134 
135 #define trans_for_each_path(_trans, _path, _idx)			\
136 	trans_for_each_path_from(_trans, _path, _idx, 1)
137 
138 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
139 {
140 	unsigned idx = path ? path->sorted_idx + 1 : 0;
141 
142 	EBUG_ON(idx > trans->nr_sorted);
143 
144 	return idx < trans->nr_sorted
145 		? trans->paths + trans->sorted[idx]
146 		: NULL;
147 }
148 
149 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
150 {
151 	unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
152 
153 	return idx
154 		? trans->paths + trans->sorted[idx - 1]
155 		: NULL;
156 }
157 
158 #define trans_for_each_path_idx_inorder(_trans, _iter)			\
159 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
160 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
161 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
162 	     _iter.sorted_idx++)
163 
164 struct trans_for_each_path_inorder_iter {
165 	btree_path_idx_t	sorted_idx;
166 	btree_path_idx_t	path_idx;
167 };
168 
169 #define trans_for_each_path_inorder(_trans, _path, _iter)		\
170 	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
171 	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
172 	      _path = (_trans)->paths + _iter.path_idx,			\
173 	      _iter.sorted_idx < (_trans)->nr_sorted);			\
174 	     _iter.sorted_idx++)
175 
176 #define trans_for_each_path_inorder_reverse(_trans, _path, _i)		\
177 	for (_i = trans->nr_sorted - 1;					\
178 	     ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
179 	     --_i)
180 
181 static inline bool __path_has_node(const struct btree_path *path,
182 				   const struct btree *b)
183 {
184 	return path->l[b->c.level].b == b &&
185 		btree_node_lock_seq_matches(path, b, b->c.level);
186 }
187 
188 static inline struct btree_path *
189 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
190 			    unsigned *idx)
191 {
192 	struct btree_path *path;
193 
194 	while ((path = __trans_next_path(trans, idx)) &&
195 		!__path_has_node(path, b))
196 	       (*idx)++;
197 
198 	return path;
199 }
200 
201 #define trans_for_each_path_with_node(_trans, _b, _path, _iter)		\
202 	for (_iter = 1;							\
203 	     (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
204 	     _iter++)
205 
206 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
207 					    bool, unsigned long);
208 
209 static inline btree_path_idx_t __must_check
210 bch2_btree_path_make_mut(struct btree_trans *trans,
211 			 btree_path_idx_t path, bool intent,
212 			 unsigned long ip)
213 {
214 	if (trans->paths[path].ref > 1 ||
215 	    trans->paths[path].preserve)
216 		path = __bch2_btree_path_make_mut(trans, path, intent, ip);
217 	trans->paths[path].should_be_locked = false;
218 	return path;
219 }
220 
221 btree_path_idx_t __must_check
222 __bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
223 			  struct bpos, bool, unsigned long);
224 
225 static inline btree_path_idx_t __must_check
226 bch2_btree_path_set_pos(struct btree_trans *trans,
227 			btree_path_idx_t path, struct bpos new_pos,
228 			bool intent, unsigned long ip)
229 {
230 	return !bpos_eq(new_pos, trans->paths[path].pos)
231 		? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
232 		: path;
233 }
234 
235 int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
236 					      btree_path_idx_t,
237 					      unsigned, unsigned long);
238 
239 static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *);
240 
241 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
242 					  btree_path_idx_t path, unsigned flags)
243 {
244 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
245 
246 	if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
247 		return 0;
248 
249 	return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
250 }
251 
252 btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
253 				 unsigned, unsigned, unsigned, unsigned long);
254 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
255 					    unsigned, struct bpos);
256 
257 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
258 
259 /*
260  * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
261  * different snapshot:
262  */
263 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
264 {
265 	struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
266 
267 	if (k.k && bpos_eq(path->pos, k.k->p))
268 		return k;
269 
270 	bkey_init(u);
271 	u->p = path->pos;
272 	return (struct bkey_s_c) { u, NULL };
273 }
274 
275 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
276 					struct btree_iter *, struct bpos);
277 
278 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
279 
280 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
281 
282 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
283 {
284 	return mutex_trylock(lock)
285 		? 0
286 		: __bch2_trans_mutex_lock(trans, lock);
287 }
288 
289 #ifdef CONFIG_BCACHEFS_DEBUG
290 void bch2_trans_verify_paths(struct btree_trans *);
291 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
292 #else
293 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
294 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
295 					  struct bpos pos) {}
296 #endif
297 
298 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
299 				      struct btree *, struct bkey_packed *);
300 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
301 			      struct btree *, struct btree_node_iter *,
302 			      struct bkey_packed *, unsigned, unsigned);
303 
304 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
305 
306 void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
307 
308 int bch2_trans_relock(struct btree_trans *);
309 int bch2_trans_relock_notrace(struct btree_trans *);
310 void bch2_trans_unlock(struct btree_trans *);
311 void bch2_trans_unlock_long(struct btree_trans *);
312 
313 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
314 {
315 	return restart_count != trans->restart_count
316 		? -BCH_ERR_transaction_restart_nested
317 		: 0;
318 }
319 
320 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
321 
322 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
323 						   u32 restart_count)
324 {
325 	if (trans_was_restarted(trans, restart_count))
326 		bch2_trans_restart_error(trans, restart_count);
327 }
328 
329 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *);
330 
331 static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *trans)
332 {
333 	if (trans->restarted || !trans->locked)
334 		bch2_trans_unlocked_or_in_restart_error(trans);
335 }
336 
337 __always_inline
338 static int btree_trans_restart_ip(struct btree_trans *trans, int err, unsigned long ip)
339 {
340 	BUG_ON(err <= 0);
341 	BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
342 
343 	trans->restarted = err;
344 	trans->last_restarted_ip = ip;
345 #ifdef CONFIG_BCACHEFS_DEBUG
346 	darray_exit(&trans->last_restarted_trace);
347 	bch2_save_backtrace(&trans->last_restarted_trace, current, 0, GFP_NOWAIT);
348 #endif
349 	return -err;
350 }
351 
352 __always_inline
353 static int btree_trans_restart(struct btree_trans *trans, int err)
354 {
355 	return btree_trans_restart_ip(trans, err, _THIS_IP_);
356 }
357 
358 bool bch2_btree_node_upgrade(struct btree_trans *,
359 			     struct btree_path *, unsigned);
360 
361 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
362 
363 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
364 					     struct btree_path *path)
365 {
366 	unsigned new_locks_want = path->level + !!path->intent_ref;
367 
368 	if (path->locks_want > new_locks_want)
369 		__bch2_btree_path_downgrade(trans, path, new_locks_want);
370 }
371 
372 void bch2_trans_downgrade(struct btree_trans *);
373 
374 void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
375 void bch2_trans_node_drop(struct btree_trans *trans, struct btree *);
376 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
377 
378 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
379 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
380 
381 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
382 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
383 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
384 
385 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos);
386 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
387 
388 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
389 {
390 	return bch2_btree_iter_peek_max(iter, SPOS_MAX);
391 }
392 
393 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos);
394 
395 static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
396 {
397 	return bch2_btree_iter_peek_prev_min(iter, POS_MIN);
398 }
399 
400 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
401 
402 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
403 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
404 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
405 
406 bool bch2_btree_iter_advance(struct btree_iter *);
407 bool bch2_btree_iter_rewind(struct btree_iter *);
408 
409 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
410 {
411 	iter->k.type = KEY_TYPE_deleted;
412 	iter->k.p.inode		= iter->pos.inode	= new_pos.inode;
413 	iter->k.p.offset	= iter->pos.offset	= new_pos.offset;
414 	iter->k.p.snapshot	= iter->pos.snapshot	= new_pos.snapshot;
415 	iter->k.size = 0;
416 }
417 
418 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
419 {
420 	struct btree_trans *trans = iter->trans;
421 
422 	if (unlikely(iter->update_path))
423 		bch2_path_put(trans, iter->update_path,
424 			      iter->flags & BTREE_ITER_intent);
425 	iter->update_path = 0;
426 
427 	if (!(iter->flags & BTREE_ITER_all_snapshots))
428 		new_pos.snapshot = iter->snapshot;
429 
430 	__bch2_btree_iter_set_pos(iter, new_pos);
431 }
432 
433 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
434 {
435 	BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
436 	iter->pos = bkey_start_pos(&iter->k);
437 }
438 
439 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
440 {
441 	struct bpos pos = iter->pos;
442 
443 	iter->snapshot = snapshot;
444 	pos.snapshot = snapshot;
445 	bch2_btree_iter_set_pos(iter, pos);
446 }
447 
448 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
449 
450 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
451 					     unsigned btree_id,
452 					     unsigned level,
453 					     unsigned flags)
454 {
455 	if (level || !btree_id_cached(trans->c, btree_id)) {
456 		flags &= ~BTREE_ITER_cached;
457 		flags &= ~BTREE_ITER_with_key_cache;
458 	} else if (!(flags & BTREE_ITER_cached))
459 		flags |= BTREE_ITER_with_key_cache;
460 
461 	if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
462 	    btree_id_is_extents(btree_id))
463 		flags |= BTREE_ITER_is_extents;
464 
465 	if (!(flags & BTREE_ITER_snapshot_field) &&
466 	    !btree_type_has_snapshot_field(btree_id))
467 		flags &= ~BTREE_ITER_all_snapshots;
468 
469 	if (!(flags & BTREE_ITER_all_snapshots) &&
470 	    btree_type_has_snapshots(btree_id))
471 		flags |= BTREE_ITER_filter_snapshots;
472 
473 	if (trans->journal_replay_not_finished)
474 		flags |= BTREE_ITER_with_journal;
475 
476 	return flags;
477 }
478 
479 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
480 					  struct btree_iter *iter,
481 					  unsigned btree_id, struct bpos pos,
482 					  unsigned locks_want,
483 					  unsigned depth,
484 					  unsigned flags,
485 					  unsigned long ip)
486 {
487 	iter->trans		= trans;
488 	iter->update_path	= 0;
489 	iter->key_cache_path	= 0;
490 	iter->btree_id		= btree_id;
491 	iter->min_depth		= 0;
492 	iter->flags		= flags;
493 	iter->snapshot		= pos.snapshot;
494 	iter->pos		= pos;
495 	iter->k			= POS_KEY(pos);
496 	iter->journal_idx	= 0;
497 #ifdef CONFIG_BCACHEFS_DEBUG
498 	iter->ip_allocated = ip;
499 #endif
500 	iter->path = bch2_path_get(trans, btree_id, iter->pos,
501 				   locks_want, depth, flags, ip);
502 }
503 
504 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
505 			  enum btree_id, struct bpos, unsigned);
506 
507 static inline void bch2_trans_iter_init(struct btree_trans *trans,
508 			  struct btree_iter *iter,
509 			  unsigned btree_id, struct bpos pos,
510 			  unsigned flags)
511 {
512 	if (__builtin_constant_p(btree_id) &&
513 	    __builtin_constant_p(flags))
514 		bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
515 				bch2_btree_iter_flags(trans, btree_id, 0, flags),
516 				_THIS_IP_);
517 	else
518 		bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
519 }
520 
521 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
522 			       enum btree_id, struct bpos,
523 			       unsigned, unsigned, unsigned);
524 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
525 
526 void bch2_set_btree_iter_dontneed(struct btree_iter *);
527 
528 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
529 
530 /**
531  * bch2_trans_kmalloc - allocate memory for use by the current transaction
532  *
533  * Must be called after bch2_trans_begin, which on second and further calls
534  * frees all memory allocated in this transaction
535  */
536 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
537 {
538 	size = roundup(size, 8);
539 
540 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
541 		void *p = trans->mem + trans->mem_top;
542 
543 		trans->mem_top += size;
544 		memset(p, 0, size);
545 		return p;
546 	} else {
547 		return __bch2_trans_kmalloc(trans, size);
548 	}
549 }
550 
551 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
552 {
553 	size = round_up(size, 8);
554 
555 	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
556 		void *p = trans->mem + trans->mem_top;
557 
558 		trans->mem_top += size;
559 		return p;
560 	} else {
561 		return __bch2_trans_kmalloc(trans, size);
562 	}
563 }
564 
565 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
566 				struct btree_iter *iter,
567 				unsigned btree_id, struct bpos pos,
568 				unsigned flags, unsigned type)
569 {
570 	struct bkey_s_c k;
571 
572 	bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
573 	k = bch2_btree_iter_peek_slot(iter);
574 
575 	if (!bkey_err(k) && type && k.k->type != type)
576 		k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
577 	if (unlikely(bkey_err(k)))
578 		bch2_trans_iter_exit(trans, iter);
579 	return k;
580 }
581 
582 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
583 				struct btree_iter *iter,
584 				unsigned btree_id, struct bpos pos,
585 				unsigned flags)
586 {
587 	return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
588 }
589 
590 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
591 	bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter,			\
592 				       _btree_id, _pos, _flags, KEY_TYPE_##_type))
593 
594 static inline void __bkey_val_copy(void *dst_v, unsigned dst_size, struct bkey_s_c src_k)
595 {
596 	unsigned b = min_t(unsigned, dst_size, bkey_val_bytes(src_k.k));
597 	memcpy(dst_v, src_k.v, b);
598 	if (unlikely(b < dst_size))
599 		memset(dst_v + b, 0, dst_size - b);
600 }
601 
602 #define bkey_val_copy(_dst_v, _src_k)					\
603 do {									\
604 	BUILD_BUG_ON(!__typecheck(*_dst_v, *_src_k.v));			\
605 	__bkey_val_copy(_dst_v, sizeof(*_dst_v), _src_k.s_c);		\
606 } while (0)
607 
608 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
609 				unsigned btree_id, struct bpos pos,
610 				unsigned flags, unsigned type,
611 				unsigned val_size, void *val)
612 {
613 	struct btree_iter iter;
614 	struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
615 	int ret = bkey_err(k);
616 	if (!ret) {
617 		__bkey_val_copy(val, val_size, k);
618 		bch2_trans_iter_exit(trans, &iter);
619 	}
620 
621 	return ret;
622 }
623 
624 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
625 	__bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,	\
626 				  KEY_TYPE_##_type, sizeof(*_val), _val)
627 
628 void bch2_trans_srcu_unlock(struct btree_trans *);
629 
630 u32 bch2_trans_begin(struct btree_trans *);
631 
632 #define __for_each_btree_node(_trans, _iter, _btree_id, _start,			\
633 			      _locks_want, _depth, _flags, _b, _do)		\
634 ({										\
635 	bch2_trans_begin((_trans));						\
636 										\
637 	struct btree_iter _iter;						\
638 	bch2_trans_node_iter_init((_trans), &_iter, (_btree_id),		\
639 				  _start, _locks_want, _depth, _flags);		\
640 	int _ret3 = 0;								\
641 	do {									\
642 		_ret3 = lockrestart_do((_trans), ({				\
643 			struct btree *_b = bch2_btree_iter_peek_node(&_iter);	\
644 			if (!_b)						\
645 				break;						\
646 										\
647 			PTR_ERR_OR_ZERO(_b) ?: (_do);				\
648 		})) ?:								\
649 		lockrestart_do((_trans),					\
650 			PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter)));	\
651 	} while (!_ret3);							\
652 										\
653 	bch2_trans_iter_exit((_trans), &(_iter));				\
654 	_ret3;									\
655 })
656 
657 #define for_each_btree_node(_trans, _iter, _btree_id, _start,		\
658 			    _flags, _b, _do)				\
659 	__for_each_btree_node(_trans, _iter, _btree_id, _start,	\
660 			      0, 0, _flags, _b, _do)
661 
662 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
663 							     unsigned flags)
664 {
665 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
666 						bch2_btree_iter_peek_prev(iter);
667 }
668 
669 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
670 							unsigned flags)
671 {
672 	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
673 						bch2_btree_iter_peek(iter);
674 }
675 
676 static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter,
677 							     struct bpos end,
678 							     unsigned flags)
679 {
680 	if (!(flags & BTREE_ITER_slots))
681 		return bch2_btree_iter_peek_max(iter, end);
682 
683 	if (bkey_gt(iter->pos, end))
684 		return bkey_s_c_null;
685 
686 	return bch2_btree_iter_peek_slot(iter);
687 }
688 
689 int __bch2_btree_trans_too_many_iters(struct btree_trans *);
690 
691 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
692 {
693 	if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
694 		return __bch2_btree_trans_too_many_iters(trans);
695 
696 	return 0;
697 }
698 
699 /*
700  * goto instead of loop, so that when used inside for_each_btree_key2()
701  * break/continue work correctly
702  */
703 #define lockrestart_do(_trans, _do)					\
704 ({									\
705 	__label__ transaction_restart;					\
706 	u32 _restart_count;						\
707 	int _ret2;							\
708 transaction_restart:							\
709 	_restart_count = bch2_trans_begin(_trans);			\
710 	_ret2 = (_do);							\
711 									\
712 	if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart))	\
713 		goto transaction_restart;				\
714 									\
715 	if (!_ret2)							\
716 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
717 	_ret2;								\
718 })
719 
720 /*
721  * nested_lockrestart_do(), nested_commit_do():
722  *
723  * These are like lockrestart_do() and commit_do(), with two differences:
724  *
725  *  - We don't call bch2_trans_begin() unless we had a transaction restart
726  *  - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
727  *  transaction restart
728  */
729 #define nested_lockrestart_do(_trans, _do)				\
730 ({									\
731 	u32 _restart_count, _orig_restart_count;			\
732 	int _ret2;							\
733 									\
734 	_restart_count = _orig_restart_count = (_trans)->restart_count;	\
735 									\
736 	while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
737 		_restart_count = bch2_trans_begin(_trans);		\
738 									\
739 	if (!_ret2)							\
740 		bch2_trans_verify_not_restarted(_trans, _restart_count);\
741 									\
742 	_ret2 ?: trans_was_restarted(_trans, _restart_count);		\
743 })
744 
745 #define for_each_btree_key_max_continue(_trans, _iter,			\
746 					 _end, _flags, _k, _do)		\
747 ({									\
748 	struct bkey_s_c _k;						\
749 	int _ret3 = 0;							\
750 									\
751 	do {								\
752 		_ret3 = lockrestart_do(_trans, ({			\
753 			(_k) = bch2_btree_iter_peek_max_type(&(_iter),	\
754 						_end, (_flags));	\
755 			if (!(_k).k)					\
756 				break;					\
757 									\
758 			bkey_err(_k) ?: (_do);				\
759 		}));							\
760 	} while (!_ret3 && bch2_btree_iter_advance(&(_iter)));		\
761 									\
762 	bch2_trans_iter_exit((_trans), &(_iter));			\
763 	_ret3;								\
764 })
765 
766 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do)	\
767 	for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
768 
769 #define for_each_btree_key_max(_trans, _iter, _btree_id,		\
770 				_start, _end, _flags, _k, _do)		\
771 ({									\
772 	bch2_trans_begin(trans);					\
773 									\
774 	struct btree_iter _iter;					\
775 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
776 			     (_start), (_flags));			\
777 									\
778 	for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\
779 })
780 
781 #define for_each_btree_key(_trans, _iter, _btree_id,			\
782 			   _start, _flags, _k, _do)			\
783 	for_each_btree_key_max(_trans, _iter, _btree_id, _start,	\
784 				 SPOS_MAX, _flags, _k, _do)
785 
786 #define for_each_btree_key_reverse(_trans, _iter, _btree_id,		\
787 				   _start, _flags, _k, _do)		\
788 ({									\
789 	struct btree_iter _iter;					\
790 	struct bkey_s_c _k;						\
791 	int _ret3 = 0;							\
792 									\
793 	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
794 			     (_start), (_flags));			\
795 									\
796 	do {								\
797 		_ret3 = lockrestart_do(_trans, ({			\
798 			(_k) = bch2_btree_iter_peek_prev_type(&(_iter),	\
799 							(_flags));	\
800 			if (!(_k).k)					\
801 				break;					\
802 									\
803 			bkey_err(_k) ?: (_do);				\
804 		}));							\
805 	} while (!_ret3 && bch2_btree_iter_rewind(&(_iter)));		\
806 									\
807 	bch2_trans_iter_exit((_trans), &(_iter));			\
808 	_ret3;								\
809 })
810 
811 #define for_each_btree_key_commit(_trans, _iter, _btree_id,		\
812 				  _start, _iter_flags, _k,		\
813 				  _disk_res, _journal_seq, _commit_flags,\
814 				  _do)					\
815 	for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
816 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
817 					(_journal_seq), (_commit_flags)))
818 
819 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id,	\
820 				  _start, _iter_flags, _k,		\
821 				  _disk_res, _journal_seq, _commit_flags,\
822 				  _do)					\
823 	for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
824 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
825 					(_journal_seq), (_commit_flags)))
826 
827 #define for_each_btree_key_max_commit(_trans, _iter, _btree_id,	\
828 				  _start, _end, _iter_flags, _k,	\
829 				  _disk_res, _journal_seq, _commit_flags,\
830 				  _do)					\
831 	for_each_btree_key_max(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
832 			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
833 					(_journal_seq), (_commit_flags)))
834 
835 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
836 
837 #define for_each_btree_key_max_norestart(_trans, _iter, _btree_id,	\
838 			   _start, _end, _flags, _k, _ret)		\
839 	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),	\
840 				  (_start), (_flags));			\
841 	     (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\
842 	     !((_ret) = bkey_err(_k)) && (_k).k;			\
843 	     bch2_btree_iter_advance(&(_iter)))
844 
845 #define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\
846 	for (;									\
847 	     (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),	\
848 	     !((_ret) = bkey_err(_k)) && (_k).k;				\
849 	     bch2_btree_iter_advance(&(_iter)))
850 
851 #define for_each_btree_key_norestart(_trans, _iter, _btree_id,		\
852 			   _start, _flags, _k, _ret)			\
853 	for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\
854 					  SPOS_MAX, _flags, _k, _ret)
855 
856 #define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id,	\
857 					     _start, _flags, _k, _ret)	\
858 	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),	\
859 				  (_start), (_flags));			\
860 	     (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags),	\
861 	     !((_ret) = bkey_err(_k)) && (_k).k;			\
862 	     bch2_btree_iter_rewind(&(_iter)))
863 
864 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret)	\
865 	for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
866 
867 /*
868  * This should not be used in a fastpath, without first trying _do in
869  * nonblocking mode - it will cause excessive transaction restarts and
870  * potentially livelocking:
871  */
872 #define drop_locks_do(_trans, _do)					\
873 ({									\
874 	bch2_trans_unlock(_trans);					\
875 	(_do) ?: bch2_trans_relock(_trans);				\
876 })
877 
878 #define allocate_dropping_locks_errcode(_trans, _do)			\
879 ({									\
880 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
881 	int _ret = _do;							\
882 									\
883 	if (bch2_err_matches(_ret, ENOMEM)) {				\
884 		_gfp = GFP_KERNEL;					\
885 		_ret = drop_locks_do(_trans, _do);			\
886 	}								\
887 	_ret;								\
888 })
889 
890 #define allocate_dropping_locks(_trans, _ret, _do)			\
891 ({									\
892 	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
893 	typeof(_do) _p = _do;						\
894 									\
895 	_ret = 0;							\
896 	if (unlikely(!_p)) {						\
897 		_gfp = GFP_KERNEL;					\
898 		_ret = drop_locks_do(_trans, ((_p = _do), 0));		\
899 	}								\
900 	_p;								\
901 })
902 
903 #define bch2_trans_run(_c, _do)						\
904 ({									\
905 	struct btree_trans *trans = bch2_trans_get(_c);			\
906 	int _ret = (_do);						\
907 	bch2_trans_put(trans);						\
908 	_ret;								\
909 })
910 
911 #define bch2_trans_do(_c, _do)	bch2_trans_run(_c, lockrestart_do(trans, _do))
912 
913 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
914 void bch2_trans_put(struct btree_trans *);
915 
916 bool bch2_current_has_btree_trans(struct bch_fs *);
917 
918 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
919 unsigned bch2_trans_get_fn_idx(const char *);
920 
921 #define bch2_trans_get(_c)						\
922 ({									\
923 	static unsigned trans_fn_idx;					\
924 									\
925 	if (unlikely(!trans_fn_idx))					\
926 		trans_fn_idx = bch2_trans_get_fn_idx(__func__);		\
927 	__bch2_trans_get(_c, trans_fn_idx);				\
928 })
929 
930 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
931 
932 void bch2_fs_btree_iter_exit(struct bch_fs *);
933 void bch2_fs_btree_iter_init_early(struct bch_fs *);
934 int bch2_fs_btree_iter_init(struct bch_fs *);
935 
936 #endif /* _BCACHEFS_BTREE_ITER_H */
937