xref: /linux/fs/bcachefs/btree_iter.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "super.h"
20 #include "trace.h"
21 
22 #include <linux/random.h>
23 #include <linux/prefetch.h>
24 
25 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
26 static inline void btree_path_list_add(struct btree_trans *,
27 			btree_path_idx_t, btree_path_idx_t);
28 
29 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
30 {
31 #ifdef TRACK_PATH_ALLOCATED
32 	return iter->ip_allocated;
33 #else
34 	return 0;
35 #endif
36 }
37 
38 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
39 static void bch2_trans_srcu_lock(struct btree_trans *);
40 
41 static inline int __btree_path_cmp(const struct btree_path *l,
42 				   enum btree_id	r_btree_id,
43 				   bool			r_cached,
44 				   struct bpos		r_pos,
45 				   unsigned		r_level)
46 {
47 	/*
48 	 * Must match lock ordering as defined by __bch2_btree_node_lock:
49 	 */
50 	return   cmp_int(l->btree_id,	r_btree_id) ?:
51 		 cmp_int((int) l->cached,	(int) r_cached) ?:
52 		 bpos_cmp(l->pos,	r_pos) ?:
53 		-cmp_int(l->level,	r_level);
54 }
55 
56 static inline int btree_path_cmp(const struct btree_path *l,
57 				 const struct btree_path *r)
58 {
59 	return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
60 }
61 
62 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
63 {
64 	/* Are we iterating over keys in all snapshots? */
65 	if (iter->flags & BTREE_ITER_all_snapshots) {
66 		p = bpos_successor(p);
67 	} else {
68 		p = bpos_nosnap_successor(p);
69 		p.snapshot = iter->snapshot;
70 	}
71 
72 	return p;
73 }
74 
75 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
76 {
77 	/* Are we iterating over keys in all snapshots? */
78 	if (iter->flags & BTREE_ITER_all_snapshots) {
79 		p = bpos_predecessor(p);
80 	} else {
81 		p = bpos_nosnap_predecessor(p);
82 		p.snapshot = iter->snapshot;
83 	}
84 
85 	return p;
86 }
87 
88 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
89 {
90 	struct bpos pos = iter->pos;
91 
92 	if ((iter->flags & BTREE_ITER_is_extents) &&
93 	    !bkey_eq(pos, POS_MAX))
94 		pos = bkey_successor(iter, pos);
95 	return pos;
96 }
97 
98 static inline bool btree_path_pos_before_node(struct btree_path *path,
99 					      struct btree *b)
100 {
101 	return bpos_lt(path->pos, b->data->min_key);
102 }
103 
104 static inline bool btree_path_pos_after_node(struct btree_path *path,
105 					     struct btree *b)
106 {
107 	return bpos_gt(path->pos, b->key.k.p);
108 }
109 
110 static inline bool btree_path_pos_in_node(struct btree_path *path,
111 					  struct btree *b)
112 {
113 	return path->btree_id == b->c.btree_id &&
114 		!btree_path_pos_before_node(path, b) &&
115 		!btree_path_pos_after_node(path, b);
116 }
117 
118 /* Debug: */
119 
120 static void __bch2_btree_path_verify_cached(struct btree_trans *trans,
121 					  struct btree_path *path)
122 {
123 	struct bkey_cached *ck;
124 	bool locked = btree_node_locked(path, 0);
125 
126 	if (!bch2_btree_node_relock(trans, path, 0))
127 		return;
128 
129 	ck = (void *) path->l[0].b;
130 	BUG_ON(ck->key.btree_id != path->btree_id ||
131 	       !bkey_eq(ck->key.pos, path->pos));
132 
133 	if (!locked)
134 		btree_node_unlock(trans, path, 0);
135 }
136 
137 static void __bch2_btree_path_verify_level(struct btree_trans *trans,
138 				struct btree_path *path, unsigned level)
139 {
140 	struct btree_path_level *l;
141 	struct btree_node_iter tmp;
142 	bool locked;
143 	struct bkey_packed *p, *k;
144 	struct printbuf buf1 = PRINTBUF;
145 	struct printbuf buf2 = PRINTBUF;
146 	struct printbuf buf3 = PRINTBUF;
147 	const char *msg;
148 
149 	l	= &path->l[level];
150 	tmp	= l->iter;
151 	locked	= btree_node_locked(path, level);
152 
153 	if (path->cached) {
154 		if (!level)
155 			__bch2_btree_path_verify_cached(trans, path);
156 		return;
157 	}
158 
159 	if (!btree_path_node(path, level))
160 		return;
161 
162 	if (!bch2_btree_node_relock_notrace(trans, path, level))
163 		return;
164 
165 	BUG_ON(!btree_path_pos_in_node(path, l->b));
166 
167 	bch2_btree_node_iter_verify(&l->iter, l->b);
168 
169 	/*
170 	 * For interior nodes, the iterator will have skipped past deleted keys:
171 	 */
172 	p = level
173 		? bch2_btree_node_iter_prev(&tmp, l->b)
174 		: bch2_btree_node_iter_prev_all(&tmp, l->b);
175 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
176 
177 	if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
178 		msg = "before";
179 		goto err;
180 	}
181 
182 	if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
183 		msg = "after";
184 		goto err;
185 	}
186 
187 	if (!locked)
188 		btree_node_unlock(trans, path, level);
189 	return;
190 err:
191 	bch2_bpos_to_text(&buf1, path->pos);
192 
193 	if (p) {
194 		struct bkey uk = bkey_unpack_key(l->b, p);
195 
196 		bch2_bkey_to_text(&buf2, &uk);
197 	} else {
198 		prt_printf(&buf2, "(none)");
199 	}
200 
201 	if (k) {
202 		struct bkey uk = bkey_unpack_key(l->b, k);
203 
204 		bch2_bkey_to_text(&buf3, &uk);
205 	} else {
206 		prt_printf(&buf3, "(none)");
207 	}
208 
209 	panic("path should be %s key at level %u:\n"
210 	      "path pos %s\n"
211 	      "prev key %s\n"
212 	      "cur  key %s\n",
213 	      msg, level, buf1.buf, buf2.buf, buf3.buf);
214 }
215 
216 static void __bch2_btree_path_verify(struct btree_trans *trans,
217 				   struct btree_path *path)
218 {
219 	struct bch_fs *c = trans->c;
220 
221 	for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
222 		if (!path->l[i].b) {
223 			BUG_ON(!path->cached &&
224 			       bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
225 			break;
226 		}
227 
228 		__bch2_btree_path_verify_level(trans, path, i);
229 	}
230 
231 	bch2_btree_path_verify_locks(trans, path);
232 }
233 
234 void __bch2_trans_verify_paths(struct btree_trans *trans)
235 {
236 	struct btree_path *path;
237 	unsigned iter;
238 
239 	trans_for_each_path(trans, path, iter)
240 		__bch2_btree_path_verify(trans, path);
241 }
242 
243 static void __bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
244 {
245 	BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
246 
247 	BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
248 	       (iter->flags & BTREE_ITER_all_snapshots));
249 
250 	BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
251 	       (iter->flags & BTREE_ITER_all_snapshots) &&
252 	       !btree_type_has_snapshot_field(iter->btree_id));
253 
254 	if (iter->update_path)
255 		__bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
256 	__bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
257 }
258 
259 static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
260 {
261 	BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
262 	       !iter->pos.snapshot);
263 
264 	BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
265 	       iter->pos.snapshot != iter->snapshot);
266 
267 	BUG_ON(iter->flags & BTREE_ITER_all_snapshots	? !bpos_eq(iter->pos, iter->k.p) :
268 	       !(iter->flags & BTREE_ITER_is_extents)	? !bkey_eq(iter->pos, iter->k.p) :
269 	       (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
270 		bkey_gt(iter->pos, iter->k.p)));
271 }
272 
273 static int __bch2_btree_iter_verify_ret(struct btree_trans *trans,
274 					struct btree_iter *iter, struct bkey_s_c k)
275 {
276 	struct btree_iter copy;
277 	struct bkey_s_c prev;
278 	int ret = 0;
279 
280 	if (!(iter->flags & BTREE_ITER_filter_snapshots))
281 		return 0;
282 
283 	if (bkey_err(k) || !k.k)
284 		return 0;
285 
286 	BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
287 					  iter->snapshot,
288 					  k.k->p.snapshot));
289 
290 	bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
291 			     BTREE_ITER_nopreserve|
292 			     BTREE_ITER_all_snapshots);
293 	prev = bch2_btree_iter_prev(trans, &copy);
294 	if (!prev.k)
295 		goto out;
296 
297 	ret = bkey_err(prev);
298 	if (ret)
299 		goto out;
300 
301 	if (bkey_eq(prev.k->p, k.k->p) &&
302 	    bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
303 				      prev.k->p.snapshot) > 0) {
304 		struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
305 
306 		bch2_bkey_to_text(&buf1, k.k);
307 		bch2_bkey_to_text(&buf2, prev.k);
308 
309 		panic("iter snap %u\n"
310 		      "k    %s\n"
311 		      "prev %s\n",
312 		      iter->snapshot,
313 		      buf1.buf, buf2.buf);
314 	}
315 out:
316 	bch2_trans_iter_exit(trans, &copy);
317 	return ret;
318 }
319 
320 void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
321 			    struct bpos pos)
322 {
323 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
324 
325 	struct btree_path *path;
326 	struct trans_for_each_path_inorder_iter iter;
327 	struct printbuf buf = PRINTBUF;
328 
329 	btree_trans_sort_paths(trans);
330 
331 	trans_for_each_path_inorder(trans, path, iter) {
332 		if (path->btree_id != id ||
333 		    !btree_node_locked(path, 0) ||
334 		    !path->should_be_locked)
335 			continue;
336 
337 		if (!path->cached) {
338 			if (bkey_ge(pos, path->l[0].b->data->min_key) &&
339 			    bkey_le(pos, path->l[0].b->key.k.p))
340 				return;
341 		} else {
342 			if (bkey_eq(pos, path->pos))
343 				return;
344 		}
345 	}
346 
347 	bch2_dump_trans_paths_updates(trans);
348 	bch2_bpos_to_text(&buf, pos);
349 
350 	panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
351 }
352 
353 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
354 						struct btree_path *path, unsigned l)
355 {
356 	if (static_branch_unlikely(&bch2_debug_check_iterators))
357 		__bch2_btree_path_verify_level(trans, path, l);
358 }
359 
360 static inline void bch2_btree_path_verify(struct btree_trans *trans,
361 					  struct btree_path *path)
362 {
363 	if (static_branch_unlikely(&bch2_debug_check_iterators))
364 		__bch2_btree_path_verify(trans, path);
365 }
366 
367 static inline void bch2_btree_iter_verify(struct btree_trans *trans,
368 					  struct btree_iter *iter)
369 {
370 	if (static_branch_unlikely(&bch2_debug_check_iterators))
371 		__bch2_btree_iter_verify(trans, iter);
372 }
373 
374 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
375 {
376 	if (static_branch_unlikely(&bch2_debug_check_iterators))
377 		__bch2_btree_iter_verify_entry_exit(iter);
378 }
379 
380 static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
381 					     struct bkey_s_c k)
382 {
383 	return static_branch_unlikely(&bch2_debug_check_iterators)
384 		? __bch2_btree_iter_verify_ret(trans, iter, k)
385 		: 0;
386 }
387 
388 /* Btree path: fixups after btree updates */
389 
390 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
391 					struct btree *b,
392 					struct bset_tree *t,
393 					struct bkey_packed *k)
394 {
395 	struct btree_node_iter_set *set;
396 
397 	btree_node_iter_for_each(iter, set)
398 		if (set->end == t->end_offset) {
399 			set->k = __btree_node_key_to_offset(b, k);
400 			bch2_btree_node_iter_sort(iter, b);
401 			return;
402 		}
403 
404 	bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
405 }
406 
407 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
408 					       struct btree *b,
409 					       struct bkey_packed *where)
410 {
411 	struct btree_path_level *l = &path->l[b->c.level];
412 
413 	if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
414 		return;
415 
416 	if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
417 		bch2_btree_node_iter_advance(&l->iter, l->b);
418 }
419 
420 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
421 				      struct btree *b,
422 				      struct bkey_packed *where)
423 {
424 	struct btree_path *path;
425 	unsigned i;
426 
427 	trans_for_each_path_with_node(trans, b, path, i) {
428 		__bch2_btree_path_fix_key_modified(path, b, where);
429 		bch2_btree_path_verify_level(trans, path, b->c.level);
430 	}
431 }
432 
433 static void __bch2_btree_node_iter_fix(struct btree_path *path,
434 				       struct btree *b,
435 				       struct btree_node_iter *node_iter,
436 				       struct bset_tree *t,
437 				       struct bkey_packed *where,
438 				       unsigned clobber_u64s,
439 				       unsigned new_u64s)
440 {
441 	const struct bkey_packed *end = btree_bkey_last(b, t);
442 	struct btree_node_iter_set *set;
443 	unsigned offset = __btree_node_key_to_offset(b, where);
444 	int shift = new_u64s - clobber_u64s;
445 	unsigned old_end = t->end_offset - shift;
446 	unsigned orig_iter_pos = node_iter->data[0].k;
447 	bool iter_current_key_modified =
448 		orig_iter_pos >= offset &&
449 		orig_iter_pos <= offset + clobber_u64s;
450 
451 	btree_node_iter_for_each(node_iter, set)
452 		if (set->end == old_end)
453 			goto found;
454 
455 	/* didn't find the bset in the iterator - might have to readd it: */
456 	if (new_u64s &&
457 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
458 		bch2_btree_node_iter_push(node_iter, b, where, end);
459 		goto fixup_done;
460 	} else {
461 		/* Iterator is after key that changed */
462 		return;
463 	}
464 found:
465 	set->end = t->end_offset;
466 
467 	/* Iterator hasn't gotten to the key that changed yet: */
468 	if (set->k < offset)
469 		return;
470 
471 	if (new_u64s &&
472 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
473 		set->k = offset;
474 	} else if (set->k < offset + clobber_u64s) {
475 		set->k = offset + new_u64s;
476 		if (set->k == set->end)
477 			bch2_btree_node_iter_set_drop(node_iter, set);
478 	} else {
479 		/* Iterator is after key that changed */
480 		set->k = (int) set->k + shift;
481 		return;
482 	}
483 
484 	bch2_btree_node_iter_sort(node_iter, b);
485 fixup_done:
486 	if (node_iter->data[0].k != orig_iter_pos)
487 		iter_current_key_modified = true;
488 
489 	/*
490 	 * When a new key is added, and the node iterator now points to that
491 	 * key, the iterator might have skipped past deleted keys that should
492 	 * come after the key the iterator now points to. We have to rewind to
493 	 * before those deleted keys - otherwise
494 	 * bch2_btree_node_iter_prev_all() breaks:
495 	 */
496 	if (!bch2_btree_node_iter_end(node_iter) &&
497 	    iter_current_key_modified &&
498 	    b->c.level) {
499 		struct bkey_packed *k, *k2, *p;
500 
501 		k = bch2_btree_node_iter_peek_all(node_iter, b);
502 
503 		for_each_bset(b, t) {
504 			bool set_pos = false;
505 
506 			if (node_iter->data[0].end == t->end_offset)
507 				continue;
508 
509 			k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
510 
511 			while ((p = bch2_bkey_prev_all(b, t, k2)) &&
512 			       bkey_iter_cmp(b, k, p) < 0) {
513 				k2 = p;
514 				set_pos = true;
515 			}
516 
517 			if (set_pos)
518 				btree_node_iter_set_set_pos(node_iter,
519 							    b, t, k2);
520 		}
521 	}
522 }
523 
524 void bch2_btree_node_iter_fix(struct btree_trans *trans,
525 			      struct btree_path *path,
526 			      struct btree *b,
527 			      struct btree_node_iter *node_iter,
528 			      struct bkey_packed *where,
529 			      unsigned clobber_u64s,
530 			      unsigned new_u64s)
531 {
532 	struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
533 	struct btree_path *linked;
534 	unsigned i;
535 
536 	if (node_iter != &path->l[b->c.level].iter) {
537 		__bch2_btree_node_iter_fix(path, b, node_iter, t,
538 					   where, clobber_u64s, new_u64s);
539 
540 		if (static_branch_unlikely(&bch2_debug_check_iterators))
541 			bch2_btree_node_iter_verify(node_iter, b);
542 	}
543 
544 	trans_for_each_path_with_node(trans, b, linked, i) {
545 		__bch2_btree_node_iter_fix(linked, b,
546 					   &linked->l[b->c.level].iter, t,
547 					   where, clobber_u64s, new_u64s);
548 		bch2_btree_path_verify_level(trans, linked, b->c.level);
549 	}
550 }
551 
552 /* Btree path level: pointer to a particular btree node and node iter */
553 
554 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
555 						  struct btree_path_level *l,
556 						  struct bkey *u,
557 						  struct bkey_packed *k)
558 {
559 	if (unlikely(!k)) {
560 		/*
561 		 * signal to bch2_btree_iter_peek_slot() that we're currently at
562 		 * a hole
563 		 */
564 		u->type = KEY_TYPE_deleted;
565 		return bkey_s_c_null;
566 	}
567 
568 	return bkey_disassemble(l->b, k, u);
569 }
570 
571 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
572 							struct btree_path_level *l,
573 							struct bkey *u)
574 {
575 	return __btree_iter_unpack(c, l, u,
576 			bch2_btree_node_iter_peek_all(&l->iter, l->b));
577 }
578 
579 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
580 						    struct btree_path *path,
581 						    struct btree_path_level *l,
582 						    struct bkey *u)
583 {
584 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
585 			bch2_btree_node_iter_prev(&l->iter, l->b));
586 
587 	path->pos = k.k ? k.k->p : l->b->data->min_key;
588 	trans->paths_sorted = false;
589 	bch2_btree_path_verify_level(trans, path, l - path->l);
590 	return k;
591 }
592 
593 static inline bool btree_path_advance_to_pos(struct btree_path *path,
594 					     struct btree_path_level *l,
595 					     int max_advance)
596 {
597 	struct bkey_packed *k;
598 	int nr_advanced = 0;
599 
600 	while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
601 	       bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
602 		if (max_advance > 0 && nr_advanced >= max_advance)
603 			return false;
604 
605 		bch2_btree_node_iter_advance(&l->iter, l->b);
606 		nr_advanced++;
607 	}
608 
609 	return true;
610 }
611 
612 static inline void __btree_path_level_init(struct btree_path *path,
613 					   unsigned level)
614 {
615 	struct btree_path_level *l = &path->l[level];
616 
617 	bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
618 
619 	/*
620 	 * Iterators to interior nodes should always be pointed at the first non
621 	 * whiteout:
622 	 */
623 	if (level)
624 		bch2_btree_node_iter_peek(&l->iter, l->b);
625 }
626 
627 void bch2_btree_path_level_init(struct btree_trans *trans,
628 				struct btree_path *path,
629 				struct btree *b)
630 {
631 	BUG_ON(path->cached);
632 
633 	EBUG_ON(!btree_path_pos_in_node(path, b));
634 
635 	path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
636 	path->l[b->c.level].b = b;
637 	__btree_path_level_init(path, b->c.level);
638 }
639 
640 /* Btree path: fixups after btree node updates: */
641 
642 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
643 {
644 	struct bch_fs *c = trans->c;
645 
646 	trans_for_each_update(trans, i)
647 		if (!i->cached &&
648 		    i->level	== b->c.level &&
649 		    i->btree_id	== b->c.btree_id &&
650 		    bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
651 		    bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
652 			i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
653 
654 			if (unlikely(trans->journal_replay_not_finished)) {
655 				struct bkey_i *j_k =
656 					bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
657 								    i->k->k.p);
658 
659 				if (j_k) {
660 					i->old_k = j_k->k;
661 					i->old_v = &j_k->v;
662 				}
663 			}
664 		}
665 }
666 
667 /*
668  * A btree node is being replaced - update the iterator to point to the new
669  * node:
670  */
671 void bch2_trans_node_add(struct btree_trans *trans,
672 			 struct btree_path *path,
673 			 struct btree *b)
674 {
675 	struct btree_path *prev;
676 
677 	BUG_ON(!btree_path_pos_in_node(path, b));
678 
679 	while ((prev = prev_btree_path(trans, path)) &&
680 	       btree_path_pos_in_node(prev, b))
681 		path = prev;
682 
683 	for (;
684 	     path && btree_path_pos_in_node(path, b);
685 	     path = next_btree_path(trans, path))
686 		if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
687 			enum btree_node_locked_type t =
688 				btree_lock_want(path, b->c.level);
689 
690 			if (t != BTREE_NODE_UNLOCKED) {
691 				btree_node_unlock(trans, path, b->c.level);
692 				six_lock_increment(&b->c.lock, (enum six_lock_type) t);
693 				mark_btree_node_locked(trans, path, b->c.level, t);
694 			}
695 
696 			bch2_btree_path_level_init(trans, path, b);
697 		}
698 
699 	bch2_trans_revalidate_updates_in_node(trans, b);
700 }
701 
702 void bch2_trans_node_drop(struct btree_trans *trans,
703 			  struct btree *b)
704 {
705 	struct btree_path *path;
706 	unsigned i, level = b->c.level;
707 
708 	trans_for_each_path(trans, path, i)
709 		if (path->l[level].b == b) {
710 			btree_node_unlock(trans, path, level);
711 			path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
712 		}
713 }
714 
715 /*
716  * A btree node has been modified in such a way as to invalidate iterators - fix
717  * them:
718  */
719 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
720 {
721 	struct btree_path *path;
722 	unsigned i;
723 
724 	trans_for_each_path_with_node(trans, b, path, i)
725 		__btree_path_level_init(path, b->c.level);
726 
727 	bch2_trans_revalidate_updates_in_node(trans, b);
728 }
729 
730 /* Btree path: traverse, set_pos: */
731 
732 static inline int btree_path_lock_root(struct btree_trans *trans,
733 				       struct btree_path *path,
734 				       unsigned depth_want,
735 				       unsigned long trace_ip)
736 {
737 	struct bch_fs *c = trans->c;
738 	struct btree_root *r = bch2_btree_id_root(c, path->btree_id);
739 	enum six_lock_type lock_type;
740 	unsigned i;
741 	int ret;
742 
743 	EBUG_ON(path->nodes_locked);
744 
745 	while (1) {
746 		struct btree *b = READ_ONCE(r->b);
747 		if (unlikely(!b)) {
748 			BUG_ON(!r->error);
749 			return r->error;
750 		}
751 
752 		path->level = READ_ONCE(b->c.level);
753 
754 		if (unlikely(path->level < depth_want)) {
755 			/*
756 			 * the root is at a lower depth than the depth we want:
757 			 * got to the end of the btree, or we're walking nodes
758 			 * greater than some depth and there are no nodes >=
759 			 * that depth
760 			 */
761 			path->level = depth_want;
762 			for (i = path->level; i < BTREE_MAX_DEPTH; i++)
763 				path->l[i].b = NULL;
764 			return 1;
765 		}
766 
767 		lock_type = __btree_lock_want(path, path->level);
768 		ret = btree_node_lock(trans, path, &b->c,
769 				      path->level, lock_type, trace_ip);
770 		if (unlikely(ret)) {
771 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
772 				return ret;
773 			BUG();
774 		}
775 
776 		if (likely(b == READ_ONCE(r->b) &&
777 			   b->c.level == path->level &&
778 			   !race_fault())) {
779 			for (i = 0; i < path->level; i++)
780 				path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
781 			path->l[path->level].b = b;
782 			for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
783 				path->l[i].b = NULL;
784 
785 			mark_btree_node_locked(trans, path, path->level,
786 					       (enum btree_node_locked_type) lock_type);
787 			bch2_btree_path_level_init(trans, path, b);
788 			return 0;
789 		}
790 
791 		six_unlock_type(&b->c.lock, lock_type);
792 	}
793 }
794 
795 noinline
796 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
797 {
798 	struct bch_fs *c = trans->c;
799 	struct btree_path_level *l = path_l(path);
800 	struct btree_node_iter node_iter = l->iter;
801 	struct bkey_packed *k;
802 	struct bkey_buf tmp;
803 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
804 		? (path->level > 1 ? 0 :  2)
805 		: (path->level > 1 ? 1 : 16);
806 	bool was_locked = btree_node_locked(path, path->level);
807 	int ret = 0;
808 
809 	bch2_bkey_buf_init(&tmp);
810 
811 	while (nr-- && !ret) {
812 		if (!bch2_btree_node_relock(trans, path, path->level))
813 			break;
814 
815 		bch2_btree_node_iter_advance(&node_iter, l->b);
816 		k = bch2_btree_node_iter_peek(&node_iter, l->b);
817 		if (!k)
818 			break;
819 
820 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
821 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
822 					       path->level - 1);
823 	}
824 
825 	if (!was_locked)
826 		btree_node_unlock(trans, path, path->level);
827 
828 	bch2_bkey_buf_exit(&tmp, c);
829 	return ret;
830 }
831 
832 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
833 				 struct btree_and_journal_iter *jiter)
834 {
835 	struct bch_fs *c = trans->c;
836 	struct bkey_s_c k;
837 	struct bkey_buf tmp;
838 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
839 		? (path->level > 1 ? 0 :  2)
840 		: (path->level > 1 ? 1 : 16);
841 	bool was_locked = btree_node_locked(path, path->level);
842 	int ret = 0;
843 
844 	bch2_bkey_buf_init(&tmp);
845 
846 	jiter->fail_if_too_many_whiteouts = true;
847 
848 	while (nr-- && !ret) {
849 		if (!bch2_btree_node_relock(trans, path, path->level))
850 			break;
851 
852 		bch2_btree_and_journal_iter_advance(jiter);
853 		k = bch2_btree_and_journal_iter_peek(jiter);
854 		if (!k.k)
855 			break;
856 
857 		bch2_bkey_buf_reassemble(&tmp, c, k);
858 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
859 					       path->level - 1);
860 	}
861 
862 	if (!was_locked)
863 		btree_node_unlock(trans, path, path->level);
864 
865 	bch2_bkey_buf_exit(&tmp, c);
866 	return ret;
867 }
868 
869 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
870 					    struct btree_path *path,
871 					    unsigned plevel, struct btree *b)
872 {
873 	struct btree_path_level *l = &path->l[plevel];
874 	bool locked = btree_node_locked(path, plevel);
875 	struct bkey_packed *k;
876 	struct bch_btree_ptr_v2 *bp;
877 
878 	if (!bch2_btree_node_relock(trans, path, plevel))
879 		return;
880 
881 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
882 	BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
883 
884 	bp = (void *) bkeyp_val(&l->b->format, k);
885 	bp->mem_ptr = (unsigned long)b;
886 
887 	if (!locked)
888 		btree_node_unlock(trans, path, plevel);
889 }
890 
891 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
892 						     struct btree_path *path,
893 						     unsigned flags)
894 {
895 	struct bch_fs *c = trans->c;
896 	struct btree_path_level *l = path_l(path);
897 	struct btree_and_journal_iter jiter;
898 	struct bkey_s_c k;
899 	int ret = 0;
900 
901 	__bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
902 
903 	k = bch2_btree_and_journal_iter_peek(&jiter);
904 	if (!k.k) {
905 		struct printbuf buf = PRINTBUF;
906 
907 		prt_str(&buf, "node not found at pos ");
908 		bch2_bpos_to_text(&buf, path->pos);
909 		prt_str(&buf, " at btree ");
910 		bch2_btree_pos_to_text(&buf, c, l->b);
911 
912 		ret = bch2_fs_topology_error(c, "%s", buf.buf);
913 		printbuf_exit(&buf);
914 		goto err;
915 	}
916 
917 	bkey_reassemble(&trans->btree_path_down, k);
918 
919 	if ((flags & BTREE_ITER_prefetch) &&
920 	    c->opts.btree_node_prefetch)
921 		ret = btree_path_prefetch_j(trans, path, &jiter);
922 
923 err:
924 	bch2_btree_and_journal_iter_exit(&jiter);
925 	return ret;
926 }
927 
928 static noinline_for_stack int btree_node_missing_err(struct btree_trans *trans,
929 						     struct btree_path *path)
930 {
931 	struct bch_fs *c = trans->c;
932 	struct printbuf buf = PRINTBUF;
933 
934 	prt_str(&buf, "node not found at pos ");
935 	bch2_bpos_to_text(&buf, path->pos);
936 	prt_str(&buf, " within parent node ");
937 	bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&path_l(path)->b->key));
938 
939 	bch2_fs_fatal_error(c, "%s", buf.buf);
940 	printbuf_exit(&buf);
941 	return bch_err_throw(c, btree_need_topology_repair);
942 }
943 
944 static __always_inline int btree_path_down(struct btree_trans *trans,
945 					   struct btree_path *path,
946 					   unsigned flags,
947 					   unsigned long trace_ip)
948 {
949 	struct bch_fs *c = trans->c;
950 	struct btree_path_level *l = path_l(path);
951 	struct btree *b;
952 	unsigned level = path->level - 1;
953 	enum six_lock_type lock_type = __btree_lock_want(path, level);
954 	int ret;
955 
956 	EBUG_ON(!btree_node_locked(path, path->level));
957 
958 	if (unlikely(trans->journal_replay_not_finished)) {
959 		ret = btree_node_iter_and_journal_peek(trans, path, flags);
960 		if (ret)
961 			return ret;
962 	} else {
963 		struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
964 		if (unlikely(!k))
965 			return btree_node_missing_err(trans, path);
966 
967 		bch2_bkey_unpack(l->b, &trans->btree_path_down, k);
968 
969 		if (unlikely((flags & BTREE_ITER_prefetch)) &&
970 		    c->opts.btree_node_prefetch) {
971 			ret = btree_path_prefetch(trans, path);
972 			if (ret)
973 				return ret;
974 		}
975 	}
976 
977 	b = bch2_btree_node_get(trans, path, &trans->btree_path_down,
978 				level, lock_type, trace_ip);
979 	ret = PTR_ERR_OR_ZERO(b);
980 	if (unlikely(ret))
981 		return ret;
982 
983 	if (unlikely(b != btree_node_mem_ptr(&trans->btree_path_down)) &&
984 	    likely(!trans->journal_replay_not_finished &&
985 		   trans->btree_path_down.k.type == KEY_TYPE_btree_ptr_v2))
986 		btree_node_mem_ptr_set(trans, path, level + 1, b);
987 
988 	if (btree_node_read_locked(path, level + 1))
989 		btree_node_unlock(trans, path, level + 1);
990 
991 	mark_btree_node_locked(trans, path, level,
992 			       (enum btree_node_locked_type) lock_type);
993 	path->level = level;
994 	bch2_btree_path_level_init(trans, path, b);
995 
996 	bch2_btree_path_verify_locks(trans, path);
997 	return 0;
998 }
999 
1000 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1001 {
1002 	struct bch_fs *c = trans->c;
1003 	struct btree_path *path;
1004 	unsigned long trace_ip = _RET_IP_;
1005 	unsigned i;
1006 	int ret = 0;
1007 
1008 	if (trans->in_traverse_all)
1009 		return bch_err_throw(c, transaction_restart_in_traverse_all);
1010 
1011 	trans->in_traverse_all = true;
1012 retry_all:
1013 	trans->restarted = 0;
1014 	trans->last_restarted_ip = 0;
1015 
1016 	trans_for_each_path(trans, path, i)
1017 		path->should_be_locked = false;
1018 
1019 	btree_trans_sort_paths(trans);
1020 
1021 	bch2_trans_unlock(trans);
1022 	cond_resched();
1023 	trans_set_locked(trans, false);
1024 
1025 	if (unlikely(trans->memory_allocation_failure)) {
1026 		struct closure cl;
1027 
1028 		closure_init_stack(&cl);
1029 
1030 		do {
1031 			ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1032 			closure_sync(&cl);
1033 		} while (ret);
1034 	}
1035 
1036 	/* Now, redo traversals in correct order: */
1037 	i = 0;
1038 	while (i < trans->nr_sorted) {
1039 		btree_path_idx_t idx = trans->sorted[i];
1040 
1041 		/*
1042 		 * Traversing a path can cause another path to be added at about
1043 		 * the same position:
1044 		 */
1045 		if (trans->paths[idx].uptodate) {
1046 			__btree_path_get(trans, &trans->paths[idx], false);
1047 			ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1048 			__btree_path_put(trans, &trans->paths[idx], false);
1049 
1050 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1051 			    bch2_err_matches(ret, ENOMEM))
1052 				goto retry_all;
1053 			if (ret)
1054 				goto err;
1055 		} else {
1056 			i++;
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * We used to assert that all paths had been traversed here
1062 	 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1063 	 * path->should_be_locked is not set yet, we might have unlocked and
1064 	 * then failed to relock a path - that's fine.
1065 	 */
1066 err:
1067 	bch2_btree_cache_cannibalize_unlock(trans);
1068 
1069 	trans->in_traverse_all = false;
1070 
1071 	trace_and_count(c, trans_traverse_all, trans, trace_ip);
1072 	return ret;
1073 }
1074 
1075 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1076 						unsigned l, int check_pos)
1077 {
1078 	if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1079 		return false;
1080 	if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1081 		return false;
1082 	return true;
1083 }
1084 
1085 static inline bool btree_path_good_node(struct btree_trans *trans,
1086 					struct btree_path *path,
1087 					unsigned l, int check_pos)
1088 {
1089 	return is_btree_node(path, l) &&
1090 		bch2_btree_node_relock(trans, path, l) &&
1091 		btree_path_check_pos_in_node(path, l, check_pos);
1092 }
1093 
1094 static void btree_path_set_level_down(struct btree_trans *trans,
1095 				      struct btree_path *path,
1096 				      unsigned new_level)
1097 {
1098 	unsigned l;
1099 
1100 	path->level = new_level;
1101 
1102 	for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1103 		if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1104 			btree_node_unlock(trans, path, l);
1105 
1106 	btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
1107 	bch2_btree_path_verify(trans, path);
1108 }
1109 
1110 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1111 							 struct btree_path *path,
1112 							 int check_pos)
1113 {
1114 	unsigned i, l = path->level;
1115 again:
1116 	while (btree_path_node(path, l) &&
1117 	       !btree_path_good_node(trans, path, l, check_pos))
1118 		__btree_path_set_level_up(trans, path, l++);
1119 
1120 	/* If we need intent locks, take them too: */
1121 	for (i = l + 1;
1122 	     i < path->locks_want && btree_path_node(path, i);
1123 	     i++)
1124 		if (!bch2_btree_node_relock(trans, path, i)) {
1125 			while (l <= i)
1126 				__btree_path_set_level_up(trans, path, l++);
1127 			goto again;
1128 		}
1129 
1130 	return l;
1131 }
1132 
1133 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1134 						     struct btree_path *path,
1135 						     int check_pos)
1136 {
1137 	return likely(btree_node_locked(path, path->level) &&
1138 		      btree_path_check_pos_in_node(path, path->level, check_pos))
1139 		? path->level
1140 		: __btree_path_up_until_good_node(trans, path, check_pos);
1141 }
1142 
1143 /*
1144  * This is the main state machine for walking down the btree - walks down to a
1145  * specified depth
1146  *
1147  * Returns 0 on success, -EIO on error (error reading in a btree node).
1148  *
1149  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1150  * stashed in the iterator and returned from bch2_trans_exit().
1151  */
1152 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1153 				 btree_path_idx_t path_idx,
1154 				 unsigned flags,
1155 				 unsigned long trace_ip)
1156 {
1157 	struct btree_path *path = &trans->paths[path_idx];
1158 	unsigned depth_want = path->level;
1159 	int ret = -((int) trans->restarted);
1160 
1161 	if (unlikely(ret))
1162 		goto out;
1163 
1164 	if (unlikely(!trans->srcu_held))
1165 		bch2_trans_srcu_lock(trans);
1166 
1167 	trace_btree_path_traverse_start(trans, path);
1168 
1169 	/*
1170 	 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1171 	 * and re-traverse the path without a transaction restart:
1172 	 */
1173 	if (path->should_be_locked) {
1174 		ret = bch2_btree_path_relock(trans, path, trace_ip);
1175 		goto out;
1176 	}
1177 
1178 	if (path->cached) {
1179 		ret = bch2_btree_path_traverse_cached(trans, path_idx, flags);
1180 		goto out;
1181 	}
1182 
1183 	path = &trans->paths[path_idx];
1184 
1185 	if (unlikely(path->level >= BTREE_MAX_DEPTH))
1186 		goto out_uptodate;
1187 
1188 	path->level = btree_path_up_until_good_node(trans, path, 0);
1189 	unsigned max_level = path->level;
1190 
1191 	EBUG_ON(btree_path_node(path, path->level) &&
1192 		!btree_node_locked(path, path->level));
1193 
1194 	/*
1195 	 * Note: path->nodes[path->level] may be temporarily NULL here - that
1196 	 * would indicate to other code that we got to the end of the btree,
1197 	 * here it indicates that relocking the root failed - it's critical that
1198 	 * btree_path_lock_root() comes next and that it can't fail
1199 	 */
1200 	while (path->level > depth_want) {
1201 		ret = btree_path_node(path, path->level)
1202 			? btree_path_down(trans, path, flags, trace_ip)
1203 			: btree_path_lock_root(trans, path, depth_want, trace_ip);
1204 		if (unlikely(ret)) {
1205 			if (ret == 1) {
1206 				/*
1207 				 * No nodes at this level - got to the end of
1208 				 * the btree:
1209 				 */
1210 				ret = 0;
1211 				goto out;
1212 			}
1213 
1214 			__bch2_btree_path_unlock(trans, path);
1215 			path->level = depth_want;
1216 			path->l[path->level].b = ERR_PTR(ret);
1217 			goto out;
1218 		}
1219 	}
1220 
1221 	if (unlikely(max_level > path->level)) {
1222 		struct btree_path *linked;
1223 		unsigned iter;
1224 
1225 		trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1226 			for (unsigned j = path->level + 1; j < max_level; j++)
1227 				linked->l[j] = path->l[j];
1228 	}
1229 
1230 out_uptodate:
1231 	path->uptodate = BTREE_ITER_UPTODATE;
1232 	trace_btree_path_traverse_end(trans, path);
1233 out:
1234 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1235 		panic("ret %s (%i) trans->restarted %s (%i)\n",
1236 		      bch2_err_str(ret), ret,
1237 		      bch2_err_str(trans->restarted), trans->restarted);
1238 	bch2_btree_path_verify(trans, path);
1239 	return ret;
1240 }
1241 
1242 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1243 			    struct btree_path *src)
1244 {
1245 	unsigned i, offset = offsetof(struct btree_path, pos);
1246 
1247 	memcpy((void *) dst + offset,
1248 	       (void *) src + offset,
1249 	       sizeof(struct btree_path) - offset);
1250 
1251 	for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1252 		unsigned t = btree_node_locked_type(dst, i);
1253 
1254 		if (t != BTREE_NODE_UNLOCKED)
1255 			six_lock_increment(&dst->l[i].b->c.lock, t);
1256 	}
1257 }
1258 
1259 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1260 					 bool intent, unsigned long ip)
1261 {
1262 	btree_path_idx_t new = btree_path_alloc(trans, src);
1263 	btree_path_copy(trans, trans->paths + new, trans->paths + src);
1264 	__btree_path_get(trans, trans->paths + new, intent);
1265 #ifdef TRACK_PATH_ALLOCATED
1266 	trans->paths[new].ip_allocated = ip;
1267 #endif
1268 	return new;
1269 }
1270 
1271 __flatten
1272 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1273 			btree_path_idx_t path, bool intent, unsigned long ip)
1274 {
1275 	struct btree_path *old = trans->paths + path;
1276 	__btree_path_put(trans, trans->paths + path, intent);
1277 	path = btree_path_clone(trans, path, intent, ip);
1278 	trace_btree_path_clone(trans, old, trans->paths + path);
1279 	trans->paths[path].preserve = false;
1280 	return path;
1281 }
1282 
1283 btree_path_idx_t __must_check
1284 __bch2_btree_path_set_pos(struct btree_trans *trans,
1285 			  btree_path_idx_t path_idx, struct bpos new_pos,
1286 			  bool intent, unsigned long ip)
1287 {
1288 	int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1289 
1290 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1291 	EBUG_ON(!trans->paths[path_idx].ref);
1292 
1293 	trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1294 
1295 	path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1296 
1297 	struct btree_path *path = trans->paths + path_idx;
1298 	path->pos		= new_pos;
1299 	trans->paths_sorted	= false;
1300 
1301 	if (unlikely(path->cached)) {
1302 		btree_node_unlock(trans, path, 0);
1303 		path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1304 		btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
1305 		goto out;
1306 	}
1307 
1308 	unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1309 
1310 	if (btree_path_node(path, level)) {
1311 		struct btree_path_level *l = &path->l[level];
1312 
1313 		BUG_ON(!btree_node_locked(path, level));
1314 		/*
1315 		 * We might have to skip over many keys, or just a few: try
1316 		 * advancing the node iterator, and if we have to skip over too
1317 		 * many keys just reinit it (or if we're rewinding, since that
1318 		 * is expensive).
1319 		 */
1320 		if (cmp < 0 ||
1321 		    !btree_path_advance_to_pos(path, l, 8))
1322 			bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1323 
1324 		/*
1325 		 * Iterators to interior nodes should always be pointed at the first non
1326 		 * whiteout:
1327 		 */
1328 		if (unlikely(level))
1329 			bch2_btree_node_iter_peek(&l->iter, l->b);
1330 	}
1331 
1332 	if (unlikely(level != path->level)) {
1333 		btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
1334 		__bch2_btree_path_unlock(trans, path);
1335 	}
1336 out:
1337 	bch2_btree_path_verify(trans, path);
1338 	return path_idx;
1339 }
1340 
1341 /* Btree path: main interface: */
1342 
1343 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1344 {
1345 	struct btree_path *sib;
1346 
1347 	sib = prev_btree_path(trans, path);
1348 	if (sib && !btree_path_cmp(sib, path))
1349 		return sib;
1350 
1351 	sib = next_btree_path(trans, path);
1352 	if (sib && !btree_path_cmp(sib, path))
1353 		return sib;
1354 
1355 	return NULL;
1356 }
1357 
1358 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1359 {
1360 	struct btree_path *sib;
1361 
1362 	sib = prev_btree_path(trans, path);
1363 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1364 		return sib;
1365 
1366 	sib = next_btree_path(trans, path);
1367 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1368 		return sib;
1369 
1370 	return NULL;
1371 }
1372 
1373 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1374 {
1375 	__bch2_btree_path_unlock(trans, trans->paths + path);
1376 	btree_path_list_remove(trans, trans->paths + path);
1377 	__clear_bit(path, trans->paths_allocated);
1378 }
1379 
1380 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1381 {
1382 	unsigned l = path->level;
1383 
1384 	do {
1385 		if (!btree_path_node(path, l))
1386 			break;
1387 
1388 		if (!is_btree_node(path, l))
1389 			return false;
1390 
1391 		if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1392 			return false;
1393 
1394 		l++;
1395 	} while (l < path->locks_want);
1396 
1397 	return true;
1398 }
1399 
1400 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1401 {
1402 	struct btree_path *path = trans->paths + path_idx, *dup = NULL;
1403 
1404 	if (!__btree_path_put(trans, path, intent))
1405 		return;
1406 
1407 	if (!path->preserve && !path->should_be_locked)
1408 		goto free;
1409 
1410 	dup = path->preserve
1411 		? have_path_at_pos(trans, path)
1412 		: have_node_at_pos(trans, path);
1413 	if (!dup)
1414 		return;
1415 
1416 	/*
1417 	 * If we need this path locked, the duplicate also has te be locked
1418 	 * before we free this one:
1419 	 */
1420 	if (path->should_be_locked &&
1421 	    !dup->should_be_locked &&
1422 	    !trans->restarted) {
1423 		if (!(trans->locked
1424 		      ? bch2_btree_path_relock_norestart(trans, dup)
1425 		      : bch2_btree_path_can_relock(trans, dup)))
1426 			return;
1427 
1428 		dup->should_be_locked = true;
1429 	}
1430 
1431 	BUG_ON(path->should_be_locked &&
1432 	       !trans->restarted &&
1433 	       trans->locked &&
1434 	       !btree_node_locked(dup, dup->level));
1435 
1436 	path->should_be_locked = false;
1437 	dup->preserve |= path->preserve;
1438 free:
1439 	trace_btree_path_free(trans, path_idx, dup);
1440 	__bch2_path_free(trans, path_idx);
1441 }
1442 
1443 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1444 {
1445 	panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1446 	      trans->restart_count, restart_count,
1447 	      (void *) trans->last_begin_ip);
1448 }
1449 
1450 static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1451 {
1452 #ifdef CONFIG_BCACHEFS_DEBUG
1453 	struct printbuf buf = PRINTBUF;
1454 	bch2_prt_backtrace(&buf, &trans->last_restarted_trace);
1455 	panic("in transaction restart: %s, last restarted by\n%s",
1456 	      bch2_err_str(trans->restarted),
1457 	      buf.buf);
1458 #else
1459 	panic("in transaction restart: %s, last restarted by %pS\n",
1460 	      bch2_err_str(trans->restarted),
1461 	      (void *) trans->last_restarted_ip);
1462 #endif
1463 }
1464 
1465 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans)
1466 {
1467 	if (trans->restarted)
1468 		bch2_trans_in_restart_error(trans);
1469 
1470 	if (!trans->locked)
1471 		panic("trans should be locked, unlocked by %pS\n",
1472 		      (void *) trans->last_unlock_ip);
1473 
1474 	BUG();
1475 }
1476 
1477 noinline __cold
1478 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1479 {
1480 	prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1481 		   trans->nr_updates, trans->fn, trans->journal_res.seq);
1482 	printbuf_indent_add(buf, 2);
1483 
1484 	trans_for_each_update(trans, i) {
1485 		struct bkey_s_c old = { &i->old_k, i->old_v };
1486 
1487 		prt_str(buf, "update: btree=");
1488 		bch2_btree_id_to_text(buf, i->btree_id);
1489 		prt_printf(buf, " cached=%u %pS\n",
1490 			   i->cached,
1491 			   (void *) i->ip_allocated);
1492 
1493 		prt_printf(buf, "  old ");
1494 		bch2_bkey_val_to_text(buf, trans->c, old);
1495 		prt_newline(buf);
1496 
1497 		prt_printf(buf, "  new ");
1498 		bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1499 		prt_newline(buf);
1500 	}
1501 
1502 	for (struct jset_entry *e = btree_trans_journal_entries_start(trans);
1503 	     e != btree_trans_journal_entries_top(trans);
1504 	     e = vstruct_next(e)) {
1505 		bch2_journal_entry_to_text(buf, trans->c, e);
1506 		prt_newline(buf);
1507 	}
1508 
1509 	printbuf_indent_sub(buf, 2);
1510 }
1511 
1512 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1513 {
1514 	struct btree_path *path = trans->paths + path_idx;
1515 
1516 	prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
1517 		   path_idx, path->ref, path->intent_ref,
1518 		   path->preserve ? 'P' : ' ',
1519 		   path->should_be_locked ? 'S' : ' ',
1520 		   path->cached ? 'C' : 'B');
1521 	bch2_btree_id_level_to_text(out, path->btree_id, path->level);
1522 	prt_str(out, " pos ");
1523 	bch2_bpos_to_text(out, path->pos);
1524 
1525 	if (!path->cached && btree_node_locked(path, path->level)) {
1526 		prt_char(out, ' ');
1527 		struct btree *b = path_l(path)->b;
1528 		bch2_bpos_to_text(out, b->data->min_key);
1529 		prt_char(out, '-');
1530 		bch2_bpos_to_text(out, b->key.k.p);
1531 	}
1532 
1533 #ifdef TRACK_PATH_ALLOCATED
1534 	prt_printf(out, " %pS", (void *) path->ip_allocated);
1535 #endif
1536 }
1537 
1538 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1539 {
1540 	switch (t) {
1541 	case BTREE_NODE_UNLOCKED:
1542 		return "unlocked";
1543 	case BTREE_NODE_READ_LOCKED:
1544 		return "read";
1545 	case BTREE_NODE_INTENT_LOCKED:
1546 		return "intent";
1547 	case BTREE_NODE_WRITE_LOCKED:
1548 		return "write";
1549 	default:
1550 		return NULL;
1551 	}
1552 }
1553 
1554 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1555 {
1556 	bch2_btree_path_to_text_short(out, trans, path_idx);
1557 
1558 	struct btree_path *path = trans->paths + path_idx;
1559 
1560 	prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1561 	prt_newline(out);
1562 
1563 	printbuf_indent_add(out, 2);
1564 	for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1565 		prt_printf(out, "l=%u locks %s seq %u node ", l,
1566 			   btree_node_locked_str(btree_node_locked_type(path, l)),
1567 			   path->l[l].lock_seq);
1568 
1569 		int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1570 		if (ret)
1571 			prt_str(out, bch2_err_str(ret));
1572 		else
1573 			prt_printf(out, "%px", path->l[l].b);
1574 		prt_newline(out);
1575 	}
1576 	printbuf_indent_sub(out, 2);
1577 }
1578 
1579 static noinline __cold
1580 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1581 				bool nosort)
1582 {
1583 	struct trans_for_each_path_inorder_iter iter;
1584 
1585 	if (!nosort)
1586 		btree_trans_sort_paths(trans);
1587 
1588 	trans_for_each_path_idx_inorder(trans, iter) {
1589 		bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1590 		prt_newline(out);
1591 	}
1592 }
1593 
1594 noinline __cold
1595 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1596 {
1597 	__bch2_trans_paths_to_text(out, trans, false);
1598 }
1599 
1600 static noinline __cold
1601 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1602 {
1603 	struct printbuf buf = PRINTBUF;
1604 
1605 	__bch2_trans_paths_to_text(&buf, trans, nosort);
1606 	bch2_trans_updates_to_text(&buf, trans);
1607 
1608 	bch2_print_str(trans->c, KERN_ERR, buf.buf);
1609 	printbuf_exit(&buf);
1610 }
1611 
1612 noinline __cold
1613 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1614 {
1615 	__bch2_dump_trans_paths_updates(trans, false);
1616 }
1617 
1618 noinline __cold
1619 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1620 {
1621 	struct btree_transaction_stats *s = btree_trans_stats(trans);
1622 	struct printbuf buf = PRINTBUF;
1623 	size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1624 
1625 	bch2_trans_paths_to_text(&buf, trans);
1626 
1627 	if (!buf.allocation_failure) {
1628 		mutex_lock(&s->lock);
1629 		if (nr > s->nr_max_paths) {
1630 			s->nr_max_paths = nr;
1631 			swap(s->max_paths_text, buf.buf);
1632 		}
1633 		mutex_unlock(&s->lock);
1634 	}
1635 
1636 	printbuf_exit(&buf);
1637 
1638 	trans->nr_paths_max = nr;
1639 }
1640 
1641 noinline __cold
1642 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1643 {
1644 	if (trace_trans_restart_too_many_iters_enabled()) {
1645 		struct printbuf buf = PRINTBUF;
1646 
1647 		bch2_trans_paths_to_text(&buf, trans);
1648 		trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1649 		printbuf_exit(&buf);
1650 	}
1651 
1652 	count_event(trans->c, trans_restart_too_many_iters);
1653 
1654 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1655 }
1656 
1657 static noinline void btree_path_overflow(struct btree_trans *trans)
1658 {
1659 	bch2_dump_trans_paths_updates(trans);
1660 	bch_err(trans->c, "trans path overflow");
1661 }
1662 
1663 static noinline void btree_paths_realloc(struct btree_trans *trans)
1664 {
1665 	unsigned nr = trans->nr_paths * 2;
1666 
1667 	void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1668 			  sizeof(struct btree_trans_paths) +
1669 			  nr * sizeof(struct btree_path) +
1670 			  nr * sizeof(btree_path_idx_t) + 8 +
1671 			  nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1672 
1673 	unsigned long *paths_allocated = p;
1674 	memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1675 	p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1676 
1677 	p += sizeof(struct btree_trans_paths);
1678 	struct btree_path *paths = p;
1679 	*trans_paths_nr(paths) = nr;
1680 	memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1681 	p += nr * sizeof(struct btree_path);
1682 
1683 	btree_path_idx_t *sorted = p;
1684 	memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1685 	p += nr * sizeof(btree_path_idx_t) + 8;
1686 
1687 	struct btree_insert_entry *updates = p;
1688 	memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1689 
1690 	unsigned long *old = trans->paths_allocated;
1691 
1692 	rcu_assign_pointer(trans->paths_allocated,	paths_allocated);
1693 	rcu_assign_pointer(trans->paths,		paths);
1694 	rcu_assign_pointer(trans->sorted,		sorted);
1695 	rcu_assign_pointer(trans->updates,		updates);
1696 
1697 	trans->nr_paths		= nr;
1698 
1699 	if (old != trans->_paths_allocated)
1700 		kfree_rcu_mightsleep(old);
1701 }
1702 
1703 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1704 						btree_path_idx_t pos)
1705 {
1706 	btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1707 
1708 	if (unlikely(idx == trans->nr_paths)) {
1709 		if (trans->nr_paths == BTREE_ITER_MAX) {
1710 			btree_path_overflow(trans);
1711 			return 0;
1712 		}
1713 
1714 		btree_paths_realloc(trans);
1715 	}
1716 
1717 	/*
1718 	 * Do this before marking the new path as allocated, since it won't be
1719 	 * initialized yet:
1720 	 */
1721 	if (unlikely(idx > trans->nr_paths_max))
1722 		bch2_trans_update_max_paths(trans);
1723 
1724 	__set_bit(idx, trans->paths_allocated);
1725 
1726 	struct btree_path *path = &trans->paths[idx];
1727 	path->ref		= 0;
1728 	path->intent_ref	= 0;
1729 	path->nodes_locked	= 0;
1730 
1731 	btree_path_list_add(trans, pos, idx);
1732 	trans->paths_sorted = false;
1733 	return idx;
1734 }
1735 
1736 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1737 			     enum btree_id btree_id, struct bpos pos,
1738 			     unsigned locks_want, unsigned level,
1739 			     unsigned flags, unsigned long ip)
1740 {
1741 	struct btree_path *path;
1742 	bool cached = flags & BTREE_ITER_cached;
1743 	bool intent = flags & BTREE_ITER_intent;
1744 	struct trans_for_each_path_inorder_iter iter;
1745 	btree_path_idx_t path_pos = 0, path_idx;
1746 
1747 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1748 	bch2_trans_verify_locks(trans);
1749 
1750 	btree_trans_sort_paths(trans);
1751 
1752 	if (intent)
1753 		locks_want = max(locks_want, level + 1);
1754 	locks_want = min(locks_want, BTREE_MAX_DEPTH);
1755 
1756 	trans_for_each_path_inorder(trans, path, iter) {
1757 		if (__btree_path_cmp(path,
1758 				     btree_id,
1759 				     cached,
1760 				     pos,
1761 				     level) > 0)
1762 			break;
1763 
1764 		path_pos = iter.path_idx;
1765 	}
1766 
1767 	if (path_pos &&
1768 	    trans->paths[path_pos].cached	== cached &&
1769 	    trans->paths[path_pos].btree_id	== btree_id &&
1770 	    trans->paths[path_pos].level	== level &&
1771 	    bch2_btree_path_upgrade_norestart(trans, trans->paths + path_pos, locks_want)) {
1772 		trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1773 
1774 		__btree_path_get(trans, trans->paths + path_pos, intent);
1775 		path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1776 		path = trans->paths + path_idx;
1777 	} else {
1778 		path_idx = btree_path_alloc(trans, path_pos);
1779 		path = trans->paths + path_idx;
1780 
1781 		__btree_path_get(trans, path, intent);
1782 		path->pos			= pos;
1783 		path->btree_id			= btree_id;
1784 		path->cached			= cached;
1785 		path->uptodate			= BTREE_ITER_NEED_TRAVERSE;
1786 		path->should_be_locked		= false;
1787 		path->level			= level;
1788 		path->locks_want		= locks_want;
1789 		path->nodes_locked		= 0;
1790 		for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1791 			path->l[i].b		= ERR_PTR(-BCH_ERR_no_btree_node_init);
1792 #ifdef TRACK_PATH_ALLOCATED
1793 		path->ip_allocated		= ip;
1794 #endif
1795 		trans->paths_sorted		= false;
1796 
1797 		trace_btree_path_alloc(trans, path);
1798 	}
1799 
1800 	if (!(flags & BTREE_ITER_nopreserve))
1801 		path->preserve = true;
1802 
1803 	/*
1804 	 * If the path has locks_want greater than requested, we don't downgrade
1805 	 * it here - on transaction restart because btree node split needs to
1806 	 * upgrade locks, we might be putting/getting the iterator again.
1807 	 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1808 	 * a successful transaction commit.
1809 	 */
1810 
1811 	return path_idx;
1812 }
1813 
1814 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1815 					    enum btree_id btree_id,
1816 					    unsigned level,
1817 					    struct bpos pos)
1818 {
1819 	btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1820 			     BTREE_ITER_nopreserve|
1821 			     BTREE_ITER_intent, _RET_IP_);
1822 	path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1823 
1824 	struct btree_path *path = trans->paths + path_idx;
1825 	bch2_btree_path_downgrade(trans, path);
1826 	__bch2_btree_path_unlock(trans, path);
1827 	return path_idx;
1828 }
1829 
1830 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1831 {
1832 
1833 	struct btree_path_level *l = path_l(path);
1834 	struct bkey_packed *_k;
1835 	struct bkey_s_c k;
1836 
1837 	if (unlikely(!l->b))
1838 		return bkey_s_c_null;
1839 
1840 	EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1841 	EBUG_ON(!btree_node_locked(path, path->level));
1842 
1843 	if (!path->cached) {
1844 		_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1845 		k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1846 
1847 		EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1848 
1849 		if (!k.k || !bpos_eq(path->pos, k.k->p))
1850 			goto hole;
1851 	} else {
1852 		struct bkey_cached *ck = (void *) path->l[0].b;
1853 		if (!ck)
1854 			return bkey_s_c_null;
1855 
1856 		EBUG_ON(path->btree_id != ck->key.btree_id ||
1857 			!bkey_eq(path->pos, ck->key.pos));
1858 
1859 		*u = ck->k->k;
1860 		k = (struct bkey_s_c) { u, &ck->k->v };
1861 	}
1862 
1863 	return k;
1864 hole:
1865 	bkey_init(u);
1866 	u->p = path->pos;
1867 	return (struct bkey_s_c) { u, NULL };
1868 }
1869 
1870 void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
1871 {
1872 	if (!iter->path || trans->restarted)
1873 		return;
1874 
1875 	struct btree_path *path = btree_iter_path(trans, iter);
1876 	path->preserve		= false;
1877 	if (path->ref == 1)
1878 		path->should_be_locked	= false;
1879 }
1880 /* Btree iterators: */
1881 
1882 int __must_check
1883 __bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
1884 {
1885 	return bch2_btree_path_traverse(trans, iter->path, iter->flags);
1886 }
1887 
1888 int __must_check
1889 bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
1890 {
1891 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1892 
1893 	iter->path = bch2_btree_path_set_pos(trans, iter->path,
1894 					btree_iter_search_key(iter),
1895 					iter->flags & BTREE_ITER_intent,
1896 					btree_iter_ip_allocated(iter));
1897 
1898 	int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1899 	if (ret)
1900 		return ret;
1901 
1902 	struct btree_path *path = btree_iter_path(trans, iter);
1903 	if (btree_path_node(path, path->level))
1904 		btree_path_set_should_be_locked(trans, path);
1905 	return 0;
1906 }
1907 
1908 /* Iterate across nodes (leaf and interior nodes) */
1909 
1910 struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans,
1911 					struct btree_iter *iter)
1912 {
1913 	struct btree *b = NULL;
1914 	int ret;
1915 
1916 	EBUG_ON(trans->paths[iter->path].cached);
1917 	bch2_btree_iter_verify(trans, iter);
1918 
1919 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1920 	if (ret)
1921 		goto err;
1922 
1923 	struct btree_path *path = btree_iter_path(trans, iter);
1924 	b = btree_path_node(path, path->level);
1925 	if (!b)
1926 		goto out;
1927 
1928 	BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1929 
1930 	bkey_init(&iter->k);
1931 	iter->k.p = iter->pos = b->key.k.p;
1932 
1933 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1934 					iter->flags & BTREE_ITER_intent,
1935 					btree_iter_ip_allocated(iter));
1936 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1937 out:
1938 	bch2_btree_iter_verify_entry_exit(iter);
1939 	bch2_btree_iter_verify(trans, iter);
1940 
1941 	return b;
1942 err:
1943 	b = ERR_PTR(ret);
1944 	goto out;
1945 }
1946 
1947 /* Only kept for -tools */
1948 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans,
1949 						    struct btree_iter *iter)
1950 {
1951 	struct btree *b;
1952 
1953 	while (b = bch2_btree_iter_peek_node(trans, iter),
1954 	       bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1955 		bch2_trans_begin(trans);
1956 
1957 	return b;
1958 }
1959 
1960 struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter)
1961 {
1962 	struct btree *b = NULL;
1963 	int ret;
1964 
1965 	EBUG_ON(trans->paths[iter->path].cached);
1966 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1967 	bch2_btree_iter_verify(trans, iter);
1968 
1969 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1970 	if (ret)
1971 		goto err;
1972 
1973 
1974 	struct btree_path *path = btree_iter_path(trans, iter);
1975 
1976 	/* already at end? */
1977 	if (!btree_path_node(path, path->level))
1978 		return NULL;
1979 
1980 	/* got to end? */
1981 	if (!btree_path_node(path, path->level + 1)) {
1982 		path->should_be_locked = false;
1983 		btree_path_set_level_up(trans, path);
1984 		return NULL;
1985 	}
1986 
1987 	/*
1988 	 * We don't correctly handle nodes with extra intent locks here:
1989 	 * downgrade so we don't violate locking invariants
1990 	 */
1991 	bch2_btree_path_downgrade(trans, path);
1992 
1993 	if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1994 		trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1995 		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1996 		__bch2_btree_path_unlock(trans, path);
1997 		path->l[path->level].b		= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1998 		path->l[path->level + 1].b	= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1999 		btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
2000 		goto err;
2001 	}
2002 
2003 	b = btree_path_node(path, path->level + 1);
2004 
2005 	if (bpos_eq(iter->pos, b->key.k.p)) {
2006 		__btree_path_set_level_up(trans, path, path->level++);
2007 	} else {
2008 		if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
2009 			btree_node_unlock(trans, path, path->level + 1);
2010 
2011 		/*
2012 		 * Haven't gotten to the end of the parent node: go back down to
2013 		 * the next child node
2014 		 */
2015 		iter->path = bch2_btree_path_set_pos(trans, iter->path,
2016 					bpos_successor(iter->pos),
2017 					iter->flags & BTREE_ITER_intent,
2018 					btree_iter_ip_allocated(iter));
2019 
2020 		path = btree_iter_path(trans, iter);
2021 		btree_path_set_level_down(trans, path, iter->min_depth);
2022 
2023 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2024 		if (ret)
2025 			goto err;
2026 
2027 		path = btree_iter_path(trans, iter);
2028 		b = path->l[path->level].b;
2029 	}
2030 
2031 	bkey_init(&iter->k);
2032 	iter->k.p = iter->pos = b->key.k.p;
2033 
2034 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2035 					iter->flags & BTREE_ITER_intent,
2036 					btree_iter_ip_allocated(iter));
2037 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2038 	EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2039 out:
2040 	bch2_btree_iter_verify_entry_exit(iter);
2041 	bch2_btree_iter_verify(trans, iter);
2042 
2043 	return b;
2044 err:
2045 	b = ERR_PTR(ret);
2046 	goto out;
2047 }
2048 
2049 /* Iterate across keys (in leaf nodes only) */
2050 
2051 inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter)
2052 {
2053 	struct bpos pos = iter->k.p;
2054 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2055 		     ? bpos_eq(pos, SPOS_MAX)
2056 		     : bkey_eq(pos, SPOS_MAX));
2057 
2058 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2059 		pos = bkey_successor(iter, pos);
2060 	bch2_btree_iter_set_pos(trans, iter, pos);
2061 	return ret;
2062 }
2063 
2064 inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter)
2065 {
2066 	struct bpos pos = bkey_start_pos(&iter->k);
2067 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2068 		     ? bpos_eq(pos, POS_MIN)
2069 		     : bkey_eq(pos, POS_MIN));
2070 
2071 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2072 		pos = bkey_predecessor(iter, pos);
2073 	bch2_btree_iter_set_pos(trans, iter, pos);
2074 	return ret;
2075 }
2076 
2077 static noinline
2078 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2079 					struct bkey_s_c *k)
2080 {
2081 	struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2082 
2083 	trans_for_each_update(trans, i)
2084 		if (!i->key_cache_already_flushed &&
2085 		    i->btree_id == iter->btree_id &&
2086 		    bpos_le(i->k->k.p, iter->pos) &&
2087 		    bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2088 			iter->k = i->k->k;
2089 			*k = bkey_i_to_s_c(i->k);
2090 		}
2091 }
2092 
2093 static noinline
2094 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2095 				   struct bkey_s_c *k)
2096 {
2097 	struct btree_path *path = btree_iter_path(trans, iter);
2098 	struct bpos end = path_l(path)->b->key.k.p;
2099 
2100 	trans_for_each_update(trans, i)
2101 		if (!i->key_cache_already_flushed &&
2102 		    i->btree_id == iter->btree_id &&
2103 		    bpos_ge(i->k->k.p, path->pos) &&
2104 		    bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2105 			iter->k = i->k->k;
2106 			*k = bkey_i_to_s_c(i->k);
2107 		}
2108 }
2109 
2110 static noinline
2111 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2112 					struct bkey_s_c *k)
2113 {
2114 	trans_for_each_update(trans, i)
2115 		if (!i->key_cache_already_flushed &&
2116 		    i->btree_id == iter->btree_id &&
2117 		    bpos_eq(i->k->k.p, iter->pos)) {
2118 			iter->k = i->k->k;
2119 			*k = bkey_i_to_s_c(i->k);
2120 		}
2121 }
2122 
2123 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2124 					      struct btree_iter *iter,
2125 					      struct bpos end_pos)
2126 {
2127 	struct btree_path *path = btree_iter_path(trans, iter);
2128 
2129 	return bch2_journal_keys_peek_max(trans->c, iter->btree_id,
2130 					   path->level,
2131 					   path->pos,
2132 					   end_pos,
2133 					   &iter->journal_idx);
2134 }
2135 
2136 static noinline
2137 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2138 					      struct btree_iter *iter)
2139 {
2140 	struct btree_path *path = btree_iter_path(trans, iter);
2141 	struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2142 
2143 	if (k) {
2144 		iter->k = k->k;
2145 		return bkey_i_to_s_c(k);
2146 	} else {
2147 		return bkey_s_c_null;
2148 	}
2149 }
2150 
2151 static noinline
2152 void btree_trans_peek_journal(struct btree_trans *trans,
2153 			      struct btree_iter *iter,
2154 			      struct bkey_s_c *k)
2155 {
2156 	struct btree_path *path = btree_iter_path(trans, iter);
2157 	struct bkey_i *next_journal =
2158 		bch2_btree_journal_peek(trans, iter,
2159 				k->k ? k->k->p : path_l(path)->b->key.k.p);
2160 	if (next_journal) {
2161 		iter->k = next_journal->k;
2162 		*k = bkey_i_to_s_c(next_journal);
2163 	}
2164 }
2165 
2166 static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans,
2167 					      struct btree_iter *iter,
2168 					      struct bpos end_pos)
2169 {
2170 	struct btree_path *path = btree_iter_path(trans, iter);
2171 
2172 	return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id,
2173 					   path->level,
2174 					   path->pos,
2175 					   end_pos,
2176 					   &iter->journal_idx);
2177 }
2178 
2179 static noinline
2180 void btree_trans_peek_prev_journal(struct btree_trans *trans,
2181 				   struct btree_iter *iter,
2182 				   struct bkey_s_c *k)
2183 {
2184 	struct btree_path *path = btree_iter_path(trans, iter);
2185 	struct bkey_i *next_journal =
2186 		bch2_btree_journal_peek_prev(trans, iter,
2187 				k->k ? k->k->p : path_l(path)->b->key.k.p);
2188 
2189 	if (next_journal) {
2190 		iter->k = next_journal->k;
2191 		*k = bkey_i_to_s_c(next_journal);
2192 	}
2193 }
2194 
2195 /*
2196  * Checks btree key cache for key at iter->pos and returns it if present, or
2197  * bkey_s_c_null:
2198  */
2199 static noinline
2200 struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter,
2201 					   struct bpos pos)
2202 {
2203 	struct bch_fs *c = trans->c;
2204 	struct bkey u;
2205 	struct bkey_s_c k;
2206 	int ret;
2207 
2208 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2209 
2210 	if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2211 	    bpos_eq(iter->pos, pos))
2212 		return bkey_s_c_null;
2213 
2214 	if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2215 		return bkey_s_c_null;
2216 
2217 	if (!iter->key_cache_path)
2218 		iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2219 						     iter->flags & BTREE_ITER_intent, 0,
2220 						     iter->flags|BTREE_ITER_cached|
2221 						     BTREE_ITER_cached_nofill,
2222 						     _THIS_IP_);
2223 
2224 	iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2225 					iter->flags & BTREE_ITER_intent,
2226 					btree_iter_ip_allocated(iter));
2227 
2228 	ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2229 					 iter->flags|BTREE_ITER_cached) ?:
2230 		bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2231 	if (unlikely(ret))
2232 		return bkey_s_c_err(ret);
2233 
2234 	k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2235 	if (!k.k)
2236 		return k;
2237 
2238 	if ((iter->flags & BTREE_ITER_all_snapshots) &&
2239 	    !bpos_eq(pos, k.k->p))
2240 		return bkey_s_c_null;
2241 
2242 	iter->k = u;
2243 	k.k = &iter->k;
2244 	btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2245 	return k;
2246 }
2247 
2248 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter,
2249 					      struct bpos search_key)
2250 {
2251 	struct bkey_s_c k, k2;
2252 	int ret;
2253 
2254 	EBUG_ON(btree_iter_path(trans, iter)->cached);
2255 	bch2_btree_iter_verify(trans, iter);
2256 
2257 	while (1) {
2258 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2259 					iter->flags & BTREE_ITER_intent,
2260 					btree_iter_ip_allocated(iter));
2261 
2262 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2263 		if (unlikely(ret)) {
2264 			/* ensure that iter->k is consistent with iter->pos: */
2265 			bch2_btree_iter_set_pos(trans, iter, iter->pos);
2266 			k = bkey_s_c_err(ret);
2267 			break;
2268 		}
2269 
2270 		struct btree_path *path = btree_iter_path(trans, iter);
2271 		struct btree_path_level *l = path_l(path);
2272 
2273 		if (unlikely(!l->b)) {
2274 			/* No btree nodes at requested level: */
2275 			bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
2276 			k = bkey_s_c_null;
2277 			break;
2278 		}
2279 
2280 		btree_path_set_should_be_locked(trans, path);
2281 
2282 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2283 
2284 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2285 		    k.k &&
2286 		    (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
2287 			k = k2;
2288 			if (bkey_err(k)) {
2289 				bch2_btree_iter_set_pos(trans, iter, iter->pos);
2290 				break;
2291 			}
2292 		}
2293 
2294 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2295 			btree_trans_peek_journal(trans, iter, &k);
2296 
2297 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2298 			     trans->nr_updates))
2299 			bch2_btree_trans_peek_updates(trans, iter, &k);
2300 
2301 		if (k.k && bkey_deleted(k.k)) {
2302 			/*
2303 			 * If we've got a whiteout, and it's after the search
2304 			 * key, advance the search key to the whiteout instead
2305 			 * of just after the whiteout - it might be a btree
2306 			 * whiteout, with a real key at the same position, since
2307 			 * in the btree deleted keys sort before non deleted.
2308 			 */
2309 			search_key = !bpos_eq(search_key, k.k->p)
2310 				? k.k->p
2311 				: bpos_successor(k.k->p);
2312 			continue;
2313 		}
2314 
2315 		if (likely(k.k)) {
2316 			break;
2317 		} else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2318 			/* Advance to next leaf node: */
2319 			search_key = bpos_successor(l->b->key.k.p);
2320 		} else {
2321 			/* End of btree: */
2322 			bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
2323 			k = bkey_s_c_null;
2324 			break;
2325 		}
2326 	}
2327 
2328 	bch2_btree_iter_verify(trans, iter);
2329 	return k;
2330 }
2331 
2332 /**
2333  * bch2_btree_iter_peek_max() - returns first key greater than or equal to
2334  * iterator's current position
2335  * @trans:	btree transaction object
2336  * @iter:	iterator to peek from
2337  * @end:	search limit: returns keys less than or equal to @end
2338  *
2339  * Returns:	key if found, or an error extractable with bkey_err().
2340  */
2341 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter,
2342 					 struct bpos end)
2343 {
2344 	struct bpos search_key = btree_iter_search_key(iter);
2345 	struct bkey_s_c k;
2346 	struct bpos iter_pos = iter->pos;
2347 	int ret;
2348 
2349 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2350 	bch2_btree_iter_verify_entry_exit(iter);
2351 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2352 
2353 	ret = trans_maybe_inject_restart(trans, _RET_IP_);
2354 	if (unlikely(ret)) {
2355 		k = bkey_s_c_err(ret);
2356 		goto out_no_locked;
2357 	}
2358 
2359 	if (iter->update_path) {
2360 		bch2_path_put(trans, iter->update_path, iter->flags & BTREE_ITER_intent);
2361 		iter->update_path = 0;
2362 	}
2363 
2364 	while (1) {
2365 		k = __bch2_btree_iter_peek(trans, iter, search_key);
2366 		if (unlikely(!k.k))
2367 			goto end;
2368 		if (unlikely(bkey_err(k)))
2369 			goto out_no_locked;
2370 
2371 		if (iter->flags & BTREE_ITER_filter_snapshots) {
2372 			/*
2373 			 * We need to check against @end before FILTER_SNAPSHOTS because
2374 			 * if we get to a different inode that requested we might be
2375 			 * seeing keys for a different snapshot tree that will all be
2376 			 * filtered out.
2377 			 *
2378 			 * But we can't do the full check here, because bkey_start_pos()
2379 			 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2380 			 * that's what we check against in extents mode:
2381 			 */
2382 			if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2383 				     ? bkey_gt(k.k->p, end)
2384 				     : k.k->p.inode > end.inode))
2385 				goto end;
2386 
2387 			if (iter->update_path &&
2388 			    !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2389 				bch2_path_put(trans, iter->update_path,
2390 					      iter->flags & BTREE_ITER_intent);
2391 				iter->update_path = 0;
2392 			}
2393 
2394 			if ((iter->flags & BTREE_ITER_intent) &&
2395 			    !(iter->flags & BTREE_ITER_is_extents) &&
2396 			    !iter->update_path) {
2397 				struct bpos pos = k.k->p;
2398 
2399 				if (pos.snapshot < iter->snapshot) {
2400 					search_key = bpos_successor(k.k->p);
2401 					continue;
2402 				}
2403 
2404 				pos.snapshot = iter->snapshot;
2405 
2406 				/*
2407 				 * advance, same as on exit for iter->path, but only up
2408 				 * to snapshot
2409 				 */
2410 				__btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2411 				iter->update_path = iter->path;
2412 
2413 				iter->update_path = bch2_btree_path_set_pos(trans,
2414 							iter->update_path, pos,
2415 							iter->flags & BTREE_ITER_intent,
2416 							_THIS_IP_);
2417 				ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2418 				if (unlikely(ret)) {
2419 					k = bkey_s_c_err(ret);
2420 					goto out_no_locked;
2421 				}
2422 			}
2423 
2424 			/*
2425 			 * We can never have a key in a leaf node at POS_MAX, so
2426 			 * we don't have to check these successor() calls:
2427 			 */
2428 			if (!bch2_snapshot_is_ancestor(trans->c,
2429 						       iter->snapshot,
2430 						       k.k->p.snapshot)) {
2431 				search_key = bpos_successor(k.k->p);
2432 				continue;
2433 			}
2434 
2435 			if (bkey_whiteout(k.k) &&
2436 			    !(iter->flags & BTREE_ITER_key_cache_fill)) {
2437 				search_key = bkey_successor(iter, k.k->p);
2438 				continue;
2439 			}
2440 		}
2441 
2442 		/*
2443 		 * iter->pos should be mononotically increasing, and always be
2444 		 * equal to the key we just returned - except extents can
2445 		 * straddle iter->pos:
2446 		 */
2447 		if (!(iter->flags & BTREE_ITER_is_extents))
2448 			iter_pos = k.k->p;
2449 		else
2450 			iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2451 
2452 		if (unlikely(iter->flags & BTREE_ITER_all_snapshots	? bpos_gt(iter_pos, end) :
2453 			     iter->flags & BTREE_ITER_is_extents	? bkey_ge(iter_pos, end) :
2454 									  bkey_gt(iter_pos, end)))
2455 			goto end;
2456 
2457 		break;
2458 	}
2459 
2460 	iter->pos = iter_pos;
2461 
2462 	iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2463 				iter->flags & BTREE_ITER_intent,
2464 				btree_iter_ip_allocated(iter));
2465 
2466 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2467 out_no_locked:
2468 	if (iter->update_path) {
2469 		ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2470 		if (unlikely(ret))
2471 			k = bkey_s_c_err(ret);
2472 		else
2473 			btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2474 	}
2475 
2476 	if (!(iter->flags & BTREE_ITER_all_snapshots))
2477 		iter->pos.snapshot = iter->snapshot;
2478 
2479 	ret = bch2_btree_iter_verify_ret(trans, iter, k);
2480 	if (unlikely(ret)) {
2481 		bch2_btree_iter_set_pos(trans, iter, iter->pos);
2482 		k = bkey_s_c_err(ret);
2483 	}
2484 
2485 	bch2_btree_iter_verify_entry_exit(iter);
2486 
2487 	return k;
2488 end:
2489 	bch2_btree_iter_set_pos(trans, iter, end);
2490 	k = bkey_s_c_null;
2491 	goto out_no_locked;
2492 }
2493 
2494 /**
2495  * bch2_btree_iter_next() - returns first key greater than iterator's current
2496  * position
2497  * @trans:	btree transaction object
2498  * @iter:	iterator to peek from
2499  *
2500  * Returns:	key if found, or an error extractable with bkey_err().
2501  */
2502 struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter)
2503 {
2504 	if (!bch2_btree_iter_advance(trans, iter))
2505 		return bkey_s_c_null;
2506 
2507 	return bch2_btree_iter_peek(trans, iter);
2508 }
2509 
2510 static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter,
2511 						   struct bpos search_key)
2512 {
2513 	struct bkey_s_c k, k2;
2514 
2515 	bch2_btree_iter_verify(trans, iter);
2516 
2517 	while (1) {
2518 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2519 					iter->flags & BTREE_ITER_intent,
2520 					btree_iter_ip_allocated(iter));
2521 
2522 		int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2523 		if (unlikely(ret)) {
2524 			/* ensure that iter->k is consistent with iter->pos: */
2525 			bch2_btree_iter_set_pos(trans, iter, iter->pos);
2526 			k = bkey_s_c_err(ret);
2527 			break;
2528 		}
2529 
2530 		struct btree_path *path = btree_iter_path(trans, iter);
2531 		struct btree_path_level *l = path_l(path);
2532 
2533 		if (unlikely(!l->b)) {
2534 			/* No btree nodes at requested level: */
2535 			bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
2536 			k = bkey_s_c_null;
2537 			break;
2538 		}
2539 
2540 		btree_path_set_should_be_locked(trans, path);
2541 
2542 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2543 		if (!k.k || bpos_gt(k.k->p, search_key)) {
2544 			k = btree_path_level_prev(trans, path, l, &iter->k);
2545 
2546 			BUG_ON(k.k && bpos_gt(k.k->p, search_key));
2547 		}
2548 
2549 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2550 		    k.k &&
2551 		    (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
2552 			k = k2;
2553 			if (bkey_err(k2)) {
2554 				bch2_btree_iter_set_pos(trans, iter, iter->pos);
2555 				break;
2556 			}
2557 		}
2558 
2559 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2560 			btree_trans_peek_prev_journal(trans, iter, &k);
2561 
2562 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2563 			     trans->nr_updates))
2564 			bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2565 
2566 		if (likely(k.k && !bkey_deleted(k.k))) {
2567 			break;
2568 		} else if (k.k) {
2569 			search_key = bpos_predecessor(k.k->p);
2570 		} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2571 			/* Advance to previous leaf node: */
2572 			search_key = bpos_predecessor(path->l[0].b->data->min_key);
2573 		} else {
2574 			/* Start of btree: */
2575 			bch2_btree_iter_set_pos(trans, iter, POS_MIN);
2576 			k = bkey_s_c_null;
2577 			break;
2578 		}
2579 	}
2580 
2581 	bch2_btree_iter_verify(trans, iter);
2582 	return k;
2583 }
2584 
2585 /**
2586  * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
2587  * iterator's current position
2588  * @trans:	btree transaction object
2589  * @iter:	iterator to peek from
2590  * @end:	search limit: returns keys greater than or equal to @end
2591  *
2592  * Returns:	key if found, or an error extractable with bkey_err().
2593  */
2594 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter,
2595 					      struct bpos end)
2596 {
2597 	if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
2598 	   !bkey_eq(iter->pos, POS_MAX) &&
2599 	   !((iter->flags & BTREE_ITER_is_extents) &&
2600 	     iter->pos.offset == U64_MAX)) {
2601 
2602 		/*
2603 		 * bkey_start_pos(), for extents, is not monotonically
2604 		 * increasing until after filtering for snapshots:
2605 		 *
2606 		 * Thus, for extents we need to search forward until we find a
2607 		 * real visible extents - easiest to just use peek_slot() (which
2608 		 * internally uses peek() for extents)
2609 		 */
2610 		struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
2611 		if (bkey_err(k))
2612 			return k;
2613 
2614 		if (!bkey_deleted(k.k) &&
2615 		    (!(iter->flags & BTREE_ITER_is_extents) ||
2616 		     bkey_lt(bkey_start_pos(k.k), iter->pos)))
2617 			return k;
2618 	}
2619 
2620 	struct bpos search_key = iter->pos;
2621 	struct bkey_s_c k;
2622 	btree_path_idx_t saved_path = 0;
2623 
2624 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2625 	bch2_btree_iter_verify_entry_exit(iter);
2626 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && iter->pos.inode != end.inode);
2627 
2628 	int ret = trans_maybe_inject_restart(trans, _RET_IP_);
2629 	if (unlikely(ret)) {
2630 		k = bkey_s_c_err(ret);
2631 		goto out_no_locked;
2632 	}
2633 
2634 	while (1) {
2635 		k = __bch2_btree_iter_peek_prev(trans, iter, search_key);
2636 		if (unlikely(!k.k))
2637 			goto end;
2638 		if (unlikely(bkey_err(k)))
2639 			goto out_no_locked;
2640 
2641 		if (iter->flags & BTREE_ITER_filter_snapshots) {
2642 			struct btree_path *s = saved_path ? trans->paths + saved_path : NULL;
2643 			if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) {
2644 				/*
2645 				 * If we have a saved candidate, and we're past
2646 				 * the last possible snapshot overwrite, return
2647 				 * it:
2648 				 */
2649 				bch2_path_put(trans, iter->path,
2650 					      iter->flags & BTREE_ITER_intent);
2651 				iter->path = saved_path;
2652 				saved_path = 0;
2653 				k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2654 				break;
2655 			}
2656 
2657 			/*
2658 			 * We need to check against @end before FILTER_SNAPSHOTS because
2659 			 * if we get to a different inode that requested we might be
2660 			 * seeing keys for a different snapshot tree that will all be
2661 			 * filtered out.
2662 			 */
2663 			if (unlikely(bkey_lt(k.k->p, end)))
2664 				goto end;
2665 
2666 			if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) {
2667 				search_key = bpos_predecessor(k.k->p);
2668 				continue;
2669 			}
2670 
2671 			if (k.k->p.snapshot != iter->snapshot) {
2672 				/*
2673 				 * Have a key visible in iter->snapshot, but
2674 				 * might have overwrites: - save it and keep
2675 				 * searching. Unless it's a whiteout - then drop
2676 				 * our previous saved candidate:
2677 				 */
2678 				if (saved_path) {
2679 					bch2_path_put(trans, saved_path,
2680 						      iter->flags & BTREE_ITER_intent);
2681 					saved_path = 0;
2682 				}
2683 
2684 				if (!bkey_whiteout(k.k)) {
2685 					saved_path = btree_path_clone(trans, iter->path,
2686 								iter->flags & BTREE_ITER_intent,
2687 								_THIS_IP_);
2688 					trace_btree_path_save_pos(trans,
2689 								  trans->paths + iter->path,
2690 								  trans->paths + saved_path);
2691 				}
2692 
2693 				search_key = bpos_predecessor(k.k->p);
2694 				continue;
2695 			}
2696 
2697 			if (bkey_whiteout(k.k)) {
2698 				search_key = bkey_predecessor(iter, k.k->p);
2699 				search_key.snapshot = U32_MAX;
2700 				continue;
2701 			}
2702 		}
2703 
2704 		EBUG_ON(iter->flags & BTREE_ITER_all_snapshots		? bpos_gt(k.k->p, iter->pos) :
2705 			iter->flags & BTREE_ITER_is_extents		? bkey_ge(bkey_start_pos(k.k), iter->pos) :
2706 									  bkey_gt(k.k->p, iter->pos));
2707 
2708 		if (unlikely(iter->flags & BTREE_ITER_all_snapshots	? bpos_lt(k.k->p, end) :
2709 			     iter->flags & BTREE_ITER_is_extents	? bkey_le(k.k->p, end) :
2710 									  bkey_lt(k.k->p, end)))
2711 			goto end;
2712 
2713 		break;
2714 	}
2715 
2716 	/* Extents can straddle iter->pos: */
2717 	iter->pos = bpos_min(iter->pos, k.k->p);;
2718 
2719 	if (iter->flags & BTREE_ITER_filter_snapshots)
2720 		iter->pos.snapshot = iter->snapshot;
2721 out_no_locked:
2722 	if (saved_path)
2723 		bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_intent);
2724 
2725 	bch2_btree_iter_verify_entry_exit(iter);
2726 	bch2_btree_iter_verify(trans, iter);
2727 	return k;
2728 end:
2729 	bch2_btree_iter_set_pos(trans, iter, end);
2730 	k = bkey_s_c_null;
2731 	goto out_no_locked;
2732 }
2733 
2734 /**
2735  * bch2_btree_iter_prev() - returns first key less than iterator's current
2736  * position
2737  * @trans:	btree transaction object
2738  * @iter:	iterator to peek from
2739  *
2740  * Returns:	key if found, or an error extractable with bkey_err().
2741  */
2742 struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter)
2743 {
2744 	if (!bch2_btree_iter_rewind(trans, iter))
2745 		return bkey_s_c_null;
2746 
2747 	return bch2_btree_iter_peek_prev(trans, iter);
2748 }
2749 
2750 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter)
2751 {
2752 	struct bpos search_key;
2753 	struct bkey_s_c k;
2754 	int ret;
2755 
2756 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2757 	bch2_btree_iter_verify(trans, iter);
2758 	bch2_btree_iter_verify_entry_exit(iter);
2759 	EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2760 
2761 	ret = trans_maybe_inject_restart(trans, _RET_IP_);
2762 	if (unlikely(ret)) {
2763 		k = bkey_s_c_err(ret);
2764 		goto out;
2765 	}
2766 
2767 	/* extents can't span inode numbers: */
2768 	if ((iter->flags & BTREE_ITER_is_extents) &&
2769 	    unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2770 		if (iter->pos.inode == KEY_INODE_MAX)
2771 			return bkey_s_c_null;
2772 
2773 		bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
2774 	}
2775 
2776 	search_key = btree_iter_search_key(iter);
2777 	iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2778 					iter->flags & BTREE_ITER_intent,
2779 					btree_iter_ip_allocated(iter));
2780 
2781 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2782 	if (unlikely(ret)) {
2783 		k = bkey_s_c_err(ret);
2784 		goto out;
2785 	}
2786 
2787 	struct btree_path *path = btree_iter_path(trans, iter);
2788 	if (unlikely(!btree_path_node(path, path->level)))
2789 		return bkey_s_c_null;
2790 
2791 	btree_path_set_should_be_locked(trans, path);
2792 
2793 	if ((iter->flags & BTREE_ITER_cached) ||
2794 	    !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2795 		k = bkey_s_c_null;
2796 
2797 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2798 			     trans->nr_updates)) {
2799 			bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2800 			if (k.k)
2801 				goto out;
2802 		}
2803 
2804 		if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2805 		    (k = btree_trans_peek_slot_journal(trans, iter)).k)
2806 			goto out;
2807 
2808 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2809 		    (k = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) {
2810 			if (!bkey_err(k))
2811 				iter->k = *k.k;
2812 			/* We're not returning a key from iter->path: */
2813 			goto out;
2814 		}
2815 
2816 		k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2817 		if (unlikely(!k.k))
2818 			goto out;
2819 
2820 		if (unlikely(k.k->type == KEY_TYPE_whiteout &&
2821 			     (iter->flags & BTREE_ITER_filter_snapshots) &&
2822 			     !(iter->flags & BTREE_ITER_key_cache_fill)))
2823 			iter->k.type = KEY_TYPE_deleted;
2824 	} else {
2825 		struct bpos next;
2826 		struct bpos end = iter->pos;
2827 
2828 		if (iter->flags & BTREE_ITER_is_extents)
2829 			end.offset = U64_MAX;
2830 
2831 		EBUG_ON(btree_iter_path(trans, iter)->level);
2832 
2833 		if (iter->flags & BTREE_ITER_intent) {
2834 			struct btree_iter iter2;
2835 
2836 			bch2_trans_copy_iter(trans, &iter2, iter);
2837 			k = bch2_btree_iter_peek_max(trans, &iter2, end);
2838 
2839 			if (k.k && !bkey_err(k)) {
2840 				swap(iter->key_cache_path, iter2.key_cache_path);
2841 				iter->k = iter2.k;
2842 				k.k = &iter->k;
2843 			}
2844 			bch2_trans_iter_exit(trans, &iter2);
2845 		} else {
2846 			struct bpos pos = iter->pos;
2847 
2848 			k = bch2_btree_iter_peek_max(trans, iter, end);
2849 			if (unlikely(bkey_err(k)))
2850 				bch2_btree_iter_set_pos(trans, iter, pos);
2851 			else
2852 				iter->pos = pos;
2853 		}
2854 
2855 		if (unlikely(bkey_err(k)))
2856 			goto out;
2857 
2858 		next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2859 
2860 		if (bkey_lt(iter->pos, next)) {
2861 			bkey_init(&iter->k);
2862 			iter->k.p = iter->pos;
2863 
2864 			if (iter->flags & BTREE_ITER_is_extents) {
2865 				bch2_key_resize(&iter->k,
2866 						min_t(u64, KEY_SIZE_MAX,
2867 						      (next.inode == iter->pos.inode
2868 						       ? next.offset
2869 						       : KEY_OFFSET_MAX) -
2870 						      iter->pos.offset));
2871 				EBUG_ON(!iter->k.size);
2872 			}
2873 
2874 			k = (struct bkey_s_c) { &iter->k, NULL };
2875 		}
2876 	}
2877 out:
2878 	bch2_btree_iter_verify_entry_exit(iter);
2879 	bch2_btree_iter_verify(trans, iter);
2880 	ret = bch2_btree_iter_verify_ret(trans, iter, k);
2881 	if (unlikely(ret))
2882 		return bkey_s_c_err(ret);
2883 
2884 	return k;
2885 }
2886 
2887 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter)
2888 {
2889 	if (!bch2_btree_iter_advance(trans, iter))
2890 		return bkey_s_c_null;
2891 
2892 	return bch2_btree_iter_peek_slot(trans, iter);
2893 }
2894 
2895 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter)
2896 {
2897 	if (!bch2_btree_iter_rewind(trans, iter))
2898 		return bkey_s_c_null;
2899 
2900 	return bch2_btree_iter_peek_slot(trans, iter);
2901 }
2902 
2903 /* Obsolete, but still used by rust wrapper in -tools */
2904 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter)
2905 {
2906 	struct bkey_s_c k;
2907 
2908 	while (btree_trans_too_many_iters(trans) ||
2909 	       (k = bch2_btree_iter_peek_type(trans, iter, iter->flags),
2910 		bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2911 		bch2_trans_begin(trans);
2912 
2913 	return k;
2914 }
2915 
2916 /* new transactional stuff: */
2917 
2918 #ifdef CONFIG_BCACHEFS_DEBUG
2919 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2920 {
2921 	struct btree_path *path;
2922 	unsigned i;
2923 
2924 	BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2925 
2926 	trans_for_each_path(trans, path, i) {
2927 		BUG_ON(path->sorted_idx >= trans->nr_sorted);
2928 		BUG_ON(trans->sorted[path->sorted_idx] != i);
2929 	}
2930 
2931 	for (i = 0; i < trans->nr_sorted; i++) {
2932 		unsigned idx = trans->sorted[i];
2933 
2934 		BUG_ON(!test_bit(idx, trans->paths_allocated));
2935 		BUG_ON(trans->paths[idx].sorted_idx != i);
2936 	}
2937 }
2938 
2939 static void btree_trans_verify_sorted(struct btree_trans *trans)
2940 {
2941 	struct btree_path *path, *prev = NULL;
2942 	struct trans_for_each_path_inorder_iter iter;
2943 
2944 	if (!static_branch_unlikely(&bch2_debug_check_iterators))
2945 		return;
2946 
2947 	trans_for_each_path_inorder(trans, path, iter) {
2948 		if (prev && btree_path_cmp(prev, path) > 0) {
2949 			__bch2_dump_trans_paths_updates(trans, true);
2950 			panic("trans paths out of order!\n");
2951 		}
2952 		prev = path;
2953 	}
2954 }
2955 #else
2956 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2957 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2958 #endif
2959 
2960 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2961 {
2962 	int i, l = 0, r = trans->nr_sorted, inc = 1;
2963 	bool swapped;
2964 
2965 	btree_trans_verify_sorted_refs(trans);
2966 
2967 	if (trans->paths_sorted)
2968 		goto out;
2969 
2970 	/*
2971 	 * Cocktail shaker sort: this is efficient because iterators will be
2972 	 * mostly sorted.
2973 	 */
2974 	do {
2975 		swapped = false;
2976 
2977 		for (i = inc > 0 ? l : r - 2;
2978 		     i + 1 < r && i >= l;
2979 		     i += inc) {
2980 			if (btree_path_cmp(trans->paths + trans->sorted[i],
2981 					   trans->paths + trans->sorted[i + 1]) > 0) {
2982 				swap(trans->sorted[i], trans->sorted[i + 1]);
2983 				trans->paths[trans->sorted[i]].sorted_idx = i;
2984 				trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2985 				swapped = true;
2986 			}
2987 		}
2988 
2989 		if (inc > 0)
2990 			--r;
2991 		else
2992 			l++;
2993 		inc = -inc;
2994 	} while (swapped);
2995 
2996 	trans->paths_sorted = true;
2997 out:
2998 	btree_trans_verify_sorted(trans);
2999 }
3000 
3001 static inline void btree_path_list_remove(struct btree_trans *trans,
3002 					  struct btree_path *path)
3003 {
3004 	EBUG_ON(path->sorted_idx >= trans->nr_sorted);
3005 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3006 	trans->nr_sorted--;
3007 	memmove_u64s_down_small(trans->sorted + path->sorted_idx,
3008 				trans->sorted + path->sorted_idx + 1,
3009 				DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3010 					     sizeof(u64) / sizeof(btree_path_idx_t)));
3011 #else
3012 	array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3013 #endif
3014 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3015 		trans->paths[trans->sorted[i]].sorted_idx = i;
3016 }
3017 
3018 static inline void btree_path_list_add(struct btree_trans *trans,
3019 				       btree_path_idx_t pos,
3020 				       btree_path_idx_t path_idx)
3021 {
3022 	struct btree_path *path = trans->paths + path_idx;
3023 
3024 	path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
3025 
3026 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3027 	memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
3028 			      trans->sorted + path->sorted_idx,
3029 			      DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3030 					   sizeof(u64) / sizeof(btree_path_idx_t)));
3031 	trans->nr_sorted++;
3032 	trans->sorted[path->sorted_idx] = path_idx;
3033 #else
3034 	array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
3035 #endif
3036 
3037 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3038 		trans->paths[trans->sorted[i]].sorted_idx = i;
3039 
3040 	btree_trans_verify_sorted_refs(trans);
3041 }
3042 
3043 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3044 {
3045 	if (iter->update_path)
3046 		bch2_path_put(trans, iter->update_path,
3047 			      iter->flags & BTREE_ITER_intent);
3048 	if (iter->path)
3049 		bch2_path_put(trans, iter->path,
3050 			      iter->flags & BTREE_ITER_intent);
3051 	if (iter->key_cache_path)
3052 		bch2_path_put(trans, iter->key_cache_path,
3053 			      iter->flags & BTREE_ITER_intent);
3054 	iter->path		= 0;
3055 	iter->update_path	= 0;
3056 	iter->key_cache_path	= 0;
3057 }
3058 
3059 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
3060 			  struct btree_iter *iter,
3061 			  enum btree_id btree_id, struct bpos pos,
3062 			  unsigned flags)
3063 {
3064 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
3065 			       bch2_btree_iter_flags(trans, btree_id, 0, flags),
3066 			       _RET_IP_);
3067 }
3068 
3069 void bch2_trans_node_iter_init(struct btree_trans *trans,
3070 			       struct btree_iter *iter,
3071 			       enum btree_id btree_id,
3072 			       struct bpos pos,
3073 			       unsigned locks_want,
3074 			       unsigned depth,
3075 			       unsigned flags)
3076 {
3077 	flags |= BTREE_ITER_not_extents;
3078 	flags |= BTREE_ITER_snapshot_field;
3079 	flags |= BTREE_ITER_all_snapshots;
3080 
3081 	if (!depth && btree_id_cached(trans->c, btree_id))
3082 		flags |= BTREE_ITER_with_key_cache;
3083 
3084 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
3085 			       bch2_btree_iter_flags(trans, btree_id, depth, flags),
3086 			       _RET_IP_);
3087 
3088 	iter->min_depth	= depth;
3089 
3090 	struct btree_path *path = btree_iter_path(trans, iter);
3091 	BUG_ON(path->locks_want	 < min(locks_want, BTREE_MAX_DEPTH));
3092 	BUG_ON(path->level	!= depth);
3093 	BUG_ON(iter->min_depth	!= depth);
3094 }
3095 
3096 void bch2_trans_copy_iter(struct btree_trans *trans,
3097 			  struct btree_iter *dst, struct btree_iter *src)
3098 {
3099 	*dst = *src;
3100 #ifdef TRACK_PATH_ALLOCATED
3101 	dst->ip_allocated = _RET_IP_;
3102 #endif
3103 	if (src->path)
3104 		__btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
3105 	if (src->update_path)
3106 		__btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
3107 	dst->key_cache_path = 0;
3108 }
3109 
3110 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
3111 void bch2_trans_kmalloc_trace_to_text(struct printbuf *out,
3112 				      darray_trans_kmalloc_trace *trace)
3113 {
3114 	printbuf_tabstops_reset(out);
3115 	printbuf_tabstop_push(out, 60);
3116 
3117 	darray_for_each(*trace, i)
3118 		prt_printf(out, "%pS\t%zu\n", (void *) i->ip, i->bytes);
3119 }
3120 #endif
3121 
3122 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long ip)
3123 {
3124 	struct bch_fs *c = trans->c;
3125 	unsigned new_top = trans->mem_top + size;
3126 	unsigned old_bytes = trans->mem_bytes;
3127 	unsigned new_bytes = roundup_pow_of_two(new_top);
3128 	int ret;
3129 	void *new_mem;
3130 	void *p;
3131 
3132 	if (WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX)) {
3133 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
3134 		struct printbuf buf = PRINTBUF;
3135 		bch2_trans_kmalloc_trace_to_text(&buf, &trans->trans_kmalloc_trace);
3136 		bch2_print_str(c, KERN_ERR, buf.buf);
3137 		printbuf_exit(&buf);
3138 #endif
3139 	}
3140 
3141 	ret = trans_maybe_inject_restart(trans, _RET_IP_);
3142 	if (ret)
3143 		return ERR_PTR(ret);
3144 
3145 	struct btree_transaction_stats *s = btree_trans_stats(trans);
3146 	if (new_bytes > s->max_mem) {
3147 		mutex_lock(&s->lock);
3148 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
3149 		darray_resize(&s->trans_kmalloc_trace, trans->trans_kmalloc_trace.nr);
3150 		s->trans_kmalloc_trace.nr = min(s->trans_kmalloc_trace.size,
3151 						trans->trans_kmalloc_trace.nr);
3152 
3153 		memcpy(s->trans_kmalloc_trace.data,
3154 		       trans->trans_kmalloc_trace.data,
3155 		       sizeof(s->trans_kmalloc_trace.data[0]) *
3156 		       s->trans_kmalloc_trace.nr);
3157 #endif
3158 		s->max_mem = new_bytes;
3159 		mutex_unlock(&s->lock);
3160 	}
3161 
3162 	if (trans->used_mempool) {
3163 		if (trans->mem_bytes >= new_bytes)
3164 			goto out_change_top;
3165 
3166 		/* No more space from mempool item, need malloc new one */
3167 		new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3168 		if (unlikely(!new_mem)) {
3169 			bch2_trans_unlock(trans);
3170 
3171 			new_mem = kmalloc(new_bytes, GFP_KERNEL);
3172 			if (!new_mem)
3173 				return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3174 
3175 			ret = bch2_trans_relock(trans);
3176 			if (ret) {
3177 				kfree(new_mem);
3178 				return ERR_PTR(ret);
3179 			}
3180 		}
3181 		memcpy(new_mem, trans->mem, trans->mem_top);
3182 		trans->used_mempool = false;
3183 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3184 		goto out_new_mem;
3185 	}
3186 
3187 	new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3188 	if (unlikely(!new_mem)) {
3189 		bch2_trans_unlock(trans);
3190 
3191 		new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
3192 		if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3193 			new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3194 			new_bytes = BTREE_TRANS_MEM_MAX;
3195 			memcpy(new_mem, trans->mem, trans->mem_top);
3196 			trans->used_mempool = true;
3197 			kfree(trans->mem);
3198 		}
3199 
3200 		if (!new_mem)
3201 			return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3202 
3203 		trans->mem = new_mem;
3204 		trans->mem_bytes = new_bytes;
3205 
3206 		ret = bch2_trans_relock(trans);
3207 		if (ret)
3208 			return ERR_PTR(ret);
3209 	}
3210 out_new_mem:
3211 	trans->mem = new_mem;
3212 	trans->mem_bytes = new_bytes;
3213 
3214 	if (old_bytes) {
3215 		trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3216 		return ERR_PTR(btree_trans_restart_ip(trans,
3217 					BCH_ERR_transaction_restart_mem_realloced, _RET_IP_));
3218 	}
3219 out_change_top:
3220 	bch2_trans_kmalloc_trace(trans, size, ip);
3221 
3222 	p = trans->mem + trans->mem_top;
3223 	trans->mem_top += size;
3224 	memset(p, 0, size);
3225 	return p;
3226 }
3227 
3228 static inline void check_srcu_held_too_long(struct btree_trans *trans)
3229 {
3230 	WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3231 	     "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3232 	     (jiffies - trans->srcu_lock_time) / HZ);
3233 }
3234 
3235 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3236 {
3237 	if (trans->srcu_held) {
3238 		struct bch_fs *c = trans->c;
3239 		struct btree_path *path;
3240 		unsigned i;
3241 
3242 		trans_for_each_path(trans, path, i)
3243 			if (path->cached && !btree_node_locked(path, 0))
3244 				path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3245 
3246 		check_srcu_held_too_long(trans);
3247 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3248 		trans->srcu_held = false;
3249 	}
3250 }
3251 
3252 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3253 {
3254 	if (!trans->srcu_held) {
3255 		trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3256 		trans->srcu_lock_time	= jiffies;
3257 		trans->srcu_held = true;
3258 	}
3259 }
3260 
3261 /**
3262  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3263  * @trans: transaction to reset
3264  *
3265  * Returns:	current restart counter, to be used with trans_was_restarted()
3266  *
3267  * While iterating over nodes or updating nodes a attempt to lock a btree node
3268  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3269  * occurs bch2_trans_begin() should be called and the transaction retried.
3270  */
3271 u32 bch2_trans_begin(struct btree_trans *trans)
3272 {
3273 	struct btree_path *path;
3274 	unsigned i;
3275 	u64 now;
3276 
3277 	bch2_trans_reset_updates(trans);
3278 
3279 	trans->restart_count++;
3280 	trans->mem_top			= 0;
3281 
3282 	trans_for_each_path(trans, path, i) {
3283 		path->should_be_locked = false;
3284 
3285 		/*
3286 		 * If the transaction wasn't restarted, we're presuming to be
3287 		 * doing something new: dont keep iterators excpt the ones that
3288 		 * are in use - except for the subvolumes btree:
3289 		 */
3290 		if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3291 			path->preserve = false;
3292 
3293 		/*
3294 		 * XXX: we probably shouldn't be doing this if the transaction
3295 		 * was restarted, but currently we still overflow transaction
3296 		 * iterators if we do that
3297 		 */
3298 		if (!path->ref && !path->preserve)
3299 			__bch2_path_free(trans, i);
3300 		else
3301 			path->preserve = false;
3302 	}
3303 
3304 	now = local_clock();
3305 
3306 	if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3307 	    time_after64(now, trans->last_begin_time + 10))
3308 		__bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3309 					 trans->last_begin_time, now);
3310 
3311 	if (!trans->restarted &&
3312 	    (need_resched() ||
3313 	     time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3314 		bch2_trans_unlock(trans);
3315 		cond_resched();
3316 		now = local_clock();
3317 	}
3318 	trans->last_begin_time = now;
3319 
3320 	if (unlikely(trans->srcu_held &&
3321 		     time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3322 		bch2_trans_srcu_unlock(trans);
3323 
3324 	trans->last_begin_ip = _RET_IP_;
3325 
3326 #ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS
3327 	if (trans->restarted) {
3328 		trans->restart_count_this_trans++;
3329 	} else {
3330 		trans->restart_count_this_trans = 0;
3331 	}
3332 #endif
3333 
3334 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
3335 	trans->trans_kmalloc_trace.nr = 0;
3336 #endif
3337 
3338 	trans_set_locked(trans, false);
3339 
3340 	if (trans->restarted) {
3341 		bch2_btree_path_traverse_all(trans);
3342 		trans->notrace_relock_fail = false;
3343 	}
3344 
3345 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
3346 	return trans->restart_count;
3347 }
3348 
3349 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3350 
3351 unsigned bch2_trans_get_fn_idx(const char *fn)
3352 {
3353 	for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3354 		if (!bch2_btree_transaction_fns[i] ||
3355 		    bch2_btree_transaction_fns[i] == fn) {
3356 			bch2_btree_transaction_fns[i] = fn;
3357 			return i;
3358 		}
3359 
3360 	pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3361 	return 0;
3362 }
3363 
3364 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3365 	__acquires(&c->btree_trans_barrier)
3366 {
3367 	struct btree_trans *trans;
3368 
3369 	if (IS_ENABLED(__KERNEL__)) {
3370 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3371 		if (trans) {
3372 			memset(trans, 0, offsetof(struct btree_trans, list));
3373 			goto got_trans;
3374 		}
3375 	}
3376 
3377 	trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3378 	memset(trans, 0, sizeof(*trans));
3379 
3380 	seqmutex_lock(&c->btree_trans_lock);
3381 	if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3382 		struct btree_trans *pos;
3383 		pid_t pid = current->pid;
3384 
3385 		trans->locking_wait.task = current;
3386 
3387 		list_for_each_entry(pos, &c->btree_trans_list, list) {
3388 			struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3389 			/*
3390 			 * We'd much prefer to be stricter here and completely
3391 			 * disallow multiple btree_trans in the same thread -
3392 			 * but the data move path calls bch2_write when we
3393 			 * already have a btree_trans initialized.
3394 			 */
3395 			BUG_ON(pos_task &&
3396 			       pid == pos_task->pid &&
3397 			       pos->locked);
3398 		}
3399 	}
3400 
3401 	list_add(&trans->list, &c->btree_trans_list);
3402 	seqmutex_unlock(&c->btree_trans_lock);
3403 got_trans:
3404 	trans->c		= c;
3405 	trans->last_begin_time	= local_clock();
3406 	trans->fn_idx		= fn_idx;
3407 	trans->locking_wait.task = current;
3408 	trans->journal_replay_not_finished =
3409 		unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3410 		atomic_inc_not_zero(&c->journal_keys.ref);
3411 	trans->nr_paths		= ARRAY_SIZE(trans->_paths);
3412 	trans->paths_allocated	= trans->_paths_allocated;
3413 	trans->sorted		= trans->_sorted;
3414 	trans->paths		= trans->_paths;
3415 	trans->updates		= trans->_updates;
3416 
3417 	*trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3418 
3419 	trans->paths_allocated[0] = 1;
3420 
3421 	static struct lock_class_key lockdep_key;
3422 	lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3423 
3424 	if (fn_idx < BCH_TRANSACTIONS_NR) {
3425 		trans->fn = bch2_btree_transaction_fns[fn_idx];
3426 
3427 		struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3428 
3429 		if (s->max_mem) {
3430 			unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3431 
3432 			trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3433 			if (likely(trans->mem))
3434 				trans->mem_bytes = expected_mem_bytes;
3435 		}
3436 
3437 		trans->nr_paths_max = s->nr_max_paths;
3438 	}
3439 
3440 	trans->srcu_idx		= srcu_read_lock(&c->btree_trans_barrier);
3441 	trans->srcu_lock_time	= jiffies;
3442 	trans->srcu_held	= true;
3443 	trans_set_locked(trans, false);
3444 
3445 	closure_init_stack_release(&trans->ref);
3446 	return trans;
3447 }
3448 
3449 #ifdef CONFIG_BCACHEFS_DEBUG
3450 
3451 static bool btree_paths_leaked(struct btree_trans *trans)
3452 {
3453 	struct btree_path *path;
3454 	unsigned i;
3455 
3456 	trans_for_each_path(trans, path, i)
3457 		if (path->ref)
3458 			return true;
3459 	return false;
3460 }
3461 
3462 static void check_btree_paths_leaked(struct btree_trans *trans)
3463 {
3464 	if (btree_paths_leaked(trans)) {
3465 		struct bch_fs *c = trans->c;
3466 		struct btree_path *path;
3467 		unsigned i;
3468 
3469 		struct printbuf buf = PRINTBUF;
3470 		bch2_log_msg_start(c, &buf);
3471 
3472 		prt_printf(&buf, "btree paths leaked from %s!\n", trans->fn);
3473 		trans_for_each_path(trans, path, i)
3474 			if (path->ref)
3475 				prt_printf(&buf, "btree %s %pS\n",
3476 					   bch2_btree_id_str(path->btree_id),
3477 					   (void *) path->ip_allocated);
3478 
3479 		bch2_fs_emergency_read_only2(c, &buf);
3480 		bch2_print_str(c, KERN_ERR, buf.buf);
3481 		printbuf_exit(&buf);
3482 	}
3483 }
3484 #else
3485 static inline void check_btree_paths_leaked(struct btree_trans *trans) {}
3486 #endif
3487 
3488 void bch2_trans_put(struct btree_trans *trans)
3489 	__releases(&c->btree_trans_barrier)
3490 {
3491 	struct bch_fs *c = trans->c;
3492 
3493 	if (trans->restarted)
3494 		bch2_trans_in_restart_error(trans);
3495 
3496 	bch2_trans_unlock(trans);
3497 
3498 	trans_for_each_update(trans, i)
3499 		__btree_path_put(trans, trans->paths + i->path, true);
3500 	trans->nr_updates	= 0;
3501 
3502 	check_btree_paths_leaked(trans);
3503 
3504 	if (trans->srcu_held) {
3505 		check_srcu_held_too_long(trans);
3506 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3507 	}
3508 
3509 	if (unlikely(trans->journal_replay_not_finished))
3510 		bch2_journal_keys_put(c);
3511 
3512 	/*
3513 	 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3514 	 * by cycle detector
3515 	 */
3516 	closure_return_sync(&trans->ref);
3517 	trans->locking_wait.task = NULL;
3518 
3519 #ifdef CONFIG_BCACHEFS_DEBUG
3520 	darray_exit(&trans->last_restarted_trace);
3521 #endif
3522 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
3523 	darray_exit(&trans->trans_kmalloc_trace);
3524 #endif
3525 
3526 	unsigned long *paths_allocated = trans->paths_allocated;
3527 	trans->paths_allocated	= NULL;
3528 	trans->paths		= NULL;
3529 
3530 	if (paths_allocated != trans->_paths_allocated)
3531 		kvfree_rcu_mightsleep(paths_allocated);
3532 
3533 	if (trans->used_mempool)
3534 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3535 	else
3536 		kfree(trans->mem);
3537 
3538 	/* Userspace doesn't have a real percpu implementation: */
3539 	if (IS_ENABLED(__KERNEL__))
3540 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3541 
3542 	if (trans) {
3543 		seqmutex_lock(&c->btree_trans_lock);
3544 		list_del(&trans->list);
3545 		seqmutex_unlock(&c->btree_trans_lock);
3546 
3547 		mempool_free(trans, &c->btree_trans_pool);
3548 	}
3549 }
3550 
3551 bool bch2_current_has_btree_trans(struct bch_fs *c)
3552 {
3553 	seqmutex_lock(&c->btree_trans_lock);
3554 	struct btree_trans *trans;
3555 	bool ret = false;
3556 	list_for_each_entry(trans, &c->btree_trans_list, list)
3557 		if (trans->locking_wait.task == current &&
3558 		    trans->locked) {
3559 			ret = true;
3560 			break;
3561 		}
3562 	seqmutex_unlock(&c->btree_trans_lock);
3563 	return ret;
3564 }
3565 
3566 static void __maybe_unused
3567 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3568 				      struct btree_bkey_cached_common *b)
3569 {
3570 	struct six_lock_count c = six_lock_counts(&b->lock);
3571 	pid_t pid;
3572 
3573 	scoped_guard(rcu) {
3574 		struct task_struct *owner = READ_ONCE(b->lock.owner);
3575 		pid = owner ? owner->pid : 0;
3576 	}
3577 
3578 	prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
3579 	bch2_btree_id_to_text(out, b->btree_id);
3580 	prt_printf(out, " l=%u:", b->level);
3581 	bch2_bpos_to_text(out, btree_node_pos(b));
3582 
3583 	prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3584 		   c.n[0], c.n[1], c.n[2], pid);
3585 }
3586 
3587 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3588 {
3589 	struct btree_bkey_cached_common *b;
3590 	static char lock_types[] = { 'r', 'i', 'w' };
3591 	struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3592 	unsigned l, idx;
3593 
3594 	/* before rcu_read_lock(): */
3595 	bch2_printbuf_make_room(out, 4096);
3596 
3597 	if (!out->nr_tabstops) {
3598 		printbuf_tabstop_push(out, 16);
3599 		printbuf_tabstop_push(out, 32);
3600 	}
3601 
3602 	prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3603 
3604 	/* trans->paths is rcu protected vs. freeing */
3605 	guard(rcu)();
3606 	out->atomic++;
3607 
3608 	struct btree_path *paths = rcu_dereference(trans->paths);
3609 	if (!paths)
3610 		goto out;
3611 
3612 	unsigned long *paths_allocated = trans_paths_allocated(paths);
3613 
3614 	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3615 		struct btree_path *path = paths + idx;
3616 		if (!path->nodes_locked)
3617 			continue;
3618 
3619 		prt_printf(out, "  path %u %c ",
3620 			   idx,
3621 			   path->cached ? 'c' : 'b');
3622 		bch2_btree_id_to_text(out, path->btree_id);
3623 		prt_printf(out, " l=%u:", path->level);
3624 		bch2_bpos_to_text(out, path->pos);
3625 		prt_newline(out);
3626 
3627 		for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3628 			if (btree_node_locked(path, l) &&
3629 			    !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3630 				prt_printf(out, "    %c l=%u ",
3631 					   lock_types[btree_node_locked_type(path, l)], l);
3632 				bch2_btree_bkey_cached_common_to_text(out, b);
3633 				prt_newline(out);
3634 			}
3635 		}
3636 	}
3637 
3638 	b = READ_ONCE(trans->locking);
3639 	if (b) {
3640 		prt_printf(out, "  blocked for %lluus on\n",
3641 			   div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3642 		prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3643 		bch2_btree_bkey_cached_common_to_text(out, b);
3644 		prt_newline(out);
3645 	}
3646 out:
3647 	--out->atomic;
3648 }
3649 
3650 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3651 {
3652 	struct btree_transaction_stats *s;
3653 	struct btree_trans *trans;
3654 	int cpu;
3655 
3656 	if (c->btree_trans_bufs)
3657 		for_each_possible_cpu(cpu) {
3658 			struct btree_trans *trans =
3659 				per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3660 
3661 			if (trans) {
3662 				seqmutex_lock(&c->btree_trans_lock);
3663 				list_del(&trans->list);
3664 				seqmutex_unlock(&c->btree_trans_lock);
3665 			}
3666 			kfree(trans);
3667 		}
3668 	free_percpu(c->btree_trans_bufs);
3669 
3670 	trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3671 	if (trans)
3672 		panic("%s leaked btree_trans\n", trans->fn);
3673 
3674 	for (s = c->btree_transaction_stats;
3675 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3676 	     s++) {
3677 #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
3678 		darray_exit(&s->trans_kmalloc_trace);
3679 #endif
3680 		kfree(s->max_paths_text);
3681 		bch2_time_stats_exit(&s->lock_hold_times);
3682 	}
3683 
3684 	if (c->btree_trans_barrier_initialized) {
3685 		synchronize_srcu_expedited(&c->btree_trans_barrier);
3686 		cleanup_srcu_struct(&c->btree_trans_barrier);
3687 	}
3688 	mempool_exit(&c->btree_trans_mem_pool);
3689 	mempool_exit(&c->btree_trans_pool);
3690 }
3691 
3692 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3693 {
3694 	struct btree_transaction_stats *s;
3695 
3696 	for (s = c->btree_transaction_stats;
3697 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3698 	     s++) {
3699 		bch2_time_stats_init(&s->duration);
3700 		bch2_time_stats_init(&s->lock_hold_times);
3701 		mutex_init(&s->lock);
3702 	}
3703 
3704 	INIT_LIST_HEAD(&c->btree_trans_list);
3705 	seqmutex_init(&c->btree_trans_lock);
3706 }
3707 
3708 int bch2_fs_btree_iter_init(struct bch_fs *c)
3709 {
3710 	int ret;
3711 
3712 	c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3713 	if (!c->btree_trans_bufs)
3714 		return -ENOMEM;
3715 
3716 	ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3717 					  sizeof(struct btree_trans)) ?:
3718 		mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3719 					  BTREE_TRANS_MEM_MAX) ?:
3720 		init_srcu_struct(&c->btree_trans_barrier);
3721 	if (ret)
3722 		return ret;
3723 
3724 	/*
3725 	 * static annotation (hackily done) for lock ordering of reclaim vs.
3726 	 * btree node locks:
3727 	 */
3728 #ifdef CONFIG_LOCKDEP
3729 	fs_reclaim_acquire(GFP_KERNEL);
3730 	struct btree_trans *trans = bch2_trans_get(c);
3731 	trans_set_locked(trans, false);
3732 	bch2_trans_put(trans);
3733 	fs_reclaim_release(GFP_KERNEL);
3734 #endif
3735 
3736 	c->btree_trans_barrier_initialized = true;
3737 	return 0;
3738 
3739 }
3740