xref: /linux/fs/bcachefs/btree_iter.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20 
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23 
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 			btree_path_idx_t, btree_path_idx_t);
27 
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31 	return iter->ip_allocated;
32 #else
33 	return 0;
34 #endif
35 }
36 
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39 
40 static inline int __btree_path_cmp(const struct btree_path *l,
41 				   enum btree_id	r_btree_id,
42 				   bool			r_cached,
43 				   struct bpos		r_pos,
44 				   unsigned		r_level)
45 {
46 	/*
47 	 * Must match lock ordering as defined by __bch2_btree_node_lock:
48 	 */
49 	return   cmp_int(l->btree_id,	r_btree_id) ?:
50 		 cmp_int((int) l->cached,	(int) r_cached) ?:
51 		 bpos_cmp(l->pos,	r_pos) ?:
52 		-cmp_int(l->level,	r_level);
53 }
54 
55 static inline int btree_path_cmp(const struct btree_path *l,
56 				 const struct btree_path *r)
57 {
58 	return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60 
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63 	/* Are we iterating over keys in all snapshots? */
64 	if (iter->flags & BTREE_ITER_all_snapshots) {
65 		p = bpos_successor(p);
66 	} else {
67 		p = bpos_nosnap_successor(p);
68 		p.snapshot = iter->snapshot;
69 	}
70 
71 	return p;
72 }
73 
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76 	/* Are we iterating over keys in all snapshots? */
77 	if (iter->flags & BTREE_ITER_all_snapshots) {
78 		p = bpos_predecessor(p);
79 	} else {
80 		p = bpos_nosnap_predecessor(p);
81 		p.snapshot = iter->snapshot;
82 	}
83 
84 	return p;
85 }
86 
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89 	struct bpos pos = iter->pos;
90 
91 	if ((iter->flags & BTREE_ITER_is_extents) &&
92 	    !bkey_eq(pos, POS_MAX))
93 		pos = bkey_successor(iter, pos);
94 	return pos;
95 }
96 
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98 					      struct btree *b)
99 {
100 	return bpos_lt(path->pos, b->data->min_key);
101 }
102 
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104 					     struct btree *b)
105 {
106 	return bpos_gt(path->pos, b->key.k.p);
107 }
108 
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110 					  struct btree *b)
111 {
112 	return path->btree_id == b->c.btree_id &&
113 		!btree_path_pos_before_node(path, b) &&
114 		!btree_path_pos_after_node(path, b);
115 }
116 
117 /* Btree iterator: */
118 
119 #ifdef CONFIG_BCACHEFS_DEBUG
120 
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 					  struct btree_path *path)
123 {
124 	struct bkey_cached *ck;
125 	bool locked = btree_node_locked(path, 0);
126 
127 	if (!bch2_btree_node_relock(trans, path, 0))
128 		return;
129 
130 	ck = (void *) path->l[0].b;
131 	BUG_ON(ck->key.btree_id != path->btree_id ||
132 	       !bkey_eq(ck->key.pos, path->pos));
133 
134 	if (!locked)
135 		btree_node_unlock(trans, path, 0);
136 }
137 
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 				struct btree_path *path, unsigned level)
140 {
141 	struct btree_path_level *l;
142 	struct btree_node_iter tmp;
143 	bool locked;
144 	struct bkey_packed *p, *k;
145 	struct printbuf buf1 = PRINTBUF;
146 	struct printbuf buf2 = PRINTBUF;
147 	struct printbuf buf3 = PRINTBUF;
148 	const char *msg;
149 
150 	if (!bch2_debug_check_iterators)
151 		return;
152 
153 	l	= &path->l[level];
154 	tmp	= l->iter;
155 	locked	= btree_node_locked(path, level);
156 
157 	if (path->cached) {
158 		if (!level)
159 			bch2_btree_path_verify_cached(trans, path);
160 		return;
161 	}
162 
163 	if (!btree_path_node(path, level))
164 		return;
165 
166 	if (!bch2_btree_node_relock_notrace(trans, path, level))
167 		return;
168 
169 	BUG_ON(!btree_path_pos_in_node(path, l->b));
170 
171 	bch2_btree_node_iter_verify(&l->iter, l->b);
172 
173 	/*
174 	 * For interior nodes, the iterator will have skipped past deleted keys:
175 	 */
176 	p = level
177 		? bch2_btree_node_iter_prev(&tmp, l->b)
178 		: bch2_btree_node_iter_prev_all(&tmp, l->b);
179 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180 
181 	if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182 		msg = "before";
183 		goto err;
184 	}
185 
186 	if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187 		msg = "after";
188 		goto err;
189 	}
190 
191 	if (!locked)
192 		btree_node_unlock(trans, path, level);
193 	return;
194 err:
195 	bch2_bpos_to_text(&buf1, path->pos);
196 
197 	if (p) {
198 		struct bkey uk = bkey_unpack_key(l->b, p);
199 
200 		bch2_bkey_to_text(&buf2, &uk);
201 	} else {
202 		prt_printf(&buf2, "(none)");
203 	}
204 
205 	if (k) {
206 		struct bkey uk = bkey_unpack_key(l->b, k);
207 
208 		bch2_bkey_to_text(&buf3, &uk);
209 	} else {
210 		prt_printf(&buf3, "(none)");
211 	}
212 
213 	panic("path should be %s key at level %u:\n"
214 	      "path pos %s\n"
215 	      "prev key %s\n"
216 	      "cur  key %s\n",
217 	      msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219 
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 				   struct btree_path *path)
222 {
223 	struct bch_fs *c = trans->c;
224 
225 	for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226 		if (!path->l[i].b) {
227 			BUG_ON(!path->cached &&
228 			       bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229 			break;
230 		}
231 
232 		bch2_btree_path_verify_level(trans, path, i);
233 	}
234 
235 	bch2_btree_path_verify_locks(path);
236 }
237 
238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240 	struct btree_path *path;
241 	unsigned iter;
242 
243 	trans_for_each_path(trans, path, iter)
244 		bch2_btree_path_verify(trans, path);
245 }
246 
247 static void bch2_btree_iter_verify(struct btree_iter *iter)
248 {
249 	struct btree_trans *trans = iter->trans;
250 
251 	BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252 
253 	BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254 	       (iter->flags & BTREE_ITER_all_snapshots));
255 
256 	BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257 	       (iter->flags & BTREE_ITER_all_snapshots) &&
258 	       !btree_type_has_snapshot_field(iter->btree_id));
259 
260 	if (iter->update_path)
261 		bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262 	bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263 }
264 
265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266 {
267 	BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268 	       !iter->pos.snapshot);
269 
270 	BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271 	       iter->pos.snapshot != iter->snapshot);
272 
273 	BUG_ON(iter->flags & BTREE_ITER_all_snapshots	? !bpos_eq(iter->pos, iter->k.p) :
274 	       !(iter->flags & BTREE_ITER_is_extents)	? !bkey_eq(iter->pos, iter->k.p) :
275 	       (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
276 		bkey_gt(iter->pos, iter->k.p)));
277 }
278 
279 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
280 {
281 	struct btree_trans *trans = iter->trans;
282 	struct btree_iter copy;
283 	struct bkey_s_c prev;
284 	int ret = 0;
285 
286 	if (!bch2_debug_check_iterators)
287 		return 0;
288 
289 	if (!(iter->flags & BTREE_ITER_filter_snapshots))
290 		return 0;
291 
292 	if (bkey_err(k) || !k.k)
293 		return 0;
294 
295 	BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
296 					  iter->snapshot,
297 					  k.k->p.snapshot));
298 
299 	bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
300 			     BTREE_ITER_nopreserve|
301 			     BTREE_ITER_all_snapshots);
302 	prev = bch2_btree_iter_prev(&copy);
303 	if (!prev.k)
304 		goto out;
305 
306 	ret = bkey_err(prev);
307 	if (ret)
308 		goto out;
309 
310 	if (bkey_eq(prev.k->p, k.k->p) &&
311 	    bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
312 				      prev.k->p.snapshot) > 0) {
313 		struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
314 
315 		bch2_bkey_to_text(&buf1, k.k);
316 		bch2_bkey_to_text(&buf2, prev.k);
317 
318 		panic("iter snap %u\n"
319 		      "k    %s\n"
320 		      "prev %s\n",
321 		      iter->snapshot,
322 		      buf1.buf, buf2.buf);
323 	}
324 out:
325 	bch2_trans_iter_exit(trans, &copy);
326 	return ret;
327 }
328 
329 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
330 			    struct bpos pos)
331 {
332 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
333 
334 	struct btree_path *path;
335 	struct trans_for_each_path_inorder_iter iter;
336 	struct printbuf buf = PRINTBUF;
337 
338 	btree_trans_sort_paths(trans);
339 
340 	trans_for_each_path_inorder(trans, path, iter) {
341 		if (path->btree_id != id ||
342 		    !btree_node_locked(path, 0) ||
343 		    !path->should_be_locked)
344 			continue;
345 
346 		if (!path->cached) {
347 			if (bkey_ge(pos, path->l[0].b->data->min_key) &&
348 			    bkey_le(pos, path->l[0].b->key.k.p))
349 				return;
350 		} else {
351 			if (bkey_eq(pos, path->pos))
352 				return;
353 		}
354 	}
355 
356 	bch2_dump_trans_paths_updates(trans);
357 	bch2_bpos_to_text(&buf, pos);
358 
359 	panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
360 }
361 
362 #else
363 
364 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
365 						struct btree_path *path, unsigned l) {}
366 static inline void bch2_btree_path_verify(struct btree_trans *trans,
367 					  struct btree_path *path) {}
368 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
369 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
370 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
371 
372 #endif
373 
374 /* Btree path: fixups after btree updates */
375 
376 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
377 					struct btree *b,
378 					struct bset_tree *t,
379 					struct bkey_packed *k)
380 {
381 	struct btree_node_iter_set *set;
382 
383 	btree_node_iter_for_each(iter, set)
384 		if (set->end == t->end_offset) {
385 			set->k = __btree_node_key_to_offset(b, k);
386 			bch2_btree_node_iter_sort(iter, b);
387 			return;
388 		}
389 
390 	bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
391 }
392 
393 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
394 					       struct btree *b,
395 					       struct bkey_packed *where)
396 {
397 	struct btree_path_level *l = &path->l[b->c.level];
398 
399 	if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
400 		return;
401 
402 	if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
403 		bch2_btree_node_iter_advance(&l->iter, l->b);
404 }
405 
406 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
407 				      struct btree *b,
408 				      struct bkey_packed *where)
409 {
410 	struct btree_path *path;
411 	unsigned i;
412 
413 	trans_for_each_path_with_node(trans, b, path, i) {
414 		__bch2_btree_path_fix_key_modified(path, b, where);
415 		bch2_btree_path_verify_level(trans, path, b->c.level);
416 	}
417 }
418 
419 static void __bch2_btree_node_iter_fix(struct btree_path *path,
420 				       struct btree *b,
421 				       struct btree_node_iter *node_iter,
422 				       struct bset_tree *t,
423 				       struct bkey_packed *where,
424 				       unsigned clobber_u64s,
425 				       unsigned new_u64s)
426 {
427 	const struct bkey_packed *end = btree_bkey_last(b, t);
428 	struct btree_node_iter_set *set;
429 	unsigned offset = __btree_node_key_to_offset(b, where);
430 	int shift = new_u64s - clobber_u64s;
431 	unsigned old_end = t->end_offset - shift;
432 	unsigned orig_iter_pos = node_iter->data[0].k;
433 	bool iter_current_key_modified =
434 		orig_iter_pos >= offset &&
435 		orig_iter_pos <= offset + clobber_u64s;
436 
437 	btree_node_iter_for_each(node_iter, set)
438 		if (set->end == old_end)
439 			goto found;
440 
441 	/* didn't find the bset in the iterator - might have to readd it: */
442 	if (new_u64s &&
443 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
444 		bch2_btree_node_iter_push(node_iter, b, where, end);
445 		goto fixup_done;
446 	} else {
447 		/* Iterator is after key that changed */
448 		return;
449 	}
450 found:
451 	set->end = t->end_offset;
452 
453 	/* Iterator hasn't gotten to the key that changed yet: */
454 	if (set->k < offset)
455 		return;
456 
457 	if (new_u64s &&
458 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
459 		set->k = offset;
460 	} else if (set->k < offset + clobber_u64s) {
461 		set->k = offset + new_u64s;
462 		if (set->k == set->end)
463 			bch2_btree_node_iter_set_drop(node_iter, set);
464 	} else {
465 		/* Iterator is after key that changed */
466 		set->k = (int) set->k + shift;
467 		return;
468 	}
469 
470 	bch2_btree_node_iter_sort(node_iter, b);
471 fixup_done:
472 	if (node_iter->data[0].k != orig_iter_pos)
473 		iter_current_key_modified = true;
474 
475 	/*
476 	 * When a new key is added, and the node iterator now points to that
477 	 * key, the iterator might have skipped past deleted keys that should
478 	 * come after the key the iterator now points to. We have to rewind to
479 	 * before those deleted keys - otherwise
480 	 * bch2_btree_node_iter_prev_all() breaks:
481 	 */
482 	if (!bch2_btree_node_iter_end(node_iter) &&
483 	    iter_current_key_modified &&
484 	    b->c.level) {
485 		struct bkey_packed *k, *k2, *p;
486 
487 		k = bch2_btree_node_iter_peek_all(node_iter, b);
488 
489 		for_each_bset(b, t) {
490 			bool set_pos = false;
491 
492 			if (node_iter->data[0].end == t->end_offset)
493 				continue;
494 
495 			k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
496 
497 			while ((p = bch2_bkey_prev_all(b, t, k2)) &&
498 			       bkey_iter_cmp(b, k, p) < 0) {
499 				k2 = p;
500 				set_pos = true;
501 			}
502 
503 			if (set_pos)
504 				btree_node_iter_set_set_pos(node_iter,
505 							    b, t, k2);
506 		}
507 	}
508 }
509 
510 void bch2_btree_node_iter_fix(struct btree_trans *trans,
511 			      struct btree_path *path,
512 			      struct btree *b,
513 			      struct btree_node_iter *node_iter,
514 			      struct bkey_packed *where,
515 			      unsigned clobber_u64s,
516 			      unsigned new_u64s)
517 {
518 	struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
519 	struct btree_path *linked;
520 	unsigned i;
521 
522 	if (node_iter != &path->l[b->c.level].iter) {
523 		__bch2_btree_node_iter_fix(path, b, node_iter, t,
524 					   where, clobber_u64s, new_u64s);
525 
526 		if (bch2_debug_check_iterators)
527 			bch2_btree_node_iter_verify(node_iter, b);
528 	}
529 
530 	trans_for_each_path_with_node(trans, b, linked, i) {
531 		__bch2_btree_node_iter_fix(linked, b,
532 					   &linked->l[b->c.level].iter, t,
533 					   where, clobber_u64s, new_u64s);
534 		bch2_btree_path_verify_level(trans, linked, b->c.level);
535 	}
536 }
537 
538 /* Btree path level: pointer to a particular btree node and node iter */
539 
540 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
541 						  struct btree_path_level *l,
542 						  struct bkey *u,
543 						  struct bkey_packed *k)
544 {
545 	if (unlikely(!k)) {
546 		/*
547 		 * signal to bch2_btree_iter_peek_slot() that we're currently at
548 		 * a hole
549 		 */
550 		u->type = KEY_TYPE_deleted;
551 		return bkey_s_c_null;
552 	}
553 
554 	return bkey_disassemble(l->b, k, u);
555 }
556 
557 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
558 							struct btree_path_level *l,
559 							struct bkey *u)
560 {
561 	return __btree_iter_unpack(c, l, u,
562 			bch2_btree_node_iter_peek_all(&l->iter, l->b));
563 }
564 
565 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
566 						    struct btree_path *path,
567 						    struct btree_path_level *l,
568 						    struct bkey *u)
569 {
570 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
571 			bch2_btree_node_iter_peek(&l->iter, l->b));
572 
573 	path->pos = k.k ? k.k->p : l->b->key.k.p;
574 	trans->paths_sorted = false;
575 	bch2_btree_path_verify_level(trans, path, l - path->l);
576 	return k;
577 }
578 
579 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
580 						    struct btree_path *path,
581 						    struct btree_path_level *l,
582 						    struct bkey *u)
583 {
584 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
585 			bch2_btree_node_iter_prev(&l->iter, l->b));
586 
587 	path->pos = k.k ? k.k->p : l->b->data->min_key;
588 	trans->paths_sorted = false;
589 	bch2_btree_path_verify_level(trans, path, l - path->l);
590 	return k;
591 }
592 
593 static inline bool btree_path_advance_to_pos(struct btree_path *path,
594 					     struct btree_path_level *l,
595 					     int max_advance)
596 {
597 	struct bkey_packed *k;
598 	int nr_advanced = 0;
599 
600 	while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
601 	       bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
602 		if (max_advance > 0 && nr_advanced >= max_advance)
603 			return false;
604 
605 		bch2_btree_node_iter_advance(&l->iter, l->b);
606 		nr_advanced++;
607 	}
608 
609 	return true;
610 }
611 
612 static inline void __btree_path_level_init(struct btree_path *path,
613 					   unsigned level)
614 {
615 	struct btree_path_level *l = &path->l[level];
616 
617 	bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
618 
619 	/*
620 	 * Iterators to interior nodes should always be pointed at the first non
621 	 * whiteout:
622 	 */
623 	if (level)
624 		bch2_btree_node_iter_peek(&l->iter, l->b);
625 }
626 
627 void bch2_btree_path_level_init(struct btree_trans *trans,
628 				struct btree_path *path,
629 				struct btree *b)
630 {
631 	BUG_ON(path->cached);
632 
633 	EBUG_ON(!btree_path_pos_in_node(path, b));
634 
635 	path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
636 	path->l[b->c.level].b = b;
637 	__btree_path_level_init(path, b->c.level);
638 }
639 
640 /* Btree path: fixups after btree node updates: */
641 
642 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
643 {
644 	struct bch_fs *c = trans->c;
645 
646 	trans_for_each_update(trans, i)
647 		if (!i->cached &&
648 		    i->level	== b->c.level &&
649 		    i->btree_id	== b->c.btree_id &&
650 		    bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
651 		    bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
652 			i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
653 
654 			if (unlikely(trans->journal_replay_not_finished)) {
655 				struct bkey_i *j_k =
656 					bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
657 								    i->k->k.p);
658 
659 				if (j_k) {
660 					i->old_k = j_k->k;
661 					i->old_v = &j_k->v;
662 				}
663 			}
664 		}
665 }
666 
667 /*
668  * A btree node is being replaced - update the iterator to point to the new
669  * node:
670  */
671 void bch2_trans_node_add(struct btree_trans *trans,
672 			 struct btree_path *path,
673 			 struct btree *b)
674 {
675 	struct btree_path *prev;
676 
677 	BUG_ON(!btree_path_pos_in_node(path, b));
678 
679 	while ((prev = prev_btree_path(trans, path)) &&
680 	       btree_path_pos_in_node(prev, b))
681 		path = prev;
682 
683 	for (;
684 	     path && btree_path_pos_in_node(path, b);
685 	     path = next_btree_path(trans, path))
686 		if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
687 			enum btree_node_locked_type t =
688 				btree_lock_want(path, b->c.level);
689 
690 			if (t != BTREE_NODE_UNLOCKED) {
691 				btree_node_unlock(trans, path, b->c.level);
692 				six_lock_increment(&b->c.lock, (enum six_lock_type) t);
693 				mark_btree_node_locked(trans, path, b->c.level, t);
694 			}
695 
696 			bch2_btree_path_level_init(trans, path, b);
697 		}
698 
699 	bch2_trans_revalidate_updates_in_node(trans, b);
700 }
701 
702 void bch2_trans_node_drop(struct btree_trans *trans,
703 			  struct btree *b)
704 {
705 	struct btree_path *path;
706 	unsigned i, level = b->c.level;
707 
708 	trans_for_each_path(trans, path, i)
709 		if (path->l[level].b == b) {
710 			btree_node_unlock(trans, path, level);
711 			path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
712 		}
713 }
714 
715 /*
716  * A btree node has been modified in such a way as to invalidate iterators - fix
717  * them:
718  */
719 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
720 {
721 	struct btree_path *path;
722 	unsigned i;
723 
724 	trans_for_each_path_with_node(trans, b, path, i)
725 		__btree_path_level_init(path, b->c.level);
726 
727 	bch2_trans_revalidate_updates_in_node(trans, b);
728 }
729 
730 /* Btree path: traverse, set_pos: */
731 
732 static inline int btree_path_lock_root(struct btree_trans *trans,
733 				       struct btree_path *path,
734 				       unsigned depth_want,
735 				       unsigned long trace_ip)
736 {
737 	struct bch_fs *c = trans->c;
738 	struct btree_root *r = bch2_btree_id_root(c, path->btree_id);
739 	enum six_lock_type lock_type;
740 	unsigned i;
741 	int ret;
742 
743 	EBUG_ON(path->nodes_locked);
744 
745 	while (1) {
746 		struct btree *b = READ_ONCE(r->b);
747 		if (unlikely(!b)) {
748 			BUG_ON(!r->error);
749 			return r->error;
750 		}
751 
752 		path->level = READ_ONCE(b->c.level);
753 
754 		if (unlikely(path->level < depth_want)) {
755 			/*
756 			 * the root is at a lower depth than the depth we want:
757 			 * got to the end of the btree, or we're walking nodes
758 			 * greater than some depth and there are no nodes >=
759 			 * that depth
760 			 */
761 			path->level = depth_want;
762 			for (i = path->level; i < BTREE_MAX_DEPTH; i++)
763 				path->l[i].b = NULL;
764 			return 1;
765 		}
766 
767 		lock_type = __btree_lock_want(path, path->level);
768 		ret = btree_node_lock(trans, path, &b->c,
769 				      path->level, lock_type, trace_ip);
770 		if (unlikely(ret)) {
771 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
772 				return ret;
773 			BUG();
774 		}
775 
776 		if (likely(b == READ_ONCE(r->b) &&
777 			   b->c.level == path->level &&
778 			   !race_fault())) {
779 			for (i = 0; i < path->level; i++)
780 				path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
781 			path->l[path->level].b = b;
782 			for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
783 				path->l[i].b = NULL;
784 
785 			mark_btree_node_locked(trans, path, path->level,
786 					       (enum btree_node_locked_type) lock_type);
787 			bch2_btree_path_level_init(trans, path, b);
788 			return 0;
789 		}
790 
791 		six_unlock_type(&b->c.lock, lock_type);
792 	}
793 }
794 
795 noinline
796 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
797 {
798 	struct bch_fs *c = trans->c;
799 	struct btree_path_level *l = path_l(path);
800 	struct btree_node_iter node_iter = l->iter;
801 	struct bkey_packed *k;
802 	struct bkey_buf tmp;
803 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
804 		? (path->level > 1 ? 0 :  2)
805 		: (path->level > 1 ? 1 : 16);
806 	bool was_locked = btree_node_locked(path, path->level);
807 	int ret = 0;
808 
809 	bch2_bkey_buf_init(&tmp);
810 
811 	while (nr-- && !ret) {
812 		if (!bch2_btree_node_relock(trans, path, path->level))
813 			break;
814 
815 		bch2_btree_node_iter_advance(&node_iter, l->b);
816 		k = bch2_btree_node_iter_peek(&node_iter, l->b);
817 		if (!k)
818 			break;
819 
820 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
821 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
822 					       path->level - 1);
823 	}
824 
825 	if (!was_locked)
826 		btree_node_unlock(trans, path, path->level);
827 
828 	bch2_bkey_buf_exit(&tmp, c);
829 	return ret;
830 }
831 
832 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
833 				 struct btree_and_journal_iter *jiter)
834 {
835 	struct bch_fs *c = trans->c;
836 	struct bkey_s_c k;
837 	struct bkey_buf tmp;
838 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
839 		? (path->level > 1 ? 0 :  2)
840 		: (path->level > 1 ? 1 : 16);
841 	bool was_locked = btree_node_locked(path, path->level);
842 	int ret = 0;
843 
844 	bch2_bkey_buf_init(&tmp);
845 
846 	jiter->fail_if_too_many_whiteouts = true;
847 
848 	while (nr-- && !ret) {
849 		if (!bch2_btree_node_relock(trans, path, path->level))
850 			break;
851 
852 		bch2_btree_and_journal_iter_advance(jiter);
853 		k = bch2_btree_and_journal_iter_peek(jiter);
854 		if (!k.k)
855 			break;
856 
857 		bch2_bkey_buf_reassemble(&tmp, c, k);
858 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
859 					       path->level - 1);
860 	}
861 
862 	if (!was_locked)
863 		btree_node_unlock(trans, path, path->level);
864 
865 	bch2_bkey_buf_exit(&tmp, c);
866 	return ret;
867 }
868 
869 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
870 					    struct btree_path *path,
871 					    unsigned plevel, struct btree *b)
872 {
873 	struct btree_path_level *l = &path->l[plevel];
874 	bool locked = btree_node_locked(path, plevel);
875 	struct bkey_packed *k;
876 	struct bch_btree_ptr_v2 *bp;
877 
878 	if (!bch2_btree_node_relock(trans, path, plevel))
879 		return;
880 
881 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
882 	BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
883 
884 	bp = (void *) bkeyp_val(&l->b->format, k);
885 	bp->mem_ptr = (unsigned long)b;
886 
887 	if (!locked)
888 		btree_node_unlock(trans, path, plevel);
889 }
890 
891 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
892 						     struct btree_path *path,
893 						     unsigned flags,
894 						     struct bkey_buf *out)
895 {
896 	struct bch_fs *c = trans->c;
897 	struct btree_path_level *l = path_l(path);
898 	struct btree_and_journal_iter jiter;
899 	struct bkey_s_c k;
900 	int ret = 0;
901 
902 	__bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
903 
904 	k = bch2_btree_and_journal_iter_peek(&jiter);
905 	if (!k.k) {
906 		struct printbuf buf = PRINTBUF;
907 
908 		prt_str(&buf, "node not found at pos ");
909 		bch2_bpos_to_text(&buf, path->pos);
910 		prt_str(&buf, " at btree ");
911 		bch2_btree_pos_to_text(&buf, c, l->b);
912 
913 		ret = bch2_fs_topology_error(c, "%s", buf.buf);
914 		printbuf_exit(&buf);
915 		goto err;
916 	}
917 
918 	bch2_bkey_buf_reassemble(out, c, k);
919 
920 	if ((flags & BTREE_ITER_prefetch) &&
921 	    c->opts.btree_node_prefetch)
922 		ret = btree_path_prefetch_j(trans, path, &jiter);
923 
924 err:
925 	bch2_btree_and_journal_iter_exit(&jiter);
926 	return ret;
927 }
928 
929 static __always_inline int btree_path_down(struct btree_trans *trans,
930 					   struct btree_path *path,
931 					   unsigned flags,
932 					   unsigned long trace_ip)
933 {
934 	struct bch_fs *c = trans->c;
935 	struct btree_path_level *l = path_l(path);
936 	struct btree *b;
937 	unsigned level = path->level - 1;
938 	enum six_lock_type lock_type = __btree_lock_want(path, level);
939 	struct bkey_buf tmp;
940 	int ret;
941 
942 	EBUG_ON(!btree_node_locked(path, path->level));
943 
944 	bch2_bkey_buf_init(&tmp);
945 
946 	if (unlikely(trans->journal_replay_not_finished)) {
947 		ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
948 		if (ret)
949 			goto err;
950 	} else {
951 		struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
952 		if (!k) {
953 			struct printbuf buf = PRINTBUF;
954 
955 			prt_str(&buf, "node not found at pos ");
956 			bch2_bpos_to_text(&buf, path->pos);
957 			prt_str(&buf, " within parent node ");
958 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
959 
960 			bch2_fs_fatal_error(c, "%s", buf.buf);
961 			printbuf_exit(&buf);
962 			ret = -BCH_ERR_btree_need_topology_repair;
963 			goto err;
964 		}
965 
966 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
967 
968 		if ((flags & BTREE_ITER_prefetch) &&
969 		    c->opts.btree_node_prefetch) {
970 			ret = btree_path_prefetch(trans, path);
971 			if (ret)
972 				goto err;
973 		}
974 	}
975 
976 	b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
977 	ret = PTR_ERR_OR_ZERO(b);
978 	if (unlikely(ret))
979 		goto err;
980 
981 	if (likely(!trans->journal_replay_not_finished &&
982 		   tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
983 	    unlikely(b != btree_node_mem_ptr(tmp.k)))
984 		btree_node_mem_ptr_set(trans, path, level + 1, b);
985 
986 	if (btree_node_read_locked(path, level + 1))
987 		btree_node_unlock(trans, path, level + 1);
988 
989 	mark_btree_node_locked(trans, path, level,
990 			       (enum btree_node_locked_type) lock_type);
991 	path->level = level;
992 	bch2_btree_path_level_init(trans, path, b);
993 
994 	bch2_btree_path_verify_locks(path);
995 err:
996 	bch2_bkey_buf_exit(&tmp, c);
997 	return ret;
998 }
999 
1000 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1001 {
1002 	struct bch_fs *c = trans->c;
1003 	struct btree_path *path;
1004 	unsigned long trace_ip = _RET_IP_;
1005 	unsigned i;
1006 	int ret = 0;
1007 
1008 	if (trans->in_traverse_all)
1009 		return -BCH_ERR_transaction_restart_in_traverse_all;
1010 
1011 	trans->in_traverse_all = true;
1012 retry_all:
1013 	trans->restarted = 0;
1014 	trans->last_restarted_ip = 0;
1015 
1016 	trans_for_each_path(trans, path, i)
1017 		path->should_be_locked = false;
1018 
1019 	btree_trans_sort_paths(trans);
1020 
1021 	bch2_trans_unlock(trans);
1022 	cond_resched();
1023 	trans_set_locked(trans, false);
1024 
1025 	if (unlikely(trans->memory_allocation_failure)) {
1026 		struct closure cl;
1027 
1028 		closure_init_stack(&cl);
1029 
1030 		do {
1031 			ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1032 			closure_sync(&cl);
1033 		} while (ret);
1034 	}
1035 
1036 	/* Now, redo traversals in correct order: */
1037 	i = 0;
1038 	while (i < trans->nr_sorted) {
1039 		btree_path_idx_t idx = trans->sorted[i];
1040 
1041 		/*
1042 		 * Traversing a path can cause another path to be added at about
1043 		 * the same position:
1044 		 */
1045 		if (trans->paths[idx].uptodate) {
1046 			__btree_path_get(trans, &trans->paths[idx], false);
1047 			ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1048 			__btree_path_put(trans, &trans->paths[idx], false);
1049 
1050 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1051 			    bch2_err_matches(ret, ENOMEM))
1052 				goto retry_all;
1053 			if (ret)
1054 				goto err;
1055 		} else {
1056 			i++;
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * We used to assert that all paths had been traversed here
1062 	 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1063 	 * path->should_be_locked is not set yet, we might have unlocked and
1064 	 * then failed to relock a path - that's fine.
1065 	 */
1066 err:
1067 	bch2_btree_cache_cannibalize_unlock(trans);
1068 
1069 	trans->in_traverse_all = false;
1070 
1071 	trace_and_count(c, trans_traverse_all, trans, trace_ip);
1072 	return ret;
1073 }
1074 
1075 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1076 						unsigned l, int check_pos)
1077 {
1078 	if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1079 		return false;
1080 	if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1081 		return false;
1082 	return true;
1083 }
1084 
1085 static inline bool btree_path_good_node(struct btree_trans *trans,
1086 					struct btree_path *path,
1087 					unsigned l, int check_pos)
1088 {
1089 	return is_btree_node(path, l) &&
1090 		bch2_btree_node_relock(trans, path, l) &&
1091 		btree_path_check_pos_in_node(path, l, check_pos);
1092 }
1093 
1094 static void btree_path_set_level_down(struct btree_trans *trans,
1095 				      struct btree_path *path,
1096 				      unsigned new_level)
1097 {
1098 	unsigned l;
1099 
1100 	path->level = new_level;
1101 
1102 	for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1103 		if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1104 			btree_node_unlock(trans, path, l);
1105 
1106 	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1107 	bch2_btree_path_verify(trans, path);
1108 }
1109 
1110 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1111 							 struct btree_path *path,
1112 							 int check_pos)
1113 {
1114 	unsigned i, l = path->level;
1115 again:
1116 	while (btree_path_node(path, l) &&
1117 	       !btree_path_good_node(trans, path, l, check_pos))
1118 		__btree_path_set_level_up(trans, path, l++);
1119 
1120 	/* If we need intent locks, take them too: */
1121 	for (i = l + 1;
1122 	     i < path->locks_want && btree_path_node(path, i);
1123 	     i++)
1124 		if (!bch2_btree_node_relock(trans, path, i)) {
1125 			while (l <= i)
1126 				__btree_path_set_level_up(trans, path, l++);
1127 			goto again;
1128 		}
1129 
1130 	return l;
1131 }
1132 
1133 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1134 						     struct btree_path *path,
1135 						     int check_pos)
1136 {
1137 	return likely(btree_node_locked(path, path->level) &&
1138 		      btree_path_check_pos_in_node(path, path->level, check_pos))
1139 		? path->level
1140 		: __btree_path_up_until_good_node(trans, path, check_pos);
1141 }
1142 
1143 /*
1144  * This is the main state machine for walking down the btree - walks down to a
1145  * specified depth
1146  *
1147  * Returns 0 on success, -EIO on error (error reading in a btree node).
1148  *
1149  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1150  * stashed in the iterator and returned from bch2_trans_exit().
1151  */
1152 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1153 				 btree_path_idx_t path_idx,
1154 				 unsigned flags,
1155 				 unsigned long trace_ip)
1156 {
1157 	struct btree_path *path = &trans->paths[path_idx];
1158 	unsigned depth_want = path->level;
1159 	int ret = -((int) trans->restarted);
1160 
1161 	if (unlikely(ret))
1162 		goto out;
1163 
1164 	if (unlikely(!trans->srcu_held))
1165 		bch2_trans_srcu_lock(trans);
1166 
1167 	trace_btree_path_traverse_start(trans, path);
1168 
1169 	/*
1170 	 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1171 	 * and re-traverse the path without a transaction restart:
1172 	 */
1173 	if (path->should_be_locked) {
1174 		ret = bch2_btree_path_relock(trans, path, trace_ip);
1175 		goto out;
1176 	}
1177 
1178 	if (path->cached) {
1179 		ret = bch2_btree_path_traverse_cached(trans, path, flags);
1180 		goto out;
1181 	}
1182 
1183 	path = &trans->paths[path_idx];
1184 
1185 	if (unlikely(path->level >= BTREE_MAX_DEPTH))
1186 		goto out_uptodate;
1187 
1188 	path->level = btree_path_up_until_good_node(trans, path, 0);
1189 	unsigned max_level = path->level;
1190 
1191 	EBUG_ON(btree_path_node(path, path->level) &&
1192 		!btree_node_locked(path, path->level));
1193 
1194 	/*
1195 	 * Note: path->nodes[path->level] may be temporarily NULL here - that
1196 	 * would indicate to other code that we got to the end of the btree,
1197 	 * here it indicates that relocking the root failed - it's critical that
1198 	 * btree_path_lock_root() comes next and that it can't fail
1199 	 */
1200 	while (path->level > depth_want) {
1201 		ret = btree_path_node(path, path->level)
1202 			? btree_path_down(trans, path, flags, trace_ip)
1203 			: btree_path_lock_root(trans, path, depth_want, trace_ip);
1204 		if (unlikely(ret)) {
1205 			if (ret == 1) {
1206 				/*
1207 				 * No nodes at this level - got to the end of
1208 				 * the btree:
1209 				 */
1210 				ret = 0;
1211 				goto out;
1212 			}
1213 
1214 			__bch2_btree_path_unlock(trans, path);
1215 			path->level = depth_want;
1216 			path->l[path->level].b = ERR_PTR(ret);
1217 			goto out;
1218 		}
1219 	}
1220 
1221 	if (unlikely(max_level > path->level)) {
1222 		struct btree_path *linked;
1223 		unsigned iter;
1224 
1225 		trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1226 			for (unsigned j = path->level + 1; j < max_level; j++)
1227 				linked->l[j] = path->l[j];
1228 	}
1229 
1230 out_uptodate:
1231 	path->uptodate = BTREE_ITER_UPTODATE;
1232 	trace_btree_path_traverse_end(trans, path);
1233 out:
1234 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1235 		panic("ret %s (%i) trans->restarted %s (%i)\n",
1236 		      bch2_err_str(ret), ret,
1237 		      bch2_err_str(trans->restarted), trans->restarted);
1238 	bch2_btree_path_verify(trans, path);
1239 	return ret;
1240 }
1241 
1242 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1243 			    struct btree_path *src)
1244 {
1245 	unsigned i, offset = offsetof(struct btree_path, pos);
1246 
1247 	memcpy((void *) dst + offset,
1248 	       (void *) src + offset,
1249 	       sizeof(struct btree_path) - offset);
1250 
1251 	for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1252 		unsigned t = btree_node_locked_type(dst, i);
1253 
1254 		if (t != BTREE_NODE_UNLOCKED)
1255 			six_lock_increment(&dst->l[i].b->c.lock, t);
1256 	}
1257 }
1258 
1259 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1260 					 bool intent, unsigned long ip)
1261 {
1262 	btree_path_idx_t new = btree_path_alloc(trans, src);
1263 	btree_path_copy(trans, trans->paths + new, trans->paths + src);
1264 	__btree_path_get(trans, trans->paths + new, intent);
1265 #ifdef TRACK_PATH_ALLOCATED
1266 	trans->paths[new].ip_allocated = ip;
1267 #endif
1268 	return new;
1269 }
1270 
1271 __flatten
1272 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1273 			btree_path_idx_t path, bool intent, unsigned long ip)
1274 {
1275 	struct btree_path *old = trans->paths + path;
1276 	__btree_path_put(trans, trans->paths + path, intent);
1277 	path = btree_path_clone(trans, path, intent, ip);
1278 	trace_btree_path_clone(trans, old, trans->paths + path);
1279 	trans->paths[path].preserve = false;
1280 	return path;
1281 }
1282 
1283 btree_path_idx_t __must_check
1284 __bch2_btree_path_set_pos(struct btree_trans *trans,
1285 			  btree_path_idx_t path_idx, struct bpos new_pos,
1286 			  bool intent, unsigned long ip)
1287 {
1288 	int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1289 
1290 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1291 	EBUG_ON(!trans->paths[path_idx].ref);
1292 
1293 	trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1294 
1295 	path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1296 
1297 	struct btree_path *path = trans->paths + path_idx;
1298 	path->pos		= new_pos;
1299 	trans->paths_sorted	= false;
1300 
1301 	if (unlikely(path->cached)) {
1302 		btree_node_unlock(trans, path, 0);
1303 		path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1304 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1305 		goto out;
1306 	}
1307 
1308 	unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1309 
1310 	if (btree_path_node(path, level)) {
1311 		struct btree_path_level *l = &path->l[level];
1312 
1313 		BUG_ON(!btree_node_locked(path, level));
1314 		/*
1315 		 * We might have to skip over many keys, or just a few: try
1316 		 * advancing the node iterator, and if we have to skip over too
1317 		 * many keys just reinit it (or if we're rewinding, since that
1318 		 * is expensive).
1319 		 */
1320 		if (cmp < 0 ||
1321 		    !btree_path_advance_to_pos(path, l, 8))
1322 			bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1323 
1324 		/*
1325 		 * Iterators to interior nodes should always be pointed at the first non
1326 		 * whiteout:
1327 		 */
1328 		if (unlikely(level))
1329 			bch2_btree_node_iter_peek(&l->iter, l->b);
1330 	}
1331 
1332 	if (unlikely(level != path->level)) {
1333 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1334 		__bch2_btree_path_unlock(trans, path);
1335 	}
1336 out:
1337 	bch2_btree_path_verify(trans, path);
1338 	return path_idx;
1339 }
1340 
1341 /* Btree path: main interface: */
1342 
1343 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1344 {
1345 	struct btree_path *sib;
1346 
1347 	sib = prev_btree_path(trans, path);
1348 	if (sib && !btree_path_cmp(sib, path))
1349 		return sib;
1350 
1351 	sib = next_btree_path(trans, path);
1352 	if (sib && !btree_path_cmp(sib, path))
1353 		return sib;
1354 
1355 	return NULL;
1356 }
1357 
1358 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1359 {
1360 	struct btree_path *sib;
1361 
1362 	sib = prev_btree_path(trans, path);
1363 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1364 		return sib;
1365 
1366 	sib = next_btree_path(trans, path);
1367 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1368 		return sib;
1369 
1370 	return NULL;
1371 }
1372 
1373 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1374 {
1375 	__bch2_btree_path_unlock(trans, trans->paths + path);
1376 	btree_path_list_remove(trans, trans->paths + path);
1377 	__clear_bit(path, trans->paths_allocated);
1378 }
1379 
1380 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1381 {
1382 	unsigned l = path->level;
1383 
1384 	do {
1385 		if (!btree_path_node(path, l))
1386 			break;
1387 
1388 		if (!is_btree_node(path, l))
1389 			return false;
1390 
1391 		if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1392 			return false;
1393 
1394 		l++;
1395 	} while (l < path->locks_want);
1396 
1397 	return true;
1398 }
1399 
1400 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1401 {
1402 	struct btree_path *path = trans->paths + path_idx, *dup;
1403 
1404 	if (!__btree_path_put(trans, path, intent))
1405 		return;
1406 
1407 	dup = path->preserve
1408 		? have_path_at_pos(trans, path)
1409 		: have_node_at_pos(trans, path);
1410 
1411 	trace_btree_path_free(trans, path_idx, dup);
1412 
1413 	if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1414 		return;
1415 
1416 	if (path->should_be_locked && !trans->restarted) {
1417 		if (!dup)
1418 			return;
1419 
1420 		if (!(trans->locked
1421 		      ? bch2_btree_path_relock_norestart(trans, dup)
1422 		      : bch2_btree_path_can_relock(trans, dup)))
1423 			return;
1424 	}
1425 
1426 	if (dup) {
1427 		dup->preserve		|= path->preserve;
1428 		dup->should_be_locked	|= path->should_be_locked;
1429 	}
1430 
1431 	__bch2_path_free(trans, path_idx);
1432 }
1433 
1434 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1435 				 bool intent)
1436 {
1437 	if (!__btree_path_put(trans, trans->paths + path, intent))
1438 		return;
1439 
1440 	__bch2_path_free(trans, path);
1441 }
1442 
1443 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1444 {
1445 	panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1446 	      trans->restart_count, restart_count,
1447 	      (void *) trans->last_begin_ip);
1448 }
1449 
1450 static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1451 {
1452 #ifdef CONFIG_BCACHEFS_DEBUG
1453 	struct printbuf buf = PRINTBUF;
1454 	bch2_prt_backtrace(&buf, &trans->last_restarted_trace);
1455 	panic("in transaction restart: %s, last restarted by\n%s",
1456 	      bch2_err_str(trans->restarted),
1457 	      buf.buf);
1458 #else
1459 	panic("in transaction restart: %s, last restarted by %pS\n",
1460 	      bch2_err_str(trans->restarted),
1461 	      (void *) trans->last_restarted_ip);
1462 #endif
1463 }
1464 
1465 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans)
1466 {
1467 	if (trans->restarted)
1468 		bch2_trans_in_restart_error(trans);
1469 
1470 	if (!trans->locked)
1471 		panic("trans should be locked, unlocked by %pS\n",
1472 		      (void *) trans->last_unlock_ip);
1473 
1474 	BUG();
1475 }
1476 
1477 noinline __cold
1478 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1479 {
1480 	prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1481 		   trans->nr_updates, trans->fn, trans->journal_res.seq);
1482 	printbuf_indent_add(buf, 2);
1483 
1484 	trans_for_each_update(trans, i) {
1485 		struct bkey_s_c old = { &i->old_k, i->old_v };
1486 
1487 		prt_str(buf, "update: btree=");
1488 		bch2_btree_id_to_text(buf, i->btree_id);
1489 		prt_printf(buf, " cached=%u %pS\n",
1490 			   i->cached,
1491 			   (void *) i->ip_allocated);
1492 
1493 		prt_printf(buf, "  old ");
1494 		bch2_bkey_val_to_text(buf, trans->c, old);
1495 		prt_newline(buf);
1496 
1497 		prt_printf(buf, "  new ");
1498 		bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1499 		prt_newline(buf);
1500 	}
1501 
1502 	for (struct jset_entry *e = trans->journal_entries;
1503 	     e != btree_trans_journal_entries_top(trans);
1504 	     e = vstruct_next(e))
1505 		bch2_journal_entry_to_text(buf, trans->c, e);
1506 
1507 	printbuf_indent_sub(buf, 2);
1508 }
1509 
1510 noinline __cold
1511 void bch2_dump_trans_updates(struct btree_trans *trans)
1512 {
1513 	struct printbuf buf = PRINTBUF;
1514 
1515 	bch2_trans_updates_to_text(&buf, trans);
1516 	bch2_print_str(trans->c, buf.buf);
1517 	printbuf_exit(&buf);
1518 }
1519 
1520 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1521 {
1522 	struct btree_path *path = trans->paths + path_idx;
1523 
1524 	prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
1525 		   path_idx, path->ref, path->intent_ref,
1526 		   path->preserve ? 'P' : ' ',
1527 		   path->should_be_locked ? 'S' : ' ',
1528 		   path->cached ? 'C' : 'B');
1529 	bch2_btree_id_level_to_text(out, path->btree_id, path->level);
1530 	prt_str(out, " pos ");
1531 	bch2_bpos_to_text(out, path->pos);
1532 
1533 	if (!path->cached && btree_node_locked(path, path->level)) {
1534 		prt_char(out, ' ');
1535 		struct btree *b = path_l(path)->b;
1536 		bch2_bpos_to_text(out, b->data->min_key);
1537 		prt_char(out, '-');
1538 		bch2_bpos_to_text(out, b->key.k.p);
1539 	}
1540 
1541 #ifdef TRACK_PATH_ALLOCATED
1542 	prt_printf(out, " %pS", (void *) path->ip_allocated);
1543 #endif
1544 }
1545 
1546 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1547 {
1548 	switch (t) {
1549 	case BTREE_NODE_UNLOCKED:
1550 		return "unlocked";
1551 	case BTREE_NODE_READ_LOCKED:
1552 		return "read";
1553 	case BTREE_NODE_INTENT_LOCKED:
1554 		return "intent";
1555 	case BTREE_NODE_WRITE_LOCKED:
1556 		return "write";
1557 	default:
1558 		return NULL;
1559 	}
1560 }
1561 
1562 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1563 {
1564 	bch2_btree_path_to_text_short(out, trans, path_idx);
1565 
1566 	struct btree_path *path = trans->paths + path_idx;
1567 
1568 	prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1569 	prt_newline(out);
1570 
1571 	printbuf_indent_add(out, 2);
1572 	for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1573 		prt_printf(out, "l=%u locks %s seq %u node ", l,
1574 			   btree_node_locked_str(btree_node_locked_type(path, l)),
1575 			   path->l[l].lock_seq);
1576 
1577 		int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1578 		if (ret)
1579 			prt_str(out, bch2_err_str(ret));
1580 		else
1581 			prt_printf(out, "%px", path->l[l].b);
1582 		prt_newline(out);
1583 	}
1584 	printbuf_indent_sub(out, 2);
1585 }
1586 
1587 static noinline __cold
1588 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1589 				bool nosort)
1590 {
1591 	struct trans_for_each_path_inorder_iter iter;
1592 
1593 	if (!nosort)
1594 		btree_trans_sort_paths(trans);
1595 
1596 	trans_for_each_path_idx_inorder(trans, iter) {
1597 		bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1598 		prt_newline(out);
1599 	}
1600 }
1601 
1602 noinline __cold
1603 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1604 {
1605 	__bch2_trans_paths_to_text(out, trans, false);
1606 }
1607 
1608 static noinline __cold
1609 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1610 {
1611 	struct printbuf buf = PRINTBUF;
1612 
1613 	__bch2_trans_paths_to_text(&buf, trans, nosort);
1614 	bch2_trans_updates_to_text(&buf, trans);
1615 
1616 	bch2_print_str(trans->c, buf.buf);
1617 	printbuf_exit(&buf);
1618 }
1619 
1620 noinline __cold
1621 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1622 {
1623 	__bch2_dump_trans_paths_updates(trans, false);
1624 }
1625 
1626 noinline __cold
1627 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1628 {
1629 	struct btree_transaction_stats *s = btree_trans_stats(trans);
1630 	struct printbuf buf = PRINTBUF;
1631 	size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1632 
1633 	bch2_trans_paths_to_text(&buf, trans);
1634 
1635 	if (!buf.allocation_failure) {
1636 		mutex_lock(&s->lock);
1637 		if (nr > s->nr_max_paths) {
1638 			s->nr_max_paths = nr;
1639 			swap(s->max_paths_text, buf.buf);
1640 		}
1641 		mutex_unlock(&s->lock);
1642 	}
1643 
1644 	printbuf_exit(&buf);
1645 
1646 	trans->nr_paths_max = nr;
1647 }
1648 
1649 noinline __cold
1650 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1651 {
1652 	if (trace_trans_restart_too_many_iters_enabled()) {
1653 		struct printbuf buf = PRINTBUF;
1654 
1655 		bch2_trans_paths_to_text(&buf, trans);
1656 		trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1657 		printbuf_exit(&buf);
1658 	}
1659 
1660 	count_event(trans->c, trans_restart_too_many_iters);
1661 
1662 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1663 }
1664 
1665 static noinline void btree_path_overflow(struct btree_trans *trans)
1666 {
1667 	bch2_dump_trans_paths_updates(trans);
1668 	bch_err(trans->c, "trans path overflow");
1669 }
1670 
1671 static noinline void btree_paths_realloc(struct btree_trans *trans)
1672 {
1673 	unsigned nr = trans->nr_paths * 2;
1674 
1675 	void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1676 			  sizeof(struct btree_trans_paths) +
1677 			  nr * sizeof(struct btree_path) +
1678 			  nr * sizeof(btree_path_idx_t) + 8 +
1679 			  nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1680 
1681 	unsigned long *paths_allocated = p;
1682 	memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1683 	p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1684 
1685 	p += sizeof(struct btree_trans_paths);
1686 	struct btree_path *paths = p;
1687 	*trans_paths_nr(paths) = nr;
1688 	memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1689 	p += nr * sizeof(struct btree_path);
1690 
1691 	btree_path_idx_t *sorted = p;
1692 	memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1693 	p += nr * sizeof(btree_path_idx_t) + 8;
1694 
1695 	struct btree_insert_entry *updates = p;
1696 	memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1697 
1698 	unsigned long *old = trans->paths_allocated;
1699 
1700 	rcu_assign_pointer(trans->paths_allocated,	paths_allocated);
1701 	rcu_assign_pointer(trans->paths,		paths);
1702 	rcu_assign_pointer(trans->sorted,		sorted);
1703 	rcu_assign_pointer(trans->updates,		updates);
1704 
1705 	trans->nr_paths		= nr;
1706 
1707 	if (old != trans->_paths_allocated)
1708 		kfree_rcu_mightsleep(old);
1709 }
1710 
1711 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1712 						btree_path_idx_t pos)
1713 {
1714 	btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1715 
1716 	if (unlikely(idx == trans->nr_paths)) {
1717 		if (trans->nr_paths == BTREE_ITER_MAX) {
1718 			btree_path_overflow(trans);
1719 			return 0;
1720 		}
1721 
1722 		btree_paths_realloc(trans);
1723 	}
1724 
1725 	/*
1726 	 * Do this before marking the new path as allocated, since it won't be
1727 	 * initialized yet:
1728 	 */
1729 	if (unlikely(idx > trans->nr_paths_max))
1730 		bch2_trans_update_max_paths(trans);
1731 
1732 	__set_bit(idx, trans->paths_allocated);
1733 
1734 	struct btree_path *path = &trans->paths[idx];
1735 	path->ref		= 0;
1736 	path->intent_ref	= 0;
1737 	path->nodes_locked	= 0;
1738 
1739 	btree_path_list_add(trans, pos, idx);
1740 	trans->paths_sorted = false;
1741 	return idx;
1742 }
1743 
1744 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1745 			     enum btree_id btree_id, struct bpos pos,
1746 			     unsigned locks_want, unsigned level,
1747 			     unsigned flags, unsigned long ip)
1748 {
1749 	struct btree_path *path;
1750 	bool cached = flags & BTREE_ITER_cached;
1751 	bool intent = flags & BTREE_ITER_intent;
1752 	struct trans_for_each_path_inorder_iter iter;
1753 	btree_path_idx_t path_pos = 0, path_idx;
1754 
1755 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1756 	bch2_trans_verify_locks(trans);
1757 
1758 	btree_trans_sort_paths(trans);
1759 
1760 	trans_for_each_path_inorder(trans, path, iter) {
1761 		if (__btree_path_cmp(path,
1762 				     btree_id,
1763 				     cached,
1764 				     pos,
1765 				     level) > 0)
1766 			break;
1767 
1768 		path_pos = iter.path_idx;
1769 	}
1770 
1771 	if (path_pos &&
1772 	    trans->paths[path_pos].cached	== cached &&
1773 	    trans->paths[path_pos].btree_id	== btree_id &&
1774 	    trans->paths[path_pos].level	== level) {
1775 		trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1776 
1777 		__btree_path_get(trans, trans->paths + path_pos, intent);
1778 		path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1779 		path = trans->paths + path_idx;
1780 	} else {
1781 		path_idx = btree_path_alloc(trans, path_pos);
1782 		path = trans->paths + path_idx;
1783 
1784 		__btree_path_get(trans, path, intent);
1785 		path->pos			= pos;
1786 		path->btree_id			= btree_id;
1787 		path->cached			= cached;
1788 		path->uptodate			= BTREE_ITER_NEED_TRAVERSE;
1789 		path->should_be_locked		= false;
1790 		path->level			= level;
1791 		path->locks_want		= locks_want;
1792 		path->nodes_locked		= 0;
1793 		for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1794 			path->l[i].b		= ERR_PTR(-BCH_ERR_no_btree_node_init);
1795 #ifdef TRACK_PATH_ALLOCATED
1796 		path->ip_allocated		= ip;
1797 #endif
1798 		trans->paths_sorted		= false;
1799 
1800 		trace_btree_path_alloc(trans, path);
1801 	}
1802 
1803 	if (!(flags & BTREE_ITER_nopreserve))
1804 		path->preserve = true;
1805 
1806 	if (path->intent_ref)
1807 		locks_want = max(locks_want, level + 1);
1808 
1809 	/*
1810 	 * If the path has locks_want greater than requested, we don't downgrade
1811 	 * it here - on transaction restart because btree node split needs to
1812 	 * upgrade locks, we might be putting/getting the iterator again.
1813 	 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1814 	 * a successful transaction commit.
1815 	 */
1816 
1817 	locks_want = min(locks_want, BTREE_MAX_DEPTH);
1818 	if (locks_want > path->locks_want)
1819 		bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1820 
1821 	return path_idx;
1822 }
1823 
1824 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1825 					    enum btree_id btree_id,
1826 					    unsigned level,
1827 					    struct bpos pos)
1828 {
1829 	btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1830 			     BTREE_ITER_nopreserve|
1831 			     BTREE_ITER_intent, _RET_IP_);
1832 	path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1833 
1834 	struct btree_path *path = trans->paths + path_idx;
1835 	bch2_btree_path_downgrade(trans, path);
1836 	__bch2_btree_path_unlock(trans, path);
1837 	return path_idx;
1838 }
1839 
1840 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1841 {
1842 
1843 	struct btree_path_level *l = path_l(path);
1844 	struct bkey_packed *_k;
1845 	struct bkey_s_c k;
1846 
1847 	if (unlikely(!l->b))
1848 		return bkey_s_c_null;
1849 
1850 	EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1851 	EBUG_ON(!btree_node_locked(path, path->level));
1852 
1853 	if (!path->cached) {
1854 		_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1855 		k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1856 
1857 		EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1858 
1859 		if (!k.k || !bpos_eq(path->pos, k.k->p))
1860 			goto hole;
1861 	} else {
1862 		struct bkey_cached *ck = (void *) path->l[0].b;
1863 		if (!ck)
1864 			return bkey_s_c_null;
1865 
1866 		EBUG_ON(path->btree_id != ck->key.btree_id ||
1867 			!bkey_eq(path->pos, ck->key.pos));
1868 
1869 		*u = ck->k->k;
1870 		k = (struct bkey_s_c) { u, &ck->k->v };
1871 	}
1872 
1873 	return k;
1874 hole:
1875 	bkey_init(u);
1876 	u->p = path->pos;
1877 	return (struct bkey_s_c) { u, NULL };
1878 }
1879 
1880 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1881 {
1882 	struct btree_trans *trans = iter->trans;
1883 
1884 	if (!iter->path || trans->restarted)
1885 		return;
1886 
1887 	struct btree_path *path = btree_iter_path(trans, iter);
1888 	path->preserve		= false;
1889 	if (path->ref == 1)
1890 		path->should_be_locked	= false;
1891 }
1892 /* Btree iterators: */
1893 
1894 int __must_check
1895 __bch2_btree_iter_traverse(struct btree_iter *iter)
1896 {
1897 	return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1898 }
1899 
1900 int __must_check
1901 bch2_btree_iter_traverse(struct btree_iter *iter)
1902 {
1903 	struct btree_trans *trans = iter->trans;
1904 	int ret;
1905 
1906 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1907 
1908 	iter->path = bch2_btree_path_set_pos(trans, iter->path,
1909 					btree_iter_search_key(iter),
1910 					iter->flags & BTREE_ITER_intent,
1911 					btree_iter_ip_allocated(iter));
1912 
1913 	ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1914 	if (ret)
1915 		return ret;
1916 
1917 	struct btree_path *path = btree_iter_path(trans, iter);
1918 	if (btree_path_node(path, path->level))
1919 		btree_path_set_should_be_locked(trans, path);
1920 	return 0;
1921 }
1922 
1923 /* Iterate across nodes (leaf and interior nodes) */
1924 
1925 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1926 {
1927 	struct btree_trans *trans = iter->trans;
1928 	struct btree *b = NULL;
1929 	int ret;
1930 
1931 	EBUG_ON(trans->paths[iter->path].cached);
1932 	bch2_btree_iter_verify(iter);
1933 
1934 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1935 	if (ret)
1936 		goto err;
1937 
1938 	struct btree_path *path = btree_iter_path(trans, iter);
1939 	b = btree_path_node(path, path->level);
1940 	if (!b)
1941 		goto out;
1942 
1943 	BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1944 
1945 	bkey_init(&iter->k);
1946 	iter->k.p = iter->pos = b->key.k.p;
1947 
1948 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1949 					iter->flags & BTREE_ITER_intent,
1950 					btree_iter_ip_allocated(iter));
1951 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1952 out:
1953 	bch2_btree_iter_verify_entry_exit(iter);
1954 	bch2_btree_iter_verify(iter);
1955 
1956 	return b;
1957 err:
1958 	b = ERR_PTR(ret);
1959 	goto out;
1960 }
1961 
1962 /* Only kept for -tools */
1963 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1964 {
1965 	struct btree *b;
1966 
1967 	while (b = bch2_btree_iter_peek_node(iter),
1968 	       bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1969 		bch2_trans_begin(iter->trans);
1970 
1971 	return b;
1972 }
1973 
1974 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1975 {
1976 	struct btree_trans *trans = iter->trans;
1977 	struct btree *b = NULL;
1978 	int ret;
1979 
1980 	EBUG_ON(trans->paths[iter->path].cached);
1981 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1982 	bch2_btree_iter_verify(iter);
1983 
1984 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1985 	if (ret)
1986 		goto err;
1987 
1988 
1989 	struct btree_path *path = btree_iter_path(trans, iter);
1990 
1991 	/* already at end? */
1992 	if (!btree_path_node(path, path->level))
1993 		return NULL;
1994 
1995 	/* got to end? */
1996 	if (!btree_path_node(path, path->level + 1)) {
1997 		btree_path_set_level_up(trans, path);
1998 		return NULL;
1999 	}
2000 
2001 	if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2002 		__bch2_btree_path_unlock(trans, path);
2003 		path->l[path->level].b		= ERR_PTR(-BCH_ERR_no_btree_node_relock);
2004 		path->l[path->level + 1].b	= ERR_PTR(-BCH_ERR_no_btree_node_relock);
2005 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2006 		trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
2007 		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
2008 		goto err;
2009 	}
2010 
2011 	b = btree_path_node(path, path->level + 1);
2012 
2013 	if (bpos_eq(iter->pos, b->key.k.p)) {
2014 		__btree_path_set_level_up(trans, path, path->level++);
2015 	} else {
2016 		if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
2017 			btree_node_unlock(trans, path, path->level + 1);
2018 
2019 		/*
2020 		 * Haven't gotten to the end of the parent node: go back down to
2021 		 * the next child node
2022 		 */
2023 		iter->path = bch2_btree_path_set_pos(trans, iter->path,
2024 					bpos_successor(iter->pos),
2025 					iter->flags & BTREE_ITER_intent,
2026 					btree_iter_ip_allocated(iter));
2027 
2028 		path = btree_iter_path(trans, iter);
2029 		btree_path_set_level_down(trans, path, iter->min_depth);
2030 
2031 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2032 		if (ret)
2033 			goto err;
2034 
2035 		path = btree_iter_path(trans, iter);
2036 		b = path->l[path->level].b;
2037 	}
2038 
2039 	bkey_init(&iter->k);
2040 	iter->k.p = iter->pos = b->key.k.p;
2041 
2042 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2043 					iter->flags & BTREE_ITER_intent,
2044 					btree_iter_ip_allocated(iter));
2045 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2046 	EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2047 out:
2048 	bch2_btree_iter_verify_entry_exit(iter);
2049 	bch2_btree_iter_verify(iter);
2050 
2051 	return b;
2052 err:
2053 	b = ERR_PTR(ret);
2054 	goto out;
2055 }
2056 
2057 /* Iterate across keys (in leaf nodes only) */
2058 
2059 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2060 {
2061 	struct bpos pos = iter->k.p;
2062 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2063 		     ? bpos_eq(pos, SPOS_MAX)
2064 		     : bkey_eq(pos, SPOS_MAX));
2065 
2066 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2067 		pos = bkey_successor(iter, pos);
2068 	bch2_btree_iter_set_pos(iter, pos);
2069 	return ret;
2070 }
2071 
2072 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2073 {
2074 	struct bpos pos = bkey_start_pos(&iter->k);
2075 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2076 		     ? bpos_eq(pos, POS_MIN)
2077 		     : bkey_eq(pos, POS_MIN));
2078 
2079 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2080 		pos = bkey_predecessor(iter, pos);
2081 	bch2_btree_iter_set_pos(iter, pos);
2082 	return ret;
2083 }
2084 
2085 static noinline
2086 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2087 					struct bkey_s_c *k)
2088 {
2089 	struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2090 
2091 	trans_for_each_update(trans, i)
2092 		if (!i->key_cache_already_flushed &&
2093 		    i->btree_id == iter->btree_id &&
2094 		    bpos_le(i->k->k.p, iter->pos) &&
2095 		    bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2096 			iter->k = i->k->k;
2097 			*k = bkey_i_to_s_c(i->k);
2098 		}
2099 }
2100 
2101 static noinline
2102 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2103 				   struct bkey_s_c *k)
2104 {
2105 	struct btree_path *path = btree_iter_path(trans, iter);
2106 	struct bpos end = path_l(path)->b->key.k.p;
2107 
2108 	trans_for_each_update(trans, i)
2109 		if (!i->key_cache_already_flushed &&
2110 		    i->btree_id == iter->btree_id &&
2111 		    bpos_ge(i->k->k.p, path->pos) &&
2112 		    bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2113 			iter->k = i->k->k;
2114 			*k = bkey_i_to_s_c(i->k);
2115 		}
2116 }
2117 
2118 static noinline
2119 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2120 					struct bkey_s_c *k)
2121 {
2122 	trans_for_each_update(trans, i)
2123 		if (!i->key_cache_already_flushed &&
2124 		    i->btree_id == iter->btree_id &&
2125 		    bpos_eq(i->k->k.p, iter->pos)) {
2126 			iter->k = i->k->k;
2127 			*k = bkey_i_to_s_c(i->k);
2128 		}
2129 }
2130 
2131 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2132 					      struct btree_iter *iter,
2133 					      struct bpos end_pos)
2134 {
2135 	struct btree_path *path = btree_iter_path(trans, iter);
2136 
2137 	return bch2_journal_keys_peek_max(trans->c, iter->btree_id,
2138 					   path->level,
2139 					   path->pos,
2140 					   end_pos,
2141 					   &iter->journal_idx);
2142 }
2143 
2144 static noinline
2145 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2146 					      struct btree_iter *iter)
2147 {
2148 	struct btree_path *path = btree_iter_path(trans, iter);
2149 	struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2150 
2151 	if (k) {
2152 		iter->k = k->k;
2153 		return bkey_i_to_s_c(k);
2154 	} else {
2155 		return bkey_s_c_null;
2156 	}
2157 }
2158 
2159 static noinline
2160 void btree_trans_peek_journal(struct btree_trans *trans,
2161 			      struct btree_iter *iter,
2162 			      struct bkey_s_c *k)
2163 {
2164 	struct btree_path *path = btree_iter_path(trans, iter);
2165 	struct bkey_i *next_journal =
2166 		bch2_btree_journal_peek(trans, iter,
2167 				k->k ? k->k->p : path_l(path)->b->key.k.p);
2168 	if (next_journal) {
2169 		iter->k = next_journal->k;
2170 		*k = bkey_i_to_s_c(next_journal);
2171 	}
2172 }
2173 
2174 static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans,
2175 					      struct btree_iter *iter,
2176 					      struct bpos end_pos)
2177 {
2178 	struct btree_path *path = btree_iter_path(trans, iter);
2179 
2180 	return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id,
2181 					   path->level,
2182 					   path->pos,
2183 					   end_pos,
2184 					   &iter->journal_idx);
2185 }
2186 
2187 static noinline
2188 void btree_trans_peek_prev_journal(struct btree_trans *trans,
2189 				   struct btree_iter *iter,
2190 				   struct bkey_s_c *k)
2191 {
2192 	struct btree_path *path = btree_iter_path(trans, iter);
2193 	struct bkey_i *next_journal =
2194 		bch2_btree_journal_peek_prev(trans, iter,
2195 				k->k ? k->k->p : path_l(path)->b->key.k.p);
2196 
2197 	if (next_journal) {
2198 		iter->k = next_journal->k;
2199 		*k = bkey_i_to_s_c(next_journal);
2200 	}
2201 }
2202 
2203 /*
2204  * Checks btree key cache for key at iter->pos and returns it if present, or
2205  * bkey_s_c_null:
2206  */
2207 static noinline
2208 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2209 {
2210 	struct btree_trans *trans = iter->trans;
2211 	struct bch_fs *c = trans->c;
2212 	struct bkey u;
2213 	struct bkey_s_c k;
2214 	int ret;
2215 
2216 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2217 
2218 	if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2219 	    bpos_eq(iter->pos, pos))
2220 		return bkey_s_c_null;
2221 
2222 	if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2223 		return bkey_s_c_null;
2224 
2225 	if (!iter->key_cache_path)
2226 		iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2227 						     iter->flags & BTREE_ITER_intent, 0,
2228 						     iter->flags|BTREE_ITER_cached|
2229 						     BTREE_ITER_cached_nofill,
2230 						     _THIS_IP_);
2231 
2232 	iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2233 					iter->flags & BTREE_ITER_intent,
2234 					btree_iter_ip_allocated(iter));
2235 
2236 	ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2237 					 iter->flags|BTREE_ITER_cached) ?:
2238 		bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2239 	if (unlikely(ret))
2240 		return bkey_s_c_err(ret);
2241 
2242 	btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2243 
2244 	k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2245 	if (!k.k)
2246 		return k;
2247 
2248 	if ((iter->flags & BTREE_ITER_all_snapshots) &&
2249 	    !bpos_eq(pos, k.k->p))
2250 		return bkey_s_c_null;
2251 
2252 	iter->k = u;
2253 	k.k = &iter->k;
2254 	return k;
2255 }
2256 
2257 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2258 {
2259 	struct btree_trans *trans = iter->trans;
2260 	struct bkey_s_c k, k2;
2261 	int ret;
2262 
2263 	EBUG_ON(btree_iter_path(trans, iter)->cached);
2264 	bch2_btree_iter_verify(iter);
2265 
2266 	while (1) {
2267 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2268 					iter->flags & BTREE_ITER_intent,
2269 					btree_iter_ip_allocated(iter));
2270 
2271 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2272 		if (unlikely(ret)) {
2273 			/* ensure that iter->k is consistent with iter->pos: */
2274 			bch2_btree_iter_set_pos(iter, iter->pos);
2275 			k = bkey_s_c_err(ret);
2276 			break;
2277 		}
2278 
2279 		struct btree_path *path = btree_iter_path(trans, iter);
2280 		struct btree_path_level *l = path_l(path);
2281 
2282 		if (unlikely(!l->b)) {
2283 			/* No btree nodes at requested level: */
2284 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2285 			k = bkey_s_c_null;
2286 			break;
2287 		}
2288 
2289 		btree_path_set_should_be_locked(trans, path);
2290 
2291 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2292 
2293 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2294 		    k.k &&
2295 		    (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2296 			k = k2;
2297 			if (bkey_err(k)) {
2298 				bch2_btree_iter_set_pos(iter, iter->pos);
2299 				break;
2300 			}
2301 		}
2302 
2303 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2304 			btree_trans_peek_journal(trans, iter, &k);
2305 
2306 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2307 			     trans->nr_updates))
2308 			bch2_btree_trans_peek_updates(trans, iter, &k);
2309 
2310 		if (k.k && bkey_deleted(k.k)) {
2311 			/*
2312 			 * If we've got a whiteout, and it's after the search
2313 			 * key, advance the search key to the whiteout instead
2314 			 * of just after the whiteout - it might be a btree
2315 			 * whiteout, with a real key at the same position, since
2316 			 * in the btree deleted keys sort before non deleted.
2317 			 */
2318 			search_key = !bpos_eq(search_key, k.k->p)
2319 				? k.k->p
2320 				: bpos_successor(k.k->p);
2321 			continue;
2322 		}
2323 
2324 		if (likely(k.k)) {
2325 			break;
2326 		} else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2327 			/* Advance to next leaf node: */
2328 			search_key = bpos_successor(l->b->key.k.p);
2329 		} else {
2330 			/* End of btree: */
2331 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2332 			k = bkey_s_c_null;
2333 			break;
2334 		}
2335 	}
2336 
2337 	bch2_btree_iter_verify(iter);
2338 	return k;
2339 }
2340 
2341 /**
2342  * bch2_btree_iter_peek_max() - returns first key greater than or equal to
2343  * iterator's current position
2344  * @iter:	iterator to peek from
2345  * @end:	search limit: returns keys less than or equal to @end
2346  *
2347  * Returns:	key if found, or an error extractable with bkey_err().
2348  */
2349 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
2350 {
2351 	struct btree_trans *trans = iter->trans;
2352 	struct bpos search_key = btree_iter_search_key(iter);
2353 	struct bkey_s_c k;
2354 	struct bpos iter_pos = iter->pos;
2355 	int ret;
2356 
2357 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2358 	bch2_btree_iter_verify_entry_exit(iter);
2359 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2360 
2361 	if (iter->update_path) {
2362 		bch2_path_put_nokeep(trans, iter->update_path,
2363 				     iter->flags & BTREE_ITER_intent);
2364 		iter->update_path = 0;
2365 	}
2366 
2367 	while (1) {
2368 		k = __bch2_btree_iter_peek(iter, search_key);
2369 		if (unlikely(!k.k))
2370 			goto end;
2371 		if (unlikely(bkey_err(k)))
2372 			goto out_no_locked;
2373 
2374 		if (iter->flags & BTREE_ITER_filter_snapshots) {
2375 			/*
2376 			 * We need to check against @end before FILTER_SNAPSHOTS because
2377 			 * if we get to a different inode that requested we might be
2378 			 * seeing keys for a different snapshot tree that will all be
2379 			 * filtered out.
2380 			 *
2381 			 * But we can't do the full check here, because bkey_start_pos()
2382 			 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2383 			 * that's what we check against in extents mode:
2384 			 */
2385 			if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2386 				     ? bkey_gt(k.k->p, end)
2387 				     : k.k->p.inode > end.inode))
2388 				goto end;
2389 
2390 			if (iter->update_path &&
2391 			    !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2392 				bch2_path_put_nokeep(trans, iter->update_path,
2393 						     iter->flags & BTREE_ITER_intent);
2394 				iter->update_path = 0;
2395 			}
2396 
2397 			if ((iter->flags & BTREE_ITER_intent) &&
2398 			    !(iter->flags & BTREE_ITER_is_extents) &&
2399 			    !iter->update_path) {
2400 				struct bpos pos = k.k->p;
2401 
2402 				if (pos.snapshot < iter->snapshot) {
2403 					search_key = bpos_successor(k.k->p);
2404 					continue;
2405 				}
2406 
2407 				pos.snapshot = iter->snapshot;
2408 
2409 				/*
2410 				 * advance, same as on exit for iter->path, but only up
2411 				 * to snapshot
2412 				 */
2413 				__btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2414 				iter->update_path = iter->path;
2415 
2416 				iter->update_path = bch2_btree_path_set_pos(trans,
2417 							iter->update_path, pos,
2418 							iter->flags & BTREE_ITER_intent,
2419 							_THIS_IP_);
2420 				ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2421 				if (unlikely(ret)) {
2422 					k = bkey_s_c_err(ret);
2423 					goto out_no_locked;
2424 				}
2425 			}
2426 
2427 			/*
2428 			 * We can never have a key in a leaf node at POS_MAX, so
2429 			 * we don't have to check these successor() calls:
2430 			 */
2431 			if (!bch2_snapshot_is_ancestor(trans->c,
2432 						       iter->snapshot,
2433 						       k.k->p.snapshot)) {
2434 				search_key = bpos_successor(k.k->p);
2435 				continue;
2436 			}
2437 
2438 			if (bkey_whiteout(k.k) &&
2439 			    !(iter->flags & BTREE_ITER_key_cache_fill)) {
2440 				search_key = bkey_successor(iter, k.k->p);
2441 				continue;
2442 			}
2443 		}
2444 
2445 		/*
2446 		 * iter->pos should be mononotically increasing, and always be
2447 		 * equal to the key we just returned - except extents can
2448 		 * straddle iter->pos:
2449 		 */
2450 		if (!(iter->flags & BTREE_ITER_is_extents))
2451 			iter_pos = k.k->p;
2452 		else
2453 			iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2454 
2455 		if (unlikely(iter->flags & BTREE_ITER_all_snapshots	? bpos_gt(iter_pos, end) :
2456 			     iter->flags & BTREE_ITER_is_extents	? bkey_ge(iter_pos, end) :
2457 									  bkey_gt(iter_pos, end)))
2458 			goto end;
2459 
2460 		break;
2461 	}
2462 
2463 	iter->pos = iter_pos;
2464 
2465 	iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2466 				iter->flags & BTREE_ITER_intent,
2467 				btree_iter_ip_allocated(iter));
2468 
2469 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2470 out_no_locked:
2471 	if (iter->update_path) {
2472 		ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2473 		if (unlikely(ret))
2474 			k = bkey_s_c_err(ret);
2475 		else
2476 			btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2477 	}
2478 
2479 	if (!(iter->flags & BTREE_ITER_all_snapshots))
2480 		iter->pos.snapshot = iter->snapshot;
2481 
2482 	ret = bch2_btree_iter_verify_ret(iter, k);
2483 	if (unlikely(ret)) {
2484 		bch2_btree_iter_set_pos(iter, iter->pos);
2485 		k = bkey_s_c_err(ret);
2486 	}
2487 
2488 	bch2_btree_iter_verify_entry_exit(iter);
2489 
2490 	return k;
2491 end:
2492 	bch2_btree_iter_set_pos(iter, end);
2493 	k = bkey_s_c_null;
2494 	goto out_no_locked;
2495 }
2496 
2497 /**
2498  * bch2_btree_iter_next() - returns first key greater than iterator's current
2499  * position
2500  * @iter:	iterator to peek from
2501  *
2502  * Returns:	key if found, or an error extractable with bkey_err().
2503  */
2504 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2505 {
2506 	if (!bch2_btree_iter_advance(iter))
2507 		return bkey_s_c_null;
2508 
2509 	return bch2_btree_iter_peek(iter);
2510 }
2511 
2512 static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
2513 {
2514 	struct btree_trans *trans = iter->trans;
2515 	struct bkey_s_c k, k2;
2516 
2517 	bch2_btree_iter_verify(iter);
2518 
2519 	while (1) {
2520 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2521 					iter->flags & BTREE_ITER_intent,
2522 					btree_iter_ip_allocated(iter));
2523 
2524 		int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2525 		if (unlikely(ret)) {
2526 			/* ensure that iter->k is consistent with iter->pos: */
2527 			bch2_btree_iter_set_pos(iter, iter->pos);
2528 			k = bkey_s_c_err(ret);
2529 			break;
2530 		}
2531 
2532 		struct btree_path *path = btree_iter_path(trans, iter);
2533 		struct btree_path_level *l = path_l(path);
2534 
2535 		if (unlikely(!l->b)) {
2536 			/* No btree nodes at requested level: */
2537 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2538 			k = bkey_s_c_null;
2539 			break;
2540 		}
2541 
2542 		btree_path_set_should_be_locked(trans, path);
2543 
2544 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2545 		if (!k.k || bpos_gt(k.k->p, search_key)) {
2546 			k = btree_path_level_prev(trans, path, l, &iter->k);
2547 
2548 			BUG_ON(k.k && bpos_gt(k.k->p, search_key));
2549 		}
2550 
2551 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2552 		    k.k &&
2553 		    (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2554 			k = k2;
2555 			if (bkey_err(k2)) {
2556 				bch2_btree_iter_set_pos(iter, iter->pos);
2557 				break;
2558 			}
2559 		}
2560 
2561 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2562 			btree_trans_peek_prev_journal(trans, iter, &k);
2563 
2564 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2565 			     trans->nr_updates))
2566 			bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2567 
2568 		if (likely(k.k && !bkey_deleted(k.k))) {
2569 			break;
2570 		} else if (k.k) {
2571 			search_key = bpos_predecessor(k.k->p);
2572 		} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2573 			/* Advance to previous leaf node: */
2574 			search_key = bpos_predecessor(path->l[0].b->data->min_key);
2575 		} else {
2576 			/* Start of btree: */
2577 			bch2_btree_iter_set_pos(iter, POS_MIN);
2578 			k = bkey_s_c_null;
2579 			break;
2580 		}
2581 	}
2582 
2583 	bch2_btree_iter_verify(iter);
2584 	return k;
2585 }
2586 
2587 /**
2588  * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
2589  * iterator's current position
2590  * @iter:	iterator to peek from
2591  * @end:	search limit: returns keys greater than or equal to @end
2592  *
2593  * Returns:	key if found, or an error extractable with bkey_err().
2594  */
2595 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
2596 {
2597 	if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
2598 	   !bkey_eq(iter->pos, POS_MAX)) {
2599 		/*
2600 		 * bkey_start_pos(), for extents, is not monotonically
2601 		 * increasing until after filtering for snapshots:
2602 		 *
2603 		 * Thus, for extents we need to search forward until we find a
2604 		 * real visible extents - easiest to just use peek_slot() (which
2605 		 * internally uses peek() for extents)
2606 		 */
2607 		struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
2608 		if (bkey_err(k))
2609 			return k;
2610 
2611 		if (!bkey_deleted(k.k) &&
2612 		    (!(iter->flags & BTREE_ITER_is_extents) ||
2613 		     bkey_lt(bkey_start_pos(k.k), iter->pos)))
2614 			return k;
2615 	}
2616 
2617 	struct btree_trans *trans = iter->trans;
2618 	struct bpos search_key = iter->pos;
2619 	struct bkey_s_c k;
2620 	btree_path_idx_t saved_path = 0;
2621 
2622 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2623 	bch2_btree_iter_verify_entry_exit(iter);
2624 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bpos_eq(end, POS_MIN));
2625 
2626 	while (1) {
2627 		k = __bch2_btree_iter_peek_prev(iter, search_key);
2628 		if (unlikely(!k.k))
2629 			goto end;
2630 		if (unlikely(bkey_err(k)))
2631 			goto out_no_locked;
2632 
2633 		if (iter->flags & BTREE_ITER_filter_snapshots) {
2634 			struct btree_path *s = saved_path ? trans->paths + saved_path : NULL;
2635 			if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) {
2636 				/*
2637 				 * If we have a saved candidate, and we're past
2638 				 * the last possible snapshot overwrite, return
2639 				 * it:
2640 				 */
2641 				bch2_path_put_nokeep(trans, iter->path,
2642 					      iter->flags & BTREE_ITER_intent);
2643 				iter->path = saved_path;
2644 				saved_path = 0;
2645 				k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2646 				break;
2647 			}
2648 
2649 			/*
2650 			 * We need to check against @end before FILTER_SNAPSHOTS because
2651 			 * if we get to a different inode that requested we might be
2652 			 * seeing keys for a different snapshot tree that will all be
2653 			 * filtered out.
2654 			 */
2655 			if (unlikely(bkey_lt(k.k->p, end)))
2656 				goto end;
2657 
2658 			if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) {
2659 				search_key = bpos_predecessor(k.k->p);
2660 				continue;
2661 			}
2662 
2663 			if (k.k->p.snapshot != iter->snapshot) {
2664 				/*
2665 				 * Have a key visible in iter->snapshot, but
2666 				 * might have overwrites: - save it and keep
2667 				 * searching. Unless it's a whiteout - then drop
2668 				 * our previous saved candidate:
2669 				 */
2670 				if (saved_path) {
2671 					bch2_path_put_nokeep(trans, saved_path,
2672 					      iter->flags & BTREE_ITER_intent);
2673 					saved_path = 0;
2674 				}
2675 
2676 				if (!bkey_whiteout(k.k)) {
2677 					saved_path = btree_path_clone(trans, iter->path,
2678 								iter->flags & BTREE_ITER_intent,
2679 								_THIS_IP_);
2680 					trace_btree_path_save_pos(trans,
2681 								  trans->paths + iter->path,
2682 								  trans->paths + saved_path);
2683 				}
2684 
2685 				search_key = bpos_predecessor(k.k->p);
2686 				continue;
2687 			}
2688 
2689 			if (bkey_whiteout(k.k)) {
2690 				search_key = bkey_predecessor(iter, k.k->p);
2691 				search_key.snapshot = U32_MAX;
2692 				continue;
2693 			}
2694 		}
2695 
2696 		EBUG_ON(iter->flags & BTREE_ITER_all_snapshots		? bpos_gt(k.k->p, iter->pos) :
2697 			iter->flags & BTREE_ITER_is_extents		? bkey_ge(bkey_start_pos(k.k), iter->pos) :
2698 									  bkey_gt(k.k->p, iter->pos));
2699 
2700 		if (unlikely(iter->flags & BTREE_ITER_all_snapshots	? bpos_lt(k.k->p, end) :
2701 			     iter->flags & BTREE_ITER_is_extents	? bkey_le(k.k->p, end) :
2702 									  bkey_lt(k.k->p, end)))
2703 			goto end;
2704 
2705 		break;
2706 	}
2707 
2708 	/* Extents can straddle iter->pos: */
2709 	iter->pos = bpos_min(iter->pos, k.k->p);;
2710 
2711 	if (iter->flags & BTREE_ITER_filter_snapshots)
2712 		iter->pos.snapshot = iter->snapshot;
2713 out_no_locked:
2714 	if (saved_path)
2715 		bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2716 
2717 	bch2_btree_iter_verify_entry_exit(iter);
2718 	bch2_btree_iter_verify(iter);
2719 	return k;
2720 end:
2721 	bch2_btree_iter_set_pos(iter, end);
2722 	k = bkey_s_c_null;
2723 	goto out_no_locked;
2724 }
2725 
2726 /**
2727  * bch2_btree_iter_prev() - returns first key less than iterator's current
2728  * position
2729  * @iter:	iterator to peek from
2730  *
2731  * Returns:	key if found, or an error extractable with bkey_err().
2732  */
2733 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2734 {
2735 	if (!bch2_btree_iter_rewind(iter))
2736 		return bkey_s_c_null;
2737 
2738 	return bch2_btree_iter_peek_prev(iter);
2739 }
2740 
2741 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2742 {
2743 	struct btree_trans *trans = iter->trans;
2744 	struct bpos search_key;
2745 	struct bkey_s_c k;
2746 	int ret;
2747 
2748 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2749 	bch2_btree_iter_verify(iter);
2750 	bch2_btree_iter_verify_entry_exit(iter);
2751 	EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2752 
2753 	/* extents can't span inode numbers: */
2754 	if ((iter->flags & BTREE_ITER_is_extents) &&
2755 	    unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2756 		if (iter->pos.inode == KEY_INODE_MAX)
2757 			return bkey_s_c_null;
2758 
2759 		bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2760 	}
2761 
2762 	search_key = btree_iter_search_key(iter);
2763 	iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2764 					iter->flags & BTREE_ITER_intent,
2765 					btree_iter_ip_allocated(iter));
2766 
2767 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2768 	if (unlikely(ret)) {
2769 		k = bkey_s_c_err(ret);
2770 		goto out_no_locked;
2771 	}
2772 
2773 	struct btree_path *path = btree_iter_path(trans, iter);
2774 	if (unlikely(!btree_path_node(path, path->level)))
2775 		return bkey_s_c_null;
2776 
2777 	if ((iter->flags & BTREE_ITER_cached) ||
2778 	    !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2779 		k = bkey_s_c_null;
2780 
2781 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2782 			     trans->nr_updates)) {
2783 			bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2784 			if (k.k)
2785 				goto out;
2786 		}
2787 
2788 		if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2789 		    (k = btree_trans_peek_slot_journal(trans, iter)).k)
2790 			goto out;
2791 
2792 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2793 		    (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2794 			if (!bkey_err(k))
2795 				iter->k = *k.k;
2796 			/* We're not returning a key from iter->path: */
2797 			goto out_no_locked;
2798 		}
2799 
2800 		k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2801 		if (unlikely(!k.k))
2802 			goto out_no_locked;
2803 
2804 		if (unlikely(k.k->type == KEY_TYPE_whiteout &&
2805 			     (iter->flags & BTREE_ITER_filter_snapshots) &&
2806 			     !(iter->flags & BTREE_ITER_key_cache_fill)))
2807 			iter->k.type = KEY_TYPE_deleted;
2808 	} else {
2809 		struct bpos next;
2810 		struct bpos end = iter->pos;
2811 
2812 		if (iter->flags & BTREE_ITER_is_extents)
2813 			end.offset = U64_MAX;
2814 
2815 		EBUG_ON(btree_iter_path(trans, iter)->level);
2816 
2817 		if (iter->flags & BTREE_ITER_intent) {
2818 			struct btree_iter iter2;
2819 
2820 			bch2_trans_copy_iter(&iter2, iter);
2821 			k = bch2_btree_iter_peek_max(&iter2, end);
2822 
2823 			if (k.k && !bkey_err(k)) {
2824 				swap(iter->key_cache_path, iter2.key_cache_path);
2825 				iter->k = iter2.k;
2826 				k.k = &iter->k;
2827 			}
2828 			bch2_trans_iter_exit(trans, &iter2);
2829 		} else {
2830 			struct bpos pos = iter->pos;
2831 
2832 			k = bch2_btree_iter_peek_max(iter, end);
2833 			if (unlikely(bkey_err(k)))
2834 				bch2_btree_iter_set_pos(iter, pos);
2835 			else
2836 				iter->pos = pos;
2837 		}
2838 
2839 		if (unlikely(bkey_err(k)))
2840 			goto out_no_locked;
2841 
2842 		next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2843 
2844 		if (bkey_lt(iter->pos, next)) {
2845 			bkey_init(&iter->k);
2846 			iter->k.p = iter->pos;
2847 
2848 			if (iter->flags & BTREE_ITER_is_extents) {
2849 				bch2_key_resize(&iter->k,
2850 						min_t(u64, KEY_SIZE_MAX,
2851 						      (next.inode == iter->pos.inode
2852 						       ? next.offset
2853 						       : KEY_OFFSET_MAX) -
2854 						      iter->pos.offset));
2855 				EBUG_ON(!iter->k.size);
2856 			}
2857 
2858 			k = (struct bkey_s_c) { &iter->k, NULL };
2859 		}
2860 	}
2861 out:
2862 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2863 out_no_locked:
2864 	bch2_btree_iter_verify_entry_exit(iter);
2865 	bch2_btree_iter_verify(iter);
2866 	ret = bch2_btree_iter_verify_ret(iter, k);
2867 	if (unlikely(ret))
2868 		return bkey_s_c_err(ret);
2869 
2870 	return k;
2871 }
2872 
2873 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2874 {
2875 	if (!bch2_btree_iter_advance(iter))
2876 		return bkey_s_c_null;
2877 
2878 	return bch2_btree_iter_peek_slot(iter);
2879 }
2880 
2881 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2882 {
2883 	if (!bch2_btree_iter_rewind(iter))
2884 		return bkey_s_c_null;
2885 
2886 	return bch2_btree_iter_peek_slot(iter);
2887 }
2888 
2889 /* Obsolete, but still used by rust wrapper in -tools */
2890 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2891 {
2892 	struct bkey_s_c k;
2893 
2894 	while (btree_trans_too_many_iters(iter->trans) ||
2895 	       (k = bch2_btree_iter_peek_type(iter, iter->flags),
2896 		bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2897 		bch2_trans_begin(iter->trans);
2898 
2899 	return k;
2900 }
2901 
2902 /* new transactional stuff: */
2903 
2904 #ifdef CONFIG_BCACHEFS_DEBUG
2905 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2906 {
2907 	struct btree_path *path;
2908 	unsigned i;
2909 
2910 	BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2911 
2912 	trans_for_each_path(trans, path, i) {
2913 		BUG_ON(path->sorted_idx >= trans->nr_sorted);
2914 		BUG_ON(trans->sorted[path->sorted_idx] != i);
2915 	}
2916 
2917 	for (i = 0; i < trans->nr_sorted; i++) {
2918 		unsigned idx = trans->sorted[i];
2919 
2920 		BUG_ON(!test_bit(idx, trans->paths_allocated));
2921 		BUG_ON(trans->paths[idx].sorted_idx != i);
2922 	}
2923 }
2924 
2925 static void btree_trans_verify_sorted(struct btree_trans *trans)
2926 {
2927 	struct btree_path *path, *prev = NULL;
2928 	struct trans_for_each_path_inorder_iter iter;
2929 
2930 	if (!bch2_debug_check_iterators)
2931 		return;
2932 
2933 	trans_for_each_path_inorder(trans, path, iter) {
2934 		if (prev && btree_path_cmp(prev, path) > 0) {
2935 			__bch2_dump_trans_paths_updates(trans, true);
2936 			panic("trans paths out of order!\n");
2937 		}
2938 		prev = path;
2939 	}
2940 }
2941 #else
2942 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2943 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2944 #endif
2945 
2946 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2947 {
2948 	int i, l = 0, r = trans->nr_sorted, inc = 1;
2949 	bool swapped;
2950 
2951 	btree_trans_verify_sorted_refs(trans);
2952 
2953 	if (trans->paths_sorted)
2954 		goto out;
2955 
2956 	/*
2957 	 * Cocktail shaker sort: this is efficient because iterators will be
2958 	 * mostly sorted.
2959 	 */
2960 	do {
2961 		swapped = false;
2962 
2963 		for (i = inc > 0 ? l : r - 2;
2964 		     i + 1 < r && i >= l;
2965 		     i += inc) {
2966 			if (btree_path_cmp(trans->paths + trans->sorted[i],
2967 					   trans->paths + trans->sorted[i + 1]) > 0) {
2968 				swap(trans->sorted[i], trans->sorted[i + 1]);
2969 				trans->paths[trans->sorted[i]].sorted_idx = i;
2970 				trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2971 				swapped = true;
2972 			}
2973 		}
2974 
2975 		if (inc > 0)
2976 			--r;
2977 		else
2978 			l++;
2979 		inc = -inc;
2980 	} while (swapped);
2981 
2982 	trans->paths_sorted = true;
2983 out:
2984 	btree_trans_verify_sorted(trans);
2985 }
2986 
2987 static inline void btree_path_list_remove(struct btree_trans *trans,
2988 					  struct btree_path *path)
2989 {
2990 	EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2991 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2992 	trans->nr_sorted--;
2993 	memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2994 				trans->sorted + path->sorted_idx + 1,
2995 				DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2996 					     sizeof(u64) / sizeof(btree_path_idx_t)));
2997 #else
2998 	array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2999 #endif
3000 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3001 		trans->paths[trans->sorted[i]].sorted_idx = i;
3002 }
3003 
3004 static inline void btree_path_list_add(struct btree_trans *trans,
3005 				       btree_path_idx_t pos,
3006 				       btree_path_idx_t path_idx)
3007 {
3008 	struct btree_path *path = trans->paths + path_idx;
3009 
3010 	path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
3011 
3012 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3013 	memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
3014 			      trans->sorted + path->sorted_idx,
3015 			      DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3016 					   sizeof(u64) / sizeof(btree_path_idx_t)));
3017 	trans->nr_sorted++;
3018 	trans->sorted[path->sorted_idx] = path_idx;
3019 #else
3020 	array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
3021 #endif
3022 
3023 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3024 		trans->paths[trans->sorted[i]].sorted_idx = i;
3025 
3026 	btree_trans_verify_sorted_refs(trans);
3027 }
3028 
3029 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3030 {
3031 	if (iter->update_path)
3032 		bch2_path_put_nokeep(trans, iter->update_path,
3033 			      iter->flags & BTREE_ITER_intent);
3034 	if (iter->path)
3035 		bch2_path_put(trans, iter->path,
3036 			      iter->flags & BTREE_ITER_intent);
3037 	if (iter->key_cache_path)
3038 		bch2_path_put(trans, iter->key_cache_path,
3039 			      iter->flags & BTREE_ITER_intent);
3040 	iter->path		= 0;
3041 	iter->update_path	= 0;
3042 	iter->key_cache_path	= 0;
3043 	iter->trans		= NULL;
3044 }
3045 
3046 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
3047 			  struct btree_iter *iter,
3048 			  enum btree_id btree_id, struct bpos pos,
3049 			  unsigned flags)
3050 {
3051 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
3052 			       bch2_btree_iter_flags(trans, btree_id, 0, flags),
3053 			       _RET_IP_);
3054 }
3055 
3056 void bch2_trans_node_iter_init(struct btree_trans *trans,
3057 			       struct btree_iter *iter,
3058 			       enum btree_id btree_id,
3059 			       struct bpos pos,
3060 			       unsigned locks_want,
3061 			       unsigned depth,
3062 			       unsigned flags)
3063 {
3064 	flags |= BTREE_ITER_not_extents;
3065 	flags |= BTREE_ITER_snapshot_field;
3066 	flags |= BTREE_ITER_all_snapshots;
3067 
3068 	if (!depth && btree_id_cached(trans->c, btree_id))
3069 		flags |= BTREE_ITER_with_key_cache;
3070 
3071 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
3072 			       bch2_btree_iter_flags(trans, btree_id, depth, flags),
3073 			       _RET_IP_);
3074 
3075 	iter->min_depth	= depth;
3076 
3077 	struct btree_path *path = btree_iter_path(trans, iter);
3078 	BUG_ON(path->locks_want	 < min(locks_want, BTREE_MAX_DEPTH));
3079 	BUG_ON(path->level	!= depth);
3080 	BUG_ON(iter->min_depth	!= depth);
3081 }
3082 
3083 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3084 {
3085 	struct btree_trans *trans = src->trans;
3086 
3087 	*dst = *src;
3088 #ifdef TRACK_PATH_ALLOCATED
3089 	dst->ip_allocated = _RET_IP_;
3090 #endif
3091 	if (src->path)
3092 		__btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
3093 	if (src->update_path)
3094 		__btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
3095 	dst->key_cache_path = 0;
3096 }
3097 
3098 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3099 {
3100 	struct bch_fs *c = trans->c;
3101 	unsigned new_top = trans->mem_top + size;
3102 	unsigned old_bytes = trans->mem_bytes;
3103 	unsigned new_bytes = roundup_pow_of_two(new_top);
3104 	int ret;
3105 	void *new_mem;
3106 	void *p;
3107 
3108 	WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3109 
3110 	struct btree_transaction_stats *s = btree_trans_stats(trans);
3111 	s->max_mem = max(s->max_mem, new_bytes);
3112 
3113 	if (trans->used_mempool) {
3114 		if (trans->mem_bytes >= new_bytes)
3115 			goto out_change_top;
3116 
3117 		/* No more space from mempool item, need malloc new one */
3118 		new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3119 		if (unlikely(!new_mem)) {
3120 			bch2_trans_unlock(trans);
3121 
3122 			new_mem = kmalloc(new_bytes, GFP_KERNEL);
3123 			if (!new_mem)
3124 				return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3125 
3126 			ret = bch2_trans_relock(trans);
3127 			if (ret) {
3128 				kfree(new_mem);
3129 				return ERR_PTR(ret);
3130 			}
3131 		}
3132 		memcpy(new_mem, trans->mem, trans->mem_top);
3133 		trans->used_mempool = false;
3134 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3135 		goto out_new_mem;
3136 	}
3137 
3138 	new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3139 	if (unlikely(!new_mem)) {
3140 		bch2_trans_unlock(trans);
3141 
3142 		new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
3143 		if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3144 			new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3145 			new_bytes = BTREE_TRANS_MEM_MAX;
3146 			memcpy(new_mem, trans->mem, trans->mem_top);
3147 			trans->used_mempool = true;
3148 			kfree(trans->mem);
3149 		}
3150 
3151 		if (!new_mem)
3152 			return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3153 
3154 		trans->mem = new_mem;
3155 		trans->mem_bytes = new_bytes;
3156 
3157 		ret = bch2_trans_relock(trans);
3158 		if (ret)
3159 			return ERR_PTR(ret);
3160 	}
3161 out_new_mem:
3162 	trans->mem = new_mem;
3163 	trans->mem_bytes = new_bytes;
3164 
3165 	if (old_bytes) {
3166 		trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3167 		return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
3168 	}
3169 out_change_top:
3170 	p = trans->mem + trans->mem_top;
3171 	trans->mem_top += size;
3172 	memset(p, 0, size);
3173 	return p;
3174 }
3175 
3176 static inline void check_srcu_held_too_long(struct btree_trans *trans)
3177 {
3178 	WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3179 	     "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3180 	     (jiffies - trans->srcu_lock_time) / HZ);
3181 }
3182 
3183 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3184 {
3185 	if (trans->srcu_held) {
3186 		struct bch_fs *c = trans->c;
3187 		struct btree_path *path;
3188 		unsigned i;
3189 
3190 		trans_for_each_path(trans, path, i)
3191 			if (path->cached && !btree_node_locked(path, 0))
3192 				path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3193 
3194 		check_srcu_held_too_long(trans);
3195 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3196 		trans->srcu_held = false;
3197 	}
3198 }
3199 
3200 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3201 {
3202 	if (!trans->srcu_held) {
3203 		trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3204 		trans->srcu_lock_time	= jiffies;
3205 		trans->srcu_held = true;
3206 	}
3207 }
3208 
3209 /**
3210  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3211  * @trans: transaction to reset
3212  *
3213  * Returns:	current restart counter, to be used with trans_was_restarted()
3214  *
3215  * While iterating over nodes or updating nodes a attempt to lock a btree node
3216  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3217  * occurs bch2_trans_begin() should be called and the transaction retried.
3218  */
3219 u32 bch2_trans_begin(struct btree_trans *trans)
3220 {
3221 	struct btree_path *path;
3222 	unsigned i;
3223 	u64 now;
3224 
3225 	bch2_trans_reset_updates(trans);
3226 
3227 	trans->restart_count++;
3228 	trans->mem_top			= 0;
3229 	trans->journal_entries		= NULL;
3230 
3231 	trans_for_each_path(trans, path, i) {
3232 		path->should_be_locked = false;
3233 
3234 		/*
3235 		 * If the transaction wasn't restarted, we're presuming to be
3236 		 * doing something new: dont keep iterators excpt the ones that
3237 		 * are in use - except for the subvolumes btree:
3238 		 */
3239 		if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3240 			path->preserve = false;
3241 
3242 		/*
3243 		 * XXX: we probably shouldn't be doing this if the transaction
3244 		 * was restarted, but currently we still overflow transaction
3245 		 * iterators if we do that
3246 		 */
3247 		if (!path->ref && !path->preserve)
3248 			__bch2_path_free(trans, i);
3249 		else
3250 			path->preserve = false;
3251 	}
3252 
3253 	now = local_clock();
3254 
3255 	if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3256 	    time_after64(now, trans->last_begin_time + 10))
3257 		__bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3258 					 trans->last_begin_time, now);
3259 
3260 	if (!trans->restarted &&
3261 	    (need_resched() ||
3262 	     time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3263 		bch2_trans_unlock(trans);
3264 		cond_resched();
3265 		now = local_clock();
3266 	}
3267 	trans->last_begin_time = now;
3268 
3269 	if (unlikely(trans->srcu_held &&
3270 		     time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3271 		bch2_trans_srcu_unlock(trans);
3272 
3273 	trans->last_begin_ip = _RET_IP_;
3274 
3275 	trans_set_locked(trans, false);
3276 
3277 	if (trans->restarted) {
3278 		bch2_btree_path_traverse_all(trans);
3279 		trans->notrace_relock_fail = false;
3280 	}
3281 
3282 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
3283 	return trans->restart_count;
3284 }
3285 
3286 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3287 
3288 unsigned bch2_trans_get_fn_idx(const char *fn)
3289 {
3290 	for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3291 		if (!bch2_btree_transaction_fns[i] ||
3292 		    bch2_btree_transaction_fns[i] == fn) {
3293 			bch2_btree_transaction_fns[i] = fn;
3294 			return i;
3295 		}
3296 
3297 	pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3298 	return 0;
3299 }
3300 
3301 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3302 	__acquires(&c->btree_trans_barrier)
3303 {
3304 	struct btree_trans *trans;
3305 
3306 	if (IS_ENABLED(__KERNEL__)) {
3307 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3308 		if (trans) {
3309 			memset(trans, 0, offsetof(struct btree_trans, list));
3310 			goto got_trans;
3311 		}
3312 	}
3313 
3314 	trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3315 	memset(trans, 0, sizeof(*trans));
3316 
3317 	seqmutex_lock(&c->btree_trans_lock);
3318 	if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3319 		struct btree_trans *pos;
3320 		pid_t pid = current->pid;
3321 
3322 		trans->locking_wait.task = current;
3323 
3324 		list_for_each_entry(pos, &c->btree_trans_list, list) {
3325 			struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3326 			/*
3327 			 * We'd much prefer to be stricter here and completely
3328 			 * disallow multiple btree_trans in the same thread -
3329 			 * but the data move path calls bch2_write when we
3330 			 * already have a btree_trans initialized.
3331 			 */
3332 			BUG_ON(pos_task &&
3333 			       pid == pos_task->pid &&
3334 			       pos->locked);
3335 		}
3336 	}
3337 
3338 	list_add(&trans->list, &c->btree_trans_list);
3339 	seqmutex_unlock(&c->btree_trans_lock);
3340 got_trans:
3341 	trans->c		= c;
3342 	trans->last_begin_time	= local_clock();
3343 	trans->fn_idx		= fn_idx;
3344 	trans->locking_wait.task = current;
3345 	trans->journal_replay_not_finished =
3346 		unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3347 		atomic_inc_not_zero(&c->journal_keys.ref);
3348 	trans->nr_paths		= ARRAY_SIZE(trans->_paths);
3349 	trans->paths_allocated	= trans->_paths_allocated;
3350 	trans->sorted		= trans->_sorted;
3351 	trans->paths		= trans->_paths;
3352 	trans->updates		= trans->_updates;
3353 
3354 	*trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3355 
3356 	trans->paths_allocated[0] = 1;
3357 
3358 	static struct lock_class_key lockdep_key;
3359 	lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3360 
3361 	if (fn_idx < BCH_TRANSACTIONS_NR) {
3362 		trans->fn = bch2_btree_transaction_fns[fn_idx];
3363 
3364 		struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3365 
3366 		if (s->max_mem) {
3367 			unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3368 
3369 			trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3370 			if (likely(trans->mem))
3371 				trans->mem_bytes = expected_mem_bytes;
3372 		}
3373 
3374 		trans->nr_paths_max = s->nr_max_paths;
3375 		trans->journal_entries_size = s->journal_entries_size;
3376 	}
3377 
3378 	trans->srcu_idx		= srcu_read_lock(&c->btree_trans_barrier);
3379 	trans->srcu_lock_time	= jiffies;
3380 	trans->srcu_held	= true;
3381 	trans_set_locked(trans, false);
3382 
3383 	closure_init_stack_release(&trans->ref);
3384 	return trans;
3385 }
3386 
3387 static void check_btree_paths_leaked(struct btree_trans *trans)
3388 {
3389 #ifdef CONFIG_BCACHEFS_DEBUG
3390 	struct bch_fs *c = trans->c;
3391 	struct btree_path *path;
3392 	unsigned i;
3393 
3394 	trans_for_each_path(trans, path, i)
3395 		if (path->ref)
3396 			goto leaked;
3397 	return;
3398 leaked:
3399 	bch_err(c, "btree paths leaked from %s!", trans->fn);
3400 	trans_for_each_path(trans, path, i)
3401 		if (path->ref)
3402 			printk(KERN_ERR "  btree %s %pS\n",
3403 			       bch2_btree_id_str(path->btree_id),
3404 			       (void *) path->ip_allocated);
3405 	/* Be noisy about this: */
3406 	bch2_fatal_error(c);
3407 #endif
3408 }
3409 
3410 void bch2_trans_put(struct btree_trans *trans)
3411 	__releases(&c->btree_trans_barrier)
3412 {
3413 	struct bch_fs *c = trans->c;
3414 
3415 	if (trans->restarted)
3416 		bch2_trans_in_restart_error(trans);
3417 
3418 	bch2_trans_unlock(trans);
3419 
3420 	trans_for_each_update(trans, i)
3421 		__btree_path_put(trans, trans->paths + i->path, true);
3422 	trans->nr_updates	= 0;
3423 
3424 	check_btree_paths_leaked(trans);
3425 
3426 	if (trans->srcu_held) {
3427 		check_srcu_held_too_long(trans);
3428 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3429 	}
3430 
3431 	if (unlikely(trans->journal_replay_not_finished))
3432 		bch2_journal_keys_put(c);
3433 
3434 	/*
3435 	 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3436 	 * by cycle detector
3437 	 */
3438 	closure_return_sync(&trans->ref);
3439 	trans->locking_wait.task = NULL;
3440 
3441 #ifdef CONFIG_BCACHEFS_DEBUG
3442 	darray_exit(&trans->last_restarted_trace);
3443 #endif
3444 
3445 	unsigned long *paths_allocated = trans->paths_allocated;
3446 	trans->paths_allocated	= NULL;
3447 	trans->paths		= NULL;
3448 
3449 	if (paths_allocated != trans->_paths_allocated)
3450 		kvfree_rcu_mightsleep(paths_allocated);
3451 
3452 	if (trans->used_mempool)
3453 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3454 	else
3455 		kfree(trans->mem);
3456 
3457 	/* Userspace doesn't have a real percpu implementation: */
3458 	if (IS_ENABLED(__KERNEL__))
3459 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3460 
3461 	if (trans) {
3462 		seqmutex_lock(&c->btree_trans_lock);
3463 		list_del(&trans->list);
3464 		seqmutex_unlock(&c->btree_trans_lock);
3465 
3466 		mempool_free(trans, &c->btree_trans_pool);
3467 	}
3468 }
3469 
3470 bool bch2_current_has_btree_trans(struct bch_fs *c)
3471 {
3472 	seqmutex_lock(&c->btree_trans_lock);
3473 	struct btree_trans *trans;
3474 	bool ret = false;
3475 	list_for_each_entry(trans, &c->btree_trans_list, list)
3476 		if (trans->locking_wait.task == current &&
3477 		    trans->locked) {
3478 			ret = true;
3479 			break;
3480 		}
3481 	seqmutex_unlock(&c->btree_trans_lock);
3482 	return ret;
3483 }
3484 
3485 static void __maybe_unused
3486 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3487 				      struct btree_bkey_cached_common *b)
3488 {
3489 	struct six_lock_count c = six_lock_counts(&b->lock);
3490 	struct task_struct *owner;
3491 	pid_t pid;
3492 
3493 	rcu_read_lock();
3494 	owner = READ_ONCE(b->lock.owner);
3495 	pid = owner ? owner->pid : 0;
3496 	rcu_read_unlock();
3497 
3498 	prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
3499 	bch2_btree_id_to_text(out, b->btree_id);
3500 	prt_printf(out, " l=%u:", b->level);
3501 	bch2_bpos_to_text(out, btree_node_pos(b));
3502 
3503 	prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3504 		   c.n[0], c.n[1], c.n[2], pid);
3505 }
3506 
3507 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3508 {
3509 	struct btree_bkey_cached_common *b;
3510 	static char lock_types[] = { 'r', 'i', 'w' };
3511 	struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3512 	unsigned l, idx;
3513 
3514 	/* before rcu_read_lock(): */
3515 	bch2_printbuf_make_room(out, 4096);
3516 
3517 	if (!out->nr_tabstops) {
3518 		printbuf_tabstop_push(out, 16);
3519 		printbuf_tabstop_push(out, 32);
3520 	}
3521 
3522 	prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3523 
3524 	/* trans->paths is rcu protected vs. freeing */
3525 	rcu_read_lock();
3526 	out->atomic++;
3527 
3528 	struct btree_path *paths = rcu_dereference(trans->paths);
3529 	if (!paths)
3530 		goto out;
3531 
3532 	unsigned long *paths_allocated = trans_paths_allocated(paths);
3533 
3534 	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3535 		struct btree_path *path = paths + idx;
3536 		if (!path->nodes_locked)
3537 			continue;
3538 
3539 		prt_printf(out, "  path %u %c ",
3540 			   idx,
3541 			   path->cached ? 'c' : 'b');
3542 		bch2_btree_id_to_text(out, path->btree_id);
3543 		prt_printf(out, " l=%u:", path->level);
3544 		bch2_bpos_to_text(out, path->pos);
3545 		prt_newline(out);
3546 
3547 		for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3548 			if (btree_node_locked(path, l) &&
3549 			    !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3550 				prt_printf(out, "    %c l=%u ",
3551 					   lock_types[btree_node_locked_type(path, l)], l);
3552 				bch2_btree_bkey_cached_common_to_text(out, b);
3553 				prt_newline(out);
3554 			}
3555 		}
3556 	}
3557 
3558 	b = READ_ONCE(trans->locking);
3559 	if (b) {
3560 		prt_printf(out, "  blocked for %lluus on\n",
3561 			   div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3562 		prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3563 		bch2_btree_bkey_cached_common_to_text(out, b);
3564 		prt_newline(out);
3565 	}
3566 out:
3567 	--out->atomic;
3568 	rcu_read_unlock();
3569 }
3570 
3571 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3572 {
3573 	struct btree_transaction_stats *s;
3574 	struct btree_trans *trans;
3575 	int cpu;
3576 
3577 	if (c->btree_trans_bufs)
3578 		for_each_possible_cpu(cpu) {
3579 			struct btree_trans *trans =
3580 				per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3581 
3582 			if (trans) {
3583 				seqmutex_lock(&c->btree_trans_lock);
3584 				list_del(&trans->list);
3585 				seqmutex_unlock(&c->btree_trans_lock);
3586 			}
3587 			kfree(trans);
3588 		}
3589 	free_percpu(c->btree_trans_bufs);
3590 
3591 	trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3592 	if (trans)
3593 		panic("%s leaked btree_trans\n", trans->fn);
3594 
3595 	for (s = c->btree_transaction_stats;
3596 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3597 	     s++) {
3598 		kfree(s->max_paths_text);
3599 		bch2_time_stats_exit(&s->lock_hold_times);
3600 	}
3601 
3602 	if (c->btree_trans_barrier_initialized) {
3603 		synchronize_srcu_expedited(&c->btree_trans_barrier);
3604 		cleanup_srcu_struct(&c->btree_trans_barrier);
3605 	}
3606 	mempool_exit(&c->btree_trans_mem_pool);
3607 	mempool_exit(&c->btree_trans_pool);
3608 }
3609 
3610 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3611 {
3612 	struct btree_transaction_stats *s;
3613 
3614 	for (s = c->btree_transaction_stats;
3615 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3616 	     s++) {
3617 		bch2_time_stats_init(&s->duration);
3618 		bch2_time_stats_init(&s->lock_hold_times);
3619 		mutex_init(&s->lock);
3620 	}
3621 
3622 	INIT_LIST_HEAD(&c->btree_trans_list);
3623 	seqmutex_init(&c->btree_trans_lock);
3624 }
3625 
3626 int bch2_fs_btree_iter_init(struct bch_fs *c)
3627 {
3628 	int ret;
3629 
3630 	c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3631 	if (!c->btree_trans_bufs)
3632 		return -ENOMEM;
3633 
3634 	ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3635 					  sizeof(struct btree_trans)) ?:
3636 		mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3637 					  BTREE_TRANS_MEM_MAX) ?:
3638 		init_srcu_struct(&c->btree_trans_barrier);
3639 	if (ret)
3640 		return ret;
3641 
3642 	/*
3643 	 * static annotation (hackily done) for lock ordering of reclaim vs.
3644 	 * btree node locks:
3645 	 */
3646 #ifdef CONFIG_LOCKDEP
3647 	fs_reclaim_acquire(GFP_KERNEL);
3648 	struct btree_trans *trans = bch2_trans_get(c);
3649 	trans_set_locked(trans, false);
3650 	bch2_trans_put(trans);
3651 	fs_reclaim_release(GFP_KERNEL);
3652 #endif
3653 
3654 	c->btree_trans_barrier_initialized = true;
3655 	return 0;
3656 
3657 }
3658