xref: /linux/fs/bcachefs/btree_iter.c (revision ab0f4cedc3554f921691ce5b63d59e258154e799)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20 
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23 
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 			btree_path_idx_t, btree_path_idx_t);
27 
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31 	return iter->ip_allocated;
32 #else
33 	return 0;
34 #endif
35 }
36 
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39 
40 static inline int __btree_path_cmp(const struct btree_path *l,
41 				   enum btree_id	r_btree_id,
42 				   bool			r_cached,
43 				   struct bpos		r_pos,
44 				   unsigned		r_level)
45 {
46 	/*
47 	 * Must match lock ordering as defined by __bch2_btree_node_lock:
48 	 */
49 	return   cmp_int(l->btree_id,	r_btree_id) ?:
50 		 cmp_int((int) l->cached,	(int) r_cached) ?:
51 		 bpos_cmp(l->pos,	r_pos) ?:
52 		-cmp_int(l->level,	r_level);
53 }
54 
55 static inline int btree_path_cmp(const struct btree_path *l,
56 				 const struct btree_path *r)
57 {
58 	return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60 
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63 	/* Are we iterating over keys in all snapshots? */
64 	if (iter->flags & BTREE_ITER_all_snapshots) {
65 		p = bpos_successor(p);
66 	} else {
67 		p = bpos_nosnap_successor(p);
68 		p.snapshot = iter->snapshot;
69 	}
70 
71 	return p;
72 }
73 
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76 	/* Are we iterating over keys in all snapshots? */
77 	if (iter->flags & BTREE_ITER_all_snapshots) {
78 		p = bpos_predecessor(p);
79 	} else {
80 		p = bpos_nosnap_predecessor(p);
81 		p.snapshot = iter->snapshot;
82 	}
83 
84 	return p;
85 }
86 
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89 	struct bpos pos = iter->pos;
90 
91 	if ((iter->flags & BTREE_ITER_is_extents) &&
92 	    !bkey_eq(pos, POS_MAX))
93 		pos = bkey_successor(iter, pos);
94 	return pos;
95 }
96 
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98 					      struct btree *b)
99 {
100 	return bpos_lt(path->pos, b->data->min_key);
101 }
102 
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104 					     struct btree *b)
105 {
106 	return bpos_gt(path->pos, b->key.k.p);
107 }
108 
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110 					  struct btree *b)
111 {
112 	return path->btree_id == b->c.btree_id &&
113 		!btree_path_pos_before_node(path, b) &&
114 		!btree_path_pos_after_node(path, b);
115 }
116 
117 /* Btree iterator: */
118 
119 #ifdef CONFIG_BCACHEFS_DEBUG
120 
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 					  struct btree_path *path)
123 {
124 	struct bkey_cached *ck;
125 	bool locked = btree_node_locked(path, 0);
126 
127 	if (!bch2_btree_node_relock(trans, path, 0))
128 		return;
129 
130 	ck = (void *) path->l[0].b;
131 	BUG_ON(ck->key.btree_id != path->btree_id ||
132 	       !bkey_eq(ck->key.pos, path->pos));
133 
134 	if (!locked)
135 		btree_node_unlock(trans, path, 0);
136 }
137 
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 				struct btree_path *path, unsigned level)
140 {
141 	struct btree_path_level *l;
142 	struct btree_node_iter tmp;
143 	bool locked;
144 	struct bkey_packed *p, *k;
145 	struct printbuf buf1 = PRINTBUF;
146 	struct printbuf buf2 = PRINTBUF;
147 	struct printbuf buf3 = PRINTBUF;
148 	const char *msg;
149 
150 	if (!bch2_debug_check_iterators)
151 		return;
152 
153 	l	= &path->l[level];
154 	tmp	= l->iter;
155 	locked	= btree_node_locked(path, level);
156 
157 	if (path->cached) {
158 		if (!level)
159 			bch2_btree_path_verify_cached(trans, path);
160 		return;
161 	}
162 
163 	if (!btree_path_node(path, level))
164 		return;
165 
166 	if (!bch2_btree_node_relock_notrace(trans, path, level))
167 		return;
168 
169 	BUG_ON(!btree_path_pos_in_node(path, l->b));
170 
171 	bch2_btree_node_iter_verify(&l->iter, l->b);
172 
173 	/*
174 	 * For interior nodes, the iterator will have skipped past deleted keys:
175 	 */
176 	p = level
177 		? bch2_btree_node_iter_prev(&tmp, l->b)
178 		: bch2_btree_node_iter_prev_all(&tmp, l->b);
179 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180 
181 	if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182 		msg = "before";
183 		goto err;
184 	}
185 
186 	if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187 		msg = "after";
188 		goto err;
189 	}
190 
191 	if (!locked)
192 		btree_node_unlock(trans, path, level);
193 	return;
194 err:
195 	bch2_bpos_to_text(&buf1, path->pos);
196 
197 	if (p) {
198 		struct bkey uk = bkey_unpack_key(l->b, p);
199 
200 		bch2_bkey_to_text(&buf2, &uk);
201 	} else {
202 		prt_printf(&buf2, "(none)");
203 	}
204 
205 	if (k) {
206 		struct bkey uk = bkey_unpack_key(l->b, k);
207 
208 		bch2_bkey_to_text(&buf3, &uk);
209 	} else {
210 		prt_printf(&buf3, "(none)");
211 	}
212 
213 	panic("path should be %s key at level %u:\n"
214 	      "path pos %s\n"
215 	      "prev key %s\n"
216 	      "cur  key %s\n",
217 	      msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219 
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 				   struct btree_path *path)
222 {
223 	struct bch_fs *c = trans->c;
224 
225 	for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226 		if (!path->l[i].b) {
227 			BUG_ON(!path->cached &&
228 			       bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229 			break;
230 		}
231 
232 		bch2_btree_path_verify_level(trans, path, i);
233 	}
234 
235 	bch2_btree_path_verify_locks(path);
236 }
237 
238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240 	struct btree_path *path;
241 	unsigned iter;
242 
243 	trans_for_each_path(trans, path, iter)
244 		bch2_btree_path_verify(trans, path);
245 }
246 
247 static void bch2_btree_iter_verify(struct btree_iter *iter)
248 {
249 	struct btree_trans *trans = iter->trans;
250 
251 	BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252 
253 	BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254 	       (iter->flags & BTREE_ITER_all_snapshots));
255 
256 	BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257 	       (iter->flags & BTREE_ITER_all_snapshots) &&
258 	       !btree_type_has_snapshot_field(iter->btree_id));
259 
260 	if (iter->update_path)
261 		bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262 	bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263 }
264 
265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266 {
267 	BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268 	       !iter->pos.snapshot);
269 
270 	BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271 	       iter->pos.snapshot != iter->snapshot);
272 
273 	BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
274 	       bkey_gt(iter->pos, iter->k.p));
275 }
276 
277 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
278 {
279 	struct btree_trans *trans = iter->trans;
280 	struct btree_iter copy;
281 	struct bkey_s_c prev;
282 	int ret = 0;
283 
284 	if (!bch2_debug_check_iterators)
285 		return 0;
286 
287 	if (!(iter->flags & BTREE_ITER_filter_snapshots))
288 		return 0;
289 
290 	if (bkey_err(k) || !k.k)
291 		return 0;
292 
293 	BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
294 					  iter->snapshot,
295 					  k.k->p.snapshot));
296 
297 	bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
298 			     BTREE_ITER_nopreserve|
299 			     BTREE_ITER_all_snapshots);
300 	prev = bch2_btree_iter_prev(&copy);
301 	if (!prev.k)
302 		goto out;
303 
304 	ret = bkey_err(prev);
305 	if (ret)
306 		goto out;
307 
308 	if (bkey_eq(prev.k->p, k.k->p) &&
309 	    bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
310 				      prev.k->p.snapshot) > 0) {
311 		struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
312 
313 		bch2_bkey_to_text(&buf1, k.k);
314 		bch2_bkey_to_text(&buf2, prev.k);
315 
316 		panic("iter snap %u\n"
317 		      "k    %s\n"
318 		      "prev %s\n",
319 		      iter->snapshot,
320 		      buf1.buf, buf2.buf);
321 	}
322 out:
323 	bch2_trans_iter_exit(trans, &copy);
324 	return ret;
325 }
326 
327 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
328 			    struct bpos pos, bool key_cache)
329 {
330 	bch2_trans_verify_not_unlocked(trans);
331 
332 	struct btree_path *path;
333 	struct trans_for_each_path_inorder_iter iter;
334 	struct printbuf buf = PRINTBUF;
335 
336 	btree_trans_sort_paths(trans);
337 
338 	trans_for_each_path_inorder(trans, path, iter) {
339 		int cmp = cmp_int(path->btree_id, id) ?:
340 			cmp_int(path->cached, key_cache);
341 
342 		if (cmp > 0)
343 			break;
344 		if (cmp < 0)
345 			continue;
346 
347 		if (!btree_node_locked(path, 0) ||
348 		    !path->should_be_locked)
349 			continue;
350 
351 		if (!key_cache) {
352 			if (bkey_ge(pos, path->l[0].b->data->min_key) &&
353 			    bkey_le(pos, path->l[0].b->key.k.p))
354 				return;
355 		} else {
356 			if (bkey_eq(pos, path->pos))
357 				return;
358 		}
359 	}
360 
361 	bch2_dump_trans_paths_updates(trans);
362 	bch2_bpos_to_text(&buf, pos);
363 
364 	panic("not locked: %s %s%s\n",
365 	      bch2_btree_id_str(id), buf.buf,
366 	      key_cache ? " cached" : "");
367 }
368 
369 #else
370 
371 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
372 						struct btree_path *path, unsigned l) {}
373 static inline void bch2_btree_path_verify(struct btree_trans *trans,
374 					  struct btree_path *path) {}
375 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
376 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
377 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
378 
379 #endif
380 
381 /* Btree path: fixups after btree updates */
382 
383 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
384 					struct btree *b,
385 					struct bset_tree *t,
386 					struct bkey_packed *k)
387 {
388 	struct btree_node_iter_set *set;
389 
390 	btree_node_iter_for_each(iter, set)
391 		if (set->end == t->end_offset) {
392 			set->k = __btree_node_key_to_offset(b, k);
393 			bch2_btree_node_iter_sort(iter, b);
394 			return;
395 		}
396 
397 	bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
398 }
399 
400 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
401 					       struct btree *b,
402 					       struct bkey_packed *where)
403 {
404 	struct btree_path_level *l = &path->l[b->c.level];
405 
406 	if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
407 		return;
408 
409 	if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
410 		bch2_btree_node_iter_advance(&l->iter, l->b);
411 }
412 
413 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
414 				      struct btree *b,
415 				      struct bkey_packed *where)
416 {
417 	struct btree_path *path;
418 	unsigned i;
419 
420 	trans_for_each_path_with_node(trans, b, path, i) {
421 		__bch2_btree_path_fix_key_modified(path, b, where);
422 		bch2_btree_path_verify_level(trans, path, b->c.level);
423 	}
424 }
425 
426 static void __bch2_btree_node_iter_fix(struct btree_path *path,
427 				       struct btree *b,
428 				       struct btree_node_iter *node_iter,
429 				       struct bset_tree *t,
430 				       struct bkey_packed *where,
431 				       unsigned clobber_u64s,
432 				       unsigned new_u64s)
433 {
434 	const struct bkey_packed *end = btree_bkey_last(b, t);
435 	struct btree_node_iter_set *set;
436 	unsigned offset = __btree_node_key_to_offset(b, where);
437 	int shift = new_u64s - clobber_u64s;
438 	unsigned old_end = t->end_offset - shift;
439 	unsigned orig_iter_pos = node_iter->data[0].k;
440 	bool iter_current_key_modified =
441 		orig_iter_pos >= offset &&
442 		orig_iter_pos <= offset + clobber_u64s;
443 
444 	btree_node_iter_for_each(node_iter, set)
445 		if (set->end == old_end)
446 			goto found;
447 
448 	/* didn't find the bset in the iterator - might have to readd it: */
449 	if (new_u64s &&
450 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
451 		bch2_btree_node_iter_push(node_iter, b, where, end);
452 		goto fixup_done;
453 	} else {
454 		/* Iterator is after key that changed */
455 		return;
456 	}
457 found:
458 	set->end = t->end_offset;
459 
460 	/* Iterator hasn't gotten to the key that changed yet: */
461 	if (set->k < offset)
462 		return;
463 
464 	if (new_u64s &&
465 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
466 		set->k = offset;
467 	} else if (set->k < offset + clobber_u64s) {
468 		set->k = offset + new_u64s;
469 		if (set->k == set->end)
470 			bch2_btree_node_iter_set_drop(node_iter, set);
471 	} else {
472 		/* Iterator is after key that changed */
473 		set->k = (int) set->k + shift;
474 		return;
475 	}
476 
477 	bch2_btree_node_iter_sort(node_iter, b);
478 fixup_done:
479 	if (node_iter->data[0].k != orig_iter_pos)
480 		iter_current_key_modified = true;
481 
482 	/*
483 	 * When a new key is added, and the node iterator now points to that
484 	 * key, the iterator might have skipped past deleted keys that should
485 	 * come after the key the iterator now points to. We have to rewind to
486 	 * before those deleted keys - otherwise
487 	 * bch2_btree_node_iter_prev_all() breaks:
488 	 */
489 	if (!bch2_btree_node_iter_end(node_iter) &&
490 	    iter_current_key_modified &&
491 	    b->c.level) {
492 		struct bkey_packed *k, *k2, *p;
493 
494 		k = bch2_btree_node_iter_peek_all(node_iter, b);
495 
496 		for_each_bset(b, t) {
497 			bool set_pos = false;
498 
499 			if (node_iter->data[0].end == t->end_offset)
500 				continue;
501 
502 			k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
503 
504 			while ((p = bch2_bkey_prev_all(b, t, k2)) &&
505 			       bkey_iter_cmp(b, k, p) < 0) {
506 				k2 = p;
507 				set_pos = true;
508 			}
509 
510 			if (set_pos)
511 				btree_node_iter_set_set_pos(node_iter,
512 							    b, t, k2);
513 		}
514 	}
515 }
516 
517 void bch2_btree_node_iter_fix(struct btree_trans *trans,
518 			      struct btree_path *path,
519 			      struct btree *b,
520 			      struct btree_node_iter *node_iter,
521 			      struct bkey_packed *where,
522 			      unsigned clobber_u64s,
523 			      unsigned new_u64s)
524 {
525 	struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
526 	struct btree_path *linked;
527 	unsigned i;
528 
529 	if (node_iter != &path->l[b->c.level].iter) {
530 		__bch2_btree_node_iter_fix(path, b, node_iter, t,
531 					   where, clobber_u64s, new_u64s);
532 
533 		if (bch2_debug_check_iterators)
534 			bch2_btree_node_iter_verify(node_iter, b);
535 	}
536 
537 	trans_for_each_path_with_node(trans, b, linked, i) {
538 		__bch2_btree_node_iter_fix(linked, b,
539 					   &linked->l[b->c.level].iter, t,
540 					   where, clobber_u64s, new_u64s);
541 		bch2_btree_path_verify_level(trans, linked, b->c.level);
542 	}
543 }
544 
545 /* Btree path level: pointer to a particular btree node and node iter */
546 
547 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
548 						  struct btree_path_level *l,
549 						  struct bkey *u,
550 						  struct bkey_packed *k)
551 {
552 	if (unlikely(!k)) {
553 		/*
554 		 * signal to bch2_btree_iter_peek_slot() that we're currently at
555 		 * a hole
556 		 */
557 		u->type = KEY_TYPE_deleted;
558 		return bkey_s_c_null;
559 	}
560 
561 	return bkey_disassemble(l->b, k, u);
562 }
563 
564 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
565 							struct btree_path_level *l,
566 							struct bkey *u)
567 {
568 	return __btree_iter_unpack(c, l, u,
569 			bch2_btree_node_iter_peek_all(&l->iter, l->b));
570 }
571 
572 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
573 						    struct btree_path *path,
574 						    struct btree_path_level *l,
575 						    struct bkey *u)
576 {
577 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
578 			bch2_btree_node_iter_peek(&l->iter, l->b));
579 
580 	path->pos = k.k ? k.k->p : l->b->key.k.p;
581 	trans->paths_sorted = false;
582 	bch2_btree_path_verify_level(trans, path, l - path->l);
583 	return k;
584 }
585 
586 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
587 						    struct btree_path *path,
588 						    struct btree_path_level *l,
589 						    struct bkey *u)
590 {
591 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
592 			bch2_btree_node_iter_prev(&l->iter, l->b));
593 
594 	path->pos = k.k ? k.k->p : l->b->data->min_key;
595 	trans->paths_sorted = false;
596 	bch2_btree_path_verify_level(trans, path, l - path->l);
597 	return k;
598 }
599 
600 static inline bool btree_path_advance_to_pos(struct btree_path *path,
601 					     struct btree_path_level *l,
602 					     int max_advance)
603 {
604 	struct bkey_packed *k;
605 	int nr_advanced = 0;
606 
607 	while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
608 	       bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
609 		if (max_advance > 0 && nr_advanced >= max_advance)
610 			return false;
611 
612 		bch2_btree_node_iter_advance(&l->iter, l->b);
613 		nr_advanced++;
614 	}
615 
616 	return true;
617 }
618 
619 static inline void __btree_path_level_init(struct btree_path *path,
620 					   unsigned level)
621 {
622 	struct btree_path_level *l = &path->l[level];
623 
624 	bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
625 
626 	/*
627 	 * Iterators to interior nodes should always be pointed at the first non
628 	 * whiteout:
629 	 */
630 	if (level)
631 		bch2_btree_node_iter_peek(&l->iter, l->b);
632 }
633 
634 void bch2_btree_path_level_init(struct btree_trans *trans,
635 				struct btree_path *path,
636 				struct btree *b)
637 {
638 	BUG_ON(path->cached);
639 
640 	EBUG_ON(!btree_path_pos_in_node(path, b));
641 
642 	path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
643 	path->l[b->c.level].b = b;
644 	__btree_path_level_init(path, b->c.level);
645 }
646 
647 /* Btree path: fixups after btree node updates: */
648 
649 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
650 {
651 	struct bch_fs *c = trans->c;
652 
653 	trans_for_each_update(trans, i)
654 		if (!i->cached &&
655 		    i->level	== b->c.level &&
656 		    i->btree_id	== b->c.btree_id &&
657 		    bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
658 		    bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
659 			i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
660 
661 			if (unlikely(trans->journal_replay_not_finished)) {
662 				struct bkey_i *j_k =
663 					bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
664 								    i->k->k.p);
665 
666 				if (j_k) {
667 					i->old_k = j_k->k;
668 					i->old_v = &j_k->v;
669 				}
670 			}
671 		}
672 }
673 
674 /*
675  * A btree node is being replaced - update the iterator to point to the new
676  * node:
677  */
678 void bch2_trans_node_add(struct btree_trans *trans,
679 			 struct btree_path *path,
680 			 struct btree *b)
681 {
682 	struct btree_path *prev;
683 
684 	BUG_ON(!btree_path_pos_in_node(path, b));
685 
686 	while ((prev = prev_btree_path(trans, path)) &&
687 	       btree_path_pos_in_node(prev, b))
688 		path = prev;
689 
690 	for (;
691 	     path && btree_path_pos_in_node(path, b);
692 	     path = next_btree_path(trans, path))
693 		if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
694 			enum btree_node_locked_type t =
695 				btree_lock_want(path, b->c.level);
696 
697 			if (t != BTREE_NODE_UNLOCKED) {
698 				btree_node_unlock(trans, path, b->c.level);
699 				six_lock_increment(&b->c.lock, (enum six_lock_type) t);
700 				mark_btree_node_locked(trans, path, b->c.level, t);
701 			}
702 
703 			bch2_btree_path_level_init(trans, path, b);
704 		}
705 
706 	bch2_trans_revalidate_updates_in_node(trans, b);
707 }
708 
709 /*
710  * A btree node has been modified in such a way as to invalidate iterators - fix
711  * them:
712  */
713 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
714 {
715 	struct btree_path *path;
716 	unsigned i;
717 
718 	trans_for_each_path_with_node(trans, b, path, i)
719 		__btree_path_level_init(path, b->c.level);
720 
721 	bch2_trans_revalidate_updates_in_node(trans, b);
722 }
723 
724 /* Btree path: traverse, set_pos: */
725 
726 static inline int btree_path_lock_root(struct btree_trans *trans,
727 				       struct btree_path *path,
728 				       unsigned depth_want,
729 				       unsigned long trace_ip)
730 {
731 	struct bch_fs *c = trans->c;
732 	struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
733 	enum six_lock_type lock_type;
734 	unsigned i;
735 	int ret;
736 
737 	EBUG_ON(path->nodes_locked);
738 
739 	while (1) {
740 		b = READ_ONCE(*rootp);
741 		path->level = READ_ONCE(b->c.level);
742 
743 		if (unlikely(path->level < depth_want)) {
744 			/*
745 			 * the root is at a lower depth than the depth we want:
746 			 * got to the end of the btree, or we're walking nodes
747 			 * greater than some depth and there are no nodes >=
748 			 * that depth
749 			 */
750 			path->level = depth_want;
751 			for (i = path->level; i < BTREE_MAX_DEPTH; i++)
752 				path->l[i].b = NULL;
753 			return 1;
754 		}
755 
756 		lock_type = __btree_lock_want(path, path->level);
757 		ret = btree_node_lock(trans, path, &b->c,
758 				      path->level, lock_type, trace_ip);
759 		if (unlikely(ret)) {
760 			if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
761 				continue;
762 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
763 				return ret;
764 			BUG();
765 		}
766 
767 		if (likely(b == READ_ONCE(*rootp) &&
768 			   b->c.level == path->level &&
769 			   !race_fault())) {
770 			for (i = 0; i < path->level; i++)
771 				path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
772 			path->l[path->level].b = b;
773 			for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
774 				path->l[i].b = NULL;
775 
776 			mark_btree_node_locked(trans, path, path->level,
777 					       (enum btree_node_locked_type) lock_type);
778 			bch2_btree_path_level_init(trans, path, b);
779 			return 0;
780 		}
781 
782 		six_unlock_type(&b->c.lock, lock_type);
783 	}
784 }
785 
786 noinline
787 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
788 {
789 	struct bch_fs *c = trans->c;
790 	struct btree_path_level *l = path_l(path);
791 	struct btree_node_iter node_iter = l->iter;
792 	struct bkey_packed *k;
793 	struct bkey_buf tmp;
794 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
795 		? (path->level > 1 ? 0 :  2)
796 		: (path->level > 1 ? 1 : 16);
797 	bool was_locked = btree_node_locked(path, path->level);
798 	int ret = 0;
799 
800 	bch2_bkey_buf_init(&tmp);
801 
802 	while (nr-- && !ret) {
803 		if (!bch2_btree_node_relock(trans, path, path->level))
804 			break;
805 
806 		bch2_btree_node_iter_advance(&node_iter, l->b);
807 		k = bch2_btree_node_iter_peek(&node_iter, l->b);
808 		if (!k)
809 			break;
810 
811 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
812 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
813 					       path->level - 1);
814 	}
815 
816 	if (!was_locked)
817 		btree_node_unlock(trans, path, path->level);
818 
819 	bch2_bkey_buf_exit(&tmp, c);
820 	return ret;
821 }
822 
823 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
824 				 struct btree_and_journal_iter *jiter)
825 {
826 	struct bch_fs *c = trans->c;
827 	struct bkey_s_c k;
828 	struct bkey_buf tmp;
829 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
830 		? (path->level > 1 ? 0 :  2)
831 		: (path->level > 1 ? 1 : 16);
832 	bool was_locked = btree_node_locked(path, path->level);
833 	int ret = 0;
834 
835 	bch2_bkey_buf_init(&tmp);
836 
837 	while (nr-- && !ret) {
838 		if (!bch2_btree_node_relock(trans, path, path->level))
839 			break;
840 
841 		bch2_btree_and_journal_iter_advance(jiter);
842 		k = bch2_btree_and_journal_iter_peek(jiter);
843 		if (!k.k)
844 			break;
845 
846 		bch2_bkey_buf_reassemble(&tmp, c, k);
847 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
848 					       path->level - 1);
849 	}
850 
851 	if (!was_locked)
852 		btree_node_unlock(trans, path, path->level);
853 
854 	bch2_bkey_buf_exit(&tmp, c);
855 	return ret;
856 }
857 
858 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
859 					    struct btree_path *path,
860 					    unsigned plevel, struct btree *b)
861 {
862 	struct btree_path_level *l = &path->l[plevel];
863 	bool locked = btree_node_locked(path, plevel);
864 	struct bkey_packed *k;
865 	struct bch_btree_ptr_v2 *bp;
866 
867 	if (!bch2_btree_node_relock(trans, path, plevel))
868 		return;
869 
870 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
871 	BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
872 
873 	bp = (void *) bkeyp_val(&l->b->format, k);
874 	bp->mem_ptr = (unsigned long)b;
875 
876 	if (!locked)
877 		btree_node_unlock(trans, path, plevel);
878 }
879 
880 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
881 						     struct btree_path *path,
882 						     unsigned flags,
883 						     struct bkey_buf *out)
884 {
885 	struct bch_fs *c = trans->c;
886 	struct btree_path_level *l = path_l(path);
887 	struct btree_and_journal_iter jiter;
888 	struct bkey_s_c k;
889 	int ret = 0;
890 
891 	__bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
892 
893 	k = bch2_btree_and_journal_iter_peek(&jiter);
894 
895 	bch2_bkey_buf_reassemble(out, c, k);
896 
897 	if ((flags & BTREE_ITER_prefetch) &&
898 	    c->opts.btree_node_prefetch)
899 		ret = btree_path_prefetch_j(trans, path, &jiter);
900 
901 	bch2_btree_and_journal_iter_exit(&jiter);
902 	return ret;
903 }
904 
905 static __always_inline int btree_path_down(struct btree_trans *trans,
906 					   struct btree_path *path,
907 					   unsigned flags,
908 					   unsigned long trace_ip)
909 {
910 	struct bch_fs *c = trans->c;
911 	struct btree_path_level *l = path_l(path);
912 	struct btree *b;
913 	unsigned level = path->level - 1;
914 	enum six_lock_type lock_type = __btree_lock_want(path, level);
915 	struct bkey_buf tmp;
916 	int ret;
917 
918 	EBUG_ON(!btree_node_locked(path, path->level));
919 
920 	bch2_bkey_buf_init(&tmp);
921 
922 	if (unlikely(trans->journal_replay_not_finished)) {
923 		ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
924 		if (ret)
925 			goto err;
926 	} else {
927 		struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
928 		if (!k) {
929 			struct printbuf buf = PRINTBUF;
930 
931 			prt_str(&buf, "node not found at pos ");
932 			bch2_bpos_to_text(&buf, path->pos);
933 			prt_str(&buf, " within parent node ");
934 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
935 
936 			bch2_fs_fatal_error(c, "%s", buf.buf);
937 			printbuf_exit(&buf);
938 			ret = -BCH_ERR_btree_need_topology_repair;
939 			goto err;
940 		}
941 
942 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
943 
944 		if ((flags & BTREE_ITER_prefetch) &&
945 		    c->opts.btree_node_prefetch) {
946 			ret = btree_path_prefetch(trans, path);
947 			if (ret)
948 				goto err;
949 		}
950 	}
951 
952 	b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
953 	ret = PTR_ERR_OR_ZERO(b);
954 	if (unlikely(ret))
955 		goto err;
956 
957 	if (likely(!trans->journal_replay_not_finished &&
958 		   tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
959 	    unlikely(b != btree_node_mem_ptr(tmp.k)))
960 		btree_node_mem_ptr_set(trans, path, level + 1, b);
961 
962 	if (btree_node_read_locked(path, level + 1))
963 		btree_node_unlock(trans, path, level + 1);
964 
965 	mark_btree_node_locked(trans, path, level,
966 			       (enum btree_node_locked_type) lock_type);
967 	path->level = level;
968 	bch2_btree_path_level_init(trans, path, b);
969 
970 	bch2_btree_path_verify_locks(path);
971 err:
972 	bch2_bkey_buf_exit(&tmp, c);
973 	return ret;
974 }
975 
976 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
977 {
978 	struct bch_fs *c = trans->c;
979 	struct btree_path *path;
980 	unsigned long trace_ip = _RET_IP_;
981 	unsigned i;
982 	int ret = 0;
983 
984 	if (trans->in_traverse_all)
985 		return -BCH_ERR_transaction_restart_in_traverse_all;
986 
987 	trans->in_traverse_all = true;
988 retry_all:
989 	trans->restarted = 0;
990 	trans->last_restarted_ip = 0;
991 
992 	trans_for_each_path(trans, path, i)
993 		path->should_be_locked = false;
994 
995 	btree_trans_sort_paths(trans);
996 
997 	bch2_trans_unlock(trans);
998 	cond_resched();
999 	trans->locked = true;
1000 
1001 	if (unlikely(trans->memory_allocation_failure)) {
1002 		struct closure cl;
1003 
1004 		closure_init_stack(&cl);
1005 
1006 		do {
1007 			ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1008 			closure_sync(&cl);
1009 		} while (ret);
1010 	}
1011 
1012 	/* Now, redo traversals in correct order: */
1013 	i = 0;
1014 	while (i < trans->nr_sorted) {
1015 		btree_path_idx_t idx = trans->sorted[i];
1016 
1017 		/*
1018 		 * Traversing a path can cause another path to be added at about
1019 		 * the same position:
1020 		 */
1021 		if (trans->paths[idx].uptodate) {
1022 			__btree_path_get(&trans->paths[idx], false);
1023 			ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1024 			__btree_path_put(&trans->paths[idx], false);
1025 
1026 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1027 			    bch2_err_matches(ret, ENOMEM))
1028 				goto retry_all;
1029 			if (ret)
1030 				goto err;
1031 		} else {
1032 			i++;
1033 		}
1034 	}
1035 
1036 	/*
1037 	 * We used to assert that all paths had been traversed here
1038 	 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1039 	 * path->should_be_locked is not set yet, we might have unlocked and
1040 	 * then failed to relock a path - that's fine.
1041 	 */
1042 err:
1043 	bch2_btree_cache_cannibalize_unlock(trans);
1044 
1045 	trans->in_traverse_all = false;
1046 
1047 	trace_and_count(c, trans_traverse_all, trans, trace_ip);
1048 	return ret;
1049 }
1050 
1051 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1052 						unsigned l, int check_pos)
1053 {
1054 	if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1055 		return false;
1056 	if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1057 		return false;
1058 	return true;
1059 }
1060 
1061 static inline bool btree_path_good_node(struct btree_trans *trans,
1062 					struct btree_path *path,
1063 					unsigned l, int check_pos)
1064 {
1065 	return is_btree_node(path, l) &&
1066 		bch2_btree_node_relock(trans, path, l) &&
1067 		btree_path_check_pos_in_node(path, l, check_pos);
1068 }
1069 
1070 static void btree_path_set_level_down(struct btree_trans *trans,
1071 				      struct btree_path *path,
1072 				      unsigned new_level)
1073 {
1074 	unsigned l;
1075 
1076 	path->level = new_level;
1077 
1078 	for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1079 		if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1080 			btree_node_unlock(trans, path, l);
1081 
1082 	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1083 	bch2_btree_path_verify(trans, path);
1084 }
1085 
1086 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1087 							 struct btree_path *path,
1088 							 int check_pos)
1089 {
1090 	unsigned i, l = path->level;
1091 again:
1092 	while (btree_path_node(path, l) &&
1093 	       !btree_path_good_node(trans, path, l, check_pos))
1094 		__btree_path_set_level_up(trans, path, l++);
1095 
1096 	/* If we need intent locks, take them too: */
1097 	for (i = l + 1;
1098 	     i < path->locks_want && btree_path_node(path, i);
1099 	     i++)
1100 		if (!bch2_btree_node_relock(trans, path, i)) {
1101 			while (l <= i)
1102 				__btree_path_set_level_up(trans, path, l++);
1103 			goto again;
1104 		}
1105 
1106 	return l;
1107 }
1108 
1109 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1110 						     struct btree_path *path,
1111 						     int check_pos)
1112 {
1113 	return likely(btree_node_locked(path, path->level) &&
1114 		      btree_path_check_pos_in_node(path, path->level, check_pos))
1115 		? path->level
1116 		: __btree_path_up_until_good_node(trans, path, check_pos);
1117 }
1118 
1119 /*
1120  * This is the main state machine for walking down the btree - walks down to a
1121  * specified depth
1122  *
1123  * Returns 0 on success, -EIO on error (error reading in a btree node).
1124  *
1125  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1126  * stashed in the iterator and returned from bch2_trans_exit().
1127  */
1128 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1129 				 btree_path_idx_t path_idx,
1130 				 unsigned flags,
1131 				 unsigned long trace_ip)
1132 {
1133 	struct btree_path *path = &trans->paths[path_idx];
1134 	unsigned depth_want = path->level;
1135 	int ret = -((int) trans->restarted);
1136 
1137 	if (unlikely(ret))
1138 		goto out;
1139 
1140 	if (unlikely(!trans->srcu_held))
1141 		bch2_trans_srcu_lock(trans);
1142 
1143 	/*
1144 	 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1145 	 * and re-traverse the path without a transaction restart:
1146 	 */
1147 	if (path->should_be_locked) {
1148 		ret = bch2_btree_path_relock(trans, path, trace_ip);
1149 		goto out;
1150 	}
1151 
1152 	if (path->cached) {
1153 		ret = bch2_btree_path_traverse_cached(trans, path, flags);
1154 		goto out;
1155 	}
1156 
1157 	path = &trans->paths[path_idx];
1158 
1159 	if (unlikely(path->level >= BTREE_MAX_DEPTH))
1160 		goto out_uptodate;
1161 
1162 	path->level = btree_path_up_until_good_node(trans, path, 0);
1163 	unsigned max_level = path->level;
1164 
1165 	EBUG_ON(btree_path_node(path, path->level) &&
1166 		!btree_node_locked(path, path->level));
1167 
1168 	/*
1169 	 * Note: path->nodes[path->level] may be temporarily NULL here - that
1170 	 * would indicate to other code that we got to the end of the btree,
1171 	 * here it indicates that relocking the root failed - it's critical that
1172 	 * btree_path_lock_root() comes next and that it can't fail
1173 	 */
1174 	while (path->level > depth_want) {
1175 		ret = btree_path_node(path, path->level)
1176 			? btree_path_down(trans, path, flags, trace_ip)
1177 			: btree_path_lock_root(trans, path, depth_want, trace_ip);
1178 		if (unlikely(ret)) {
1179 			if (ret == 1) {
1180 				/*
1181 				 * No nodes at this level - got to the end of
1182 				 * the btree:
1183 				 */
1184 				ret = 0;
1185 				goto out;
1186 			}
1187 
1188 			__bch2_btree_path_unlock(trans, path);
1189 			path->level = depth_want;
1190 			path->l[path->level].b = ERR_PTR(ret);
1191 			goto out;
1192 		}
1193 	}
1194 
1195 	if (unlikely(max_level > path->level)) {
1196 		struct btree_path *linked;
1197 		unsigned iter;
1198 
1199 		trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1200 			for (unsigned j = path->level + 1; j < max_level; j++)
1201 				linked->l[j] = path->l[j];
1202 	}
1203 
1204 out_uptodate:
1205 	path->uptodate = BTREE_ITER_UPTODATE;
1206 out:
1207 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1208 		panic("ret %s (%i) trans->restarted %s (%i)\n",
1209 		      bch2_err_str(ret), ret,
1210 		      bch2_err_str(trans->restarted), trans->restarted);
1211 	bch2_btree_path_verify(trans, path);
1212 	return ret;
1213 }
1214 
1215 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1216 			    struct btree_path *src)
1217 {
1218 	unsigned i, offset = offsetof(struct btree_path, pos);
1219 
1220 	memcpy((void *) dst + offset,
1221 	       (void *) src + offset,
1222 	       sizeof(struct btree_path) - offset);
1223 
1224 	for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1225 		unsigned t = btree_node_locked_type(dst, i);
1226 
1227 		if (t != BTREE_NODE_UNLOCKED)
1228 			six_lock_increment(&dst->l[i].b->c.lock, t);
1229 	}
1230 }
1231 
1232 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1233 					 bool intent, unsigned long ip)
1234 {
1235 	btree_path_idx_t new = btree_path_alloc(trans, src);
1236 	btree_path_copy(trans, trans->paths + new, trans->paths + src);
1237 	__btree_path_get(trans->paths + new, intent);
1238 #ifdef TRACK_PATH_ALLOCATED
1239 	trans->paths[new].ip_allocated = ip;
1240 #endif
1241 	return new;
1242 }
1243 
1244 __flatten
1245 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1246 			btree_path_idx_t path, bool intent, unsigned long ip)
1247 {
1248 	__btree_path_put(trans->paths + path, intent);
1249 	path = btree_path_clone(trans, path, intent, ip);
1250 	trans->paths[path].preserve = false;
1251 	return path;
1252 }
1253 
1254 btree_path_idx_t __must_check
1255 __bch2_btree_path_set_pos(struct btree_trans *trans,
1256 			  btree_path_idx_t path_idx, struct bpos new_pos,
1257 			  bool intent, unsigned long ip)
1258 {
1259 	int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1260 
1261 	bch2_trans_verify_not_in_restart(trans);
1262 	EBUG_ON(!trans->paths[path_idx].ref);
1263 
1264 	path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1265 
1266 	struct btree_path *path = trans->paths + path_idx;
1267 	path->pos		= new_pos;
1268 	trans->paths_sorted	= false;
1269 
1270 	if (unlikely(path->cached)) {
1271 		btree_node_unlock(trans, path, 0);
1272 		path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1273 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1274 		goto out;
1275 	}
1276 
1277 	unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1278 
1279 	if (btree_path_node(path, level)) {
1280 		struct btree_path_level *l = &path->l[level];
1281 
1282 		BUG_ON(!btree_node_locked(path, level));
1283 		/*
1284 		 * We might have to skip over many keys, or just a few: try
1285 		 * advancing the node iterator, and if we have to skip over too
1286 		 * many keys just reinit it (or if we're rewinding, since that
1287 		 * is expensive).
1288 		 */
1289 		if (cmp < 0 ||
1290 		    !btree_path_advance_to_pos(path, l, 8))
1291 			bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1292 
1293 		/*
1294 		 * Iterators to interior nodes should always be pointed at the first non
1295 		 * whiteout:
1296 		 */
1297 		if (unlikely(level))
1298 			bch2_btree_node_iter_peek(&l->iter, l->b);
1299 	}
1300 
1301 	if (unlikely(level != path->level)) {
1302 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1303 		__bch2_btree_path_unlock(trans, path);
1304 	}
1305 out:
1306 	bch2_btree_path_verify(trans, path);
1307 	return path_idx;
1308 }
1309 
1310 /* Btree path: main interface: */
1311 
1312 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1313 {
1314 	struct btree_path *sib;
1315 
1316 	sib = prev_btree_path(trans, path);
1317 	if (sib && !btree_path_cmp(sib, path))
1318 		return sib;
1319 
1320 	sib = next_btree_path(trans, path);
1321 	if (sib && !btree_path_cmp(sib, path))
1322 		return sib;
1323 
1324 	return NULL;
1325 }
1326 
1327 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1328 {
1329 	struct btree_path *sib;
1330 
1331 	sib = prev_btree_path(trans, path);
1332 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1333 		return sib;
1334 
1335 	sib = next_btree_path(trans, path);
1336 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1337 		return sib;
1338 
1339 	return NULL;
1340 }
1341 
1342 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1343 {
1344 	__bch2_btree_path_unlock(trans, trans->paths + path);
1345 	btree_path_list_remove(trans, trans->paths + path);
1346 	__clear_bit(path, trans->paths_allocated);
1347 }
1348 
1349 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1350 {
1351 	unsigned l = path->level;
1352 
1353 	do {
1354 		if (!btree_path_node(path, l))
1355 			break;
1356 
1357 		if (!is_btree_node(path, l))
1358 			return false;
1359 
1360 		if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1361 			return false;
1362 
1363 		l++;
1364 	} while (l < path->locks_want);
1365 
1366 	return true;
1367 }
1368 
1369 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1370 {
1371 	struct btree_path *path = trans->paths + path_idx, *dup;
1372 
1373 	if (!__btree_path_put(path, intent))
1374 		return;
1375 
1376 	dup = path->preserve
1377 		? have_path_at_pos(trans, path)
1378 		: have_node_at_pos(trans, path);
1379 
1380 	if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1381 		return;
1382 
1383 	if (path->should_be_locked && !trans->restarted) {
1384 		if (!dup)
1385 			return;
1386 
1387 		if (!(trans->locked
1388 		      ? bch2_btree_path_relock_norestart(trans, dup)
1389 		      : bch2_btree_path_can_relock(trans, dup)))
1390 			return;
1391 	}
1392 
1393 	if (dup) {
1394 		dup->preserve		|= path->preserve;
1395 		dup->should_be_locked	|= path->should_be_locked;
1396 	}
1397 
1398 	__bch2_path_free(trans, path_idx);
1399 }
1400 
1401 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1402 				 bool intent)
1403 {
1404 	if (!__btree_path_put(trans->paths + path, intent))
1405 		return;
1406 
1407 	__bch2_path_free(trans, path);
1408 }
1409 
1410 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1411 {
1412 	panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1413 	      trans->restart_count, restart_count,
1414 	      (void *) trans->last_begin_ip);
1415 }
1416 
1417 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1418 {
1419 	panic("in transaction restart: %s, last restarted by %pS\n",
1420 	      bch2_err_str(trans->restarted),
1421 	      (void *) trans->last_restarted_ip);
1422 }
1423 
1424 void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
1425 {
1426 	panic("trans should be locked, unlocked by %pS\n",
1427 	      (void *) trans->last_unlock_ip);
1428 }
1429 
1430 noinline __cold
1431 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1432 {
1433 	prt_printf(buf, "transaction updates for %s journal seq %llu\n",
1434 	       trans->fn, trans->journal_res.seq);
1435 	printbuf_indent_add(buf, 2);
1436 
1437 	trans_for_each_update(trans, i) {
1438 		struct bkey_s_c old = { &i->old_k, i->old_v };
1439 
1440 		prt_printf(buf, "update: btree=%s cached=%u %pS\n",
1441 		       bch2_btree_id_str(i->btree_id),
1442 		       i->cached,
1443 		       (void *) i->ip_allocated);
1444 
1445 		prt_printf(buf, "  old ");
1446 		bch2_bkey_val_to_text(buf, trans->c, old);
1447 		prt_newline(buf);
1448 
1449 		prt_printf(buf, "  new ");
1450 		bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1451 		prt_newline(buf);
1452 	}
1453 
1454 	for (struct jset_entry *e = trans->journal_entries;
1455 	     e != btree_trans_journal_entries_top(trans);
1456 	     e = vstruct_next(e))
1457 		bch2_journal_entry_to_text(buf, trans->c, e);
1458 
1459 	printbuf_indent_sub(buf, 2);
1460 }
1461 
1462 noinline __cold
1463 void bch2_dump_trans_updates(struct btree_trans *trans)
1464 {
1465 	struct printbuf buf = PRINTBUF;
1466 
1467 	bch2_trans_updates_to_text(&buf, trans);
1468 	bch2_print_string_as_lines(KERN_ERR, buf.buf);
1469 	printbuf_exit(&buf);
1470 }
1471 
1472 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1473 {
1474 	struct btree_path *path = trans->paths + path_idx;
1475 
1476 	prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
1477 		   path_idx, path->ref, path->intent_ref,
1478 		   path->preserve ? 'P' : ' ',
1479 		   path->should_be_locked ? 'S' : ' ',
1480 		   path->cached ? 'C' : 'B',
1481 		   bch2_btree_id_str(path->btree_id),
1482 		   path->level);
1483 	bch2_bpos_to_text(out, path->pos);
1484 
1485 #ifdef TRACK_PATH_ALLOCATED
1486 	prt_printf(out, " %pS", (void *) path->ip_allocated);
1487 #endif
1488 }
1489 
1490 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1491 {
1492 	switch (t) {
1493 	case BTREE_NODE_UNLOCKED:
1494 		return "unlocked";
1495 	case BTREE_NODE_READ_LOCKED:
1496 		return "read";
1497 	case BTREE_NODE_INTENT_LOCKED:
1498 		return "intent";
1499 	case BTREE_NODE_WRITE_LOCKED:
1500 		return "write";
1501 	default:
1502 		return NULL;
1503 	}
1504 }
1505 
1506 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1507 {
1508 	bch2_btree_path_to_text_short(out, trans, path_idx);
1509 
1510 	struct btree_path *path = trans->paths + path_idx;
1511 
1512 	prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1513 	prt_newline(out);
1514 
1515 	printbuf_indent_add(out, 2);
1516 	for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1517 		prt_printf(out, "l=%u locks %s seq %u node ", l,
1518 			   btree_node_locked_str(btree_node_locked_type(path, l)),
1519 			   path->l[l].lock_seq);
1520 
1521 		int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1522 		if (ret)
1523 			prt_str(out, bch2_err_str(ret));
1524 		else
1525 			prt_printf(out, "%px", path->l[l].b);
1526 		prt_newline(out);
1527 	}
1528 	printbuf_indent_sub(out, 2);
1529 }
1530 
1531 static noinline __cold
1532 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1533 				bool nosort)
1534 {
1535 	struct trans_for_each_path_inorder_iter iter;
1536 
1537 	if (!nosort)
1538 		btree_trans_sort_paths(trans);
1539 
1540 	trans_for_each_path_idx_inorder(trans, iter) {
1541 		bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1542 		prt_newline(out);
1543 	}
1544 }
1545 
1546 noinline __cold
1547 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1548 {
1549 	__bch2_trans_paths_to_text(out, trans, false);
1550 }
1551 
1552 static noinline __cold
1553 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1554 {
1555 	struct printbuf buf = PRINTBUF;
1556 
1557 	__bch2_trans_paths_to_text(&buf, trans, nosort);
1558 	bch2_trans_updates_to_text(&buf, trans);
1559 
1560 	bch2_print_string_as_lines(KERN_ERR, buf.buf);
1561 	printbuf_exit(&buf);
1562 }
1563 
1564 noinline __cold
1565 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1566 {
1567 	__bch2_dump_trans_paths_updates(trans, false);
1568 }
1569 
1570 noinline __cold
1571 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1572 {
1573 	struct btree_transaction_stats *s = btree_trans_stats(trans);
1574 	struct printbuf buf = PRINTBUF;
1575 	size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1576 
1577 	bch2_trans_paths_to_text(&buf, trans);
1578 
1579 	if (!buf.allocation_failure) {
1580 		mutex_lock(&s->lock);
1581 		if (nr > s->nr_max_paths) {
1582 			s->nr_max_paths = nr;
1583 			swap(s->max_paths_text, buf.buf);
1584 		}
1585 		mutex_unlock(&s->lock);
1586 	}
1587 
1588 	printbuf_exit(&buf);
1589 
1590 	trans->nr_paths_max = nr;
1591 }
1592 
1593 noinline __cold
1594 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1595 {
1596 	if (trace_trans_restart_too_many_iters_enabled()) {
1597 		struct printbuf buf = PRINTBUF;
1598 
1599 		bch2_trans_paths_to_text(&buf, trans);
1600 		trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1601 		printbuf_exit(&buf);
1602 	}
1603 
1604 	count_event(trans->c, trans_restart_too_many_iters);
1605 
1606 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1607 }
1608 
1609 static noinline void btree_path_overflow(struct btree_trans *trans)
1610 {
1611 	bch2_dump_trans_paths_updates(trans);
1612 	bch_err(trans->c, "trans path overflow");
1613 }
1614 
1615 static noinline void btree_paths_realloc(struct btree_trans *trans)
1616 {
1617 	unsigned nr = trans->nr_paths * 2;
1618 
1619 	void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1620 			  sizeof(struct btree_trans_paths) +
1621 			  nr * sizeof(struct btree_path) +
1622 			  nr * sizeof(btree_path_idx_t) + 8 +
1623 			  nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1624 
1625 	unsigned long *paths_allocated = p;
1626 	memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1627 	p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1628 
1629 	p += sizeof(struct btree_trans_paths);
1630 	struct btree_path *paths = p;
1631 	*trans_paths_nr(paths) = nr;
1632 	memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1633 	p += nr * sizeof(struct btree_path);
1634 
1635 	btree_path_idx_t *sorted = p;
1636 	memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1637 	p += nr * sizeof(btree_path_idx_t) + 8;
1638 
1639 	struct btree_insert_entry *updates = p;
1640 	memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1641 
1642 	unsigned long *old = trans->paths_allocated;
1643 
1644 	rcu_assign_pointer(trans->paths_allocated,	paths_allocated);
1645 	rcu_assign_pointer(trans->paths,		paths);
1646 	rcu_assign_pointer(trans->sorted,		sorted);
1647 	rcu_assign_pointer(trans->updates,		updates);
1648 
1649 	trans->nr_paths		= nr;
1650 
1651 	if (old != trans->_paths_allocated)
1652 		kfree_rcu_mightsleep(old);
1653 }
1654 
1655 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1656 						btree_path_idx_t pos)
1657 {
1658 	btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1659 
1660 	if (unlikely(idx == trans->nr_paths)) {
1661 		if (trans->nr_paths == BTREE_ITER_MAX) {
1662 			btree_path_overflow(trans);
1663 			return 0;
1664 		}
1665 
1666 		btree_paths_realloc(trans);
1667 	}
1668 
1669 	/*
1670 	 * Do this before marking the new path as allocated, since it won't be
1671 	 * initialized yet:
1672 	 */
1673 	if (unlikely(idx > trans->nr_paths_max))
1674 		bch2_trans_update_max_paths(trans);
1675 
1676 	__set_bit(idx, trans->paths_allocated);
1677 
1678 	struct btree_path *path = &trans->paths[idx];
1679 	path->ref		= 0;
1680 	path->intent_ref	= 0;
1681 	path->nodes_locked	= 0;
1682 
1683 	btree_path_list_add(trans, pos, idx);
1684 	trans->paths_sorted = false;
1685 	return idx;
1686 }
1687 
1688 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1689 			     enum btree_id btree_id, struct bpos pos,
1690 			     unsigned locks_want, unsigned level,
1691 			     unsigned flags, unsigned long ip)
1692 {
1693 	struct btree_path *path;
1694 	bool cached = flags & BTREE_ITER_cached;
1695 	bool intent = flags & BTREE_ITER_intent;
1696 	struct trans_for_each_path_inorder_iter iter;
1697 	btree_path_idx_t path_pos = 0, path_idx;
1698 
1699 	bch2_trans_verify_not_unlocked(trans);
1700 	bch2_trans_verify_not_in_restart(trans);
1701 	bch2_trans_verify_locks(trans);
1702 
1703 	btree_trans_sort_paths(trans);
1704 
1705 	trans_for_each_path_inorder(trans, path, iter) {
1706 		if (__btree_path_cmp(path,
1707 				     btree_id,
1708 				     cached,
1709 				     pos,
1710 				     level) > 0)
1711 			break;
1712 
1713 		path_pos = iter.path_idx;
1714 	}
1715 
1716 	if (path_pos &&
1717 	    trans->paths[path_pos].cached	== cached &&
1718 	    trans->paths[path_pos].btree_id	== btree_id &&
1719 	    trans->paths[path_pos].level	== level) {
1720 		__btree_path_get(trans->paths + path_pos, intent);
1721 		path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1722 		path = trans->paths + path_idx;
1723 	} else {
1724 		path_idx = btree_path_alloc(trans, path_pos);
1725 		path = trans->paths + path_idx;
1726 
1727 		__btree_path_get(path, intent);
1728 		path->pos			= pos;
1729 		path->btree_id			= btree_id;
1730 		path->cached			= cached;
1731 		path->uptodate			= BTREE_ITER_NEED_TRAVERSE;
1732 		path->should_be_locked		= false;
1733 		path->level			= level;
1734 		path->locks_want		= locks_want;
1735 		path->nodes_locked		= 0;
1736 		for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1737 			path->l[i].b		= ERR_PTR(-BCH_ERR_no_btree_node_init);
1738 #ifdef TRACK_PATH_ALLOCATED
1739 		path->ip_allocated		= ip;
1740 #endif
1741 		trans->paths_sorted		= false;
1742 	}
1743 
1744 	if (!(flags & BTREE_ITER_nopreserve))
1745 		path->preserve = true;
1746 
1747 	if (path->intent_ref)
1748 		locks_want = max(locks_want, level + 1);
1749 
1750 	/*
1751 	 * If the path has locks_want greater than requested, we don't downgrade
1752 	 * it here - on transaction restart because btree node split needs to
1753 	 * upgrade locks, we might be putting/getting the iterator again.
1754 	 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1755 	 * a successful transaction commit.
1756 	 */
1757 
1758 	locks_want = min(locks_want, BTREE_MAX_DEPTH);
1759 	if (locks_want > path->locks_want)
1760 		bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1761 
1762 	return path_idx;
1763 }
1764 
1765 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1766 					    enum btree_id btree_id,
1767 					    unsigned level,
1768 					    struct bpos pos)
1769 {
1770 	btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1771 			     BTREE_ITER_nopreserve|
1772 			     BTREE_ITER_intent, _RET_IP_);
1773 	path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1774 
1775 	struct btree_path *path = trans->paths + path_idx;
1776 	bch2_btree_path_downgrade(trans, path);
1777 	__bch2_btree_path_unlock(trans, path);
1778 	return path_idx;
1779 }
1780 
1781 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1782 {
1783 
1784 	struct btree_path_level *l = path_l(path);
1785 	struct bkey_packed *_k;
1786 	struct bkey_s_c k;
1787 
1788 	if (unlikely(!l->b))
1789 		return bkey_s_c_null;
1790 
1791 	EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1792 	EBUG_ON(!btree_node_locked(path, path->level));
1793 
1794 	if (!path->cached) {
1795 		_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1796 		k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1797 
1798 		EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1799 
1800 		if (!k.k || !bpos_eq(path->pos, k.k->p))
1801 			goto hole;
1802 	} else {
1803 		struct bkey_cached *ck = (void *) path->l[0].b;
1804 
1805 		EBUG_ON(ck &&
1806 			(path->btree_id != ck->key.btree_id ||
1807 			 !bkey_eq(path->pos, ck->key.pos)));
1808 		if (!ck || !ck->valid)
1809 			return bkey_s_c_null;
1810 
1811 		*u = ck->k->k;
1812 		k = bkey_i_to_s_c(ck->k);
1813 	}
1814 
1815 	return k;
1816 hole:
1817 	bkey_init(u);
1818 	u->p = path->pos;
1819 	return (struct bkey_s_c) { u, NULL };
1820 }
1821 
1822 
1823 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1824 {
1825 	struct btree_trans *trans = iter->trans;
1826 
1827 	if (!iter->path || trans->restarted)
1828 		return;
1829 
1830 	struct btree_path *path = btree_iter_path(trans, iter);
1831 	path->preserve		= false;
1832 	if (path->ref == 1)
1833 		path->should_be_locked	= false;
1834 }
1835 /* Btree iterators: */
1836 
1837 int __must_check
1838 __bch2_btree_iter_traverse(struct btree_iter *iter)
1839 {
1840 	return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1841 }
1842 
1843 int __must_check
1844 bch2_btree_iter_traverse(struct btree_iter *iter)
1845 {
1846 	struct btree_trans *trans = iter->trans;
1847 	int ret;
1848 
1849 	bch2_trans_verify_not_unlocked(trans);
1850 
1851 	iter->path = bch2_btree_path_set_pos(trans, iter->path,
1852 					btree_iter_search_key(iter),
1853 					iter->flags & BTREE_ITER_intent,
1854 					btree_iter_ip_allocated(iter));
1855 
1856 	ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1857 	if (ret)
1858 		return ret;
1859 
1860 	struct btree_path *path = btree_iter_path(trans, iter);
1861 	if (btree_path_node(path, path->level))
1862 		btree_path_set_should_be_locked(path);
1863 	return 0;
1864 }
1865 
1866 /* Iterate across nodes (leaf and interior nodes) */
1867 
1868 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1869 {
1870 	struct btree_trans *trans = iter->trans;
1871 	struct btree *b = NULL;
1872 	int ret;
1873 
1874 	EBUG_ON(trans->paths[iter->path].cached);
1875 	bch2_btree_iter_verify(iter);
1876 
1877 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1878 	if (ret)
1879 		goto err;
1880 
1881 	struct btree_path *path = btree_iter_path(trans, iter);
1882 	b = btree_path_node(path, path->level);
1883 	if (!b)
1884 		goto out;
1885 
1886 	BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1887 
1888 	bkey_init(&iter->k);
1889 	iter->k.p = iter->pos = b->key.k.p;
1890 
1891 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1892 					iter->flags & BTREE_ITER_intent,
1893 					btree_iter_ip_allocated(iter));
1894 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1895 out:
1896 	bch2_btree_iter_verify_entry_exit(iter);
1897 	bch2_btree_iter_verify(iter);
1898 
1899 	return b;
1900 err:
1901 	b = ERR_PTR(ret);
1902 	goto out;
1903 }
1904 
1905 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1906 {
1907 	struct btree *b;
1908 
1909 	while (b = bch2_btree_iter_peek_node(iter),
1910 	       bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1911 		bch2_trans_begin(iter->trans);
1912 
1913 	return b;
1914 }
1915 
1916 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1917 {
1918 	struct btree_trans *trans = iter->trans;
1919 	struct btree *b = NULL;
1920 	int ret;
1921 
1922 	EBUG_ON(trans->paths[iter->path].cached);
1923 	bch2_trans_verify_not_in_restart(trans);
1924 	bch2_btree_iter_verify(iter);
1925 
1926 	struct btree_path *path = btree_iter_path(trans, iter);
1927 
1928 	/* already at end? */
1929 	if (!btree_path_node(path, path->level))
1930 		return NULL;
1931 
1932 	/* got to end? */
1933 	if (!btree_path_node(path, path->level + 1)) {
1934 		btree_path_set_level_up(trans, path);
1935 		return NULL;
1936 	}
1937 
1938 	if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1939 		__bch2_btree_path_unlock(trans, path);
1940 		path->l[path->level].b		= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1941 		path->l[path->level + 1].b	= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1942 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1943 		trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1944 		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1945 		goto err;
1946 	}
1947 
1948 	b = btree_path_node(path, path->level + 1);
1949 
1950 	if (bpos_eq(iter->pos, b->key.k.p)) {
1951 		__btree_path_set_level_up(trans, path, path->level++);
1952 	} else {
1953 		if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
1954 			btree_node_unlock(trans, path, path->level + 1);
1955 
1956 		/*
1957 		 * Haven't gotten to the end of the parent node: go back down to
1958 		 * the next child node
1959 		 */
1960 		iter->path = bch2_btree_path_set_pos(trans, iter->path,
1961 					bpos_successor(iter->pos),
1962 					iter->flags & BTREE_ITER_intent,
1963 					btree_iter_ip_allocated(iter));
1964 
1965 		path = btree_iter_path(trans, iter);
1966 		btree_path_set_level_down(trans, path, iter->min_depth);
1967 
1968 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1969 		if (ret)
1970 			goto err;
1971 
1972 		path = btree_iter_path(trans, iter);
1973 		b = path->l[path->level].b;
1974 	}
1975 
1976 	bkey_init(&iter->k);
1977 	iter->k.p = iter->pos = b->key.k.p;
1978 
1979 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1980 					iter->flags & BTREE_ITER_intent,
1981 					btree_iter_ip_allocated(iter));
1982 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1983 	EBUG_ON(btree_iter_path(trans, iter)->uptodate);
1984 out:
1985 	bch2_btree_iter_verify_entry_exit(iter);
1986 	bch2_btree_iter_verify(iter);
1987 
1988 	return b;
1989 err:
1990 	b = ERR_PTR(ret);
1991 	goto out;
1992 }
1993 
1994 /* Iterate across keys (in leaf nodes only) */
1995 
1996 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1997 {
1998 	struct bpos pos = iter->k.p;
1999 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2000 		     ? bpos_eq(pos, SPOS_MAX)
2001 		     : bkey_eq(pos, SPOS_MAX));
2002 
2003 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2004 		pos = bkey_successor(iter, pos);
2005 	bch2_btree_iter_set_pos(iter, pos);
2006 	return ret;
2007 }
2008 
2009 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2010 {
2011 	struct bpos pos = bkey_start_pos(&iter->k);
2012 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2013 		     ? bpos_eq(pos, POS_MIN)
2014 		     : bkey_eq(pos, POS_MIN));
2015 
2016 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2017 		pos = bkey_predecessor(iter, pos);
2018 	bch2_btree_iter_set_pos(iter, pos);
2019 	return ret;
2020 }
2021 
2022 static noinline
2023 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2024 					struct bkey_s_c *k)
2025 {
2026 	struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2027 
2028 	trans_for_each_update(trans, i)
2029 		if (!i->key_cache_already_flushed &&
2030 		    i->btree_id == iter->btree_id &&
2031 		    bpos_le(i->k->k.p, iter->pos) &&
2032 		    bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2033 			iter->k = i->k->k;
2034 			*k = bkey_i_to_s_c(i->k);
2035 		}
2036 }
2037 
2038 static noinline
2039 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2040 				   struct bkey_s_c *k)
2041 {
2042 	struct btree_path *path = btree_iter_path(trans, iter);
2043 	struct bpos end = path_l(path)->b->key.k.p;
2044 
2045 	trans_for_each_update(trans, i)
2046 		if (!i->key_cache_already_flushed &&
2047 		    i->btree_id == iter->btree_id &&
2048 		    bpos_ge(i->k->k.p, path->pos) &&
2049 		    bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2050 			iter->k = i->k->k;
2051 			*k = bkey_i_to_s_c(i->k);
2052 		}
2053 }
2054 
2055 static noinline
2056 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2057 					struct bkey_s_c *k)
2058 {
2059 	trans_for_each_update(trans, i)
2060 		if (!i->key_cache_already_flushed &&
2061 		    i->btree_id == iter->btree_id &&
2062 		    bpos_eq(i->k->k.p, iter->pos)) {
2063 			iter->k = i->k->k;
2064 			*k = bkey_i_to_s_c(i->k);
2065 		}
2066 }
2067 
2068 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2069 					      struct btree_iter *iter,
2070 					      struct bpos end_pos)
2071 {
2072 	struct btree_path *path = btree_iter_path(trans, iter);
2073 
2074 	return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
2075 					   path->level,
2076 					   path->pos,
2077 					   end_pos,
2078 					   &iter->journal_idx);
2079 }
2080 
2081 static noinline
2082 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2083 					      struct btree_iter *iter)
2084 {
2085 	struct btree_path *path = btree_iter_path(trans, iter);
2086 	struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2087 
2088 	if (k) {
2089 		iter->k = k->k;
2090 		return bkey_i_to_s_c(k);
2091 	} else {
2092 		return bkey_s_c_null;
2093 	}
2094 }
2095 
2096 static noinline
2097 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2098 					 struct btree_iter *iter,
2099 					 struct bkey_s_c k)
2100 {
2101 	struct btree_path *path = btree_iter_path(trans, iter);
2102 	struct bkey_i *next_journal =
2103 		bch2_btree_journal_peek(trans, iter,
2104 				k.k ? k.k->p : path_l(path)->b->key.k.p);
2105 
2106 	if (next_journal) {
2107 		iter->k = next_journal->k;
2108 		k = bkey_i_to_s_c(next_journal);
2109 	}
2110 
2111 	return k;
2112 }
2113 
2114 /*
2115  * Checks btree key cache for key at iter->pos and returns it if present, or
2116  * bkey_s_c_null:
2117  */
2118 static noinline
2119 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2120 {
2121 	struct btree_trans *trans = iter->trans;
2122 	struct bch_fs *c = trans->c;
2123 	struct bkey u;
2124 	struct bkey_s_c k;
2125 	int ret;
2126 
2127 	bch2_trans_verify_not_in_restart(trans);
2128 	bch2_trans_verify_not_unlocked(trans);
2129 
2130 	if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2131 	    bpos_eq(iter->pos, pos))
2132 		return bkey_s_c_null;
2133 
2134 	if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2135 		return bkey_s_c_null;
2136 
2137 	if (!iter->key_cache_path)
2138 		iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2139 						     iter->flags & BTREE_ITER_intent, 0,
2140 						     iter->flags|BTREE_ITER_cached|
2141 						     BTREE_ITER_cached_nofill,
2142 						     _THIS_IP_);
2143 
2144 	iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2145 					iter->flags & BTREE_ITER_intent,
2146 					btree_iter_ip_allocated(iter));
2147 
2148 	ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2149 					 iter->flags|BTREE_ITER_cached) ?:
2150 		bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2151 	if (unlikely(ret))
2152 		return bkey_s_c_err(ret);
2153 
2154 	btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
2155 
2156 	k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2157 	if (k.k && !bkey_err(k)) {
2158 		iter->k = u;
2159 		k.k = &iter->k;
2160 	}
2161 	return k;
2162 }
2163 
2164 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2165 {
2166 	struct btree_trans *trans = iter->trans;
2167 	struct bkey_s_c k, k2;
2168 	int ret;
2169 
2170 	EBUG_ON(btree_iter_path(trans, iter)->cached);
2171 	bch2_btree_iter_verify(iter);
2172 
2173 	while (1) {
2174 		struct btree_path_level *l;
2175 
2176 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2177 					iter->flags & BTREE_ITER_intent,
2178 					btree_iter_ip_allocated(iter));
2179 
2180 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2181 		if (unlikely(ret)) {
2182 			/* ensure that iter->k is consistent with iter->pos: */
2183 			bch2_btree_iter_set_pos(iter, iter->pos);
2184 			k = bkey_s_c_err(ret);
2185 			goto out;
2186 		}
2187 
2188 		struct btree_path *path = btree_iter_path(trans, iter);
2189 		l = path_l(path);
2190 
2191 		if (unlikely(!l->b)) {
2192 			/* No btree nodes at requested level: */
2193 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2194 			k = bkey_s_c_null;
2195 			goto out;
2196 		}
2197 
2198 		btree_path_set_should_be_locked(path);
2199 
2200 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2201 
2202 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2203 		    k.k &&
2204 		    (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2205 			k = k2;
2206 			ret = bkey_err(k);
2207 			if (ret) {
2208 				bch2_btree_iter_set_pos(iter, iter->pos);
2209 				goto out;
2210 			}
2211 		}
2212 
2213 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2214 			k = btree_trans_peek_journal(trans, iter, k);
2215 
2216 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2217 			     trans->nr_updates))
2218 			bch2_btree_trans_peek_updates(trans, iter, &k);
2219 
2220 		if (k.k && bkey_deleted(k.k)) {
2221 			/*
2222 			 * If we've got a whiteout, and it's after the search
2223 			 * key, advance the search key to the whiteout instead
2224 			 * of just after the whiteout - it might be a btree
2225 			 * whiteout, with a real key at the same position, since
2226 			 * in the btree deleted keys sort before non deleted.
2227 			 */
2228 			search_key = !bpos_eq(search_key, k.k->p)
2229 				? k.k->p
2230 				: bpos_successor(k.k->p);
2231 			continue;
2232 		}
2233 
2234 		if (likely(k.k)) {
2235 			break;
2236 		} else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2237 			/* Advance to next leaf node: */
2238 			search_key = bpos_successor(l->b->key.k.p);
2239 		} else {
2240 			/* End of btree: */
2241 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2242 			k = bkey_s_c_null;
2243 			goto out;
2244 		}
2245 	}
2246 out:
2247 	bch2_btree_iter_verify(iter);
2248 
2249 	return k;
2250 }
2251 
2252 /**
2253  * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
2254  * iterator's current position
2255  * @iter:	iterator to peek from
2256  * @end:	search limit: returns keys less than or equal to @end
2257  *
2258  * Returns:	key if found, or an error extractable with bkey_err().
2259  */
2260 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2261 {
2262 	struct btree_trans *trans = iter->trans;
2263 	struct bpos search_key = btree_iter_search_key(iter);
2264 	struct bkey_s_c k;
2265 	struct bpos iter_pos;
2266 	int ret;
2267 
2268 	bch2_trans_verify_not_unlocked(trans);
2269 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2270 
2271 	if (iter->update_path) {
2272 		bch2_path_put_nokeep(trans, iter->update_path,
2273 				     iter->flags & BTREE_ITER_intent);
2274 		iter->update_path = 0;
2275 	}
2276 
2277 	bch2_btree_iter_verify_entry_exit(iter);
2278 
2279 	while (1) {
2280 		k = __bch2_btree_iter_peek(iter, search_key);
2281 		if (unlikely(!k.k))
2282 			goto end;
2283 		if (unlikely(bkey_err(k)))
2284 			goto out_no_locked;
2285 
2286 		/*
2287 		 * We need to check against @end before FILTER_SNAPSHOTS because
2288 		 * if we get to a different inode that requested we might be
2289 		 * seeing keys for a different snapshot tree that will all be
2290 		 * filtered out.
2291 		 *
2292 		 * But we can't do the full check here, because bkey_start_pos()
2293 		 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2294 		 * that's what we check against in extents mode:
2295 		 */
2296 		if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2297 			     ? bkey_gt(k.k->p, end)
2298 			     : k.k->p.inode > end.inode))
2299 			goto end;
2300 
2301 		if (iter->update_path &&
2302 		    !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2303 			bch2_path_put_nokeep(trans, iter->update_path,
2304 					     iter->flags & BTREE_ITER_intent);
2305 			iter->update_path = 0;
2306 		}
2307 
2308 		if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2309 		    (iter->flags & BTREE_ITER_intent) &&
2310 		    !(iter->flags & BTREE_ITER_is_extents) &&
2311 		    !iter->update_path) {
2312 			struct bpos pos = k.k->p;
2313 
2314 			if (pos.snapshot < iter->snapshot) {
2315 				search_key = bpos_successor(k.k->p);
2316 				continue;
2317 			}
2318 
2319 			pos.snapshot = iter->snapshot;
2320 
2321 			/*
2322 			 * advance, same as on exit for iter->path, but only up
2323 			 * to snapshot
2324 			 */
2325 			__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2326 			iter->update_path = iter->path;
2327 
2328 			iter->update_path = bch2_btree_path_set_pos(trans,
2329 						iter->update_path, pos,
2330 						iter->flags & BTREE_ITER_intent,
2331 						_THIS_IP_);
2332 			ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2333 			if (unlikely(ret)) {
2334 				k = bkey_s_c_err(ret);
2335 				goto out_no_locked;
2336 			}
2337 		}
2338 
2339 		/*
2340 		 * We can never have a key in a leaf node at POS_MAX, so
2341 		 * we don't have to check these successor() calls:
2342 		 */
2343 		if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2344 		    !bch2_snapshot_is_ancestor(trans->c,
2345 					       iter->snapshot,
2346 					       k.k->p.snapshot)) {
2347 			search_key = bpos_successor(k.k->p);
2348 			continue;
2349 		}
2350 
2351 		if (bkey_whiteout(k.k) &&
2352 		    !(iter->flags & BTREE_ITER_all_snapshots)) {
2353 			search_key = bkey_successor(iter, k.k->p);
2354 			continue;
2355 		}
2356 
2357 		/*
2358 		 * iter->pos should be mononotically increasing, and always be
2359 		 * equal to the key we just returned - except extents can
2360 		 * straddle iter->pos:
2361 		 */
2362 		if (!(iter->flags & BTREE_ITER_is_extents))
2363 			iter_pos = k.k->p;
2364 		else
2365 			iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2366 
2367 		if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2368 			     ? bkey_gt(iter_pos, end)
2369 			     : bkey_ge(iter_pos, end)))
2370 			goto end;
2371 
2372 		break;
2373 	}
2374 
2375 	iter->pos = iter_pos;
2376 
2377 	iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2378 				iter->flags & BTREE_ITER_intent,
2379 				btree_iter_ip_allocated(iter));
2380 
2381 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2382 out_no_locked:
2383 	if (iter->update_path) {
2384 		ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2385 		if (unlikely(ret))
2386 			k = bkey_s_c_err(ret);
2387 		else
2388 			btree_path_set_should_be_locked(trans->paths + iter->update_path);
2389 	}
2390 
2391 	if (!(iter->flags & BTREE_ITER_all_snapshots))
2392 		iter->pos.snapshot = iter->snapshot;
2393 
2394 	ret = bch2_btree_iter_verify_ret(iter, k);
2395 	if (unlikely(ret)) {
2396 		bch2_btree_iter_set_pos(iter, iter->pos);
2397 		k = bkey_s_c_err(ret);
2398 	}
2399 
2400 	bch2_btree_iter_verify_entry_exit(iter);
2401 
2402 	return k;
2403 end:
2404 	bch2_btree_iter_set_pos(iter, end);
2405 	k = bkey_s_c_null;
2406 	goto out_no_locked;
2407 }
2408 
2409 /**
2410  * bch2_btree_iter_next() - returns first key greater than iterator's current
2411  * position
2412  * @iter:	iterator to peek from
2413  *
2414  * Returns:	key if found, or an error extractable with bkey_err().
2415  */
2416 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2417 {
2418 	if (!bch2_btree_iter_advance(iter))
2419 		return bkey_s_c_null;
2420 
2421 	return bch2_btree_iter_peek(iter);
2422 }
2423 
2424 /**
2425  * bch2_btree_iter_peek_prev() - returns first key less than or equal to
2426  * iterator's current position
2427  * @iter:	iterator to peek from
2428  *
2429  * Returns:	key if found, or an error extractable with bkey_err().
2430  */
2431 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2432 {
2433 	struct btree_trans *trans = iter->trans;
2434 	struct bpos search_key = iter->pos;
2435 	struct bkey_s_c k;
2436 	struct bkey saved_k;
2437 	const struct bch_val *saved_v;
2438 	btree_path_idx_t saved_path = 0;
2439 	int ret;
2440 
2441 	bch2_trans_verify_not_unlocked(trans);
2442 	EBUG_ON(btree_iter_path(trans, iter)->cached ||
2443 		btree_iter_path(trans, iter)->level);
2444 
2445 	if (iter->flags & BTREE_ITER_with_journal)
2446 		return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
2447 
2448 	bch2_btree_iter_verify(iter);
2449 	bch2_btree_iter_verify_entry_exit(iter);
2450 
2451 	if (iter->flags & BTREE_ITER_filter_snapshots)
2452 		search_key.snapshot = U32_MAX;
2453 
2454 	while (1) {
2455 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2456 						iter->flags & BTREE_ITER_intent,
2457 						btree_iter_ip_allocated(iter));
2458 
2459 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2460 		if (unlikely(ret)) {
2461 			/* ensure that iter->k is consistent with iter->pos: */
2462 			bch2_btree_iter_set_pos(iter, iter->pos);
2463 			k = bkey_s_c_err(ret);
2464 			goto out_no_locked;
2465 		}
2466 
2467 		struct btree_path *path = btree_iter_path(trans, iter);
2468 
2469 		k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
2470 		if (!k.k ||
2471 		    ((iter->flags & BTREE_ITER_is_extents)
2472 		     ? bpos_ge(bkey_start_pos(k.k), search_key)
2473 		     : bpos_gt(k.k->p, search_key)))
2474 			k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
2475 
2476 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2477 			     trans->nr_updates))
2478 			bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2479 
2480 		if (likely(k.k)) {
2481 			if (iter->flags & BTREE_ITER_filter_snapshots) {
2482 				if (k.k->p.snapshot == iter->snapshot)
2483 					goto got_key;
2484 
2485 				/*
2486 				 * If we have a saved candidate, and we're no
2487 				 * longer at the same _key_ (not pos), return
2488 				 * that candidate
2489 				 */
2490 				if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2491 					bch2_path_put_nokeep(trans, iter->path,
2492 						      iter->flags & BTREE_ITER_intent);
2493 					iter->path = saved_path;
2494 					saved_path = 0;
2495 					iter->k	= saved_k;
2496 					k.v	= saved_v;
2497 					goto got_key;
2498 				}
2499 
2500 				if (bch2_snapshot_is_ancestor(trans->c,
2501 							      iter->snapshot,
2502 							      k.k->p.snapshot)) {
2503 					if (saved_path)
2504 						bch2_path_put_nokeep(trans, saved_path,
2505 						      iter->flags & BTREE_ITER_intent);
2506 					saved_path = btree_path_clone(trans, iter->path,
2507 								iter->flags & BTREE_ITER_intent,
2508 								_THIS_IP_);
2509 					path = btree_iter_path(trans, iter);
2510 					saved_k = *k.k;
2511 					saved_v = k.v;
2512 				}
2513 
2514 				search_key = bpos_predecessor(k.k->p);
2515 				continue;
2516 			}
2517 got_key:
2518 			if (bkey_whiteout(k.k) &&
2519 			    !(iter->flags & BTREE_ITER_all_snapshots)) {
2520 				search_key = bkey_predecessor(iter, k.k->p);
2521 				if (iter->flags & BTREE_ITER_filter_snapshots)
2522 					search_key.snapshot = U32_MAX;
2523 				continue;
2524 			}
2525 
2526 			btree_path_set_should_be_locked(path);
2527 			break;
2528 		} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2529 			/* Advance to previous leaf node: */
2530 			search_key = bpos_predecessor(path->l[0].b->data->min_key);
2531 		} else {
2532 			/* Start of btree: */
2533 			bch2_btree_iter_set_pos(iter, POS_MIN);
2534 			k = bkey_s_c_null;
2535 			goto out_no_locked;
2536 		}
2537 	}
2538 
2539 	EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2540 
2541 	/* Extents can straddle iter->pos: */
2542 	if (bkey_lt(k.k->p, iter->pos))
2543 		iter->pos = k.k->p;
2544 
2545 	if (iter->flags & BTREE_ITER_filter_snapshots)
2546 		iter->pos.snapshot = iter->snapshot;
2547 out_no_locked:
2548 	if (saved_path)
2549 		bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2550 
2551 	bch2_btree_iter_verify_entry_exit(iter);
2552 	bch2_btree_iter_verify(iter);
2553 
2554 	return k;
2555 }
2556 
2557 /**
2558  * bch2_btree_iter_prev() - returns first key less than iterator's current
2559  * position
2560  * @iter:	iterator to peek from
2561  *
2562  * Returns:	key if found, or an error extractable with bkey_err().
2563  */
2564 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2565 {
2566 	if (!bch2_btree_iter_rewind(iter))
2567 		return bkey_s_c_null;
2568 
2569 	return bch2_btree_iter_peek_prev(iter);
2570 }
2571 
2572 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2573 {
2574 	struct btree_trans *trans = iter->trans;
2575 	struct bpos search_key;
2576 	struct bkey_s_c k;
2577 	int ret;
2578 
2579 	bch2_trans_verify_not_unlocked(trans);
2580 	bch2_btree_iter_verify(iter);
2581 	bch2_btree_iter_verify_entry_exit(iter);
2582 	EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2583 
2584 	/* extents can't span inode numbers: */
2585 	if ((iter->flags & BTREE_ITER_is_extents) &&
2586 	    unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2587 		if (iter->pos.inode == KEY_INODE_MAX)
2588 			return bkey_s_c_null;
2589 
2590 		bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2591 	}
2592 
2593 	search_key = btree_iter_search_key(iter);
2594 	iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2595 					iter->flags & BTREE_ITER_intent,
2596 					btree_iter_ip_allocated(iter));
2597 
2598 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2599 	if (unlikely(ret)) {
2600 		k = bkey_s_c_err(ret);
2601 		goto out_no_locked;
2602 	}
2603 
2604 	if ((iter->flags & BTREE_ITER_cached) ||
2605 	    !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2606 		k = bkey_s_c_null;
2607 
2608 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2609 			     trans->nr_updates)) {
2610 			bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2611 			if (k.k)
2612 				goto out;
2613 		}
2614 
2615 		if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2616 		    (k = btree_trans_peek_slot_journal(trans, iter)).k)
2617 			goto out;
2618 
2619 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2620 		    (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2621 			if (!bkey_err(k))
2622 				iter->k = *k.k;
2623 			/* We're not returning a key from iter->path: */
2624 			goto out_no_locked;
2625 		}
2626 
2627 		k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2628 		if (unlikely(!k.k))
2629 			goto out_no_locked;
2630 	} else {
2631 		struct bpos next;
2632 		struct bpos end = iter->pos;
2633 
2634 		if (iter->flags & BTREE_ITER_is_extents)
2635 			end.offset = U64_MAX;
2636 
2637 		EBUG_ON(btree_iter_path(trans, iter)->level);
2638 
2639 		if (iter->flags & BTREE_ITER_intent) {
2640 			struct btree_iter iter2;
2641 
2642 			bch2_trans_copy_iter(&iter2, iter);
2643 			k = bch2_btree_iter_peek_upto(&iter2, end);
2644 
2645 			if (k.k && !bkey_err(k)) {
2646 				swap(iter->key_cache_path, iter2.key_cache_path);
2647 				iter->k = iter2.k;
2648 				k.k = &iter->k;
2649 			}
2650 			bch2_trans_iter_exit(trans, &iter2);
2651 		} else {
2652 			struct bpos pos = iter->pos;
2653 
2654 			k = bch2_btree_iter_peek_upto(iter, end);
2655 			if (unlikely(bkey_err(k)))
2656 				bch2_btree_iter_set_pos(iter, pos);
2657 			else
2658 				iter->pos = pos;
2659 		}
2660 
2661 		if (unlikely(bkey_err(k)))
2662 			goto out_no_locked;
2663 
2664 		next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2665 
2666 		if (bkey_lt(iter->pos, next)) {
2667 			bkey_init(&iter->k);
2668 			iter->k.p = iter->pos;
2669 
2670 			if (iter->flags & BTREE_ITER_is_extents) {
2671 				bch2_key_resize(&iter->k,
2672 						min_t(u64, KEY_SIZE_MAX,
2673 						      (next.inode == iter->pos.inode
2674 						       ? next.offset
2675 						       : KEY_OFFSET_MAX) -
2676 						      iter->pos.offset));
2677 				EBUG_ON(!iter->k.size);
2678 			}
2679 
2680 			k = (struct bkey_s_c) { &iter->k, NULL };
2681 		}
2682 	}
2683 out:
2684 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2685 out_no_locked:
2686 	bch2_btree_iter_verify_entry_exit(iter);
2687 	bch2_btree_iter_verify(iter);
2688 	ret = bch2_btree_iter_verify_ret(iter, k);
2689 	if (unlikely(ret))
2690 		return bkey_s_c_err(ret);
2691 
2692 	return k;
2693 }
2694 
2695 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2696 {
2697 	if (!bch2_btree_iter_advance(iter))
2698 		return bkey_s_c_null;
2699 
2700 	return bch2_btree_iter_peek_slot(iter);
2701 }
2702 
2703 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2704 {
2705 	if (!bch2_btree_iter_rewind(iter))
2706 		return bkey_s_c_null;
2707 
2708 	return bch2_btree_iter_peek_slot(iter);
2709 }
2710 
2711 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2712 {
2713 	struct bkey_s_c k;
2714 
2715 	while (btree_trans_too_many_iters(iter->trans) ||
2716 	       (k = bch2_btree_iter_peek_type(iter, iter->flags),
2717 		bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2718 		bch2_trans_begin(iter->trans);
2719 
2720 	return k;
2721 }
2722 
2723 /* new transactional stuff: */
2724 
2725 #ifdef CONFIG_BCACHEFS_DEBUG
2726 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2727 {
2728 	struct btree_path *path;
2729 	unsigned i;
2730 
2731 	BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2732 
2733 	trans_for_each_path(trans, path, i) {
2734 		BUG_ON(path->sorted_idx >= trans->nr_sorted);
2735 		BUG_ON(trans->sorted[path->sorted_idx] != i);
2736 	}
2737 
2738 	for (i = 0; i < trans->nr_sorted; i++) {
2739 		unsigned idx = trans->sorted[i];
2740 
2741 		BUG_ON(!test_bit(idx, trans->paths_allocated));
2742 		BUG_ON(trans->paths[idx].sorted_idx != i);
2743 	}
2744 }
2745 
2746 static void btree_trans_verify_sorted(struct btree_trans *trans)
2747 {
2748 	struct btree_path *path, *prev = NULL;
2749 	struct trans_for_each_path_inorder_iter iter;
2750 
2751 	if (!bch2_debug_check_iterators)
2752 		return;
2753 
2754 	trans_for_each_path_inorder(trans, path, iter) {
2755 		if (prev && btree_path_cmp(prev, path) > 0) {
2756 			__bch2_dump_trans_paths_updates(trans, true);
2757 			panic("trans paths out of order!\n");
2758 		}
2759 		prev = path;
2760 	}
2761 }
2762 #else
2763 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2764 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2765 #endif
2766 
2767 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2768 {
2769 	int i, l = 0, r = trans->nr_sorted, inc = 1;
2770 	bool swapped;
2771 
2772 	btree_trans_verify_sorted_refs(trans);
2773 
2774 	if (trans->paths_sorted)
2775 		goto out;
2776 
2777 	/*
2778 	 * Cocktail shaker sort: this is efficient because iterators will be
2779 	 * mostly sorted.
2780 	 */
2781 	do {
2782 		swapped = false;
2783 
2784 		for (i = inc > 0 ? l : r - 2;
2785 		     i + 1 < r && i >= l;
2786 		     i += inc) {
2787 			if (btree_path_cmp(trans->paths + trans->sorted[i],
2788 					   trans->paths + trans->sorted[i + 1]) > 0) {
2789 				swap(trans->sorted[i], trans->sorted[i + 1]);
2790 				trans->paths[trans->sorted[i]].sorted_idx = i;
2791 				trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2792 				swapped = true;
2793 			}
2794 		}
2795 
2796 		if (inc > 0)
2797 			--r;
2798 		else
2799 			l++;
2800 		inc = -inc;
2801 	} while (swapped);
2802 
2803 	trans->paths_sorted = true;
2804 out:
2805 	btree_trans_verify_sorted(trans);
2806 }
2807 
2808 static inline void btree_path_list_remove(struct btree_trans *trans,
2809 					  struct btree_path *path)
2810 {
2811 	EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2812 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2813 	trans->nr_sorted--;
2814 	memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2815 				trans->sorted + path->sorted_idx + 1,
2816 				DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2817 					     sizeof(u64) / sizeof(btree_path_idx_t)));
2818 #else
2819 	array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2820 #endif
2821 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2822 		trans->paths[trans->sorted[i]].sorted_idx = i;
2823 }
2824 
2825 static inline void btree_path_list_add(struct btree_trans *trans,
2826 				       btree_path_idx_t pos,
2827 				       btree_path_idx_t path_idx)
2828 {
2829 	struct btree_path *path = trans->paths + path_idx;
2830 
2831 	path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2832 
2833 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2834 	memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2835 			      trans->sorted + path->sorted_idx,
2836 			      DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2837 					   sizeof(u64) / sizeof(btree_path_idx_t)));
2838 	trans->nr_sorted++;
2839 	trans->sorted[path->sorted_idx] = path_idx;
2840 #else
2841 	array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
2842 #endif
2843 
2844 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2845 		trans->paths[trans->sorted[i]].sorted_idx = i;
2846 
2847 	btree_trans_verify_sorted_refs(trans);
2848 }
2849 
2850 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2851 {
2852 	if (iter->update_path)
2853 		bch2_path_put_nokeep(trans, iter->update_path,
2854 			      iter->flags & BTREE_ITER_intent);
2855 	if (iter->path)
2856 		bch2_path_put(trans, iter->path,
2857 			      iter->flags & BTREE_ITER_intent);
2858 	if (iter->key_cache_path)
2859 		bch2_path_put(trans, iter->key_cache_path,
2860 			      iter->flags & BTREE_ITER_intent);
2861 	iter->path		= 0;
2862 	iter->update_path	= 0;
2863 	iter->key_cache_path	= 0;
2864 	iter->trans		= NULL;
2865 }
2866 
2867 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2868 			  struct btree_iter *iter,
2869 			  enum btree_id btree_id, struct bpos pos,
2870 			  unsigned flags)
2871 {
2872 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2873 			       bch2_btree_iter_flags(trans, btree_id, flags),
2874 			       _RET_IP_);
2875 }
2876 
2877 void bch2_trans_node_iter_init(struct btree_trans *trans,
2878 			       struct btree_iter *iter,
2879 			       enum btree_id btree_id,
2880 			       struct bpos pos,
2881 			       unsigned locks_want,
2882 			       unsigned depth,
2883 			       unsigned flags)
2884 {
2885 	flags |= BTREE_ITER_not_extents;
2886 	flags |= BTREE_ITER_snapshot_field;
2887 	flags |= BTREE_ITER_all_snapshots;
2888 
2889 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2890 			       __bch2_btree_iter_flags(trans, btree_id, flags),
2891 			       _RET_IP_);
2892 
2893 	iter->min_depth	= depth;
2894 
2895 	struct btree_path *path = btree_iter_path(trans, iter);
2896 	BUG_ON(path->locks_want	 < min(locks_want, BTREE_MAX_DEPTH));
2897 	BUG_ON(path->level	!= depth);
2898 	BUG_ON(iter->min_depth	!= depth);
2899 }
2900 
2901 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2902 {
2903 	struct btree_trans *trans = src->trans;
2904 
2905 	*dst = *src;
2906 #ifdef TRACK_PATH_ALLOCATED
2907 	dst->ip_allocated = _RET_IP_;
2908 #endif
2909 	if (src->path)
2910 		__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
2911 	if (src->update_path)
2912 		__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
2913 	dst->key_cache_path = 0;
2914 }
2915 
2916 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2917 {
2918 	struct bch_fs *c = trans->c;
2919 	unsigned new_top = trans->mem_top + size;
2920 	unsigned old_bytes = trans->mem_bytes;
2921 	unsigned new_bytes = roundup_pow_of_two(new_top);
2922 	int ret;
2923 	void *new_mem;
2924 	void *p;
2925 
2926 	WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2927 
2928 	struct btree_transaction_stats *s = btree_trans_stats(trans);
2929 	s->max_mem = max(s->max_mem, new_bytes);
2930 
2931 	if (trans->used_mempool) {
2932 		if (trans->mem_bytes >= new_bytes)
2933 			goto out_change_top;
2934 
2935 		/* No more space from mempool item, need malloc new one */
2936 		new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2937 		if (unlikely(!new_mem)) {
2938 			bch2_trans_unlock(trans);
2939 
2940 			new_mem = kmalloc(new_bytes, GFP_KERNEL);
2941 			if (!new_mem)
2942 				return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2943 
2944 			ret = bch2_trans_relock(trans);
2945 			if (ret) {
2946 				kfree(new_mem);
2947 				return ERR_PTR(ret);
2948 			}
2949 		}
2950 		memcpy(new_mem, trans->mem, trans->mem_top);
2951 		trans->used_mempool = false;
2952 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
2953 		goto out_new_mem;
2954 	}
2955 
2956 	new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2957 	if (unlikely(!new_mem)) {
2958 		bch2_trans_unlock(trans);
2959 
2960 		new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2961 		if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2962 			new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2963 			new_bytes = BTREE_TRANS_MEM_MAX;
2964 			memcpy(new_mem, trans->mem, trans->mem_top);
2965 			trans->used_mempool = true;
2966 			kfree(trans->mem);
2967 		}
2968 
2969 		if (!new_mem)
2970 			return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2971 
2972 		trans->mem = new_mem;
2973 		trans->mem_bytes = new_bytes;
2974 
2975 		ret = bch2_trans_relock(trans);
2976 		if (ret)
2977 			return ERR_PTR(ret);
2978 	}
2979 out_new_mem:
2980 	trans->mem = new_mem;
2981 	trans->mem_bytes = new_bytes;
2982 
2983 	if (old_bytes) {
2984 		trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2985 		return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2986 	}
2987 out_change_top:
2988 	p = trans->mem + trans->mem_top;
2989 	trans->mem_top += size;
2990 	memset(p, 0, size);
2991 	return p;
2992 }
2993 
2994 static inline void check_srcu_held_too_long(struct btree_trans *trans)
2995 {
2996 	WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
2997 	     "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
2998 	     (jiffies - trans->srcu_lock_time) / HZ);
2999 }
3000 
3001 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3002 {
3003 	if (trans->srcu_held) {
3004 		struct bch_fs *c = trans->c;
3005 		struct btree_path *path;
3006 		unsigned i;
3007 
3008 		trans_for_each_path(trans, path, i)
3009 			if (path->cached && !btree_node_locked(path, 0))
3010 				path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3011 
3012 		check_srcu_held_too_long(trans);
3013 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3014 		trans->srcu_held = false;
3015 	}
3016 }
3017 
3018 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3019 {
3020 	if (!trans->srcu_held) {
3021 		trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3022 		trans->srcu_lock_time	= jiffies;
3023 		trans->srcu_held = true;
3024 	}
3025 }
3026 
3027 /**
3028  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3029  * @trans: transaction to reset
3030  *
3031  * Returns:	current restart counter, to be used with trans_was_restarted()
3032  *
3033  * While iterating over nodes or updating nodes a attempt to lock a btree node
3034  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3035  * occurs bch2_trans_begin() should be called and the transaction retried.
3036  */
3037 u32 bch2_trans_begin(struct btree_trans *trans)
3038 {
3039 	struct btree_path *path;
3040 	unsigned i;
3041 	u64 now;
3042 
3043 	bch2_trans_reset_updates(trans);
3044 
3045 	trans->restart_count++;
3046 	trans->mem_top			= 0;
3047 	trans->journal_entries		= NULL;
3048 
3049 	trans_for_each_path(trans, path, i) {
3050 		path->should_be_locked = false;
3051 
3052 		/*
3053 		 * If the transaction wasn't restarted, we're presuming to be
3054 		 * doing something new: dont keep iterators excpt the ones that
3055 		 * are in use - except for the subvolumes btree:
3056 		 */
3057 		if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3058 			path->preserve = false;
3059 
3060 		/*
3061 		 * XXX: we probably shouldn't be doing this if the transaction
3062 		 * was restarted, but currently we still overflow transaction
3063 		 * iterators if we do that
3064 		 */
3065 		if (!path->ref && !path->preserve)
3066 			__bch2_path_free(trans, i);
3067 		else
3068 			path->preserve = false;
3069 	}
3070 
3071 	now = local_clock();
3072 
3073 	if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3074 	    time_after64(now, trans->last_begin_time + 10))
3075 		__bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3076 					 trans->last_begin_time, now);
3077 
3078 	if (!trans->restarted &&
3079 	    (need_resched() ||
3080 	     time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3081 		bch2_trans_unlock(trans);
3082 		cond_resched();
3083 		now = local_clock();
3084 	}
3085 	trans->last_begin_time = now;
3086 
3087 	if (unlikely(trans->srcu_held &&
3088 		     time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3089 		bch2_trans_srcu_unlock(trans);
3090 
3091 	trans->last_begin_ip = _RET_IP_;
3092 	trans->locked  = true;
3093 
3094 	if (trans->restarted) {
3095 		bch2_btree_path_traverse_all(trans);
3096 		trans->notrace_relock_fail = false;
3097 	}
3098 
3099 	bch2_trans_verify_not_unlocked(trans);
3100 	return trans->restart_count;
3101 }
3102 
3103 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3104 
3105 unsigned bch2_trans_get_fn_idx(const char *fn)
3106 {
3107 	for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3108 		if (!bch2_btree_transaction_fns[i] ||
3109 		    bch2_btree_transaction_fns[i] == fn) {
3110 			bch2_btree_transaction_fns[i] = fn;
3111 			return i;
3112 		}
3113 
3114 	pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3115 	return 0;
3116 }
3117 
3118 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3119 	__acquires(&c->btree_trans_barrier)
3120 {
3121 	struct btree_trans *trans;
3122 
3123 	if (IS_ENABLED(__KERNEL__)) {
3124 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3125 		if (trans) {
3126 			memset(trans, 0, offsetof(struct btree_trans, list));
3127 			goto got_trans;
3128 		}
3129 	}
3130 
3131 	trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3132 	memset(trans, 0, sizeof(*trans));
3133 
3134 	seqmutex_lock(&c->btree_trans_lock);
3135 	if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3136 		struct btree_trans *pos;
3137 		pid_t pid = current->pid;
3138 
3139 		trans->locking_wait.task = current;
3140 
3141 		list_for_each_entry(pos, &c->btree_trans_list, list) {
3142 			struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3143 			/*
3144 			 * We'd much prefer to be stricter here and completely
3145 			 * disallow multiple btree_trans in the same thread -
3146 			 * but the data move path calls bch2_write when we
3147 			 * already have a btree_trans initialized.
3148 			 */
3149 			BUG_ON(pos_task &&
3150 			       pid == pos_task->pid &&
3151 			       pos->locked);
3152 		}
3153 	}
3154 
3155 	list_add(&trans->list, &c->btree_trans_list);
3156 	seqmutex_unlock(&c->btree_trans_lock);
3157 got_trans:
3158 	trans->c		= c;
3159 	trans->last_begin_time	= local_clock();
3160 	trans->fn_idx		= fn_idx;
3161 	trans->locking_wait.task = current;
3162 	trans->locked		= true;
3163 	trans->journal_replay_not_finished =
3164 		unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3165 		atomic_inc_not_zero(&c->journal_keys.ref);
3166 	trans->nr_paths		= ARRAY_SIZE(trans->_paths);
3167 	trans->paths_allocated	= trans->_paths_allocated;
3168 	trans->sorted		= trans->_sorted;
3169 	trans->paths		= trans->_paths;
3170 	trans->updates		= trans->_updates;
3171 
3172 	*trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3173 
3174 	trans->paths_allocated[0] = 1;
3175 
3176 	if (fn_idx < BCH_TRANSACTIONS_NR) {
3177 		trans->fn = bch2_btree_transaction_fns[fn_idx];
3178 
3179 		struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3180 
3181 		if (s->max_mem) {
3182 			unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3183 
3184 			trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3185 			if (likely(trans->mem))
3186 				trans->mem_bytes = expected_mem_bytes;
3187 		}
3188 
3189 		trans->nr_paths_max = s->nr_max_paths;
3190 		trans->journal_entries_size = s->journal_entries_size;
3191 	}
3192 
3193 	trans->srcu_idx		= srcu_read_lock(&c->btree_trans_barrier);
3194 	trans->srcu_lock_time	= jiffies;
3195 	trans->srcu_held	= true;
3196 
3197 	closure_init_stack_release(&trans->ref);
3198 	return trans;
3199 }
3200 
3201 static void check_btree_paths_leaked(struct btree_trans *trans)
3202 {
3203 #ifdef CONFIG_BCACHEFS_DEBUG
3204 	struct bch_fs *c = trans->c;
3205 	struct btree_path *path;
3206 	unsigned i;
3207 
3208 	trans_for_each_path(trans, path, i)
3209 		if (path->ref)
3210 			goto leaked;
3211 	return;
3212 leaked:
3213 	bch_err(c, "btree paths leaked from %s!", trans->fn);
3214 	trans_for_each_path(trans, path, i)
3215 		if (path->ref)
3216 			printk(KERN_ERR "  btree %s %pS\n",
3217 			       bch2_btree_id_str(path->btree_id),
3218 			       (void *) path->ip_allocated);
3219 	/* Be noisy about this: */
3220 	bch2_fatal_error(c);
3221 #endif
3222 }
3223 
3224 void bch2_trans_put(struct btree_trans *trans)
3225 	__releases(&c->btree_trans_barrier)
3226 {
3227 	struct bch_fs *c = trans->c;
3228 
3229 	bch2_trans_unlock(trans);
3230 
3231 	trans_for_each_update(trans, i)
3232 		__btree_path_put(trans->paths + i->path, true);
3233 	trans->nr_updates	= 0;
3234 
3235 	check_btree_paths_leaked(trans);
3236 
3237 	if (trans->srcu_held) {
3238 		check_srcu_held_too_long(trans);
3239 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3240 	}
3241 
3242 	if (trans->fs_usage_deltas) {
3243 		if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3244 		    REPLICAS_DELTA_LIST_MAX)
3245 			mempool_free(trans->fs_usage_deltas,
3246 				     &c->replicas_delta_pool);
3247 		else
3248 			kfree(trans->fs_usage_deltas);
3249 	}
3250 
3251 	if (unlikely(trans->journal_replay_not_finished))
3252 		bch2_journal_keys_put(c);
3253 
3254 	/*
3255 	 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3256 	 * by cycle detector
3257 	 */
3258 	closure_return_sync(&trans->ref);
3259 	trans->locking_wait.task = NULL;
3260 
3261 	unsigned long *paths_allocated = trans->paths_allocated;
3262 	trans->paths_allocated	= NULL;
3263 	trans->paths		= NULL;
3264 
3265 	if (paths_allocated != trans->_paths_allocated)
3266 		kvfree_rcu_mightsleep(paths_allocated);
3267 
3268 	if (trans->used_mempool)
3269 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3270 	else
3271 		kfree(trans->mem);
3272 
3273 	/* Userspace doesn't have a real percpu implementation: */
3274 	if (IS_ENABLED(__KERNEL__))
3275 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3276 
3277 	if (trans) {
3278 		seqmutex_lock(&c->btree_trans_lock);
3279 		list_del(&trans->list);
3280 		seqmutex_unlock(&c->btree_trans_lock);
3281 
3282 		mempool_free(trans, &c->btree_trans_pool);
3283 	}
3284 }
3285 
3286 static void __maybe_unused
3287 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3288 				      struct btree_bkey_cached_common *b)
3289 {
3290 	struct six_lock_count c = six_lock_counts(&b->lock);
3291 	struct task_struct *owner;
3292 	pid_t pid;
3293 
3294 	rcu_read_lock();
3295 	owner = READ_ONCE(b->lock.owner);
3296 	pid = owner ? owner->pid : 0;
3297 	rcu_read_unlock();
3298 
3299 	prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3300 		   b->level, bch2_btree_id_str(b->btree_id));
3301 	bch2_bpos_to_text(out, btree_node_pos(b));
3302 
3303 	prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3304 		   c.n[0], c.n[1], c.n[2], pid);
3305 }
3306 
3307 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3308 {
3309 	struct btree_bkey_cached_common *b;
3310 	static char lock_types[] = { 'r', 'i', 'w' };
3311 	struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3312 	unsigned l, idx;
3313 
3314 	/* before rcu_read_lock(): */
3315 	bch2_printbuf_make_room(out, 4096);
3316 
3317 	if (!out->nr_tabstops) {
3318 		printbuf_tabstop_push(out, 16);
3319 		printbuf_tabstop_push(out, 32);
3320 	}
3321 
3322 	prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3323 
3324 	/* trans->paths is rcu protected vs. freeing */
3325 	rcu_read_lock();
3326 	out->atomic++;
3327 
3328 	struct btree_path *paths = rcu_dereference(trans->paths);
3329 	if (!paths)
3330 		goto out;
3331 
3332 	unsigned long *paths_allocated = trans_paths_allocated(paths);
3333 
3334 	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3335 		struct btree_path *path = paths + idx;
3336 		if (!path->nodes_locked)
3337 			continue;
3338 
3339 		prt_printf(out, "  path %u %c l=%u %s:",
3340 		       idx,
3341 		       path->cached ? 'c' : 'b',
3342 		       path->level,
3343 		       bch2_btree_id_str(path->btree_id));
3344 		bch2_bpos_to_text(out, path->pos);
3345 		prt_newline(out);
3346 
3347 		for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3348 			if (btree_node_locked(path, l) &&
3349 			    !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3350 				prt_printf(out, "    %c l=%u ",
3351 					   lock_types[btree_node_locked_type(path, l)], l);
3352 				bch2_btree_bkey_cached_common_to_text(out, b);
3353 				prt_newline(out);
3354 			}
3355 		}
3356 	}
3357 
3358 	b = READ_ONCE(trans->locking);
3359 	if (b) {
3360 		prt_printf(out, "  blocked for %lluus on\n",
3361 			   div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3362 		prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3363 		bch2_btree_bkey_cached_common_to_text(out, b);
3364 		prt_newline(out);
3365 	}
3366 out:
3367 	--out->atomic;
3368 	rcu_read_unlock();
3369 }
3370 
3371 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3372 {
3373 	struct btree_transaction_stats *s;
3374 	struct btree_trans *trans;
3375 	int cpu;
3376 
3377 	if (c->btree_trans_bufs)
3378 		for_each_possible_cpu(cpu) {
3379 			struct btree_trans *trans =
3380 				per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3381 
3382 			if (trans) {
3383 				seqmutex_lock(&c->btree_trans_lock);
3384 				list_del(&trans->list);
3385 				seqmutex_unlock(&c->btree_trans_lock);
3386 			}
3387 			kfree(trans);
3388 		}
3389 	free_percpu(c->btree_trans_bufs);
3390 
3391 	trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3392 	if (trans)
3393 		panic("%s leaked btree_trans\n", trans->fn);
3394 
3395 	for (s = c->btree_transaction_stats;
3396 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3397 	     s++) {
3398 		kfree(s->max_paths_text);
3399 		bch2_time_stats_exit(&s->lock_hold_times);
3400 	}
3401 
3402 	if (c->btree_trans_barrier_initialized) {
3403 		synchronize_srcu_expedited(&c->btree_trans_barrier);
3404 		cleanup_srcu_struct(&c->btree_trans_barrier);
3405 	}
3406 	mempool_exit(&c->btree_trans_mem_pool);
3407 	mempool_exit(&c->btree_trans_pool);
3408 }
3409 
3410 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3411 {
3412 	struct btree_transaction_stats *s;
3413 
3414 	for (s = c->btree_transaction_stats;
3415 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3416 	     s++) {
3417 		bch2_time_stats_init(&s->duration);
3418 		bch2_time_stats_init(&s->lock_hold_times);
3419 		mutex_init(&s->lock);
3420 	}
3421 
3422 	INIT_LIST_HEAD(&c->btree_trans_list);
3423 	seqmutex_init(&c->btree_trans_lock);
3424 }
3425 
3426 int bch2_fs_btree_iter_init(struct bch_fs *c)
3427 {
3428 	int ret;
3429 
3430 	c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3431 	if (!c->btree_trans_bufs)
3432 		return -ENOMEM;
3433 
3434 	ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3435 					  sizeof(struct btree_trans)) ?:
3436 		mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3437 					  BTREE_TRANS_MEM_MAX) ?:
3438 		init_srcu_struct(&c->btree_trans_barrier);
3439 	if (!ret)
3440 		c->btree_trans_barrier_initialized = true;
3441 	return ret;
3442 }
3443