xref: /linux/fs/bcachefs/btree_iter.c (revision b8e4b0529d59a3ccd0b25a31d3cfc8b0f3b34068)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20 
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23 
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 			btree_path_idx_t, btree_path_idx_t);
27 
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31 	return iter->ip_allocated;
32 #else
33 	return 0;
34 #endif
35 }
36 
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39 
40 static inline int __btree_path_cmp(const struct btree_path *l,
41 				   enum btree_id	r_btree_id,
42 				   bool			r_cached,
43 				   struct bpos		r_pos,
44 				   unsigned		r_level)
45 {
46 	/*
47 	 * Must match lock ordering as defined by __bch2_btree_node_lock:
48 	 */
49 	return   cmp_int(l->btree_id,	r_btree_id) ?:
50 		 cmp_int((int) l->cached,	(int) r_cached) ?:
51 		 bpos_cmp(l->pos,	r_pos) ?:
52 		-cmp_int(l->level,	r_level);
53 }
54 
55 static inline int btree_path_cmp(const struct btree_path *l,
56 				 const struct btree_path *r)
57 {
58 	return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60 
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63 	/* Are we iterating over keys in all snapshots? */
64 	if (iter->flags & BTREE_ITER_all_snapshots) {
65 		p = bpos_successor(p);
66 	} else {
67 		p = bpos_nosnap_successor(p);
68 		p.snapshot = iter->snapshot;
69 	}
70 
71 	return p;
72 }
73 
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76 	/* Are we iterating over keys in all snapshots? */
77 	if (iter->flags & BTREE_ITER_all_snapshots) {
78 		p = bpos_predecessor(p);
79 	} else {
80 		p = bpos_nosnap_predecessor(p);
81 		p.snapshot = iter->snapshot;
82 	}
83 
84 	return p;
85 }
86 
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89 	struct bpos pos = iter->pos;
90 
91 	if ((iter->flags & BTREE_ITER_is_extents) &&
92 	    !bkey_eq(pos, POS_MAX))
93 		pos = bkey_successor(iter, pos);
94 	return pos;
95 }
96 
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98 					      struct btree *b)
99 {
100 	return bpos_lt(path->pos, b->data->min_key);
101 }
102 
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104 					     struct btree *b)
105 {
106 	return bpos_gt(path->pos, b->key.k.p);
107 }
108 
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110 					  struct btree *b)
111 {
112 	return path->btree_id == b->c.btree_id &&
113 		!btree_path_pos_before_node(path, b) &&
114 		!btree_path_pos_after_node(path, b);
115 }
116 
117 /* Btree iterator: */
118 
119 #ifdef CONFIG_BCACHEFS_DEBUG
120 
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 					  struct btree_path *path)
123 {
124 	struct bkey_cached *ck;
125 	bool locked = btree_node_locked(path, 0);
126 
127 	if (!bch2_btree_node_relock(trans, path, 0))
128 		return;
129 
130 	ck = (void *) path->l[0].b;
131 	BUG_ON(ck->key.btree_id != path->btree_id ||
132 	       !bkey_eq(ck->key.pos, path->pos));
133 
134 	if (!locked)
135 		btree_node_unlock(trans, path, 0);
136 }
137 
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 				struct btree_path *path, unsigned level)
140 {
141 	struct btree_path_level *l;
142 	struct btree_node_iter tmp;
143 	bool locked;
144 	struct bkey_packed *p, *k;
145 	struct printbuf buf1 = PRINTBUF;
146 	struct printbuf buf2 = PRINTBUF;
147 	struct printbuf buf3 = PRINTBUF;
148 	const char *msg;
149 
150 	if (!bch2_debug_check_iterators)
151 		return;
152 
153 	l	= &path->l[level];
154 	tmp	= l->iter;
155 	locked	= btree_node_locked(path, level);
156 
157 	if (path->cached) {
158 		if (!level)
159 			bch2_btree_path_verify_cached(trans, path);
160 		return;
161 	}
162 
163 	if (!btree_path_node(path, level))
164 		return;
165 
166 	if (!bch2_btree_node_relock_notrace(trans, path, level))
167 		return;
168 
169 	BUG_ON(!btree_path_pos_in_node(path, l->b));
170 
171 	bch2_btree_node_iter_verify(&l->iter, l->b);
172 
173 	/*
174 	 * For interior nodes, the iterator will have skipped past deleted keys:
175 	 */
176 	p = level
177 		? bch2_btree_node_iter_prev(&tmp, l->b)
178 		: bch2_btree_node_iter_prev_all(&tmp, l->b);
179 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180 
181 	if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182 		msg = "before";
183 		goto err;
184 	}
185 
186 	if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187 		msg = "after";
188 		goto err;
189 	}
190 
191 	if (!locked)
192 		btree_node_unlock(trans, path, level);
193 	return;
194 err:
195 	bch2_bpos_to_text(&buf1, path->pos);
196 
197 	if (p) {
198 		struct bkey uk = bkey_unpack_key(l->b, p);
199 
200 		bch2_bkey_to_text(&buf2, &uk);
201 	} else {
202 		prt_printf(&buf2, "(none)");
203 	}
204 
205 	if (k) {
206 		struct bkey uk = bkey_unpack_key(l->b, k);
207 
208 		bch2_bkey_to_text(&buf3, &uk);
209 	} else {
210 		prt_printf(&buf3, "(none)");
211 	}
212 
213 	panic("path should be %s key at level %u:\n"
214 	      "path pos %s\n"
215 	      "prev key %s\n"
216 	      "cur  key %s\n",
217 	      msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219 
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 				   struct btree_path *path)
222 {
223 	struct bch_fs *c = trans->c;
224 
225 	for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226 		if (!path->l[i].b) {
227 			BUG_ON(!path->cached &&
228 			       bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229 			break;
230 		}
231 
232 		bch2_btree_path_verify_level(trans, path, i);
233 	}
234 
235 	bch2_btree_path_verify_locks(path);
236 }
237 
238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240 	struct btree_path *path;
241 	unsigned iter;
242 
243 	trans_for_each_path(trans, path, iter)
244 		bch2_btree_path_verify(trans, path);
245 }
246 
247 static void bch2_btree_iter_verify(struct btree_iter *iter)
248 {
249 	struct btree_trans *trans = iter->trans;
250 
251 	BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252 
253 	BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254 	       (iter->flags & BTREE_ITER_all_snapshots));
255 
256 	BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257 	       (iter->flags & BTREE_ITER_all_snapshots) &&
258 	       !btree_type_has_snapshot_field(iter->btree_id));
259 
260 	if (iter->update_path)
261 		bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262 	bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263 }
264 
265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266 {
267 	BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268 	       !iter->pos.snapshot);
269 
270 	BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271 	       iter->pos.snapshot != iter->snapshot);
272 
273 	BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
274 	       bkey_gt(iter->pos, iter->k.p));
275 }
276 
277 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
278 {
279 	struct btree_trans *trans = iter->trans;
280 	struct btree_iter copy;
281 	struct bkey_s_c prev;
282 	int ret = 0;
283 
284 	if (!bch2_debug_check_iterators)
285 		return 0;
286 
287 	if (!(iter->flags & BTREE_ITER_filter_snapshots))
288 		return 0;
289 
290 	if (bkey_err(k) || !k.k)
291 		return 0;
292 
293 	BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
294 					  iter->snapshot,
295 					  k.k->p.snapshot));
296 
297 	bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
298 			     BTREE_ITER_nopreserve|
299 			     BTREE_ITER_all_snapshots);
300 	prev = bch2_btree_iter_prev(&copy);
301 	if (!prev.k)
302 		goto out;
303 
304 	ret = bkey_err(prev);
305 	if (ret)
306 		goto out;
307 
308 	if (bkey_eq(prev.k->p, k.k->p) &&
309 	    bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
310 				      prev.k->p.snapshot) > 0) {
311 		struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
312 
313 		bch2_bkey_to_text(&buf1, k.k);
314 		bch2_bkey_to_text(&buf2, prev.k);
315 
316 		panic("iter snap %u\n"
317 		      "k    %s\n"
318 		      "prev %s\n",
319 		      iter->snapshot,
320 		      buf1.buf, buf2.buf);
321 	}
322 out:
323 	bch2_trans_iter_exit(trans, &copy);
324 	return ret;
325 }
326 
327 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
328 			    struct bpos pos)
329 {
330 	bch2_trans_verify_not_unlocked(trans);
331 
332 	struct btree_path *path;
333 	struct trans_for_each_path_inorder_iter iter;
334 	struct printbuf buf = PRINTBUF;
335 
336 	btree_trans_sort_paths(trans);
337 
338 	trans_for_each_path_inorder(trans, path, iter) {
339 		if (path->btree_id != id ||
340 		    !btree_node_locked(path, 0) ||
341 		    !path->should_be_locked)
342 			continue;
343 
344 		if (!path->cached) {
345 			if (bkey_ge(pos, path->l[0].b->data->min_key) &&
346 			    bkey_le(pos, path->l[0].b->key.k.p))
347 				return;
348 		} else {
349 			if (bkey_eq(pos, path->pos))
350 				return;
351 		}
352 	}
353 
354 	bch2_dump_trans_paths_updates(trans);
355 	bch2_bpos_to_text(&buf, pos);
356 
357 	panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
358 }
359 
360 #else
361 
362 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
363 						struct btree_path *path, unsigned l) {}
364 static inline void bch2_btree_path_verify(struct btree_trans *trans,
365 					  struct btree_path *path) {}
366 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
367 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
368 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
369 
370 #endif
371 
372 /* Btree path: fixups after btree updates */
373 
374 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
375 					struct btree *b,
376 					struct bset_tree *t,
377 					struct bkey_packed *k)
378 {
379 	struct btree_node_iter_set *set;
380 
381 	btree_node_iter_for_each(iter, set)
382 		if (set->end == t->end_offset) {
383 			set->k = __btree_node_key_to_offset(b, k);
384 			bch2_btree_node_iter_sort(iter, b);
385 			return;
386 		}
387 
388 	bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
389 }
390 
391 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
392 					       struct btree *b,
393 					       struct bkey_packed *where)
394 {
395 	struct btree_path_level *l = &path->l[b->c.level];
396 
397 	if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
398 		return;
399 
400 	if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
401 		bch2_btree_node_iter_advance(&l->iter, l->b);
402 }
403 
404 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
405 				      struct btree *b,
406 				      struct bkey_packed *where)
407 {
408 	struct btree_path *path;
409 	unsigned i;
410 
411 	trans_for_each_path_with_node(trans, b, path, i) {
412 		__bch2_btree_path_fix_key_modified(path, b, where);
413 		bch2_btree_path_verify_level(trans, path, b->c.level);
414 	}
415 }
416 
417 static void __bch2_btree_node_iter_fix(struct btree_path *path,
418 				       struct btree *b,
419 				       struct btree_node_iter *node_iter,
420 				       struct bset_tree *t,
421 				       struct bkey_packed *where,
422 				       unsigned clobber_u64s,
423 				       unsigned new_u64s)
424 {
425 	const struct bkey_packed *end = btree_bkey_last(b, t);
426 	struct btree_node_iter_set *set;
427 	unsigned offset = __btree_node_key_to_offset(b, where);
428 	int shift = new_u64s - clobber_u64s;
429 	unsigned old_end = t->end_offset - shift;
430 	unsigned orig_iter_pos = node_iter->data[0].k;
431 	bool iter_current_key_modified =
432 		orig_iter_pos >= offset &&
433 		orig_iter_pos <= offset + clobber_u64s;
434 
435 	btree_node_iter_for_each(node_iter, set)
436 		if (set->end == old_end)
437 			goto found;
438 
439 	/* didn't find the bset in the iterator - might have to readd it: */
440 	if (new_u64s &&
441 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
442 		bch2_btree_node_iter_push(node_iter, b, where, end);
443 		goto fixup_done;
444 	} else {
445 		/* Iterator is after key that changed */
446 		return;
447 	}
448 found:
449 	set->end = t->end_offset;
450 
451 	/* Iterator hasn't gotten to the key that changed yet: */
452 	if (set->k < offset)
453 		return;
454 
455 	if (new_u64s &&
456 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
457 		set->k = offset;
458 	} else if (set->k < offset + clobber_u64s) {
459 		set->k = offset + new_u64s;
460 		if (set->k == set->end)
461 			bch2_btree_node_iter_set_drop(node_iter, set);
462 	} else {
463 		/* Iterator is after key that changed */
464 		set->k = (int) set->k + shift;
465 		return;
466 	}
467 
468 	bch2_btree_node_iter_sort(node_iter, b);
469 fixup_done:
470 	if (node_iter->data[0].k != orig_iter_pos)
471 		iter_current_key_modified = true;
472 
473 	/*
474 	 * When a new key is added, and the node iterator now points to that
475 	 * key, the iterator might have skipped past deleted keys that should
476 	 * come after the key the iterator now points to. We have to rewind to
477 	 * before those deleted keys - otherwise
478 	 * bch2_btree_node_iter_prev_all() breaks:
479 	 */
480 	if (!bch2_btree_node_iter_end(node_iter) &&
481 	    iter_current_key_modified &&
482 	    b->c.level) {
483 		struct bkey_packed *k, *k2, *p;
484 
485 		k = bch2_btree_node_iter_peek_all(node_iter, b);
486 
487 		for_each_bset(b, t) {
488 			bool set_pos = false;
489 
490 			if (node_iter->data[0].end == t->end_offset)
491 				continue;
492 
493 			k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
494 
495 			while ((p = bch2_bkey_prev_all(b, t, k2)) &&
496 			       bkey_iter_cmp(b, k, p) < 0) {
497 				k2 = p;
498 				set_pos = true;
499 			}
500 
501 			if (set_pos)
502 				btree_node_iter_set_set_pos(node_iter,
503 							    b, t, k2);
504 		}
505 	}
506 }
507 
508 void bch2_btree_node_iter_fix(struct btree_trans *trans,
509 			      struct btree_path *path,
510 			      struct btree *b,
511 			      struct btree_node_iter *node_iter,
512 			      struct bkey_packed *where,
513 			      unsigned clobber_u64s,
514 			      unsigned new_u64s)
515 {
516 	struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
517 	struct btree_path *linked;
518 	unsigned i;
519 
520 	if (node_iter != &path->l[b->c.level].iter) {
521 		__bch2_btree_node_iter_fix(path, b, node_iter, t,
522 					   where, clobber_u64s, new_u64s);
523 
524 		if (bch2_debug_check_iterators)
525 			bch2_btree_node_iter_verify(node_iter, b);
526 	}
527 
528 	trans_for_each_path_with_node(trans, b, linked, i) {
529 		__bch2_btree_node_iter_fix(linked, b,
530 					   &linked->l[b->c.level].iter, t,
531 					   where, clobber_u64s, new_u64s);
532 		bch2_btree_path_verify_level(trans, linked, b->c.level);
533 	}
534 }
535 
536 /* Btree path level: pointer to a particular btree node and node iter */
537 
538 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
539 						  struct btree_path_level *l,
540 						  struct bkey *u,
541 						  struct bkey_packed *k)
542 {
543 	if (unlikely(!k)) {
544 		/*
545 		 * signal to bch2_btree_iter_peek_slot() that we're currently at
546 		 * a hole
547 		 */
548 		u->type = KEY_TYPE_deleted;
549 		return bkey_s_c_null;
550 	}
551 
552 	return bkey_disassemble(l->b, k, u);
553 }
554 
555 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
556 							struct btree_path_level *l,
557 							struct bkey *u)
558 {
559 	return __btree_iter_unpack(c, l, u,
560 			bch2_btree_node_iter_peek_all(&l->iter, l->b));
561 }
562 
563 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
564 						    struct btree_path *path,
565 						    struct btree_path_level *l,
566 						    struct bkey *u)
567 {
568 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
569 			bch2_btree_node_iter_peek(&l->iter, l->b));
570 
571 	path->pos = k.k ? k.k->p : l->b->key.k.p;
572 	trans->paths_sorted = false;
573 	bch2_btree_path_verify_level(trans, path, l - path->l);
574 	return k;
575 }
576 
577 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
578 						    struct btree_path *path,
579 						    struct btree_path_level *l,
580 						    struct bkey *u)
581 {
582 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
583 			bch2_btree_node_iter_prev(&l->iter, l->b));
584 
585 	path->pos = k.k ? k.k->p : l->b->data->min_key;
586 	trans->paths_sorted = false;
587 	bch2_btree_path_verify_level(trans, path, l - path->l);
588 	return k;
589 }
590 
591 static inline bool btree_path_advance_to_pos(struct btree_path *path,
592 					     struct btree_path_level *l,
593 					     int max_advance)
594 {
595 	struct bkey_packed *k;
596 	int nr_advanced = 0;
597 
598 	while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
599 	       bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
600 		if (max_advance > 0 && nr_advanced >= max_advance)
601 			return false;
602 
603 		bch2_btree_node_iter_advance(&l->iter, l->b);
604 		nr_advanced++;
605 	}
606 
607 	return true;
608 }
609 
610 static inline void __btree_path_level_init(struct btree_path *path,
611 					   unsigned level)
612 {
613 	struct btree_path_level *l = &path->l[level];
614 
615 	bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
616 
617 	/*
618 	 * Iterators to interior nodes should always be pointed at the first non
619 	 * whiteout:
620 	 */
621 	if (level)
622 		bch2_btree_node_iter_peek(&l->iter, l->b);
623 }
624 
625 void bch2_btree_path_level_init(struct btree_trans *trans,
626 				struct btree_path *path,
627 				struct btree *b)
628 {
629 	BUG_ON(path->cached);
630 
631 	EBUG_ON(!btree_path_pos_in_node(path, b));
632 
633 	path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
634 	path->l[b->c.level].b = b;
635 	__btree_path_level_init(path, b->c.level);
636 }
637 
638 /* Btree path: fixups after btree node updates: */
639 
640 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
641 {
642 	struct bch_fs *c = trans->c;
643 
644 	trans_for_each_update(trans, i)
645 		if (!i->cached &&
646 		    i->level	== b->c.level &&
647 		    i->btree_id	== b->c.btree_id &&
648 		    bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
649 		    bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
650 			i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
651 
652 			if (unlikely(trans->journal_replay_not_finished)) {
653 				struct bkey_i *j_k =
654 					bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
655 								    i->k->k.p);
656 
657 				if (j_k) {
658 					i->old_k = j_k->k;
659 					i->old_v = &j_k->v;
660 				}
661 			}
662 		}
663 }
664 
665 /*
666  * A btree node is being replaced - update the iterator to point to the new
667  * node:
668  */
669 void bch2_trans_node_add(struct btree_trans *trans,
670 			 struct btree_path *path,
671 			 struct btree *b)
672 {
673 	struct btree_path *prev;
674 
675 	BUG_ON(!btree_path_pos_in_node(path, b));
676 
677 	while ((prev = prev_btree_path(trans, path)) &&
678 	       btree_path_pos_in_node(prev, b))
679 		path = prev;
680 
681 	for (;
682 	     path && btree_path_pos_in_node(path, b);
683 	     path = next_btree_path(trans, path))
684 		if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
685 			enum btree_node_locked_type t =
686 				btree_lock_want(path, b->c.level);
687 
688 			if (t != BTREE_NODE_UNLOCKED) {
689 				btree_node_unlock(trans, path, b->c.level);
690 				six_lock_increment(&b->c.lock, (enum six_lock_type) t);
691 				mark_btree_node_locked(trans, path, b->c.level, t);
692 			}
693 
694 			bch2_btree_path_level_init(trans, path, b);
695 		}
696 
697 	bch2_trans_revalidate_updates_in_node(trans, b);
698 }
699 
700 /*
701  * A btree node has been modified in such a way as to invalidate iterators - fix
702  * them:
703  */
704 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
705 {
706 	struct btree_path *path;
707 	unsigned i;
708 
709 	trans_for_each_path_with_node(trans, b, path, i)
710 		__btree_path_level_init(path, b->c.level);
711 
712 	bch2_trans_revalidate_updates_in_node(trans, b);
713 }
714 
715 /* Btree path: traverse, set_pos: */
716 
717 static inline int btree_path_lock_root(struct btree_trans *trans,
718 				       struct btree_path *path,
719 				       unsigned depth_want,
720 				       unsigned long trace_ip)
721 {
722 	struct bch_fs *c = trans->c;
723 	struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
724 	enum six_lock_type lock_type;
725 	unsigned i;
726 	int ret;
727 
728 	EBUG_ON(path->nodes_locked);
729 
730 	while (1) {
731 		b = READ_ONCE(*rootp);
732 		path->level = READ_ONCE(b->c.level);
733 
734 		if (unlikely(path->level < depth_want)) {
735 			/*
736 			 * the root is at a lower depth than the depth we want:
737 			 * got to the end of the btree, or we're walking nodes
738 			 * greater than some depth and there are no nodes >=
739 			 * that depth
740 			 */
741 			path->level = depth_want;
742 			for (i = path->level; i < BTREE_MAX_DEPTH; i++)
743 				path->l[i].b = NULL;
744 			return 1;
745 		}
746 
747 		lock_type = __btree_lock_want(path, path->level);
748 		ret = btree_node_lock(trans, path, &b->c,
749 				      path->level, lock_type, trace_ip);
750 		if (unlikely(ret)) {
751 			if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
752 				continue;
753 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
754 				return ret;
755 			BUG();
756 		}
757 
758 		if (likely(b == READ_ONCE(*rootp) &&
759 			   b->c.level == path->level &&
760 			   !race_fault())) {
761 			for (i = 0; i < path->level; i++)
762 				path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
763 			path->l[path->level].b = b;
764 			for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
765 				path->l[i].b = NULL;
766 
767 			mark_btree_node_locked(trans, path, path->level,
768 					       (enum btree_node_locked_type) lock_type);
769 			bch2_btree_path_level_init(trans, path, b);
770 			return 0;
771 		}
772 
773 		six_unlock_type(&b->c.lock, lock_type);
774 	}
775 }
776 
777 noinline
778 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
779 {
780 	struct bch_fs *c = trans->c;
781 	struct btree_path_level *l = path_l(path);
782 	struct btree_node_iter node_iter = l->iter;
783 	struct bkey_packed *k;
784 	struct bkey_buf tmp;
785 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
786 		? (path->level > 1 ? 0 :  2)
787 		: (path->level > 1 ? 1 : 16);
788 	bool was_locked = btree_node_locked(path, path->level);
789 	int ret = 0;
790 
791 	bch2_bkey_buf_init(&tmp);
792 
793 	while (nr-- && !ret) {
794 		if (!bch2_btree_node_relock(trans, path, path->level))
795 			break;
796 
797 		bch2_btree_node_iter_advance(&node_iter, l->b);
798 		k = bch2_btree_node_iter_peek(&node_iter, l->b);
799 		if (!k)
800 			break;
801 
802 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
803 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
804 					       path->level - 1);
805 	}
806 
807 	if (!was_locked)
808 		btree_node_unlock(trans, path, path->level);
809 
810 	bch2_bkey_buf_exit(&tmp, c);
811 	return ret;
812 }
813 
814 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
815 				 struct btree_and_journal_iter *jiter)
816 {
817 	struct bch_fs *c = trans->c;
818 	struct bkey_s_c k;
819 	struct bkey_buf tmp;
820 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
821 		? (path->level > 1 ? 0 :  2)
822 		: (path->level > 1 ? 1 : 16);
823 	bool was_locked = btree_node_locked(path, path->level);
824 	int ret = 0;
825 
826 	bch2_bkey_buf_init(&tmp);
827 
828 	while (nr-- && !ret) {
829 		if (!bch2_btree_node_relock(trans, path, path->level))
830 			break;
831 
832 		bch2_btree_and_journal_iter_advance(jiter);
833 		k = bch2_btree_and_journal_iter_peek(jiter);
834 		if (!k.k)
835 			break;
836 
837 		bch2_bkey_buf_reassemble(&tmp, c, k);
838 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
839 					       path->level - 1);
840 	}
841 
842 	if (!was_locked)
843 		btree_node_unlock(trans, path, path->level);
844 
845 	bch2_bkey_buf_exit(&tmp, c);
846 	return ret;
847 }
848 
849 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
850 					    struct btree_path *path,
851 					    unsigned plevel, struct btree *b)
852 {
853 	struct btree_path_level *l = &path->l[plevel];
854 	bool locked = btree_node_locked(path, plevel);
855 	struct bkey_packed *k;
856 	struct bch_btree_ptr_v2 *bp;
857 
858 	if (!bch2_btree_node_relock(trans, path, plevel))
859 		return;
860 
861 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
862 	BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
863 
864 	bp = (void *) bkeyp_val(&l->b->format, k);
865 	bp->mem_ptr = (unsigned long)b;
866 
867 	if (!locked)
868 		btree_node_unlock(trans, path, plevel);
869 }
870 
871 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
872 						     struct btree_path *path,
873 						     unsigned flags,
874 						     struct bkey_buf *out)
875 {
876 	struct bch_fs *c = trans->c;
877 	struct btree_path_level *l = path_l(path);
878 	struct btree_and_journal_iter jiter;
879 	struct bkey_s_c k;
880 	int ret = 0;
881 
882 	__bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
883 
884 	k = bch2_btree_and_journal_iter_peek(&jiter);
885 
886 	bch2_bkey_buf_reassemble(out, c, k);
887 
888 	if ((flags & BTREE_ITER_prefetch) &&
889 	    c->opts.btree_node_prefetch)
890 		ret = btree_path_prefetch_j(trans, path, &jiter);
891 
892 	bch2_btree_and_journal_iter_exit(&jiter);
893 	return ret;
894 }
895 
896 static __always_inline int btree_path_down(struct btree_trans *trans,
897 					   struct btree_path *path,
898 					   unsigned flags,
899 					   unsigned long trace_ip)
900 {
901 	struct bch_fs *c = trans->c;
902 	struct btree_path_level *l = path_l(path);
903 	struct btree *b;
904 	unsigned level = path->level - 1;
905 	enum six_lock_type lock_type = __btree_lock_want(path, level);
906 	struct bkey_buf tmp;
907 	int ret;
908 
909 	EBUG_ON(!btree_node_locked(path, path->level));
910 
911 	bch2_bkey_buf_init(&tmp);
912 
913 	if (unlikely(trans->journal_replay_not_finished)) {
914 		ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
915 		if (ret)
916 			goto err;
917 	} else {
918 		struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
919 		if (!k) {
920 			struct printbuf buf = PRINTBUF;
921 
922 			prt_str(&buf, "node not found at pos ");
923 			bch2_bpos_to_text(&buf, path->pos);
924 			prt_str(&buf, " within parent node ");
925 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
926 
927 			bch2_fs_fatal_error(c, "%s", buf.buf);
928 			printbuf_exit(&buf);
929 			ret = -BCH_ERR_btree_need_topology_repair;
930 			goto err;
931 		}
932 
933 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
934 
935 		if ((flags & BTREE_ITER_prefetch) &&
936 		    c->opts.btree_node_prefetch) {
937 			ret = btree_path_prefetch(trans, path);
938 			if (ret)
939 				goto err;
940 		}
941 	}
942 
943 	b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
944 	ret = PTR_ERR_OR_ZERO(b);
945 	if (unlikely(ret))
946 		goto err;
947 
948 	if (likely(!trans->journal_replay_not_finished &&
949 		   tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
950 	    unlikely(b != btree_node_mem_ptr(tmp.k)))
951 		btree_node_mem_ptr_set(trans, path, level + 1, b);
952 
953 	if (btree_node_read_locked(path, level + 1))
954 		btree_node_unlock(trans, path, level + 1);
955 
956 	mark_btree_node_locked(trans, path, level,
957 			       (enum btree_node_locked_type) lock_type);
958 	path->level = level;
959 	bch2_btree_path_level_init(trans, path, b);
960 
961 	bch2_btree_path_verify_locks(path);
962 err:
963 	bch2_bkey_buf_exit(&tmp, c);
964 	return ret;
965 }
966 
967 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
968 {
969 	struct bch_fs *c = trans->c;
970 	struct btree_path *path;
971 	unsigned long trace_ip = _RET_IP_;
972 	unsigned i;
973 	int ret = 0;
974 
975 	if (trans->in_traverse_all)
976 		return -BCH_ERR_transaction_restart_in_traverse_all;
977 
978 	trans->in_traverse_all = true;
979 retry_all:
980 	trans->restarted = 0;
981 	trans->last_restarted_ip = 0;
982 
983 	trans_for_each_path(trans, path, i)
984 		path->should_be_locked = false;
985 
986 	btree_trans_sort_paths(trans);
987 
988 	bch2_trans_unlock(trans);
989 	cond_resched();
990 	trans_set_locked(trans);
991 
992 	if (unlikely(trans->memory_allocation_failure)) {
993 		struct closure cl;
994 
995 		closure_init_stack(&cl);
996 
997 		do {
998 			ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
999 			closure_sync(&cl);
1000 		} while (ret);
1001 	}
1002 
1003 	/* Now, redo traversals in correct order: */
1004 	i = 0;
1005 	while (i < trans->nr_sorted) {
1006 		btree_path_idx_t idx = trans->sorted[i];
1007 
1008 		/*
1009 		 * Traversing a path can cause another path to be added at about
1010 		 * the same position:
1011 		 */
1012 		if (trans->paths[idx].uptodate) {
1013 			__btree_path_get(&trans->paths[idx], false);
1014 			ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1015 			__btree_path_put(&trans->paths[idx], false);
1016 
1017 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1018 			    bch2_err_matches(ret, ENOMEM))
1019 				goto retry_all;
1020 			if (ret)
1021 				goto err;
1022 		} else {
1023 			i++;
1024 		}
1025 	}
1026 
1027 	/*
1028 	 * We used to assert that all paths had been traversed here
1029 	 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1030 	 * path->should_be_locked is not set yet, we might have unlocked and
1031 	 * then failed to relock a path - that's fine.
1032 	 */
1033 err:
1034 	bch2_btree_cache_cannibalize_unlock(trans);
1035 
1036 	trans->in_traverse_all = false;
1037 
1038 	trace_and_count(c, trans_traverse_all, trans, trace_ip);
1039 	return ret;
1040 }
1041 
1042 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1043 						unsigned l, int check_pos)
1044 {
1045 	if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1046 		return false;
1047 	if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1048 		return false;
1049 	return true;
1050 }
1051 
1052 static inline bool btree_path_good_node(struct btree_trans *trans,
1053 					struct btree_path *path,
1054 					unsigned l, int check_pos)
1055 {
1056 	return is_btree_node(path, l) &&
1057 		bch2_btree_node_relock(trans, path, l) &&
1058 		btree_path_check_pos_in_node(path, l, check_pos);
1059 }
1060 
1061 static void btree_path_set_level_down(struct btree_trans *trans,
1062 				      struct btree_path *path,
1063 				      unsigned new_level)
1064 {
1065 	unsigned l;
1066 
1067 	path->level = new_level;
1068 
1069 	for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1070 		if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1071 			btree_node_unlock(trans, path, l);
1072 
1073 	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1074 	bch2_btree_path_verify(trans, path);
1075 }
1076 
1077 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1078 							 struct btree_path *path,
1079 							 int check_pos)
1080 {
1081 	unsigned i, l = path->level;
1082 again:
1083 	while (btree_path_node(path, l) &&
1084 	       !btree_path_good_node(trans, path, l, check_pos))
1085 		__btree_path_set_level_up(trans, path, l++);
1086 
1087 	/* If we need intent locks, take them too: */
1088 	for (i = l + 1;
1089 	     i < path->locks_want && btree_path_node(path, i);
1090 	     i++)
1091 		if (!bch2_btree_node_relock(trans, path, i)) {
1092 			while (l <= i)
1093 				__btree_path_set_level_up(trans, path, l++);
1094 			goto again;
1095 		}
1096 
1097 	return l;
1098 }
1099 
1100 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1101 						     struct btree_path *path,
1102 						     int check_pos)
1103 {
1104 	return likely(btree_node_locked(path, path->level) &&
1105 		      btree_path_check_pos_in_node(path, path->level, check_pos))
1106 		? path->level
1107 		: __btree_path_up_until_good_node(trans, path, check_pos);
1108 }
1109 
1110 /*
1111  * This is the main state machine for walking down the btree - walks down to a
1112  * specified depth
1113  *
1114  * Returns 0 on success, -EIO on error (error reading in a btree node).
1115  *
1116  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1117  * stashed in the iterator and returned from bch2_trans_exit().
1118  */
1119 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1120 				 btree_path_idx_t path_idx,
1121 				 unsigned flags,
1122 				 unsigned long trace_ip)
1123 {
1124 	struct btree_path *path = &trans->paths[path_idx];
1125 	unsigned depth_want = path->level;
1126 	int ret = -((int) trans->restarted);
1127 
1128 	if (unlikely(ret))
1129 		goto out;
1130 
1131 	if (unlikely(!trans->srcu_held))
1132 		bch2_trans_srcu_lock(trans);
1133 
1134 	/*
1135 	 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1136 	 * and re-traverse the path without a transaction restart:
1137 	 */
1138 	if (path->should_be_locked) {
1139 		ret = bch2_btree_path_relock(trans, path, trace_ip);
1140 		goto out;
1141 	}
1142 
1143 	if (path->cached) {
1144 		ret = bch2_btree_path_traverse_cached(trans, path, flags);
1145 		goto out;
1146 	}
1147 
1148 	path = &trans->paths[path_idx];
1149 
1150 	if (unlikely(path->level >= BTREE_MAX_DEPTH))
1151 		goto out_uptodate;
1152 
1153 	path->level = btree_path_up_until_good_node(trans, path, 0);
1154 	unsigned max_level = path->level;
1155 
1156 	EBUG_ON(btree_path_node(path, path->level) &&
1157 		!btree_node_locked(path, path->level));
1158 
1159 	/*
1160 	 * Note: path->nodes[path->level] may be temporarily NULL here - that
1161 	 * would indicate to other code that we got to the end of the btree,
1162 	 * here it indicates that relocking the root failed - it's critical that
1163 	 * btree_path_lock_root() comes next and that it can't fail
1164 	 */
1165 	while (path->level > depth_want) {
1166 		ret = btree_path_node(path, path->level)
1167 			? btree_path_down(trans, path, flags, trace_ip)
1168 			: btree_path_lock_root(trans, path, depth_want, trace_ip);
1169 		if (unlikely(ret)) {
1170 			if (ret == 1) {
1171 				/*
1172 				 * No nodes at this level - got to the end of
1173 				 * the btree:
1174 				 */
1175 				ret = 0;
1176 				goto out;
1177 			}
1178 
1179 			__bch2_btree_path_unlock(trans, path);
1180 			path->level = depth_want;
1181 			path->l[path->level].b = ERR_PTR(ret);
1182 			goto out;
1183 		}
1184 	}
1185 
1186 	if (unlikely(max_level > path->level)) {
1187 		struct btree_path *linked;
1188 		unsigned iter;
1189 
1190 		trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1191 			for (unsigned j = path->level + 1; j < max_level; j++)
1192 				linked->l[j] = path->l[j];
1193 	}
1194 
1195 out_uptodate:
1196 	path->uptodate = BTREE_ITER_UPTODATE;
1197 out:
1198 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1199 		panic("ret %s (%i) trans->restarted %s (%i)\n",
1200 		      bch2_err_str(ret), ret,
1201 		      bch2_err_str(trans->restarted), trans->restarted);
1202 	bch2_btree_path_verify(trans, path);
1203 	return ret;
1204 }
1205 
1206 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1207 			    struct btree_path *src)
1208 {
1209 	unsigned i, offset = offsetof(struct btree_path, pos);
1210 
1211 	memcpy((void *) dst + offset,
1212 	       (void *) src + offset,
1213 	       sizeof(struct btree_path) - offset);
1214 
1215 	for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1216 		unsigned t = btree_node_locked_type(dst, i);
1217 
1218 		if (t != BTREE_NODE_UNLOCKED)
1219 			six_lock_increment(&dst->l[i].b->c.lock, t);
1220 	}
1221 }
1222 
1223 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1224 					 bool intent, unsigned long ip)
1225 {
1226 	btree_path_idx_t new = btree_path_alloc(trans, src);
1227 	btree_path_copy(trans, trans->paths + new, trans->paths + src);
1228 	__btree_path_get(trans->paths + new, intent);
1229 #ifdef TRACK_PATH_ALLOCATED
1230 	trans->paths[new].ip_allocated = ip;
1231 #endif
1232 	return new;
1233 }
1234 
1235 __flatten
1236 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1237 			btree_path_idx_t path, bool intent, unsigned long ip)
1238 {
1239 	__btree_path_put(trans->paths + path, intent);
1240 	path = btree_path_clone(trans, path, intent, ip);
1241 	trans->paths[path].preserve = false;
1242 	return path;
1243 }
1244 
1245 btree_path_idx_t __must_check
1246 __bch2_btree_path_set_pos(struct btree_trans *trans,
1247 			  btree_path_idx_t path_idx, struct bpos new_pos,
1248 			  bool intent, unsigned long ip)
1249 {
1250 	int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1251 
1252 	bch2_trans_verify_not_in_restart(trans);
1253 	EBUG_ON(!trans->paths[path_idx].ref);
1254 
1255 	path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1256 
1257 	struct btree_path *path = trans->paths + path_idx;
1258 	path->pos		= new_pos;
1259 	trans->paths_sorted	= false;
1260 
1261 	if (unlikely(path->cached)) {
1262 		btree_node_unlock(trans, path, 0);
1263 		path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1264 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1265 		goto out;
1266 	}
1267 
1268 	unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1269 
1270 	if (btree_path_node(path, level)) {
1271 		struct btree_path_level *l = &path->l[level];
1272 
1273 		BUG_ON(!btree_node_locked(path, level));
1274 		/*
1275 		 * We might have to skip over many keys, or just a few: try
1276 		 * advancing the node iterator, and if we have to skip over too
1277 		 * many keys just reinit it (or if we're rewinding, since that
1278 		 * is expensive).
1279 		 */
1280 		if (cmp < 0 ||
1281 		    !btree_path_advance_to_pos(path, l, 8))
1282 			bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1283 
1284 		/*
1285 		 * Iterators to interior nodes should always be pointed at the first non
1286 		 * whiteout:
1287 		 */
1288 		if (unlikely(level))
1289 			bch2_btree_node_iter_peek(&l->iter, l->b);
1290 	}
1291 
1292 	if (unlikely(level != path->level)) {
1293 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1294 		__bch2_btree_path_unlock(trans, path);
1295 	}
1296 out:
1297 	bch2_btree_path_verify(trans, path);
1298 	return path_idx;
1299 }
1300 
1301 /* Btree path: main interface: */
1302 
1303 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1304 {
1305 	struct btree_path *sib;
1306 
1307 	sib = prev_btree_path(trans, path);
1308 	if (sib && !btree_path_cmp(sib, path))
1309 		return sib;
1310 
1311 	sib = next_btree_path(trans, path);
1312 	if (sib && !btree_path_cmp(sib, path))
1313 		return sib;
1314 
1315 	return NULL;
1316 }
1317 
1318 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1319 {
1320 	struct btree_path *sib;
1321 
1322 	sib = prev_btree_path(trans, path);
1323 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1324 		return sib;
1325 
1326 	sib = next_btree_path(trans, path);
1327 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1328 		return sib;
1329 
1330 	return NULL;
1331 }
1332 
1333 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1334 {
1335 	__bch2_btree_path_unlock(trans, trans->paths + path);
1336 	btree_path_list_remove(trans, trans->paths + path);
1337 	__clear_bit(path, trans->paths_allocated);
1338 }
1339 
1340 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1341 {
1342 	unsigned l = path->level;
1343 
1344 	do {
1345 		if (!btree_path_node(path, l))
1346 			break;
1347 
1348 		if (!is_btree_node(path, l))
1349 			return false;
1350 
1351 		if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1352 			return false;
1353 
1354 		l++;
1355 	} while (l < path->locks_want);
1356 
1357 	return true;
1358 }
1359 
1360 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1361 {
1362 	struct btree_path *path = trans->paths + path_idx, *dup;
1363 
1364 	if (!__btree_path_put(path, intent))
1365 		return;
1366 
1367 	dup = path->preserve
1368 		? have_path_at_pos(trans, path)
1369 		: have_node_at_pos(trans, path);
1370 
1371 	if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1372 		return;
1373 
1374 	if (path->should_be_locked && !trans->restarted) {
1375 		if (!dup)
1376 			return;
1377 
1378 		if (!(trans->locked
1379 		      ? bch2_btree_path_relock_norestart(trans, dup)
1380 		      : bch2_btree_path_can_relock(trans, dup)))
1381 			return;
1382 	}
1383 
1384 	if (dup) {
1385 		dup->preserve		|= path->preserve;
1386 		dup->should_be_locked	|= path->should_be_locked;
1387 	}
1388 
1389 	__bch2_path_free(trans, path_idx);
1390 }
1391 
1392 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1393 				 bool intent)
1394 {
1395 	if (!__btree_path_put(trans->paths + path, intent))
1396 		return;
1397 
1398 	__bch2_path_free(trans, path);
1399 }
1400 
1401 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1402 {
1403 	panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1404 	      trans->restart_count, restart_count,
1405 	      (void *) trans->last_begin_ip);
1406 }
1407 
1408 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1409 {
1410 	panic("in transaction restart: %s, last restarted by %pS\n",
1411 	      bch2_err_str(trans->restarted),
1412 	      (void *) trans->last_restarted_ip);
1413 }
1414 
1415 void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
1416 {
1417 	panic("trans should be locked, unlocked by %pS\n",
1418 	      (void *) trans->last_unlock_ip);
1419 }
1420 
1421 noinline __cold
1422 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1423 {
1424 	prt_printf(buf, "transaction updates for %s journal seq %llu\n",
1425 	       trans->fn, trans->journal_res.seq);
1426 	printbuf_indent_add(buf, 2);
1427 
1428 	trans_for_each_update(trans, i) {
1429 		struct bkey_s_c old = { &i->old_k, i->old_v };
1430 
1431 		prt_printf(buf, "update: btree=%s cached=%u %pS\n",
1432 		       bch2_btree_id_str(i->btree_id),
1433 		       i->cached,
1434 		       (void *) i->ip_allocated);
1435 
1436 		prt_printf(buf, "  old ");
1437 		bch2_bkey_val_to_text(buf, trans->c, old);
1438 		prt_newline(buf);
1439 
1440 		prt_printf(buf, "  new ");
1441 		bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1442 		prt_newline(buf);
1443 	}
1444 
1445 	for (struct jset_entry *e = trans->journal_entries;
1446 	     e != btree_trans_journal_entries_top(trans);
1447 	     e = vstruct_next(e))
1448 		bch2_journal_entry_to_text(buf, trans->c, e);
1449 
1450 	printbuf_indent_sub(buf, 2);
1451 }
1452 
1453 noinline __cold
1454 void bch2_dump_trans_updates(struct btree_trans *trans)
1455 {
1456 	struct printbuf buf = PRINTBUF;
1457 
1458 	bch2_trans_updates_to_text(&buf, trans);
1459 	bch2_print_str(trans->c, buf.buf);
1460 	printbuf_exit(&buf);
1461 }
1462 
1463 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1464 {
1465 	struct btree_path *path = trans->paths + path_idx;
1466 
1467 	prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
1468 		   path_idx, path->ref, path->intent_ref,
1469 		   path->preserve ? 'P' : ' ',
1470 		   path->should_be_locked ? 'S' : ' ',
1471 		   path->cached ? 'C' : 'B',
1472 		   bch2_btree_id_str(path->btree_id),
1473 		   path->level);
1474 	bch2_bpos_to_text(out, path->pos);
1475 
1476 	if (!path->cached && btree_node_locked(path, path->level)) {
1477 		prt_char(out, ' ');
1478 		struct btree *b = path_l(path)->b;
1479 		bch2_bpos_to_text(out, b->data->min_key);
1480 		prt_char(out, '-');
1481 		bch2_bpos_to_text(out, b->key.k.p);
1482 	}
1483 
1484 #ifdef TRACK_PATH_ALLOCATED
1485 	prt_printf(out, " %pS", (void *) path->ip_allocated);
1486 #endif
1487 }
1488 
1489 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1490 {
1491 	switch (t) {
1492 	case BTREE_NODE_UNLOCKED:
1493 		return "unlocked";
1494 	case BTREE_NODE_READ_LOCKED:
1495 		return "read";
1496 	case BTREE_NODE_INTENT_LOCKED:
1497 		return "intent";
1498 	case BTREE_NODE_WRITE_LOCKED:
1499 		return "write";
1500 	default:
1501 		return NULL;
1502 	}
1503 }
1504 
1505 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1506 {
1507 	bch2_btree_path_to_text_short(out, trans, path_idx);
1508 
1509 	struct btree_path *path = trans->paths + path_idx;
1510 
1511 	prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1512 	prt_newline(out);
1513 
1514 	printbuf_indent_add(out, 2);
1515 	for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1516 		prt_printf(out, "l=%u locks %s seq %u node ", l,
1517 			   btree_node_locked_str(btree_node_locked_type(path, l)),
1518 			   path->l[l].lock_seq);
1519 
1520 		int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1521 		if (ret)
1522 			prt_str(out, bch2_err_str(ret));
1523 		else
1524 			prt_printf(out, "%px", path->l[l].b);
1525 		prt_newline(out);
1526 	}
1527 	printbuf_indent_sub(out, 2);
1528 }
1529 
1530 static noinline __cold
1531 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1532 				bool nosort)
1533 {
1534 	struct trans_for_each_path_inorder_iter iter;
1535 
1536 	if (!nosort)
1537 		btree_trans_sort_paths(trans);
1538 
1539 	trans_for_each_path_idx_inorder(trans, iter) {
1540 		bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1541 		prt_newline(out);
1542 	}
1543 }
1544 
1545 noinline __cold
1546 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1547 {
1548 	__bch2_trans_paths_to_text(out, trans, false);
1549 }
1550 
1551 static noinline __cold
1552 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1553 {
1554 	struct printbuf buf = PRINTBUF;
1555 
1556 	__bch2_trans_paths_to_text(&buf, trans, nosort);
1557 	bch2_trans_updates_to_text(&buf, trans);
1558 
1559 	bch2_print_str(trans->c, buf.buf);
1560 	printbuf_exit(&buf);
1561 }
1562 
1563 noinline __cold
1564 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1565 {
1566 	__bch2_dump_trans_paths_updates(trans, false);
1567 }
1568 
1569 noinline __cold
1570 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1571 {
1572 	struct btree_transaction_stats *s = btree_trans_stats(trans);
1573 	struct printbuf buf = PRINTBUF;
1574 	size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1575 
1576 	bch2_trans_paths_to_text(&buf, trans);
1577 
1578 	if (!buf.allocation_failure) {
1579 		mutex_lock(&s->lock);
1580 		if (nr > s->nr_max_paths) {
1581 			s->nr_max_paths = nr;
1582 			swap(s->max_paths_text, buf.buf);
1583 		}
1584 		mutex_unlock(&s->lock);
1585 	}
1586 
1587 	printbuf_exit(&buf);
1588 
1589 	trans->nr_paths_max = nr;
1590 }
1591 
1592 noinline __cold
1593 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1594 {
1595 	if (trace_trans_restart_too_many_iters_enabled()) {
1596 		struct printbuf buf = PRINTBUF;
1597 
1598 		bch2_trans_paths_to_text(&buf, trans);
1599 		trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1600 		printbuf_exit(&buf);
1601 	}
1602 
1603 	count_event(trans->c, trans_restart_too_many_iters);
1604 
1605 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1606 }
1607 
1608 static noinline void btree_path_overflow(struct btree_trans *trans)
1609 {
1610 	bch2_dump_trans_paths_updates(trans);
1611 	bch_err(trans->c, "trans path overflow");
1612 }
1613 
1614 static noinline void btree_paths_realloc(struct btree_trans *trans)
1615 {
1616 	unsigned nr = trans->nr_paths * 2;
1617 
1618 	void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1619 			  sizeof(struct btree_trans_paths) +
1620 			  nr * sizeof(struct btree_path) +
1621 			  nr * sizeof(btree_path_idx_t) + 8 +
1622 			  nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1623 
1624 	unsigned long *paths_allocated = p;
1625 	memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1626 	p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1627 
1628 	p += sizeof(struct btree_trans_paths);
1629 	struct btree_path *paths = p;
1630 	*trans_paths_nr(paths) = nr;
1631 	memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1632 	p += nr * sizeof(struct btree_path);
1633 
1634 	btree_path_idx_t *sorted = p;
1635 	memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1636 	p += nr * sizeof(btree_path_idx_t) + 8;
1637 
1638 	struct btree_insert_entry *updates = p;
1639 	memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1640 
1641 	unsigned long *old = trans->paths_allocated;
1642 
1643 	rcu_assign_pointer(trans->paths_allocated,	paths_allocated);
1644 	rcu_assign_pointer(trans->paths,		paths);
1645 	rcu_assign_pointer(trans->sorted,		sorted);
1646 	rcu_assign_pointer(trans->updates,		updates);
1647 
1648 	trans->nr_paths		= nr;
1649 
1650 	if (old != trans->_paths_allocated)
1651 		kfree_rcu_mightsleep(old);
1652 }
1653 
1654 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1655 						btree_path_idx_t pos)
1656 {
1657 	btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1658 
1659 	if (unlikely(idx == trans->nr_paths)) {
1660 		if (trans->nr_paths == BTREE_ITER_MAX) {
1661 			btree_path_overflow(trans);
1662 			return 0;
1663 		}
1664 
1665 		btree_paths_realloc(trans);
1666 	}
1667 
1668 	/*
1669 	 * Do this before marking the new path as allocated, since it won't be
1670 	 * initialized yet:
1671 	 */
1672 	if (unlikely(idx > trans->nr_paths_max))
1673 		bch2_trans_update_max_paths(trans);
1674 
1675 	__set_bit(idx, trans->paths_allocated);
1676 
1677 	struct btree_path *path = &trans->paths[idx];
1678 	path->ref		= 0;
1679 	path->intent_ref	= 0;
1680 	path->nodes_locked	= 0;
1681 
1682 	btree_path_list_add(trans, pos, idx);
1683 	trans->paths_sorted = false;
1684 	return idx;
1685 }
1686 
1687 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1688 			     enum btree_id btree_id, struct bpos pos,
1689 			     unsigned locks_want, unsigned level,
1690 			     unsigned flags, unsigned long ip)
1691 {
1692 	struct btree_path *path;
1693 	bool cached = flags & BTREE_ITER_cached;
1694 	bool intent = flags & BTREE_ITER_intent;
1695 	struct trans_for_each_path_inorder_iter iter;
1696 	btree_path_idx_t path_pos = 0, path_idx;
1697 
1698 	bch2_trans_verify_not_unlocked(trans);
1699 	bch2_trans_verify_not_in_restart(trans);
1700 	bch2_trans_verify_locks(trans);
1701 
1702 	btree_trans_sort_paths(trans);
1703 
1704 	trans_for_each_path_inorder(trans, path, iter) {
1705 		if (__btree_path_cmp(path,
1706 				     btree_id,
1707 				     cached,
1708 				     pos,
1709 				     level) > 0)
1710 			break;
1711 
1712 		path_pos = iter.path_idx;
1713 	}
1714 
1715 	if (path_pos &&
1716 	    trans->paths[path_pos].cached	== cached &&
1717 	    trans->paths[path_pos].btree_id	== btree_id &&
1718 	    trans->paths[path_pos].level	== level) {
1719 		__btree_path_get(trans->paths + path_pos, intent);
1720 		path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1721 		path = trans->paths + path_idx;
1722 	} else {
1723 		path_idx = btree_path_alloc(trans, path_pos);
1724 		path = trans->paths + path_idx;
1725 
1726 		__btree_path_get(path, intent);
1727 		path->pos			= pos;
1728 		path->btree_id			= btree_id;
1729 		path->cached			= cached;
1730 		path->uptodate			= BTREE_ITER_NEED_TRAVERSE;
1731 		path->should_be_locked		= false;
1732 		path->level			= level;
1733 		path->locks_want		= locks_want;
1734 		path->nodes_locked		= 0;
1735 		for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1736 			path->l[i].b		= ERR_PTR(-BCH_ERR_no_btree_node_init);
1737 #ifdef TRACK_PATH_ALLOCATED
1738 		path->ip_allocated		= ip;
1739 #endif
1740 		trans->paths_sorted		= false;
1741 	}
1742 
1743 	if (!(flags & BTREE_ITER_nopreserve))
1744 		path->preserve = true;
1745 
1746 	if (path->intent_ref)
1747 		locks_want = max(locks_want, level + 1);
1748 
1749 	/*
1750 	 * If the path has locks_want greater than requested, we don't downgrade
1751 	 * it here - on transaction restart because btree node split needs to
1752 	 * upgrade locks, we might be putting/getting the iterator again.
1753 	 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1754 	 * a successful transaction commit.
1755 	 */
1756 
1757 	locks_want = min(locks_want, BTREE_MAX_DEPTH);
1758 	if (locks_want > path->locks_want)
1759 		bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1760 
1761 	return path_idx;
1762 }
1763 
1764 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1765 					    enum btree_id btree_id,
1766 					    unsigned level,
1767 					    struct bpos pos)
1768 {
1769 	btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1770 			     BTREE_ITER_nopreserve|
1771 			     BTREE_ITER_intent, _RET_IP_);
1772 	path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1773 
1774 	struct btree_path *path = trans->paths + path_idx;
1775 	bch2_btree_path_downgrade(trans, path);
1776 	__bch2_btree_path_unlock(trans, path);
1777 	return path_idx;
1778 }
1779 
1780 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1781 {
1782 
1783 	struct btree_path_level *l = path_l(path);
1784 	struct bkey_packed *_k;
1785 	struct bkey_s_c k;
1786 
1787 	if (unlikely(!l->b))
1788 		return bkey_s_c_null;
1789 
1790 	EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1791 	EBUG_ON(!btree_node_locked(path, path->level));
1792 
1793 	if (!path->cached) {
1794 		_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1795 		k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1796 
1797 		EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1798 
1799 		if (!k.k || !bpos_eq(path->pos, k.k->p))
1800 			goto hole;
1801 	} else {
1802 		struct bkey_cached *ck = (void *) path->l[0].b;
1803 		if (!ck)
1804 			return bkey_s_c_null;
1805 
1806 		EBUG_ON(path->btree_id != ck->key.btree_id ||
1807 			!bkey_eq(path->pos, ck->key.pos));
1808 
1809 		*u = ck->k->k;
1810 		k = bkey_i_to_s_c(ck->k);
1811 	}
1812 
1813 	return k;
1814 hole:
1815 	bkey_init(u);
1816 	u->p = path->pos;
1817 	return (struct bkey_s_c) { u, NULL };
1818 }
1819 
1820 
1821 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1822 {
1823 	struct btree_trans *trans = iter->trans;
1824 
1825 	if (!iter->path || trans->restarted)
1826 		return;
1827 
1828 	struct btree_path *path = btree_iter_path(trans, iter);
1829 	path->preserve		= false;
1830 	if (path->ref == 1)
1831 		path->should_be_locked	= false;
1832 }
1833 /* Btree iterators: */
1834 
1835 int __must_check
1836 __bch2_btree_iter_traverse(struct btree_iter *iter)
1837 {
1838 	return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1839 }
1840 
1841 int __must_check
1842 bch2_btree_iter_traverse(struct btree_iter *iter)
1843 {
1844 	struct btree_trans *trans = iter->trans;
1845 	int ret;
1846 
1847 	bch2_trans_verify_not_unlocked(trans);
1848 
1849 	iter->path = bch2_btree_path_set_pos(trans, iter->path,
1850 					btree_iter_search_key(iter),
1851 					iter->flags & BTREE_ITER_intent,
1852 					btree_iter_ip_allocated(iter));
1853 
1854 	ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1855 	if (ret)
1856 		return ret;
1857 
1858 	struct btree_path *path = btree_iter_path(trans, iter);
1859 	if (btree_path_node(path, path->level))
1860 		btree_path_set_should_be_locked(path);
1861 	return 0;
1862 }
1863 
1864 /* Iterate across nodes (leaf and interior nodes) */
1865 
1866 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1867 {
1868 	struct btree_trans *trans = iter->trans;
1869 	struct btree *b = NULL;
1870 	int ret;
1871 
1872 	EBUG_ON(trans->paths[iter->path].cached);
1873 	bch2_btree_iter_verify(iter);
1874 
1875 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1876 	if (ret)
1877 		goto err;
1878 
1879 	struct btree_path *path = btree_iter_path(trans, iter);
1880 	b = btree_path_node(path, path->level);
1881 	if (!b)
1882 		goto out;
1883 
1884 	BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1885 
1886 	bkey_init(&iter->k);
1887 	iter->k.p = iter->pos = b->key.k.p;
1888 
1889 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1890 					iter->flags & BTREE_ITER_intent,
1891 					btree_iter_ip_allocated(iter));
1892 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1893 out:
1894 	bch2_btree_iter_verify_entry_exit(iter);
1895 	bch2_btree_iter_verify(iter);
1896 
1897 	return b;
1898 err:
1899 	b = ERR_PTR(ret);
1900 	goto out;
1901 }
1902 
1903 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1904 {
1905 	struct btree *b;
1906 
1907 	while (b = bch2_btree_iter_peek_node(iter),
1908 	       bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1909 		bch2_trans_begin(iter->trans);
1910 
1911 	return b;
1912 }
1913 
1914 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1915 {
1916 	struct btree_trans *trans = iter->trans;
1917 	struct btree *b = NULL;
1918 	int ret;
1919 
1920 	EBUG_ON(trans->paths[iter->path].cached);
1921 	bch2_trans_verify_not_in_restart(trans);
1922 	bch2_btree_iter_verify(iter);
1923 
1924 	struct btree_path *path = btree_iter_path(trans, iter);
1925 
1926 	/* already at end? */
1927 	if (!btree_path_node(path, path->level))
1928 		return NULL;
1929 
1930 	/* got to end? */
1931 	if (!btree_path_node(path, path->level + 1)) {
1932 		btree_path_set_level_up(trans, path);
1933 		return NULL;
1934 	}
1935 
1936 	if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1937 		__bch2_btree_path_unlock(trans, path);
1938 		path->l[path->level].b		= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1939 		path->l[path->level + 1].b	= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1940 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1941 		trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1942 		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1943 		goto err;
1944 	}
1945 
1946 	b = btree_path_node(path, path->level + 1);
1947 
1948 	if (bpos_eq(iter->pos, b->key.k.p)) {
1949 		__btree_path_set_level_up(trans, path, path->level++);
1950 	} else {
1951 		if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
1952 			btree_node_unlock(trans, path, path->level + 1);
1953 
1954 		/*
1955 		 * Haven't gotten to the end of the parent node: go back down to
1956 		 * the next child node
1957 		 */
1958 		iter->path = bch2_btree_path_set_pos(trans, iter->path,
1959 					bpos_successor(iter->pos),
1960 					iter->flags & BTREE_ITER_intent,
1961 					btree_iter_ip_allocated(iter));
1962 
1963 		path = btree_iter_path(trans, iter);
1964 		btree_path_set_level_down(trans, path, iter->min_depth);
1965 
1966 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1967 		if (ret)
1968 			goto err;
1969 
1970 		path = btree_iter_path(trans, iter);
1971 		b = path->l[path->level].b;
1972 	}
1973 
1974 	bkey_init(&iter->k);
1975 	iter->k.p = iter->pos = b->key.k.p;
1976 
1977 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1978 					iter->flags & BTREE_ITER_intent,
1979 					btree_iter_ip_allocated(iter));
1980 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1981 	EBUG_ON(btree_iter_path(trans, iter)->uptodate);
1982 out:
1983 	bch2_btree_iter_verify_entry_exit(iter);
1984 	bch2_btree_iter_verify(iter);
1985 
1986 	return b;
1987 err:
1988 	b = ERR_PTR(ret);
1989 	goto out;
1990 }
1991 
1992 /* Iterate across keys (in leaf nodes only) */
1993 
1994 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1995 {
1996 	struct bpos pos = iter->k.p;
1997 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
1998 		     ? bpos_eq(pos, SPOS_MAX)
1999 		     : bkey_eq(pos, SPOS_MAX));
2000 
2001 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2002 		pos = bkey_successor(iter, pos);
2003 	bch2_btree_iter_set_pos(iter, pos);
2004 	return ret;
2005 }
2006 
2007 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2008 {
2009 	struct bpos pos = bkey_start_pos(&iter->k);
2010 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2011 		     ? bpos_eq(pos, POS_MIN)
2012 		     : bkey_eq(pos, POS_MIN));
2013 
2014 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2015 		pos = bkey_predecessor(iter, pos);
2016 	bch2_btree_iter_set_pos(iter, pos);
2017 	return ret;
2018 }
2019 
2020 static noinline
2021 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2022 					struct bkey_s_c *k)
2023 {
2024 	struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2025 
2026 	trans_for_each_update(trans, i)
2027 		if (!i->key_cache_already_flushed &&
2028 		    i->btree_id == iter->btree_id &&
2029 		    bpos_le(i->k->k.p, iter->pos) &&
2030 		    bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2031 			iter->k = i->k->k;
2032 			*k = bkey_i_to_s_c(i->k);
2033 		}
2034 }
2035 
2036 static noinline
2037 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2038 				   struct bkey_s_c *k)
2039 {
2040 	struct btree_path *path = btree_iter_path(trans, iter);
2041 	struct bpos end = path_l(path)->b->key.k.p;
2042 
2043 	trans_for_each_update(trans, i)
2044 		if (!i->key_cache_already_flushed &&
2045 		    i->btree_id == iter->btree_id &&
2046 		    bpos_ge(i->k->k.p, path->pos) &&
2047 		    bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2048 			iter->k = i->k->k;
2049 			*k = bkey_i_to_s_c(i->k);
2050 		}
2051 }
2052 
2053 static noinline
2054 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2055 					struct bkey_s_c *k)
2056 {
2057 	trans_for_each_update(trans, i)
2058 		if (!i->key_cache_already_flushed &&
2059 		    i->btree_id == iter->btree_id &&
2060 		    bpos_eq(i->k->k.p, iter->pos)) {
2061 			iter->k = i->k->k;
2062 			*k = bkey_i_to_s_c(i->k);
2063 		}
2064 }
2065 
2066 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2067 					      struct btree_iter *iter,
2068 					      struct bpos end_pos)
2069 {
2070 	struct btree_path *path = btree_iter_path(trans, iter);
2071 
2072 	return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
2073 					   path->level,
2074 					   path->pos,
2075 					   end_pos,
2076 					   &iter->journal_idx);
2077 }
2078 
2079 static noinline
2080 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2081 					      struct btree_iter *iter)
2082 {
2083 	struct btree_path *path = btree_iter_path(trans, iter);
2084 	struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2085 
2086 	if (k) {
2087 		iter->k = k->k;
2088 		return bkey_i_to_s_c(k);
2089 	} else {
2090 		return bkey_s_c_null;
2091 	}
2092 }
2093 
2094 static noinline
2095 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2096 					 struct btree_iter *iter,
2097 					 struct bkey_s_c k)
2098 {
2099 	struct btree_path *path = btree_iter_path(trans, iter);
2100 	struct bkey_i *next_journal =
2101 		bch2_btree_journal_peek(trans, iter,
2102 				k.k ? k.k->p : path_l(path)->b->key.k.p);
2103 
2104 	if (next_journal) {
2105 		iter->k = next_journal->k;
2106 		k = bkey_i_to_s_c(next_journal);
2107 	}
2108 
2109 	return k;
2110 }
2111 
2112 /*
2113  * Checks btree key cache for key at iter->pos and returns it if present, or
2114  * bkey_s_c_null:
2115  */
2116 static noinline
2117 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2118 {
2119 	struct btree_trans *trans = iter->trans;
2120 	struct bch_fs *c = trans->c;
2121 	struct bkey u;
2122 	struct bkey_s_c k;
2123 	int ret;
2124 
2125 	bch2_trans_verify_not_in_restart(trans);
2126 	bch2_trans_verify_not_unlocked(trans);
2127 
2128 	if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2129 	    bpos_eq(iter->pos, pos))
2130 		return bkey_s_c_null;
2131 
2132 	if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2133 		return bkey_s_c_null;
2134 
2135 	if (!iter->key_cache_path)
2136 		iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2137 						     iter->flags & BTREE_ITER_intent, 0,
2138 						     iter->flags|BTREE_ITER_cached|
2139 						     BTREE_ITER_cached_nofill,
2140 						     _THIS_IP_);
2141 
2142 	iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2143 					iter->flags & BTREE_ITER_intent,
2144 					btree_iter_ip_allocated(iter));
2145 
2146 	ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2147 					 iter->flags|BTREE_ITER_cached) ?:
2148 		bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2149 	if (unlikely(ret))
2150 		return bkey_s_c_err(ret);
2151 
2152 	btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
2153 
2154 	k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2155 	if (k.k && !bkey_err(k)) {
2156 		iter->k = u;
2157 		k.k = &iter->k;
2158 	}
2159 	return k;
2160 }
2161 
2162 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2163 {
2164 	struct btree_trans *trans = iter->trans;
2165 	struct bkey_s_c k, k2;
2166 	int ret;
2167 
2168 	EBUG_ON(btree_iter_path(trans, iter)->cached);
2169 	bch2_btree_iter_verify(iter);
2170 
2171 	while (1) {
2172 		struct btree_path_level *l;
2173 
2174 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2175 					iter->flags & BTREE_ITER_intent,
2176 					btree_iter_ip_allocated(iter));
2177 
2178 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2179 		if (unlikely(ret)) {
2180 			/* ensure that iter->k is consistent with iter->pos: */
2181 			bch2_btree_iter_set_pos(iter, iter->pos);
2182 			k = bkey_s_c_err(ret);
2183 			goto out;
2184 		}
2185 
2186 		struct btree_path *path = btree_iter_path(trans, iter);
2187 		l = path_l(path);
2188 
2189 		if (unlikely(!l->b)) {
2190 			/* No btree nodes at requested level: */
2191 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2192 			k = bkey_s_c_null;
2193 			goto out;
2194 		}
2195 
2196 		btree_path_set_should_be_locked(path);
2197 
2198 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2199 
2200 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2201 		    k.k &&
2202 		    (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2203 			k = k2;
2204 			ret = bkey_err(k);
2205 			if (ret) {
2206 				bch2_btree_iter_set_pos(iter, iter->pos);
2207 				goto out;
2208 			}
2209 		}
2210 
2211 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2212 			k = btree_trans_peek_journal(trans, iter, k);
2213 
2214 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2215 			     trans->nr_updates))
2216 			bch2_btree_trans_peek_updates(trans, iter, &k);
2217 
2218 		if (k.k && bkey_deleted(k.k)) {
2219 			/*
2220 			 * If we've got a whiteout, and it's after the search
2221 			 * key, advance the search key to the whiteout instead
2222 			 * of just after the whiteout - it might be a btree
2223 			 * whiteout, with a real key at the same position, since
2224 			 * in the btree deleted keys sort before non deleted.
2225 			 */
2226 			search_key = !bpos_eq(search_key, k.k->p)
2227 				? k.k->p
2228 				: bpos_successor(k.k->p);
2229 			continue;
2230 		}
2231 
2232 		if (likely(k.k)) {
2233 			break;
2234 		} else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2235 			/* Advance to next leaf node: */
2236 			search_key = bpos_successor(l->b->key.k.p);
2237 		} else {
2238 			/* End of btree: */
2239 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2240 			k = bkey_s_c_null;
2241 			goto out;
2242 		}
2243 	}
2244 out:
2245 	bch2_btree_iter_verify(iter);
2246 
2247 	return k;
2248 }
2249 
2250 /**
2251  * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
2252  * iterator's current position
2253  * @iter:	iterator to peek from
2254  * @end:	search limit: returns keys less than or equal to @end
2255  *
2256  * Returns:	key if found, or an error extractable with bkey_err().
2257  */
2258 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2259 {
2260 	struct btree_trans *trans = iter->trans;
2261 	struct bpos search_key = btree_iter_search_key(iter);
2262 	struct bkey_s_c k;
2263 	struct bpos iter_pos;
2264 	int ret;
2265 
2266 	bch2_trans_verify_not_unlocked(trans);
2267 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2268 
2269 	if (iter->update_path) {
2270 		bch2_path_put_nokeep(trans, iter->update_path,
2271 				     iter->flags & BTREE_ITER_intent);
2272 		iter->update_path = 0;
2273 	}
2274 
2275 	bch2_btree_iter_verify_entry_exit(iter);
2276 
2277 	while (1) {
2278 		k = __bch2_btree_iter_peek(iter, search_key);
2279 		if (unlikely(!k.k))
2280 			goto end;
2281 		if (unlikely(bkey_err(k)))
2282 			goto out_no_locked;
2283 
2284 		/*
2285 		 * We need to check against @end before FILTER_SNAPSHOTS because
2286 		 * if we get to a different inode that requested we might be
2287 		 * seeing keys for a different snapshot tree that will all be
2288 		 * filtered out.
2289 		 *
2290 		 * But we can't do the full check here, because bkey_start_pos()
2291 		 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2292 		 * that's what we check against in extents mode:
2293 		 */
2294 		if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2295 			     ? bkey_gt(k.k->p, end)
2296 			     : k.k->p.inode > end.inode))
2297 			goto end;
2298 
2299 		if (iter->update_path &&
2300 		    !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2301 			bch2_path_put_nokeep(trans, iter->update_path,
2302 					     iter->flags & BTREE_ITER_intent);
2303 			iter->update_path = 0;
2304 		}
2305 
2306 		if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2307 		    (iter->flags & BTREE_ITER_intent) &&
2308 		    !(iter->flags & BTREE_ITER_is_extents) &&
2309 		    !iter->update_path) {
2310 			struct bpos pos = k.k->p;
2311 
2312 			if (pos.snapshot < iter->snapshot) {
2313 				search_key = bpos_successor(k.k->p);
2314 				continue;
2315 			}
2316 
2317 			pos.snapshot = iter->snapshot;
2318 
2319 			/*
2320 			 * advance, same as on exit for iter->path, but only up
2321 			 * to snapshot
2322 			 */
2323 			__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2324 			iter->update_path = iter->path;
2325 
2326 			iter->update_path = bch2_btree_path_set_pos(trans,
2327 						iter->update_path, pos,
2328 						iter->flags & BTREE_ITER_intent,
2329 						_THIS_IP_);
2330 			ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2331 			if (unlikely(ret)) {
2332 				k = bkey_s_c_err(ret);
2333 				goto out_no_locked;
2334 			}
2335 		}
2336 
2337 		/*
2338 		 * We can never have a key in a leaf node at POS_MAX, so
2339 		 * we don't have to check these successor() calls:
2340 		 */
2341 		if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2342 		    !bch2_snapshot_is_ancestor(trans->c,
2343 					       iter->snapshot,
2344 					       k.k->p.snapshot)) {
2345 			search_key = bpos_successor(k.k->p);
2346 			continue;
2347 		}
2348 
2349 		if (bkey_whiteout(k.k) &&
2350 		    !(iter->flags & BTREE_ITER_all_snapshots)) {
2351 			search_key = bkey_successor(iter, k.k->p);
2352 			continue;
2353 		}
2354 
2355 		/*
2356 		 * iter->pos should be mononotically increasing, and always be
2357 		 * equal to the key we just returned - except extents can
2358 		 * straddle iter->pos:
2359 		 */
2360 		if (!(iter->flags & BTREE_ITER_is_extents))
2361 			iter_pos = k.k->p;
2362 		else
2363 			iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2364 
2365 		if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2366 			     ? bkey_gt(iter_pos, end)
2367 			     : bkey_ge(iter_pos, end)))
2368 			goto end;
2369 
2370 		break;
2371 	}
2372 
2373 	iter->pos = iter_pos;
2374 
2375 	iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2376 				iter->flags & BTREE_ITER_intent,
2377 				btree_iter_ip_allocated(iter));
2378 
2379 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2380 out_no_locked:
2381 	if (iter->update_path) {
2382 		ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2383 		if (unlikely(ret))
2384 			k = bkey_s_c_err(ret);
2385 		else
2386 			btree_path_set_should_be_locked(trans->paths + iter->update_path);
2387 	}
2388 
2389 	if (!(iter->flags & BTREE_ITER_all_snapshots))
2390 		iter->pos.snapshot = iter->snapshot;
2391 
2392 	ret = bch2_btree_iter_verify_ret(iter, k);
2393 	if (unlikely(ret)) {
2394 		bch2_btree_iter_set_pos(iter, iter->pos);
2395 		k = bkey_s_c_err(ret);
2396 	}
2397 
2398 	bch2_btree_iter_verify_entry_exit(iter);
2399 
2400 	return k;
2401 end:
2402 	bch2_btree_iter_set_pos(iter, end);
2403 	k = bkey_s_c_null;
2404 	goto out_no_locked;
2405 }
2406 
2407 /**
2408  * bch2_btree_iter_next() - returns first key greater than iterator's current
2409  * position
2410  * @iter:	iterator to peek from
2411  *
2412  * Returns:	key if found, or an error extractable with bkey_err().
2413  */
2414 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2415 {
2416 	if (!bch2_btree_iter_advance(iter))
2417 		return bkey_s_c_null;
2418 
2419 	return bch2_btree_iter_peek(iter);
2420 }
2421 
2422 /**
2423  * bch2_btree_iter_peek_prev() - returns first key less than or equal to
2424  * iterator's current position
2425  * @iter:	iterator to peek from
2426  *
2427  * Returns:	key if found, or an error extractable with bkey_err().
2428  */
2429 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2430 {
2431 	struct btree_trans *trans = iter->trans;
2432 	struct bpos search_key = iter->pos;
2433 	struct bkey_s_c k;
2434 	struct bkey saved_k;
2435 	const struct bch_val *saved_v;
2436 	btree_path_idx_t saved_path = 0;
2437 	int ret;
2438 
2439 	bch2_trans_verify_not_unlocked(trans);
2440 	EBUG_ON(btree_iter_path(trans, iter)->cached ||
2441 		btree_iter_path(trans, iter)->level);
2442 
2443 	if (iter->flags & BTREE_ITER_with_journal)
2444 		return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
2445 
2446 	bch2_btree_iter_verify(iter);
2447 	bch2_btree_iter_verify_entry_exit(iter);
2448 
2449 	if (iter->flags & BTREE_ITER_filter_snapshots)
2450 		search_key.snapshot = U32_MAX;
2451 
2452 	while (1) {
2453 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2454 						iter->flags & BTREE_ITER_intent,
2455 						btree_iter_ip_allocated(iter));
2456 
2457 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2458 		if (unlikely(ret)) {
2459 			/* ensure that iter->k is consistent with iter->pos: */
2460 			bch2_btree_iter_set_pos(iter, iter->pos);
2461 			k = bkey_s_c_err(ret);
2462 			goto out_no_locked;
2463 		}
2464 
2465 		struct btree_path *path = btree_iter_path(trans, iter);
2466 
2467 		k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
2468 		if (!k.k ||
2469 		    ((iter->flags & BTREE_ITER_is_extents)
2470 		     ? bpos_ge(bkey_start_pos(k.k), search_key)
2471 		     : bpos_gt(k.k->p, search_key)))
2472 			k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
2473 
2474 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2475 			     trans->nr_updates))
2476 			bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2477 
2478 		if (likely(k.k)) {
2479 			if (iter->flags & BTREE_ITER_filter_snapshots) {
2480 				if (k.k->p.snapshot == iter->snapshot)
2481 					goto got_key;
2482 
2483 				/*
2484 				 * If we have a saved candidate, and we're no
2485 				 * longer at the same _key_ (not pos), return
2486 				 * that candidate
2487 				 */
2488 				if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2489 					bch2_path_put_nokeep(trans, iter->path,
2490 						      iter->flags & BTREE_ITER_intent);
2491 					iter->path = saved_path;
2492 					saved_path = 0;
2493 					iter->k	= saved_k;
2494 					k.v	= saved_v;
2495 					goto got_key;
2496 				}
2497 
2498 				if (bch2_snapshot_is_ancestor(trans->c,
2499 							      iter->snapshot,
2500 							      k.k->p.snapshot)) {
2501 					if (saved_path)
2502 						bch2_path_put_nokeep(trans, saved_path,
2503 						      iter->flags & BTREE_ITER_intent);
2504 					saved_path = btree_path_clone(trans, iter->path,
2505 								iter->flags & BTREE_ITER_intent,
2506 								_THIS_IP_);
2507 					path = btree_iter_path(trans, iter);
2508 					saved_k = *k.k;
2509 					saved_v = k.v;
2510 				}
2511 
2512 				search_key = bpos_predecessor(k.k->p);
2513 				continue;
2514 			}
2515 got_key:
2516 			if (bkey_whiteout(k.k) &&
2517 			    !(iter->flags & BTREE_ITER_all_snapshots)) {
2518 				search_key = bkey_predecessor(iter, k.k->p);
2519 				if (iter->flags & BTREE_ITER_filter_snapshots)
2520 					search_key.snapshot = U32_MAX;
2521 				continue;
2522 			}
2523 
2524 			btree_path_set_should_be_locked(path);
2525 			break;
2526 		} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2527 			/* Advance to previous leaf node: */
2528 			search_key = bpos_predecessor(path->l[0].b->data->min_key);
2529 		} else {
2530 			/* Start of btree: */
2531 			bch2_btree_iter_set_pos(iter, POS_MIN);
2532 			k = bkey_s_c_null;
2533 			goto out_no_locked;
2534 		}
2535 	}
2536 
2537 	EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2538 
2539 	/* Extents can straddle iter->pos: */
2540 	if (bkey_lt(k.k->p, iter->pos))
2541 		iter->pos = k.k->p;
2542 
2543 	if (iter->flags & BTREE_ITER_filter_snapshots)
2544 		iter->pos.snapshot = iter->snapshot;
2545 out_no_locked:
2546 	if (saved_path)
2547 		bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2548 
2549 	bch2_btree_iter_verify_entry_exit(iter);
2550 	bch2_btree_iter_verify(iter);
2551 
2552 	return k;
2553 }
2554 
2555 /**
2556  * bch2_btree_iter_prev() - returns first key less than iterator's current
2557  * position
2558  * @iter:	iterator to peek from
2559  *
2560  * Returns:	key if found, or an error extractable with bkey_err().
2561  */
2562 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2563 {
2564 	if (!bch2_btree_iter_rewind(iter))
2565 		return bkey_s_c_null;
2566 
2567 	return bch2_btree_iter_peek_prev(iter);
2568 }
2569 
2570 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2571 {
2572 	struct btree_trans *trans = iter->trans;
2573 	struct bpos search_key;
2574 	struct bkey_s_c k;
2575 	int ret;
2576 
2577 	bch2_trans_verify_not_unlocked(trans);
2578 	bch2_btree_iter_verify(iter);
2579 	bch2_btree_iter_verify_entry_exit(iter);
2580 	EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2581 
2582 	/* extents can't span inode numbers: */
2583 	if ((iter->flags & BTREE_ITER_is_extents) &&
2584 	    unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2585 		if (iter->pos.inode == KEY_INODE_MAX)
2586 			return bkey_s_c_null;
2587 
2588 		bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2589 	}
2590 
2591 	search_key = btree_iter_search_key(iter);
2592 	iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2593 					iter->flags & BTREE_ITER_intent,
2594 					btree_iter_ip_allocated(iter));
2595 
2596 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2597 	if (unlikely(ret)) {
2598 		k = bkey_s_c_err(ret);
2599 		goto out_no_locked;
2600 	}
2601 
2602 	if ((iter->flags & BTREE_ITER_cached) ||
2603 	    !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2604 		k = bkey_s_c_null;
2605 
2606 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2607 			     trans->nr_updates)) {
2608 			bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2609 			if (k.k)
2610 				goto out;
2611 		}
2612 
2613 		if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2614 		    (k = btree_trans_peek_slot_journal(trans, iter)).k)
2615 			goto out;
2616 
2617 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2618 		    (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2619 			if (!bkey_err(k))
2620 				iter->k = *k.k;
2621 			/* We're not returning a key from iter->path: */
2622 			goto out_no_locked;
2623 		}
2624 
2625 		k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2626 		if (unlikely(!k.k))
2627 			goto out_no_locked;
2628 	} else {
2629 		struct bpos next;
2630 		struct bpos end = iter->pos;
2631 
2632 		if (iter->flags & BTREE_ITER_is_extents)
2633 			end.offset = U64_MAX;
2634 
2635 		EBUG_ON(btree_iter_path(trans, iter)->level);
2636 
2637 		if (iter->flags & BTREE_ITER_intent) {
2638 			struct btree_iter iter2;
2639 
2640 			bch2_trans_copy_iter(&iter2, iter);
2641 			k = bch2_btree_iter_peek_upto(&iter2, end);
2642 
2643 			if (k.k && !bkey_err(k)) {
2644 				swap(iter->key_cache_path, iter2.key_cache_path);
2645 				iter->k = iter2.k;
2646 				k.k = &iter->k;
2647 			}
2648 			bch2_trans_iter_exit(trans, &iter2);
2649 		} else {
2650 			struct bpos pos = iter->pos;
2651 
2652 			k = bch2_btree_iter_peek_upto(iter, end);
2653 			if (unlikely(bkey_err(k)))
2654 				bch2_btree_iter_set_pos(iter, pos);
2655 			else
2656 				iter->pos = pos;
2657 		}
2658 
2659 		if (unlikely(bkey_err(k)))
2660 			goto out_no_locked;
2661 
2662 		next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2663 
2664 		if (bkey_lt(iter->pos, next)) {
2665 			bkey_init(&iter->k);
2666 			iter->k.p = iter->pos;
2667 
2668 			if (iter->flags & BTREE_ITER_is_extents) {
2669 				bch2_key_resize(&iter->k,
2670 						min_t(u64, KEY_SIZE_MAX,
2671 						      (next.inode == iter->pos.inode
2672 						       ? next.offset
2673 						       : KEY_OFFSET_MAX) -
2674 						      iter->pos.offset));
2675 				EBUG_ON(!iter->k.size);
2676 			}
2677 
2678 			k = (struct bkey_s_c) { &iter->k, NULL };
2679 		}
2680 	}
2681 out:
2682 	btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2683 out_no_locked:
2684 	bch2_btree_iter_verify_entry_exit(iter);
2685 	bch2_btree_iter_verify(iter);
2686 	ret = bch2_btree_iter_verify_ret(iter, k);
2687 	if (unlikely(ret))
2688 		return bkey_s_c_err(ret);
2689 
2690 	return k;
2691 }
2692 
2693 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2694 {
2695 	if (!bch2_btree_iter_advance(iter))
2696 		return bkey_s_c_null;
2697 
2698 	return bch2_btree_iter_peek_slot(iter);
2699 }
2700 
2701 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2702 {
2703 	if (!bch2_btree_iter_rewind(iter))
2704 		return bkey_s_c_null;
2705 
2706 	return bch2_btree_iter_peek_slot(iter);
2707 }
2708 
2709 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2710 {
2711 	struct bkey_s_c k;
2712 
2713 	while (btree_trans_too_many_iters(iter->trans) ||
2714 	       (k = bch2_btree_iter_peek_type(iter, iter->flags),
2715 		bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2716 		bch2_trans_begin(iter->trans);
2717 
2718 	return k;
2719 }
2720 
2721 /* new transactional stuff: */
2722 
2723 #ifdef CONFIG_BCACHEFS_DEBUG
2724 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2725 {
2726 	struct btree_path *path;
2727 	unsigned i;
2728 
2729 	BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2730 
2731 	trans_for_each_path(trans, path, i) {
2732 		BUG_ON(path->sorted_idx >= trans->nr_sorted);
2733 		BUG_ON(trans->sorted[path->sorted_idx] != i);
2734 	}
2735 
2736 	for (i = 0; i < trans->nr_sorted; i++) {
2737 		unsigned idx = trans->sorted[i];
2738 
2739 		BUG_ON(!test_bit(idx, trans->paths_allocated));
2740 		BUG_ON(trans->paths[idx].sorted_idx != i);
2741 	}
2742 }
2743 
2744 static void btree_trans_verify_sorted(struct btree_trans *trans)
2745 {
2746 	struct btree_path *path, *prev = NULL;
2747 	struct trans_for_each_path_inorder_iter iter;
2748 
2749 	if (!bch2_debug_check_iterators)
2750 		return;
2751 
2752 	trans_for_each_path_inorder(trans, path, iter) {
2753 		if (prev && btree_path_cmp(prev, path) > 0) {
2754 			__bch2_dump_trans_paths_updates(trans, true);
2755 			panic("trans paths out of order!\n");
2756 		}
2757 		prev = path;
2758 	}
2759 }
2760 #else
2761 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2762 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2763 #endif
2764 
2765 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2766 {
2767 	int i, l = 0, r = trans->nr_sorted, inc = 1;
2768 	bool swapped;
2769 
2770 	btree_trans_verify_sorted_refs(trans);
2771 
2772 	if (trans->paths_sorted)
2773 		goto out;
2774 
2775 	/*
2776 	 * Cocktail shaker sort: this is efficient because iterators will be
2777 	 * mostly sorted.
2778 	 */
2779 	do {
2780 		swapped = false;
2781 
2782 		for (i = inc > 0 ? l : r - 2;
2783 		     i + 1 < r && i >= l;
2784 		     i += inc) {
2785 			if (btree_path_cmp(trans->paths + trans->sorted[i],
2786 					   trans->paths + trans->sorted[i + 1]) > 0) {
2787 				swap(trans->sorted[i], trans->sorted[i + 1]);
2788 				trans->paths[trans->sorted[i]].sorted_idx = i;
2789 				trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2790 				swapped = true;
2791 			}
2792 		}
2793 
2794 		if (inc > 0)
2795 			--r;
2796 		else
2797 			l++;
2798 		inc = -inc;
2799 	} while (swapped);
2800 
2801 	trans->paths_sorted = true;
2802 out:
2803 	btree_trans_verify_sorted(trans);
2804 }
2805 
2806 static inline void btree_path_list_remove(struct btree_trans *trans,
2807 					  struct btree_path *path)
2808 {
2809 	EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2810 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2811 	trans->nr_sorted--;
2812 	memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2813 				trans->sorted + path->sorted_idx + 1,
2814 				DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2815 					     sizeof(u64) / sizeof(btree_path_idx_t)));
2816 #else
2817 	array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2818 #endif
2819 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2820 		trans->paths[trans->sorted[i]].sorted_idx = i;
2821 }
2822 
2823 static inline void btree_path_list_add(struct btree_trans *trans,
2824 				       btree_path_idx_t pos,
2825 				       btree_path_idx_t path_idx)
2826 {
2827 	struct btree_path *path = trans->paths + path_idx;
2828 
2829 	path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2830 
2831 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2832 	memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2833 			      trans->sorted + path->sorted_idx,
2834 			      DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2835 					   sizeof(u64) / sizeof(btree_path_idx_t)));
2836 	trans->nr_sorted++;
2837 	trans->sorted[path->sorted_idx] = path_idx;
2838 #else
2839 	array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
2840 #endif
2841 
2842 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2843 		trans->paths[trans->sorted[i]].sorted_idx = i;
2844 
2845 	btree_trans_verify_sorted_refs(trans);
2846 }
2847 
2848 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2849 {
2850 	if (iter->update_path)
2851 		bch2_path_put_nokeep(trans, iter->update_path,
2852 			      iter->flags & BTREE_ITER_intent);
2853 	if (iter->path)
2854 		bch2_path_put(trans, iter->path,
2855 			      iter->flags & BTREE_ITER_intent);
2856 	if (iter->key_cache_path)
2857 		bch2_path_put(trans, iter->key_cache_path,
2858 			      iter->flags & BTREE_ITER_intent);
2859 	iter->path		= 0;
2860 	iter->update_path	= 0;
2861 	iter->key_cache_path	= 0;
2862 	iter->trans		= NULL;
2863 }
2864 
2865 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2866 			  struct btree_iter *iter,
2867 			  enum btree_id btree_id, struct bpos pos,
2868 			  unsigned flags)
2869 {
2870 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2871 			       bch2_btree_iter_flags(trans, btree_id, flags),
2872 			       _RET_IP_);
2873 }
2874 
2875 void bch2_trans_node_iter_init(struct btree_trans *trans,
2876 			       struct btree_iter *iter,
2877 			       enum btree_id btree_id,
2878 			       struct bpos pos,
2879 			       unsigned locks_want,
2880 			       unsigned depth,
2881 			       unsigned flags)
2882 {
2883 	flags |= BTREE_ITER_not_extents;
2884 	flags |= BTREE_ITER_snapshot_field;
2885 	flags |= BTREE_ITER_all_snapshots;
2886 
2887 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2888 			       __bch2_btree_iter_flags(trans, btree_id, flags),
2889 			       _RET_IP_);
2890 
2891 	iter->min_depth	= depth;
2892 
2893 	struct btree_path *path = btree_iter_path(trans, iter);
2894 	BUG_ON(path->locks_want	 < min(locks_want, BTREE_MAX_DEPTH));
2895 	BUG_ON(path->level	!= depth);
2896 	BUG_ON(iter->min_depth	!= depth);
2897 }
2898 
2899 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2900 {
2901 	struct btree_trans *trans = src->trans;
2902 
2903 	*dst = *src;
2904 #ifdef TRACK_PATH_ALLOCATED
2905 	dst->ip_allocated = _RET_IP_;
2906 #endif
2907 	if (src->path)
2908 		__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
2909 	if (src->update_path)
2910 		__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
2911 	dst->key_cache_path = 0;
2912 }
2913 
2914 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2915 {
2916 	struct bch_fs *c = trans->c;
2917 	unsigned new_top = trans->mem_top + size;
2918 	unsigned old_bytes = trans->mem_bytes;
2919 	unsigned new_bytes = roundup_pow_of_two(new_top);
2920 	int ret;
2921 	void *new_mem;
2922 	void *p;
2923 
2924 	WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2925 
2926 	struct btree_transaction_stats *s = btree_trans_stats(trans);
2927 	s->max_mem = max(s->max_mem, new_bytes);
2928 
2929 	if (trans->used_mempool) {
2930 		if (trans->mem_bytes >= new_bytes)
2931 			goto out_change_top;
2932 
2933 		/* No more space from mempool item, need malloc new one */
2934 		new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2935 		if (unlikely(!new_mem)) {
2936 			bch2_trans_unlock(trans);
2937 
2938 			new_mem = kmalloc(new_bytes, GFP_KERNEL);
2939 			if (!new_mem)
2940 				return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2941 
2942 			ret = bch2_trans_relock(trans);
2943 			if (ret) {
2944 				kfree(new_mem);
2945 				return ERR_PTR(ret);
2946 			}
2947 		}
2948 		memcpy(new_mem, trans->mem, trans->mem_top);
2949 		trans->used_mempool = false;
2950 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
2951 		goto out_new_mem;
2952 	}
2953 
2954 	new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2955 	if (unlikely(!new_mem)) {
2956 		bch2_trans_unlock(trans);
2957 
2958 		new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2959 		if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2960 			new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2961 			new_bytes = BTREE_TRANS_MEM_MAX;
2962 			memcpy(new_mem, trans->mem, trans->mem_top);
2963 			trans->used_mempool = true;
2964 			kfree(trans->mem);
2965 		}
2966 
2967 		if (!new_mem)
2968 			return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2969 
2970 		trans->mem = new_mem;
2971 		trans->mem_bytes = new_bytes;
2972 
2973 		ret = bch2_trans_relock(trans);
2974 		if (ret)
2975 			return ERR_PTR(ret);
2976 	}
2977 out_new_mem:
2978 	trans->mem = new_mem;
2979 	trans->mem_bytes = new_bytes;
2980 
2981 	if (old_bytes) {
2982 		trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2983 		return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2984 	}
2985 out_change_top:
2986 	p = trans->mem + trans->mem_top;
2987 	trans->mem_top += size;
2988 	memset(p, 0, size);
2989 	return p;
2990 }
2991 
2992 static inline void check_srcu_held_too_long(struct btree_trans *trans)
2993 {
2994 	WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
2995 	     "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
2996 	     (jiffies - trans->srcu_lock_time) / HZ);
2997 }
2998 
2999 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3000 {
3001 	if (trans->srcu_held) {
3002 		struct bch_fs *c = trans->c;
3003 		struct btree_path *path;
3004 		unsigned i;
3005 
3006 		trans_for_each_path(trans, path, i)
3007 			if (path->cached && !btree_node_locked(path, 0))
3008 				path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3009 
3010 		check_srcu_held_too_long(trans);
3011 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3012 		trans->srcu_held = false;
3013 	}
3014 }
3015 
3016 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3017 {
3018 	if (!trans->srcu_held) {
3019 		trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3020 		trans->srcu_lock_time	= jiffies;
3021 		trans->srcu_held = true;
3022 	}
3023 }
3024 
3025 /**
3026  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3027  * @trans: transaction to reset
3028  *
3029  * Returns:	current restart counter, to be used with trans_was_restarted()
3030  *
3031  * While iterating over nodes or updating nodes a attempt to lock a btree node
3032  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3033  * occurs bch2_trans_begin() should be called and the transaction retried.
3034  */
3035 u32 bch2_trans_begin(struct btree_trans *trans)
3036 {
3037 	struct btree_path *path;
3038 	unsigned i;
3039 	u64 now;
3040 
3041 	bch2_trans_reset_updates(trans);
3042 
3043 	trans->restart_count++;
3044 	trans->mem_top			= 0;
3045 	trans->journal_entries		= NULL;
3046 
3047 	trans_for_each_path(trans, path, i) {
3048 		path->should_be_locked = false;
3049 
3050 		/*
3051 		 * If the transaction wasn't restarted, we're presuming to be
3052 		 * doing something new: dont keep iterators excpt the ones that
3053 		 * are in use - except for the subvolumes btree:
3054 		 */
3055 		if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3056 			path->preserve = false;
3057 
3058 		/*
3059 		 * XXX: we probably shouldn't be doing this if the transaction
3060 		 * was restarted, but currently we still overflow transaction
3061 		 * iterators if we do that
3062 		 */
3063 		if (!path->ref && !path->preserve)
3064 			__bch2_path_free(trans, i);
3065 		else
3066 			path->preserve = false;
3067 	}
3068 
3069 	now = local_clock();
3070 
3071 	if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3072 	    time_after64(now, trans->last_begin_time + 10))
3073 		__bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3074 					 trans->last_begin_time, now);
3075 
3076 	if (!trans->restarted &&
3077 	    (need_resched() ||
3078 	     time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3079 		bch2_trans_unlock(trans);
3080 		cond_resched();
3081 		now = local_clock();
3082 	}
3083 	trans->last_begin_time = now;
3084 
3085 	if (unlikely(trans->srcu_held &&
3086 		     time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3087 		bch2_trans_srcu_unlock(trans);
3088 
3089 	trans->last_begin_ip = _RET_IP_;
3090 
3091 	trans_set_locked(trans);
3092 
3093 	if (trans->restarted) {
3094 		bch2_btree_path_traverse_all(trans);
3095 		trans->notrace_relock_fail = false;
3096 	}
3097 
3098 	bch2_trans_verify_not_unlocked(trans);
3099 	return trans->restart_count;
3100 }
3101 
3102 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3103 
3104 unsigned bch2_trans_get_fn_idx(const char *fn)
3105 {
3106 	for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3107 		if (!bch2_btree_transaction_fns[i] ||
3108 		    bch2_btree_transaction_fns[i] == fn) {
3109 			bch2_btree_transaction_fns[i] = fn;
3110 			return i;
3111 		}
3112 
3113 	pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3114 	return 0;
3115 }
3116 
3117 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3118 	__acquires(&c->btree_trans_barrier)
3119 {
3120 	struct btree_trans *trans;
3121 
3122 	if (IS_ENABLED(__KERNEL__)) {
3123 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3124 		if (trans) {
3125 			memset(trans, 0, offsetof(struct btree_trans, list));
3126 			goto got_trans;
3127 		}
3128 	}
3129 
3130 	trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3131 	memset(trans, 0, sizeof(*trans));
3132 
3133 	seqmutex_lock(&c->btree_trans_lock);
3134 	if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3135 		struct btree_trans *pos;
3136 		pid_t pid = current->pid;
3137 
3138 		trans->locking_wait.task = current;
3139 
3140 		list_for_each_entry(pos, &c->btree_trans_list, list) {
3141 			struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3142 			/*
3143 			 * We'd much prefer to be stricter here and completely
3144 			 * disallow multiple btree_trans in the same thread -
3145 			 * but the data move path calls bch2_write when we
3146 			 * already have a btree_trans initialized.
3147 			 */
3148 			BUG_ON(pos_task &&
3149 			       pid == pos_task->pid &&
3150 			       pos->locked);
3151 		}
3152 	}
3153 
3154 	list_add(&trans->list, &c->btree_trans_list);
3155 	seqmutex_unlock(&c->btree_trans_lock);
3156 got_trans:
3157 	trans->c		= c;
3158 	trans->last_begin_time	= local_clock();
3159 	trans->fn_idx		= fn_idx;
3160 	trans->locking_wait.task = current;
3161 	trans->journal_replay_not_finished =
3162 		unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3163 		atomic_inc_not_zero(&c->journal_keys.ref);
3164 	trans->nr_paths		= ARRAY_SIZE(trans->_paths);
3165 	trans->paths_allocated	= trans->_paths_allocated;
3166 	trans->sorted		= trans->_sorted;
3167 	trans->paths		= trans->_paths;
3168 	trans->updates		= trans->_updates;
3169 
3170 	*trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3171 
3172 	trans->paths_allocated[0] = 1;
3173 
3174 	static struct lock_class_key lockdep_key;
3175 	lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3176 
3177 	if (fn_idx < BCH_TRANSACTIONS_NR) {
3178 		trans->fn = bch2_btree_transaction_fns[fn_idx];
3179 
3180 		struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3181 
3182 		if (s->max_mem) {
3183 			unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3184 
3185 			trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3186 			if (likely(trans->mem))
3187 				trans->mem_bytes = expected_mem_bytes;
3188 		}
3189 
3190 		trans->nr_paths_max = s->nr_max_paths;
3191 		trans->journal_entries_size = s->journal_entries_size;
3192 	}
3193 
3194 	trans->srcu_idx		= srcu_read_lock(&c->btree_trans_barrier);
3195 	trans->srcu_lock_time	= jiffies;
3196 	trans->srcu_held	= true;
3197 	trans_set_locked(trans);
3198 
3199 	closure_init_stack_release(&trans->ref);
3200 	return trans;
3201 }
3202 
3203 static void check_btree_paths_leaked(struct btree_trans *trans)
3204 {
3205 #ifdef CONFIG_BCACHEFS_DEBUG
3206 	struct bch_fs *c = trans->c;
3207 	struct btree_path *path;
3208 	unsigned i;
3209 
3210 	trans_for_each_path(trans, path, i)
3211 		if (path->ref)
3212 			goto leaked;
3213 	return;
3214 leaked:
3215 	bch_err(c, "btree paths leaked from %s!", trans->fn);
3216 	trans_for_each_path(trans, path, i)
3217 		if (path->ref)
3218 			printk(KERN_ERR "  btree %s %pS\n",
3219 			       bch2_btree_id_str(path->btree_id),
3220 			       (void *) path->ip_allocated);
3221 	/* Be noisy about this: */
3222 	bch2_fatal_error(c);
3223 #endif
3224 }
3225 
3226 void bch2_trans_put(struct btree_trans *trans)
3227 	__releases(&c->btree_trans_barrier)
3228 {
3229 	struct bch_fs *c = trans->c;
3230 
3231 	bch2_trans_unlock(trans);
3232 
3233 	trans_for_each_update(trans, i)
3234 		__btree_path_put(trans->paths + i->path, true);
3235 	trans->nr_updates	= 0;
3236 
3237 	check_btree_paths_leaked(trans);
3238 
3239 	if (trans->srcu_held) {
3240 		check_srcu_held_too_long(trans);
3241 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3242 	}
3243 
3244 	if (unlikely(trans->journal_replay_not_finished))
3245 		bch2_journal_keys_put(c);
3246 
3247 	/*
3248 	 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3249 	 * by cycle detector
3250 	 */
3251 	closure_return_sync(&trans->ref);
3252 	trans->locking_wait.task = NULL;
3253 
3254 	unsigned long *paths_allocated = trans->paths_allocated;
3255 	trans->paths_allocated	= NULL;
3256 	trans->paths		= NULL;
3257 
3258 	if (paths_allocated != trans->_paths_allocated)
3259 		kvfree_rcu_mightsleep(paths_allocated);
3260 
3261 	if (trans->used_mempool)
3262 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3263 	else
3264 		kfree(trans->mem);
3265 
3266 	/* Userspace doesn't have a real percpu implementation: */
3267 	if (IS_ENABLED(__KERNEL__))
3268 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3269 
3270 	if (trans) {
3271 		seqmutex_lock(&c->btree_trans_lock);
3272 		list_del(&trans->list);
3273 		seqmutex_unlock(&c->btree_trans_lock);
3274 
3275 		mempool_free(trans, &c->btree_trans_pool);
3276 	}
3277 }
3278 
3279 bool bch2_current_has_btree_trans(struct bch_fs *c)
3280 {
3281 	seqmutex_lock(&c->btree_trans_lock);
3282 	struct btree_trans *trans;
3283 	bool ret = false;
3284 	list_for_each_entry(trans, &c->btree_trans_list, list)
3285 		if (trans->locking_wait.task == current &&
3286 		    trans->locked) {
3287 			ret = true;
3288 			break;
3289 		}
3290 	seqmutex_unlock(&c->btree_trans_lock);
3291 	return ret;
3292 }
3293 
3294 static void __maybe_unused
3295 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3296 				      struct btree_bkey_cached_common *b)
3297 {
3298 	struct six_lock_count c = six_lock_counts(&b->lock);
3299 	struct task_struct *owner;
3300 	pid_t pid;
3301 
3302 	rcu_read_lock();
3303 	owner = READ_ONCE(b->lock.owner);
3304 	pid = owner ? owner->pid : 0;
3305 	rcu_read_unlock();
3306 
3307 	prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3308 		   b->level, bch2_btree_id_str(b->btree_id));
3309 	bch2_bpos_to_text(out, btree_node_pos(b));
3310 
3311 	prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3312 		   c.n[0], c.n[1], c.n[2], pid);
3313 }
3314 
3315 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3316 {
3317 	struct btree_bkey_cached_common *b;
3318 	static char lock_types[] = { 'r', 'i', 'w' };
3319 	struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3320 	unsigned l, idx;
3321 
3322 	/* before rcu_read_lock(): */
3323 	bch2_printbuf_make_room(out, 4096);
3324 
3325 	if (!out->nr_tabstops) {
3326 		printbuf_tabstop_push(out, 16);
3327 		printbuf_tabstop_push(out, 32);
3328 	}
3329 
3330 	prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3331 
3332 	/* trans->paths is rcu protected vs. freeing */
3333 	rcu_read_lock();
3334 	out->atomic++;
3335 
3336 	struct btree_path *paths = rcu_dereference(trans->paths);
3337 	if (!paths)
3338 		goto out;
3339 
3340 	unsigned long *paths_allocated = trans_paths_allocated(paths);
3341 
3342 	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3343 		struct btree_path *path = paths + idx;
3344 		if (!path->nodes_locked)
3345 			continue;
3346 
3347 		prt_printf(out, "  path %u %c l=%u %s:",
3348 		       idx,
3349 		       path->cached ? 'c' : 'b',
3350 		       path->level,
3351 		       bch2_btree_id_str(path->btree_id));
3352 		bch2_bpos_to_text(out, path->pos);
3353 		prt_newline(out);
3354 
3355 		for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3356 			if (btree_node_locked(path, l) &&
3357 			    !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3358 				prt_printf(out, "    %c l=%u ",
3359 					   lock_types[btree_node_locked_type(path, l)], l);
3360 				bch2_btree_bkey_cached_common_to_text(out, b);
3361 				prt_newline(out);
3362 			}
3363 		}
3364 	}
3365 
3366 	b = READ_ONCE(trans->locking);
3367 	if (b) {
3368 		prt_printf(out, "  blocked for %lluus on\n",
3369 			   div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3370 		prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3371 		bch2_btree_bkey_cached_common_to_text(out, b);
3372 		prt_newline(out);
3373 	}
3374 out:
3375 	--out->atomic;
3376 	rcu_read_unlock();
3377 }
3378 
3379 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3380 {
3381 	struct btree_transaction_stats *s;
3382 	struct btree_trans *trans;
3383 	int cpu;
3384 
3385 	if (c->btree_trans_bufs)
3386 		for_each_possible_cpu(cpu) {
3387 			struct btree_trans *trans =
3388 				per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3389 
3390 			if (trans) {
3391 				seqmutex_lock(&c->btree_trans_lock);
3392 				list_del(&trans->list);
3393 				seqmutex_unlock(&c->btree_trans_lock);
3394 			}
3395 			kfree(trans);
3396 		}
3397 	free_percpu(c->btree_trans_bufs);
3398 
3399 	trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3400 	if (trans)
3401 		panic("%s leaked btree_trans\n", trans->fn);
3402 
3403 	for (s = c->btree_transaction_stats;
3404 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3405 	     s++) {
3406 		kfree(s->max_paths_text);
3407 		bch2_time_stats_exit(&s->lock_hold_times);
3408 	}
3409 
3410 	if (c->btree_trans_barrier_initialized) {
3411 		synchronize_srcu_expedited(&c->btree_trans_barrier);
3412 		cleanup_srcu_struct(&c->btree_trans_barrier);
3413 	}
3414 	mempool_exit(&c->btree_trans_mem_pool);
3415 	mempool_exit(&c->btree_trans_pool);
3416 }
3417 
3418 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3419 {
3420 	struct btree_transaction_stats *s;
3421 
3422 	for (s = c->btree_transaction_stats;
3423 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3424 	     s++) {
3425 		bch2_time_stats_init(&s->duration);
3426 		bch2_time_stats_init(&s->lock_hold_times);
3427 		mutex_init(&s->lock);
3428 	}
3429 
3430 	INIT_LIST_HEAD(&c->btree_trans_list);
3431 	seqmutex_init(&c->btree_trans_lock);
3432 }
3433 
3434 int bch2_fs_btree_iter_init(struct bch_fs *c)
3435 {
3436 	int ret;
3437 
3438 	c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3439 	if (!c->btree_trans_bufs)
3440 		return -ENOMEM;
3441 
3442 	ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3443 					  sizeof(struct btree_trans)) ?:
3444 		mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3445 					  BTREE_TRANS_MEM_MAX) ?:
3446 		init_srcu_struct(&c->btree_trans_barrier);
3447 	if (ret)
3448 		return ret;
3449 
3450 	/*
3451 	 * static annotation (hackily done) for lock ordering of reclaim vs.
3452 	 * btree node locks:
3453 	 */
3454 #ifdef CONFIG_LOCKDEP
3455 	fs_reclaim_acquire(GFP_KERNEL);
3456 	struct btree_trans *trans = bch2_trans_get(c);
3457 	trans_set_locked(trans);
3458 	bch2_trans_put(trans);
3459 	fs_reclaim_release(GFP_KERNEL);
3460 #endif
3461 
3462 	c->btree_trans_barrier_initialized = true;
3463 	return 0;
3464 
3465 }
3466