xref: /linux/fs/bcachefs/btree_iter.c (revision 4a4b30ea80d8cb5e8c4c62bb86201f4ea0d9b030)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20 
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23 
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 			btree_path_idx_t, btree_path_idx_t);
27 
btree_iter_ip_allocated(struct btree_iter * iter)28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31 	return iter->ip_allocated;
32 #else
33 	return 0;
34 #endif
35 }
36 
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39 
__btree_path_cmp(const struct btree_path * l,enum btree_id r_btree_id,bool r_cached,struct bpos r_pos,unsigned r_level)40 static inline int __btree_path_cmp(const struct btree_path *l,
41 				   enum btree_id	r_btree_id,
42 				   bool			r_cached,
43 				   struct bpos		r_pos,
44 				   unsigned		r_level)
45 {
46 	/*
47 	 * Must match lock ordering as defined by __bch2_btree_node_lock:
48 	 */
49 	return   cmp_int(l->btree_id,	r_btree_id) ?:
50 		 cmp_int((int) l->cached,	(int) r_cached) ?:
51 		 bpos_cmp(l->pos,	r_pos) ?:
52 		-cmp_int(l->level,	r_level);
53 }
54 
btree_path_cmp(const struct btree_path * l,const struct btree_path * r)55 static inline int btree_path_cmp(const struct btree_path *l,
56 				 const struct btree_path *r)
57 {
58 	return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60 
bkey_successor(struct btree_iter * iter,struct bpos p)61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63 	/* Are we iterating over keys in all snapshots? */
64 	if (iter->flags & BTREE_ITER_all_snapshots) {
65 		p = bpos_successor(p);
66 	} else {
67 		p = bpos_nosnap_successor(p);
68 		p.snapshot = iter->snapshot;
69 	}
70 
71 	return p;
72 }
73 
bkey_predecessor(struct btree_iter * iter,struct bpos p)74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76 	/* Are we iterating over keys in all snapshots? */
77 	if (iter->flags & BTREE_ITER_all_snapshots) {
78 		p = bpos_predecessor(p);
79 	} else {
80 		p = bpos_nosnap_predecessor(p);
81 		p.snapshot = iter->snapshot;
82 	}
83 
84 	return p;
85 }
86 
btree_iter_search_key(struct btree_iter * iter)87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89 	struct bpos pos = iter->pos;
90 
91 	if ((iter->flags & BTREE_ITER_is_extents) &&
92 	    !bkey_eq(pos, POS_MAX))
93 		pos = bkey_successor(iter, pos);
94 	return pos;
95 }
96 
btree_path_pos_before_node(struct btree_path * path,struct btree * b)97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98 					      struct btree *b)
99 {
100 	return bpos_lt(path->pos, b->data->min_key);
101 }
102 
btree_path_pos_after_node(struct btree_path * path,struct btree * b)103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104 					     struct btree *b)
105 {
106 	return bpos_gt(path->pos, b->key.k.p);
107 }
108 
btree_path_pos_in_node(struct btree_path * path,struct btree * b)109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110 					  struct btree *b)
111 {
112 	return path->btree_id == b->c.btree_id &&
113 		!btree_path_pos_before_node(path, b) &&
114 		!btree_path_pos_after_node(path, b);
115 }
116 
117 /* Btree iterator: */
118 
119 #ifdef CONFIG_BCACHEFS_DEBUG
120 
bch2_btree_path_verify_cached(struct btree_trans * trans,struct btree_path * path)121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 					  struct btree_path *path)
123 {
124 	struct bkey_cached *ck;
125 	bool locked = btree_node_locked(path, 0);
126 
127 	if (!bch2_btree_node_relock(trans, path, 0))
128 		return;
129 
130 	ck = (void *) path->l[0].b;
131 	BUG_ON(ck->key.btree_id != path->btree_id ||
132 	       !bkey_eq(ck->key.pos, path->pos));
133 
134 	if (!locked)
135 		btree_node_unlock(trans, path, 0);
136 }
137 
bch2_btree_path_verify_level(struct btree_trans * trans,struct btree_path * path,unsigned level)138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 				struct btree_path *path, unsigned level)
140 {
141 	struct btree_path_level *l;
142 	struct btree_node_iter tmp;
143 	bool locked;
144 	struct bkey_packed *p, *k;
145 	struct printbuf buf1 = PRINTBUF;
146 	struct printbuf buf2 = PRINTBUF;
147 	struct printbuf buf3 = PRINTBUF;
148 	const char *msg;
149 
150 	if (!bch2_debug_check_iterators)
151 		return;
152 
153 	l	= &path->l[level];
154 	tmp	= l->iter;
155 	locked	= btree_node_locked(path, level);
156 
157 	if (path->cached) {
158 		if (!level)
159 			bch2_btree_path_verify_cached(trans, path);
160 		return;
161 	}
162 
163 	if (!btree_path_node(path, level))
164 		return;
165 
166 	if (!bch2_btree_node_relock_notrace(trans, path, level))
167 		return;
168 
169 	BUG_ON(!btree_path_pos_in_node(path, l->b));
170 
171 	bch2_btree_node_iter_verify(&l->iter, l->b);
172 
173 	/*
174 	 * For interior nodes, the iterator will have skipped past deleted keys:
175 	 */
176 	p = level
177 		? bch2_btree_node_iter_prev(&tmp, l->b)
178 		: bch2_btree_node_iter_prev_all(&tmp, l->b);
179 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180 
181 	if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182 		msg = "before";
183 		goto err;
184 	}
185 
186 	if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187 		msg = "after";
188 		goto err;
189 	}
190 
191 	if (!locked)
192 		btree_node_unlock(trans, path, level);
193 	return;
194 err:
195 	bch2_bpos_to_text(&buf1, path->pos);
196 
197 	if (p) {
198 		struct bkey uk = bkey_unpack_key(l->b, p);
199 
200 		bch2_bkey_to_text(&buf2, &uk);
201 	} else {
202 		prt_printf(&buf2, "(none)");
203 	}
204 
205 	if (k) {
206 		struct bkey uk = bkey_unpack_key(l->b, k);
207 
208 		bch2_bkey_to_text(&buf3, &uk);
209 	} else {
210 		prt_printf(&buf3, "(none)");
211 	}
212 
213 	panic("path should be %s key at level %u:\n"
214 	      "path pos %s\n"
215 	      "prev key %s\n"
216 	      "cur  key %s\n",
217 	      msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219 
bch2_btree_path_verify(struct btree_trans * trans,struct btree_path * path)220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 				   struct btree_path *path)
222 {
223 	struct bch_fs *c = trans->c;
224 
225 	for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226 		if (!path->l[i].b) {
227 			BUG_ON(!path->cached &&
228 			       bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229 			break;
230 		}
231 
232 		bch2_btree_path_verify_level(trans, path, i);
233 	}
234 
235 	bch2_btree_path_verify_locks(path);
236 }
237 
bch2_trans_verify_paths(struct btree_trans * trans)238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240 	struct btree_path *path;
241 	unsigned iter;
242 
243 	trans_for_each_path(trans, path, iter)
244 		bch2_btree_path_verify(trans, path);
245 }
246 
bch2_btree_iter_verify(struct btree_iter * iter)247 static void bch2_btree_iter_verify(struct btree_iter *iter)
248 {
249 	struct btree_trans *trans = iter->trans;
250 
251 	BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252 
253 	BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254 	       (iter->flags & BTREE_ITER_all_snapshots));
255 
256 	BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257 	       (iter->flags & BTREE_ITER_all_snapshots) &&
258 	       !btree_type_has_snapshot_field(iter->btree_id));
259 
260 	if (iter->update_path)
261 		bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262 	bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263 }
264 
bch2_btree_iter_verify_entry_exit(struct btree_iter * iter)265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266 {
267 	BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268 	       !iter->pos.snapshot);
269 
270 	BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271 	       iter->pos.snapshot != iter->snapshot);
272 
273 	BUG_ON(iter->flags & BTREE_ITER_all_snapshots	? !bpos_eq(iter->pos, iter->k.p) :
274 	       !(iter->flags & BTREE_ITER_is_extents)	? !bkey_eq(iter->pos, iter->k.p) :
275 	       (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
276 		bkey_gt(iter->pos, iter->k.p)));
277 }
278 
bch2_btree_iter_verify_ret(struct btree_iter * iter,struct bkey_s_c k)279 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
280 {
281 	struct btree_trans *trans = iter->trans;
282 	struct btree_iter copy;
283 	struct bkey_s_c prev;
284 	int ret = 0;
285 
286 	if (!bch2_debug_check_iterators)
287 		return 0;
288 
289 	if (!(iter->flags & BTREE_ITER_filter_snapshots))
290 		return 0;
291 
292 	if (bkey_err(k) || !k.k)
293 		return 0;
294 
295 	BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
296 					  iter->snapshot,
297 					  k.k->p.snapshot));
298 
299 	bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
300 			     BTREE_ITER_nopreserve|
301 			     BTREE_ITER_all_snapshots);
302 	prev = bch2_btree_iter_prev(&copy);
303 	if (!prev.k)
304 		goto out;
305 
306 	ret = bkey_err(prev);
307 	if (ret)
308 		goto out;
309 
310 	if (bkey_eq(prev.k->p, k.k->p) &&
311 	    bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
312 				      prev.k->p.snapshot) > 0) {
313 		struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
314 
315 		bch2_bkey_to_text(&buf1, k.k);
316 		bch2_bkey_to_text(&buf2, prev.k);
317 
318 		panic("iter snap %u\n"
319 		      "k    %s\n"
320 		      "prev %s\n",
321 		      iter->snapshot,
322 		      buf1.buf, buf2.buf);
323 	}
324 out:
325 	bch2_trans_iter_exit(trans, &copy);
326 	return ret;
327 }
328 
bch2_assert_pos_locked(struct btree_trans * trans,enum btree_id id,struct bpos pos)329 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
330 			    struct bpos pos)
331 {
332 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
333 
334 	struct btree_path *path;
335 	struct trans_for_each_path_inorder_iter iter;
336 	struct printbuf buf = PRINTBUF;
337 
338 	btree_trans_sort_paths(trans);
339 
340 	trans_for_each_path_inorder(trans, path, iter) {
341 		if (path->btree_id != id ||
342 		    !btree_node_locked(path, 0) ||
343 		    !path->should_be_locked)
344 			continue;
345 
346 		if (!path->cached) {
347 			if (bkey_ge(pos, path->l[0].b->data->min_key) &&
348 			    bkey_le(pos, path->l[0].b->key.k.p))
349 				return;
350 		} else {
351 			if (bkey_eq(pos, path->pos))
352 				return;
353 		}
354 	}
355 
356 	bch2_dump_trans_paths_updates(trans);
357 	bch2_bpos_to_text(&buf, pos);
358 
359 	panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
360 }
361 
362 #else
363 
bch2_btree_path_verify_level(struct btree_trans * trans,struct btree_path * path,unsigned l)364 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
365 						struct btree_path *path, unsigned l) {}
bch2_btree_path_verify(struct btree_trans * trans,struct btree_path * path)366 static inline void bch2_btree_path_verify(struct btree_trans *trans,
367 					  struct btree_path *path) {}
bch2_btree_iter_verify(struct btree_iter * iter)368 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
bch2_btree_iter_verify_entry_exit(struct btree_iter * iter)369 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
bch2_btree_iter_verify_ret(struct btree_iter * iter,struct bkey_s_c k)370 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
371 
372 #endif
373 
374 /* Btree path: fixups after btree updates */
375 
btree_node_iter_set_set_pos(struct btree_node_iter * iter,struct btree * b,struct bset_tree * t,struct bkey_packed * k)376 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
377 					struct btree *b,
378 					struct bset_tree *t,
379 					struct bkey_packed *k)
380 {
381 	struct btree_node_iter_set *set;
382 
383 	btree_node_iter_for_each(iter, set)
384 		if (set->end == t->end_offset) {
385 			set->k = __btree_node_key_to_offset(b, k);
386 			bch2_btree_node_iter_sort(iter, b);
387 			return;
388 		}
389 
390 	bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
391 }
392 
__bch2_btree_path_fix_key_modified(struct btree_path * path,struct btree * b,struct bkey_packed * where)393 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
394 					       struct btree *b,
395 					       struct bkey_packed *where)
396 {
397 	struct btree_path_level *l = &path->l[b->c.level];
398 
399 	if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
400 		return;
401 
402 	if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
403 		bch2_btree_node_iter_advance(&l->iter, l->b);
404 }
405 
bch2_btree_path_fix_key_modified(struct btree_trans * trans,struct btree * b,struct bkey_packed * where)406 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
407 				      struct btree *b,
408 				      struct bkey_packed *where)
409 {
410 	struct btree_path *path;
411 	unsigned i;
412 
413 	trans_for_each_path_with_node(trans, b, path, i) {
414 		__bch2_btree_path_fix_key_modified(path, b, where);
415 		bch2_btree_path_verify_level(trans, path, b->c.level);
416 	}
417 }
418 
__bch2_btree_node_iter_fix(struct btree_path * path,struct btree * b,struct btree_node_iter * node_iter,struct bset_tree * t,struct bkey_packed * where,unsigned clobber_u64s,unsigned new_u64s)419 static void __bch2_btree_node_iter_fix(struct btree_path *path,
420 				       struct btree *b,
421 				       struct btree_node_iter *node_iter,
422 				       struct bset_tree *t,
423 				       struct bkey_packed *where,
424 				       unsigned clobber_u64s,
425 				       unsigned new_u64s)
426 {
427 	const struct bkey_packed *end = btree_bkey_last(b, t);
428 	struct btree_node_iter_set *set;
429 	unsigned offset = __btree_node_key_to_offset(b, where);
430 	int shift = new_u64s - clobber_u64s;
431 	unsigned old_end = t->end_offset - shift;
432 	unsigned orig_iter_pos = node_iter->data[0].k;
433 	bool iter_current_key_modified =
434 		orig_iter_pos >= offset &&
435 		orig_iter_pos <= offset + clobber_u64s;
436 
437 	btree_node_iter_for_each(node_iter, set)
438 		if (set->end == old_end)
439 			goto found;
440 
441 	/* didn't find the bset in the iterator - might have to readd it: */
442 	if (new_u64s &&
443 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
444 		bch2_btree_node_iter_push(node_iter, b, where, end);
445 		goto fixup_done;
446 	} else {
447 		/* Iterator is after key that changed */
448 		return;
449 	}
450 found:
451 	set->end = t->end_offset;
452 
453 	/* Iterator hasn't gotten to the key that changed yet: */
454 	if (set->k < offset)
455 		return;
456 
457 	if (new_u64s &&
458 	    bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
459 		set->k = offset;
460 	} else if (set->k < offset + clobber_u64s) {
461 		set->k = offset + new_u64s;
462 		if (set->k == set->end)
463 			bch2_btree_node_iter_set_drop(node_iter, set);
464 	} else {
465 		/* Iterator is after key that changed */
466 		set->k = (int) set->k + shift;
467 		return;
468 	}
469 
470 	bch2_btree_node_iter_sort(node_iter, b);
471 fixup_done:
472 	if (node_iter->data[0].k != orig_iter_pos)
473 		iter_current_key_modified = true;
474 
475 	/*
476 	 * When a new key is added, and the node iterator now points to that
477 	 * key, the iterator might have skipped past deleted keys that should
478 	 * come after the key the iterator now points to. We have to rewind to
479 	 * before those deleted keys - otherwise
480 	 * bch2_btree_node_iter_prev_all() breaks:
481 	 */
482 	if (!bch2_btree_node_iter_end(node_iter) &&
483 	    iter_current_key_modified &&
484 	    b->c.level) {
485 		struct bkey_packed *k, *k2, *p;
486 
487 		k = bch2_btree_node_iter_peek_all(node_iter, b);
488 
489 		for_each_bset(b, t) {
490 			bool set_pos = false;
491 
492 			if (node_iter->data[0].end == t->end_offset)
493 				continue;
494 
495 			k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
496 
497 			while ((p = bch2_bkey_prev_all(b, t, k2)) &&
498 			       bkey_iter_cmp(b, k, p) < 0) {
499 				k2 = p;
500 				set_pos = true;
501 			}
502 
503 			if (set_pos)
504 				btree_node_iter_set_set_pos(node_iter,
505 							    b, t, k2);
506 		}
507 	}
508 }
509 
bch2_btree_node_iter_fix(struct btree_trans * trans,struct btree_path * path,struct btree * b,struct btree_node_iter * node_iter,struct bkey_packed * where,unsigned clobber_u64s,unsigned new_u64s)510 void bch2_btree_node_iter_fix(struct btree_trans *trans,
511 			      struct btree_path *path,
512 			      struct btree *b,
513 			      struct btree_node_iter *node_iter,
514 			      struct bkey_packed *where,
515 			      unsigned clobber_u64s,
516 			      unsigned new_u64s)
517 {
518 	struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
519 	struct btree_path *linked;
520 	unsigned i;
521 
522 	if (node_iter != &path->l[b->c.level].iter) {
523 		__bch2_btree_node_iter_fix(path, b, node_iter, t,
524 					   where, clobber_u64s, new_u64s);
525 
526 		if (bch2_debug_check_iterators)
527 			bch2_btree_node_iter_verify(node_iter, b);
528 	}
529 
530 	trans_for_each_path_with_node(trans, b, linked, i) {
531 		__bch2_btree_node_iter_fix(linked, b,
532 					   &linked->l[b->c.level].iter, t,
533 					   where, clobber_u64s, new_u64s);
534 		bch2_btree_path_verify_level(trans, linked, b->c.level);
535 	}
536 }
537 
538 /* Btree path level: pointer to a particular btree node and node iter */
539 
__btree_iter_unpack(struct bch_fs * c,struct btree_path_level * l,struct bkey * u,struct bkey_packed * k)540 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
541 						  struct btree_path_level *l,
542 						  struct bkey *u,
543 						  struct bkey_packed *k)
544 {
545 	if (unlikely(!k)) {
546 		/*
547 		 * signal to bch2_btree_iter_peek_slot() that we're currently at
548 		 * a hole
549 		 */
550 		u->type = KEY_TYPE_deleted;
551 		return bkey_s_c_null;
552 	}
553 
554 	return bkey_disassemble(l->b, k, u);
555 }
556 
btree_path_level_peek_all(struct bch_fs * c,struct btree_path_level * l,struct bkey * u)557 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
558 							struct btree_path_level *l,
559 							struct bkey *u)
560 {
561 	return __btree_iter_unpack(c, l, u,
562 			bch2_btree_node_iter_peek_all(&l->iter, l->b));
563 }
564 
btree_path_level_prev(struct btree_trans * trans,struct btree_path * path,struct btree_path_level * l,struct bkey * u)565 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
566 						    struct btree_path *path,
567 						    struct btree_path_level *l,
568 						    struct bkey *u)
569 {
570 	struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
571 			bch2_btree_node_iter_prev(&l->iter, l->b));
572 
573 	path->pos = k.k ? k.k->p : l->b->data->min_key;
574 	trans->paths_sorted = false;
575 	bch2_btree_path_verify_level(trans, path, l - path->l);
576 	return k;
577 }
578 
btree_path_advance_to_pos(struct btree_path * path,struct btree_path_level * l,int max_advance)579 static inline bool btree_path_advance_to_pos(struct btree_path *path,
580 					     struct btree_path_level *l,
581 					     int max_advance)
582 {
583 	struct bkey_packed *k;
584 	int nr_advanced = 0;
585 
586 	while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
587 	       bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
588 		if (max_advance > 0 && nr_advanced >= max_advance)
589 			return false;
590 
591 		bch2_btree_node_iter_advance(&l->iter, l->b);
592 		nr_advanced++;
593 	}
594 
595 	return true;
596 }
597 
__btree_path_level_init(struct btree_path * path,unsigned level)598 static inline void __btree_path_level_init(struct btree_path *path,
599 					   unsigned level)
600 {
601 	struct btree_path_level *l = &path->l[level];
602 
603 	bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
604 
605 	/*
606 	 * Iterators to interior nodes should always be pointed at the first non
607 	 * whiteout:
608 	 */
609 	if (level)
610 		bch2_btree_node_iter_peek(&l->iter, l->b);
611 }
612 
bch2_btree_path_level_init(struct btree_trans * trans,struct btree_path * path,struct btree * b)613 void bch2_btree_path_level_init(struct btree_trans *trans,
614 				struct btree_path *path,
615 				struct btree *b)
616 {
617 	BUG_ON(path->cached);
618 
619 	EBUG_ON(!btree_path_pos_in_node(path, b));
620 
621 	path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
622 	path->l[b->c.level].b = b;
623 	__btree_path_level_init(path, b->c.level);
624 }
625 
626 /* Btree path: fixups after btree node updates: */
627 
bch2_trans_revalidate_updates_in_node(struct btree_trans * trans,struct btree * b)628 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
629 {
630 	struct bch_fs *c = trans->c;
631 
632 	trans_for_each_update(trans, i)
633 		if (!i->cached &&
634 		    i->level	== b->c.level &&
635 		    i->btree_id	== b->c.btree_id &&
636 		    bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
637 		    bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
638 			i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
639 
640 			if (unlikely(trans->journal_replay_not_finished)) {
641 				struct bkey_i *j_k =
642 					bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
643 								    i->k->k.p);
644 
645 				if (j_k) {
646 					i->old_k = j_k->k;
647 					i->old_v = &j_k->v;
648 				}
649 			}
650 		}
651 }
652 
653 /*
654  * A btree node is being replaced - update the iterator to point to the new
655  * node:
656  */
bch2_trans_node_add(struct btree_trans * trans,struct btree_path * path,struct btree * b)657 void bch2_trans_node_add(struct btree_trans *trans,
658 			 struct btree_path *path,
659 			 struct btree *b)
660 {
661 	struct btree_path *prev;
662 
663 	BUG_ON(!btree_path_pos_in_node(path, b));
664 
665 	while ((prev = prev_btree_path(trans, path)) &&
666 	       btree_path_pos_in_node(prev, b))
667 		path = prev;
668 
669 	for (;
670 	     path && btree_path_pos_in_node(path, b);
671 	     path = next_btree_path(trans, path))
672 		if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
673 			enum btree_node_locked_type t =
674 				btree_lock_want(path, b->c.level);
675 
676 			if (t != BTREE_NODE_UNLOCKED) {
677 				btree_node_unlock(trans, path, b->c.level);
678 				six_lock_increment(&b->c.lock, (enum six_lock_type) t);
679 				mark_btree_node_locked(trans, path, b->c.level, t);
680 			}
681 
682 			bch2_btree_path_level_init(trans, path, b);
683 		}
684 
685 	bch2_trans_revalidate_updates_in_node(trans, b);
686 }
687 
bch2_trans_node_drop(struct btree_trans * trans,struct btree * b)688 void bch2_trans_node_drop(struct btree_trans *trans,
689 			  struct btree *b)
690 {
691 	struct btree_path *path;
692 	unsigned i, level = b->c.level;
693 
694 	trans_for_each_path(trans, path, i)
695 		if (path->l[level].b == b) {
696 			btree_node_unlock(trans, path, level);
697 			path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
698 		}
699 }
700 
701 /*
702  * A btree node has been modified in such a way as to invalidate iterators - fix
703  * them:
704  */
bch2_trans_node_reinit_iter(struct btree_trans * trans,struct btree * b)705 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
706 {
707 	struct btree_path *path;
708 	unsigned i;
709 
710 	trans_for_each_path_with_node(trans, b, path, i)
711 		__btree_path_level_init(path, b->c.level);
712 
713 	bch2_trans_revalidate_updates_in_node(trans, b);
714 }
715 
716 /* Btree path: traverse, set_pos: */
717 
btree_path_lock_root(struct btree_trans * trans,struct btree_path * path,unsigned depth_want,unsigned long trace_ip)718 static inline int btree_path_lock_root(struct btree_trans *trans,
719 				       struct btree_path *path,
720 				       unsigned depth_want,
721 				       unsigned long trace_ip)
722 {
723 	struct bch_fs *c = trans->c;
724 	struct btree_root *r = bch2_btree_id_root(c, path->btree_id);
725 	enum six_lock_type lock_type;
726 	unsigned i;
727 	int ret;
728 
729 	EBUG_ON(path->nodes_locked);
730 
731 	while (1) {
732 		struct btree *b = READ_ONCE(r->b);
733 		if (unlikely(!b)) {
734 			BUG_ON(!r->error);
735 			return r->error;
736 		}
737 
738 		path->level = READ_ONCE(b->c.level);
739 
740 		if (unlikely(path->level < depth_want)) {
741 			/*
742 			 * the root is at a lower depth than the depth we want:
743 			 * got to the end of the btree, or we're walking nodes
744 			 * greater than some depth and there are no nodes >=
745 			 * that depth
746 			 */
747 			path->level = depth_want;
748 			for (i = path->level; i < BTREE_MAX_DEPTH; i++)
749 				path->l[i].b = NULL;
750 			return 1;
751 		}
752 
753 		lock_type = __btree_lock_want(path, path->level);
754 		ret = btree_node_lock(trans, path, &b->c,
755 				      path->level, lock_type, trace_ip);
756 		if (unlikely(ret)) {
757 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
758 				return ret;
759 			BUG();
760 		}
761 
762 		if (likely(b == READ_ONCE(r->b) &&
763 			   b->c.level == path->level &&
764 			   !race_fault())) {
765 			for (i = 0; i < path->level; i++)
766 				path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
767 			path->l[path->level].b = b;
768 			for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
769 				path->l[i].b = NULL;
770 
771 			mark_btree_node_locked(trans, path, path->level,
772 					       (enum btree_node_locked_type) lock_type);
773 			bch2_btree_path_level_init(trans, path, b);
774 			return 0;
775 		}
776 
777 		six_unlock_type(&b->c.lock, lock_type);
778 	}
779 }
780 
781 noinline
btree_path_prefetch(struct btree_trans * trans,struct btree_path * path)782 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
783 {
784 	struct bch_fs *c = trans->c;
785 	struct btree_path_level *l = path_l(path);
786 	struct btree_node_iter node_iter = l->iter;
787 	struct bkey_packed *k;
788 	struct bkey_buf tmp;
789 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
790 		? (path->level > 1 ? 0 :  2)
791 		: (path->level > 1 ? 1 : 16);
792 	bool was_locked = btree_node_locked(path, path->level);
793 	int ret = 0;
794 
795 	bch2_bkey_buf_init(&tmp);
796 
797 	while (nr-- && !ret) {
798 		if (!bch2_btree_node_relock(trans, path, path->level))
799 			break;
800 
801 		bch2_btree_node_iter_advance(&node_iter, l->b);
802 		k = bch2_btree_node_iter_peek(&node_iter, l->b);
803 		if (!k)
804 			break;
805 
806 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
807 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
808 					       path->level - 1);
809 	}
810 
811 	if (!was_locked)
812 		btree_node_unlock(trans, path, path->level);
813 
814 	bch2_bkey_buf_exit(&tmp, c);
815 	return ret;
816 }
817 
btree_path_prefetch_j(struct btree_trans * trans,struct btree_path * path,struct btree_and_journal_iter * jiter)818 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
819 				 struct btree_and_journal_iter *jiter)
820 {
821 	struct bch_fs *c = trans->c;
822 	struct bkey_s_c k;
823 	struct bkey_buf tmp;
824 	unsigned nr = test_bit(BCH_FS_started, &c->flags)
825 		? (path->level > 1 ? 0 :  2)
826 		: (path->level > 1 ? 1 : 16);
827 	bool was_locked = btree_node_locked(path, path->level);
828 	int ret = 0;
829 
830 	bch2_bkey_buf_init(&tmp);
831 
832 	jiter->fail_if_too_many_whiteouts = true;
833 
834 	while (nr-- && !ret) {
835 		if (!bch2_btree_node_relock(trans, path, path->level))
836 			break;
837 
838 		bch2_btree_and_journal_iter_advance(jiter);
839 		k = bch2_btree_and_journal_iter_peek(jiter);
840 		if (!k.k)
841 			break;
842 
843 		bch2_bkey_buf_reassemble(&tmp, c, k);
844 		ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
845 					       path->level - 1);
846 	}
847 
848 	if (!was_locked)
849 		btree_node_unlock(trans, path, path->level);
850 
851 	bch2_bkey_buf_exit(&tmp, c);
852 	return ret;
853 }
854 
btree_node_mem_ptr_set(struct btree_trans * trans,struct btree_path * path,unsigned plevel,struct btree * b)855 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
856 					    struct btree_path *path,
857 					    unsigned plevel, struct btree *b)
858 {
859 	struct btree_path_level *l = &path->l[plevel];
860 	bool locked = btree_node_locked(path, plevel);
861 	struct bkey_packed *k;
862 	struct bch_btree_ptr_v2 *bp;
863 
864 	if (!bch2_btree_node_relock(trans, path, plevel))
865 		return;
866 
867 	k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
868 	BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
869 
870 	bp = (void *) bkeyp_val(&l->b->format, k);
871 	bp->mem_ptr = (unsigned long)b;
872 
873 	if (!locked)
874 		btree_node_unlock(trans, path, plevel);
875 }
876 
btree_node_iter_and_journal_peek(struct btree_trans * trans,struct btree_path * path,unsigned flags,struct bkey_buf * out)877 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
878 						     struct btree_path *path,
879 						     unsigned flags,
880 						     struct bkey_buf *out)
881 {
882 	struct bch_fs *c = trans->c;
883 	struct btree_path_level *l = path_l(path);
884 	struct btree_and_journal_iter jiter;
885 	struct bkey_s_c k;
886 	int ret = 0;
887 
888 	__bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
889 
890 	k = bch2_btree_and_journal_iter_peek(&jiter);
891 	if (!k.k) {
892 		struct printbuf buf = PRINTBUF;
893 
894 		prt_str(&buf, "node not found at pos ");
895 		bch2_bpos_to_text(&buf, path->pos);
896 		prt_str(&buf, " at btree ");
897 		bch2_btree_pos_to_text(&buf, c, l->b);
898 
899 		ret = bch2_fs_topology_error(c, "%s", buf.buf);
900 		printbuf_exit(&buf);
901 		goto err;
902 	}
903 
904 	bch2_bkey_buf_reassemble(out, c, k);
905 
906 	if ((flags & BTREE_ITER_prefetch) &&
907 	    c->opts.btree_node_prefetch)
908 		ret = btree_path_prefetch_j(trans, path, &jiter);
909 
910 err:
911 	bch2_btree_and_journal_iter_exit(&jiter);
912 	return ret;
913 }
914 
btree_path_down(struct btree_trans * trans,struct btree_path * path,unsigned flags,unsigned long trace_ip)915 static __always_inline int btree_path_down(struct btree_trans *trans,
916 					   struct btree_path *path,
917 					   unsigned flags,
918 					   unsigned long trace_ip)
919 {
920 	struct bch_fs *c = trans->c;
921 	struct btree_path_level *l = path_l(path);
922 	struct btree *b;
923 	unsigned level = path->level - 1;
924 	enum six_lock_type lock_type = __btree_lock_want(path, level);
925 	struct bkey_buf tmp;
926 	int ret;
927 
928 	EBUG_ON(!btree_node_locked(path, path->level));
929 
930 	bch2_bkey_buf_init(&tmp);
931 
932 	if (unlikely(trans->journal_replay_not_finished)) {
933 		ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
934 		if (ret)
935 			goto err;
936 	} else {
937 		struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
938 		if (!k) {
939 			struct printbuf buf = PRINTBUF;
940 
941 			prt_str(&buf, "node not found at pos ");
942 			bch2_bpos_to_text(&buf, path->pos);
943 			prt_str(&buf, " within parent node ");
944 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
945 
946 			bch2_fs_fatal_error(c, "%s", buf.buf);
947 			printbuf_exit(&buf);
948 			ret = -BCH_ERR_btree_need_topology_repair;
949 			goto err;
950 		}
951 
952 		bch2_bkey_buf_unpack(&tmp, c, l->b, k);
953 
954 		if ((flags & BTREE_ITER_prefetch) &&
955 		    c->opts.btree_node_prefetch) {
956 			ret = btree_path_prefetch(trans, path);
957 			if (ret)
958 				goto err;
959 		}
960 	}
961 
962 	b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
963 	ret = PTR_ERR_OR_ZERO(b);
964 	if (unlikely(ret))
965 		goto err;
966 
967 	if (likely(!trans->journal_replay_not_finished &&
968 		   tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
969 	    unlikely(b != btree_node_mem_ptr(tmp.k)))
970 		btree_node_mem_ptr_set(trans, path, level + 1, b);
971 
972 	if (btree_node_read_locked(path, level + 1))
973 		btree_node_unlock(trans, path, level + 1);
974 
975 	mark_btree_node_locked(trans, path, level,
976 			       (enum btree_node_locked_type) lock_type);
977 	path->level = level;
978 	bch2_btree_path_level_init(trans, path, b);
979 
980 	bch2_btree_path_verify_locks(path);
981 err:
982 	bch2_bkey_buf_exit(&tmp, c);
983 	return ret;
984 }
985 
bch2_btree_path_traverse_all(struct btree_trans * trans)986 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
987 {
988 	struct bch_fs *c = trans->c;
989 	struct btree_path *path;
990 	unsigned long trace_ip = _RET_IP_;
991 	unsigned i;
992 	int ret = 0;
993 
994 	if (trans->in_traverse_all)
995 		return -BCH_ERR_transaction_restart_in_traverse_all;
996 
997 	trans->in_traverse_all = true;
998 retry_all:
999 	trans->restarted = 0;
1000 	trans->last_restarted_ip = 0;
1001 
1002 	trans_for_each_path(trans, path, i)
1003 		path->should_be_locked = false;
1004 
1005 	btree_trans_sort_paths(trans);
1006 
1007 	bch2_trans_unlock(trans);
1008 	cond_resched();
1009 	trans_set_locked(trans, false);
1010 
1011 	if (unlikely(trans->memory_allocation_failure)) {
1012 		struct closure cl;
1013 
1014 		closure_init_stack(&cl);
1015 
1016 		do {
1017 			ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1018 			closure_sync(&cl);
1019 		} while (ret);
1020 	}
1021 
1022 	/* Now, redo traversals in correct order: */
1023 	i = 0;
1024 	while (i < trans->nr_sorted) {
1025 		btree_path_idx_t idx = trans->sorted[i];
1026 
1027 		/*
1028 		 * Traversing a path can cause another path to be added at about
1029 		 * the same position:
1030 		 */
1031 		if (trans->paths[idx].uptodate) {
1032 			__btree_path_get(trans, &trans->paths[idx], false);
1033 			ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1034 			__btree_path_put(trans, &trans->paths[idx], false);
1035 
1036 			if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1037 			    bch2_err_matches(ret, ENOMEM))
1038 				goto retry_all;
1039 			if (ret)
1040 				goto err;
1041 		} else {
1042 			i++;
1043 		}
1044 	}
1045 
1046 	/*
1047 	 * We used to assert that all paths had been traversed here
1048 	 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1049 	 * path->should_be_locked is not set yet, we might have unlocked and
1050 	 * then failed to relock a path - that's fine.
1051 	 */
1052 err:
1053 	bch2_btree_cache_cannibalize_unlock(trans);
1054 
1055 	trans->in_traverse_all = false;
1056 
1057 	trace_and_count(c, trans_traverse_all, trans, trace_ip);
1058 	return ret;
1059 }
1060 
btree_path_check_pos_in_node(struct btree_path * path,unsigned l,int check_pos)1061 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1062 						unsigned l, int check_pos)
1063 {
1064 	if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1065 		return false;
1066 	if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1067 		return false;
1068 	return true;
1069 }
1070 
btree_path_good_node(struct btree_trans * trans,struct btree_path * path,unsigned l,int check_pos)1071 static inline bool btree_path_good_node(struct btree_trans *trans,
1072 					struct btree_path *path,
1073 					unsigned l, int check_pos)
1074 {
1075 	return is_btree_node(path, l) &&
1076 		bch2_btree_node_relock(trans, path, l) &&
1077 		btree_path_check_pos_in_node(path, l, check_pos);
1078 }
1079 
btree_path_set_level_down(struct btree_trans * trans,struct btree_path * path,unsigned new_level)1080 static void btree_path_set_level_down(struct btree_trans *trans,
1081 				      struct btree_path *path,
1082 				      unsigned new_level)
1083 {
1084 	unsigned l;
1085 
1086 	path->level = new_level;
1087 
1088 	for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1089 		if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1090 			btree_node_unlock(trans, path, l);
1091 
1092 	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1093 	bch2_btree_path_verify(trans, path);
1094 }
1095 
__btree_path_up_until_good_node(struct btree_trans * trans,struct btree_path * path,int check_pos)1096 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1097 							 struct btree_path *path,
1098 							 int check_pos)
1099 {
1100 	unsigned i, l = path->level;
1101 again:
1102 	while (btree_path_node(path, l) &&
1103 	       !btree_path_good_node(trans, path, l, check_pos))
1104 		__btree_path_set_level_up(trans, path, l++);
1105 
1106 	/* If we need intent locks, take them too: */
1107 	for (i = l + 1;
1108 	     i < path->locks_want && btree_path_node(path, i);
1109 	     i++)
1110 		if (!bch2_btree_node_relock(trans, path, i)) {
1111 			while (l <= i)
1112 				__btree_path_set_level_up(trans, path, l++);
1113 			goto again;
1114 		}
1115 
1116 	return l;
1117 }
1118 
btree_path_up_until_good_node(struct btree_trans * trans,struct btree_path * path,int check_pos)1119 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1120 						     struct btree_path *path,
1121 						     int check_pos)
1122 {
1123 	return likely(btree_node_locked(path, path->level) &&
1124 		      btree_path_check_pos_in_node(path, path->level, check_pos))
1125 		? path->level
1126 		: __btree_path_up_until_good_node(trans, path, check_pos);
1127 }
1128 
1129 /*
1130  * This is the main state machine for walking down the btree - walks down to a
1131  * specified depth
1132  *
1133  * Returns 0 on success, -EIO on error (error reading in a btree node).
1134  *
1135  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1136  * stashed in the iterator and returned from bch2_trans_exit().
1137  */
bch2_btree_path_traverse_one(struct btree_trans * trans,btree_path_idx_t path_idx,unsigned flags,unsigned long trace_ip)1138 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1139 				 btree_path_idx_t path_idx,
1140 				 unsigned flags,
1141 				 unsigned long trace_ip)
1142 {
1143 	struct btree_path *path = &trans->paths[path_idx];
1144 	unsigned depth_want = path->level;
1145 	int ret = -((int) trans->restarted);
1146 
1147 	if (unlikely(ret))
1148 		goto out;
1149 
1150 	if (unlikely(!trans->srcu_held))
1151 		bch2_trans_srcu_lock(trans);
1152 
1153 	trace_btree_path_traverse_start(trans, path);
1154 
1155 	/*
1156 	 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1157 	 * and re-traverse the path without a transaction restart:
1158 	 */
1159 	if (path->should_be_locked) {
1160 		ret = bch2_btree_path_relock(trans, path, trace_ip);
1161 		goto out;
1162 	}
1163 
1164 	if (path->cached) {
1165 		ret = bch2_btree_path_traverse_cached(trans, path, flags);
1166 		goto out;
1167 	}
1168 
1169 	path = &trans->paths[path_idx];
1170 
1171 	if (unlikely(path->level >= BTREE_MAX_DEPTH))
1172 		goto out_uptodate;
1173 
1174 	path->level = btree_path_up_until_good_node(trans, path, 0);
1175 	unsigned max_level = path->level;
1176 
1177 	EBUG_ON(btree_path_node(path, path->level) &&
1178 		!btree_node_locked(path, path->level));
1179 
1180 	/*
1181 	 * Note: path->nodes[path->level] may be temporarily NULL here - that
1182 	 * would indicate to other code that we got to the end of the btree,
1183 	 * here it indicates that relocking the root failed - it's critical that
1184 	 * btree_path_lock_root() comes next and that it can't fail
1185 	 */
1186 	while (path->level > depth_want) {
1187 		ret = btree_path_node(path, path->level)
1188 			? btree_path_down(trans, path, flags, trace_ip)
1189 			: btree_path_lock_root(trans, path, depth_want, trace_ip);
1190 		if (unlikely(ret)) {
1191 			if (ret == 1) {
1192 				/*
1193 				 * No nodes at this level - got to the end of
1194 				 * the btree:
1195 				 */
1196 				ret = 0;
1197 				goto out;
1198 			}
1199 
1200 			__bch2_btree_path_unlock(trans, path);
1201 			path->level = depth_want;
1202 			path->l[path->level].b = ERR_PTR(ret);
1203 			goto out;
1204 		}
1205 	}
1206 
1207 	if (unlikely(max_level > path->level)) {
1208 		struct btree_path *linked;
1209 		unsigned iter;
1210 
1211 		trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1212 			for (unsigned j = path->level + 1; j < max_level; j++)
1213 				linked->l[j] = path->l[j];
1214 	}
1215 
1216 out_uptodate:
1217 	path->uptodate = BTREE_ITER_UPTODATE;
1218 	trace_btree_path_traverse_end(trans, path);
1219 out:
1220 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1221 		panic("ret %s (%i) trans->restarted %s (%i)\n",
1222 		      bch2_err_str(ret), ret,
1223 		      bch2_err_str(trans->restarted), trans->restarted);
1224 	bch2_btree_path_verify(trans, path);
1225 	return ret;
1226 }
1227 
btree_path_copy(struct btree_trans * trans,struct btree_path * dst,struct btree_path * src)1228 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1229 			    struct btree_path *src)
1230 {
1231 	unsigned i, offset = offsetof(struct btree_path, pos);
1232 
1233 	memcpy((void *) dst + offset,
1234 	       (void *) src + offset,
1235 	       sizeof(struct btree_path) - offset);
1236 
1237 	for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1238 		unsigned t = btree_node_locked_type(dst, i);
1239 
1240 		if (t != BTREE_NODE_UNLOCKED)
1241 			six_lock_increment(&dst->l[i].b->c.lock, t);
1242 	}
1243 }
1244 
btree_path_clone(struct btree_trans * trans,btree_path_idx_t src,bool intent,unsigned long ip)1245 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1246 					 bool intent, unsigned long ip)
1247 {
1248 	btree_path_idx_t new = btree_path_alloc(trans, src);
1249 	btree_path_copy(trans, trans->paths + new, trans->paths + src);
1250 	__btree_path_get(trans, trans->paths + new, intent);
1251 #ifdef TRACK_PATH_ALLOCATED
1252 	trans->paths[new].ip_allocated = ip;
1253 #endif
1254 	return new;
1255 }
1256 
1257 __flatten
__bch2_btree_path_make_mut(struct btree_trans * trans,btree_path_idx_t path,bool intent,unsigned long ip)1258 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1259 			btree_path_idx_t path, bool intent, unsigned long ip)
1260 {
1261 	struct btree_path *old = trans->paths + path;
1262 	__btree_path_put(trans, trans->paths + path, intent);
1263 	path = btree_path_clone(trans, path, intent, ip);
1264 	trace_btree_path_clone(trans, old, trans->paths + path);
1265 	trans->paths[path].preserve = false;
1266 	return path;
1267 }
1268 
1269 btree_path_idx_t __must_check
__bch2_btree_path_set_pos(struct btree_trans * trans,btree_path_idx_t path_idx,struct bpos new_pos,bool intent,unsigned long ip)1270 __bch2_btree_path_set_pos(struct btree_trans *trans,
1271 			  btree_path_idx_t path_idx, struct bpos new_pos,
1272 			  bool intent, unsigned long ip)
1273 {
1274 	int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1275 
1276 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1277 	EBUG_ON(!trans->paths[path_idx].ref);
1278 
1279 	trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1280 
1281 	path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1282 
1283 	struct btree_path *path = trans->paths + path_idx;
1284 	path->pos		= new_pos;
1285 	trans->paths_sorted	= false;
1286 
1287 	if (unlikely(path->cached)) {
1288 		btree_node_unlock(trans, path, 0);
1289 		path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1290 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1291 		goto out;
1292 	}
1293 
1294 	unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1295 
1296 	if (btree_path_node(path, level)) {
1297 		struct btree_path_level *l = &path->l[level];
1298 
1299 		BUG_ON(!btree_node_locked(path, level));
1300 		/*
1301 		 * We might have to skip over many keys, or just a few: try
1302 		 * advancing the node iterator, and if we have to skip over too
1303 		 * many keys just reinit it (or if we're rewinding, since that
1304 		 * is expensive).
1305 		 */
1306 		if (cmp < 0 ||
1307 		    !btree_path_advance_to_pos(path, l, 8))
1308 			bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1309 
1310 		/*
1311 		 * Iterators to interior nodes should always be pointed at the first non
1312 		 * whiteout:
1313 		 */
1314 		if (unlikely(level))
1315 			bch2_btree_node_iter_peek(&l->iter, l->b);
1316 	}
1317 
1318 	if (unlikely(level != path->level)) {
1319 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1320 		__bch2_btree_path_unlock(trans, path);
1321 	}
1322 out:
1323 	bch2_btree_path_verify(trans, path);
1324 	return path_idx;
1325 }
1326 
1327 /* Btree path: main interface: */
1328 
have_path_at_pos(struct btree_trans * trans,struct btree_path * path)1329 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1330 {
1331 	struct btree_path *sib;
1332 
1333 	sib = prev_btree_path(trans, path);
1334 	if (sib && !btree_path_cmp(sib, path))
1335 		return sib;
1336 
1337 	sib = next_btree_path(trans, path);
1338 	if (sib && !btree_path_cmp(sib, path))
1339 		return sib;
1340 
1341 	return NULL;
1342 }
1343 
have_node_at_pos(struct btree_trans * trans,struct btree_path * path)1344 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1345 {
1346 	struct btree_path *sib;
1347 
1348 	sib = prev_btree_path(trans, path);
1349 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1350 		return sib;
1351 
1352 	sib = next_btree_path(trans, path);
1353 	if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1354 		return sib;
1355 
1356 	return NULL;
1357 }
1358 
__bch2_path_free(struct btree_trans * trans,btree_path_idx_t path)1359 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1360 {
1361 	__bch2_btree_path_unlock(trans, trans->paths + path);
1362 	btree_path_list_remove(trans, trans->paths + path);
1363 	__clear_bit(path, trans->paths_allocated);
1364 }
1365 
bch2_btree_path_can_relock(struct btree_trans * trans,struct btree_path * path)1366 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1367 {
1368 	unsigned l = path->level;
1369 
1370 	do {
1371 		if (!btree_path_node(path, l))
1372 			break;
1373 
1374 		if (!is_btree_node(path, l))
1375 			return false;
1376 
1377 		if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1378 			return false;
1379 
1380 		l++;
1381 	} while (l < path->locks_want);
1382 
1383 	return true;
1384 }
1385 
bch2_path_put(struct btree_trans * trans,btree_path_idx_t path_idx,bool intent)1386 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1387 {
1388 	struct btree_path *path = trans->paths + path_idx, *dup;
1389 
1390 	if (!__btree_path_put(trans, path, intent))
1391 		return;
1392 
1393 	dup = path->preserve
1394 		? have_path_at_pos(trans, path)
1395 		: have_node_at_pos(trans, path);
1396 
1397 	trace_btree_path_free(trans, path_idx, dup);
1398 
1399 	if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1400 		return;
1401 
1402 	if (path->should_be_locked && !trans->restarted) {
1403 		if (!dup)
1404 			return;
1405 
1406 		if (!(trans->locked
1407 		      ? bch2_btree_path_relock_norestart(trans, dup)
1408 		      : bch2_btree_path_can_relock(trans, dup)))
1409 			return;
1410 	}
1411 
1412 	if (dup) {
1413 		dup->preserve		|= path->preserve;
1414 		dup->should_be_locked	|= path->should_be_locked;
1415 	}
1416 
1417 	__bch2_path_free(trans, path_idx);
1418 }
1419 
bch2_path_put_nokeep(struct btree_trans * trans,btree_path_idx_t path,bool intent)1420 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1421 				 bool intent)
1422 {
1423 	if (!__btree_path_put(trans, trans->paths + path, intent))
1424 		return;
1425 
1426 	__bch2_path_free(trans, path);
1427 }
1428 
bch2_trans_restart_error(struct btree_trans * trans,u32 restart_count)1429 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1430 {
1431 	panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1432 	      trans->restart_count, restart_count,
1433 	      (void *) trans->last_begin_ip);
1434 }
1435 
bch2_trans_in_restart_error(struct btree_trans * trans)1436 static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1437 {
1438 #ifdef CONFIG_BCACHEFS_DEBUG
1439 	struct printbuf buf = PRINTBUF;
1440 	bch2_prt_backtrace(&buf, &trans->last_restarted_trace);
1441 	panic("in transaction restart: %s, last restarted by\n%s",
1442 	      bch2_err_str(trans->restarted),
1443 	      buf.buf);
1444 #else
1445 	panic("in transaction restart: %s, last restarted by %pS\n",
1446 	      bch2_err_str(trans->restarted),
1447 	      (void *) trans->last_restarted_ip);
1448 #endif
1449 }
1450 
bch2_trans_unlocked_or_in_restart_error(struct btree_trans * trans)1451 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans)
1452 {
1453 	if (trans->restarted)
1454 		bch2_trans_in_restart_error(trans);
1455 
1456 	if (!trans->locked)
1457 		panic("trans should be locked, unlocked by %pS\n",
1458 		      (void *) trans->last_unlock_ip);
1459 
1460 	BUG();
1461 }
1462 
1463 noinline __cold
bch2_trans_updates_to_text(struct printbuf * buf,struct btree_trans * trans)1464 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1465 {
1466 	prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1467 		   trans->nr_updates, trans->fn, trans->journal_res.seq);
1468 	printbuf_indent_add(buf, 2);
1469 
1470 	trans_for_each_update(trans, i) {
1471 		struct bkey_s_c old = { &i->old_k, i->old_v };
1472 
1473 		prt_str(buf, "update: btree=");
1474 		bch2_btree_id_to_text(buf, i->btree_id);
1475 		prt_printf(buf, " cached=%u %pS\n",
1476 			   i->cached,
1477 			   (void *) i->ip_allocated);
1478 
1479 		prt_printf(buf, "  old ");
1480 		bch2_bkey_val_to_text(buf, trans->c, old);
1481 		prt_newline(buf);
1482 
1483 		prt_printf(buf, "  new ");
1484 		bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1485 		prt_newline(buf);
1486 	}
1487 
1488 	for (struct jset_entry *e = trans->journal_entries;
1489 	     e != btree_trans_journal_entries_top(trans);
1490 	     e = vstruct_next(e))
1491 		bch2_journal_entry_to_text(buf, trans->c, e);
1492 
1493 	printbuf_indent_sub(buf, 2);
1494 }
1495 
1496 noinline __cold
bch2_dump_trans_updates(struct btree_trans * trans)1497 void bch2_dump_trans_updates(struct btree_trans *trans)
1498 {
1499 	struct printbuf buf = PRINTBUF;
1500 
1501 	bch2_trans_updates_to_text(&buf, trans);
1502 	bch2_print_str(trans->c, buf.buf);
1503 	printbuf_exit(&buf);
1504 }
1505 
bch2_btree_path_to_text_short(struct printbuf * out,struct btree_trans * trans,btree_path_idx_t path_idx)1506 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1507 {
1508 	struct btree_path *path = trans->paths + path_idx;
1509 
1510 	prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
1511 		   path_idx, path->ref, path->intent_ref,
1512 		   path->preserve ? 'P' : ' ',
1513 		   path->should_be_locked ? 'S' : ' ',
1514 		   path->cached ? 'C' : 'B');
1515 	bch2_btree_id_level_to_text(out, path->btree_id, path->level);
1516 	prt_str(out, " pos ");
1517 	bch2_bpos_to_text(out, path->pos);
1518 
1519 	if (!path->cached && btree_node_locked(path, path->level)) {
1520 		prt_char(out, ' ');
1521 		struct btree *b = path_l(path)->b;
1522 		bch2_bpos_to_text(out, b->data->min_key);
1523 		prt_char(out, '-');
1524 		bch2_bpos_to_text(out, b->key.k.p);
1525 	}
1526 
1527 #ifdef TRACK_PATH_ALLOCATED
1528 	prt_printf(out, " %pS", (void *) path->ip_allocated);
1529 #endif
1530 }
1531 
btree_node_locked_str(enum btree_node_locked_type t)1532 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1533 {
1534 	switch (t) {
1535 	case BTREE_NODE_UNLOCKED:
1536 		return "unlocked";
1537 	case BTREE_NODE_READ_LOCKED:
1538 		return "read";
1539 	case BTREE_NODE_INTENT_LOCKED:
1540 		return "intent";
1541 	case BTREE_NODE_WRITE_LOCKED:
1542 		return "write";
1543 	default:
1544 		return NULL;
1545 	}
1546 }
1547 
bch2_btree_path_to_text(struct printbuf * out,struct btree_trans * trans,btree_path_idx_t path_idx)1548 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1549 {
1550 	bch2_btree_path_to_text_short(out, trans, path_idx);
1551 
1552 	struct btree_path *path = trans->paths + path_idx;
1553 
1554 	prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1555 	prt_newline(out);
1556 
1557 	printbuf_indent_add(out, 2);
1558 	for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1559 		prt_printf(out, "l=%u locks %s seq %u node ", l,
1560 			   btree_node_locked_str(btree_node_locked_type(path, l)),
1561 			   path->l[l].lock_seq);
1562 
1563 		int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1564 		if (ret)
1565 			prt_str(out, bch2_err_str(ret));
1566 		else
1567 			prt_printf(out, "%px", path->l[l].b);
1568 		prt_newline(out);
1569 	}
1570 	printbuf_indent_sub(out, 2);
1571 }
1572 
1573 static noinline __cold
__bch2_trans_paths_to_text(struct printbuf * out,struct btree_trans * trans,bool nosort)1574 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1575 				bool nosort)
1576 {
1577 	struct trans_for_each_path_inorder_iter iter;
1578 
1579 	if (!nosort)
1580 		btree_trans_sort_paths(trans);
1581 
1582 	trans_for_each_path_idx_inorder(trans, iter) {
1583 		bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1584 		prt_newline(out);
1585 	}
1586 }
1587 
1588 noinline __cold
bch2_trans_paths_to_text(struct printbuf * out,struct btree_trans * trans)1589 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1590 {
1591 	__bch2_trans_paths_to_text(out, trans, false);
1592 }
1593 
1594 static noinline __cold
__bch2_dump_trans_paths_updates(struct btree_trans * trans,bool nosort)1595 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1596 {
1597 	struct printbuf buf = PRINTBUF;
1598 
1599 	__bch2_trans_paths_to_text(&buf, trans, nosort);
1600 	bch2_trans_updates_to_text(&buf, trans);
1601 
1602 	bch2_print_str(trans->c, buf.buf);
1603 	printbuf_exit(&buf);
1604 }
1605 
1606 noinline __cold
bch2_dump_trans_paths_updates(struct btree_trans * trans)1607 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1608 {
1609 	__bch2_dump_trans_paths_updates(trans, false);
1610 }
1611 
1612 noinline __cold
bch2_trans_update_max_paths(struct btree_trans * trans)1613 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1614 {
1615 	struct btree_transaction_stats *s = btree_trans_stats(trans);
1616 	struct printbuf buf = PRINTBUF;
1617 	size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1618 
1619 	bch2_trans_paths_to_text(&buf, trans);
1620 
1621 	if (!buf.allocation_failure) {
1622 		mutex_lock(&s->lock);
1623 		if (nr > s->nr_max_paths) {
1624 			s->nr_max_paths = nr;
1625 			swap(s->max_paths_text, buf.buf);
1626 		}
1627 		mutex_unlock(&s->lock);
1628 	}
1629 
1630 	printbuf_exit(&buf);
1631 
1632 	trans->nr_paths_max = nr;
1633 }
1634 
1635 noinline __cold
__bch2_btree_trans_too_many_iters(struct btree_trans * trans)1636 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1637 {
1638 	if (trace_trans_restart_too_many_iters_enabled()) {
1639 		struct printbuf buf = PRINTBUF;
1640 
1641 		bch2_trans_paths_to_text(&buf, trans);
1642 		trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1643 		printbuf_exit(&buf);
1644 	}
1645 
1646 	count_event(trans->c, trans_restart_too_many_iters);
1647 
1648 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1649 }
1650 
btree_path_overflow(struct btree_trans * trans)1651 static noinline void btree_path_overflow(struct btree_trans *trans)
1652 {
1653 	bch2_dump_trans_paths_updates(trans);
1654 	bch_err(trans->c, "trans path overflow");
1655 }
1656 
btree_paths_realloc(struct btree_trans * trans)1657 static noinline void btree_paths_realloc(struct btree_trans *trans)
1658 {
1659 	unsigned nr = trans->nr_paths * 2;
1660 
1661 	void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1662 			  sizeof(struct btree_trans_paths) +
1663 			  nr * sizeof(struct btree_path) +
1664 			  nr * sizeof(btree_path_idx_t) + 8 +
1665 			  nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1666 
1667 	unsigned long *paths_allocated = p;
1668 	memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1669 	p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1670 
1671 	p += sizeof(struct btree_trans_paths);
1672 	struct btree_path *paths = p;
1673 	*trans_paths_nr(paths) = nr;
1674 	memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1675 	p += nr * sizeof(struct btree_path);
1676 
1677 	btree_path_idx_t *sorted = p;
1678 	memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1679 	p += nr * sizeof(btree_path_idx_t) + 8;
1680 
1681 	struct btree_insert_entry *updates = p;
1682 	memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1683 
1684 	unsigned long *old = trans->paths_allocated;
1685 
1686 	rcu_assign_pointer(trans->paths_allocated,	paths_allocated);
1687 	rcu_assign_pointer(trans->paths,		paths);
1688 	rcu_assign_pointer(trans->sorted,		sorted);
1689 	rcu_assign_pointer(trans->updates,		updates);
1690 
1691 	trans->nr_paths		= nr;
1692 
1693 	if (old != trans->_paths_allocated)
1694 		kfree_rcu_mightsleep(old);
1695 }
1696 
btree_path_alloc(struct btree_trans * trans,btree_path_idx_t pos)1697 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1698 						btree_path_idx_t pos)
1699 {
1700 	btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1701 
1702 	if (unlikely(idx == trans->nr_paths)) {
1703 		if (trans->nr_paths == BTREE_ITER_MAX) {
1704 			btree_path_overflow(trans);
1705 			return 0;
1706 		}
1707 
1708 		btree_paths_realloc(trans);
1709 	}
1710 
1711 	/*
1712 	 * Do this before marking the new path as allocated, since it won't be
1713 	 * initialized yet:
1714 	 */
1715 	if (unlikely(idx > trans->nr_paths_max))
1716 		bch2_trans_update_max_paths(trans);
1717 
1718 	__set_bit(idx, trans->paths_allocated);
1719 
1720 	struct btree_path *path = &trans->paths[idx];
1721 	path->ref		= 0;
1722 	path->intent_ref	= 0;
1723 	path->nodes_locked	= 0;
1724 
1725 	btree_path_list_add(trans, pos, idx);
1726 	trans->paths_sorted = false;
1727 	return idx;
1728 }
1729 
bch2_path_get(struct btree_trans * trans,enum btree_id btree_id,struct bpos pos,unsigned locks_want,unsigned level,unsigned flags,unsigned long ip)1730 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1731 			     enum btree_id btree_id, struct bpos pos,
1732 			     unsigned locks_want, unsigned level,
1733 			     unsigned flags, unsigned long ip)
1734 {
1735 	struct btree_path *path;
1736 	bool cached = flags & BTREE_ITER_cached;
1737 	bool intent = flags & BTREE_ITER_intent;
1738 	struct trans_for_each_path_inorder_iter iter;
1739 	btree_path_idx_t path_pos = 0, path_idx;
1740 
1741 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1742 	bch2_trans_verify_locks(trans);
1743 
1744 	btree_trans_sort_paths(trans);
1745 
1746 	trans_for_each_path_inorder(trans, path, iter) {
1747 		if (__btree_path_cmp(path,
1748 				     btree_id,
1749 				     cached,
1750 				     pos,
1751 				     level) > 0)
1752 			break;
1753 
1754 		path_pos = iter.path_idx;
1755 	}
1756 
1757 	if (path_pos &&
1758 	    trans->paths[path_pos].cached	== cached &&
1759 	    trans->paths[path_pos].btree_id	== btree_id &&
1760 	    trans->paths[path_pos].level	== level) {
1761 		trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1762 
1763 		__btree_path_get(trans, trans->paths + path_pos, intent);
1764 		path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1765 		path = trans->paths + path_idx;
1766 	} else {
1767 		path_idx = btree_path_alloc(trans, path_pos);
1768 		path = trans->paths + path_idx;
1769 
1770 		__btree_path_get(trans, path, intent);
1771 		path->pos			= pos;
1772 		path->btree_id			= btree_id;
1773 		path->cached			= cached;
1774 		path->uptodate			= BTREE_ITER_NEED_TRAVERSE;
1775 		path->should_be_locked		= false;
1776 		path->level			= level;
1777 		path->locks_want		= locks_want;
1778 		path->nodes_locked		= 0;
1779 		for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1780 			path->l[i].b		= ERR_PTR(-BCH_ERR_no_btree_node_init);
1781 #ifdef TRACK_PATH_ALLOCATED
1782 		path->ip_allocated		= ip;
1783 #endif
1784 		trans->paths_sorted		= false;
1785 
1786 		trace_btree_path_alloc(trans, path);
1787 	}
1788 
1789 	if (!(flags & BTREE_ITER_nopreserve))
1790 		path->preserve = true;
1791 
1792 	if (path->intent_ref)
1793 		locks_want = max(locks_want, level + 1);
1794 
1795 	/*
1796 	 * If the path has locks_want greater than requested, we don't downgrade
1797 	 * it here - on transaction restart because btree node split needs to
1798 	 * upgrade locks, we might be putting/getting the iterator again.
1799 	 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1800 	 * a successful transaction commit.
1801 	 */
1802 
1803 	locks_want = min(locks_want, BTREE_MAX_DEPTH);
1804 	if (locks_want > path->locks_want)
1805 		bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1806 
1807 	return path_idx;
1808 }
1809 
bch2_path_get_unlocked_mut(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bpos pos)1810 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1811 					    enum btree_id btree_id,
1812 					    unsigned level,
1813 					    struct bpos pos)
1814 {
1815 	btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1816 			     BTREE_ITER_nopreserve|
1817 			     BTREE_ITER_intent, _RET_IP_);
1818 	path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1819 
1820 	struct btree_path *path = trans->paths + path_idx;
1821 	bch2_btree_path_downgrade(trans, path);
1822 	__bch2_btree_path_unlock(trans, path);
1823 	return path_idx;
1824 }
1825 
bch2_btree_path_peek_slot(struct btree_path * path,struct bkey * u)1826 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1827 {
1828 
1829 	struct btree_path_level *l = path_l(path);
1830 	struct bkey_packed *_k;
1831 	struct bkey_s_c k;
1832 
1833 	if (unlikely(!l->b))
1834 		return bkey_s_c_null;
1835 
1836 	EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1837 	EBUG_ON(!btree_node_locked(path, path->level));
1838 
1839 	if (!path->cached) {
1840 		_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1841 		k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1842 
1843 		EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1844 
1845 		if (!k.k || !bpos_eq(path->pos, k.k->p))
1846 			goto hole;
1847 	} else {
1848 		struct bkey_cached *ck = (void *) path->l[0].b;
1849 		if (!ck)
1850 			return bkey_s_c_null;
1851 
1852 		EBUG_ON(path->btree_id != ck->key.btree_id ||
1853 			!bkey_eq(path->pos, ck->key.pos));
1854 
1855 		*u = ck->k->k;
1856 		k = (struct bkey_s_c) { u, &ck->k->v };
1857 	}
1858 
1859 	return k;
1860 hole:
1861 	bkey_init(u);
1862 	u->p = path->pos;
1863 	return (struct bkey_s_c) { u, NULL };
1864 }
1865 
bch2_set_btree_iter_dontneed(struct btree_iter * iter)1866 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1867 {
1868 	struct btree_trans *trans = iter->trans;
1869 
1870 	if (!iter->path || trans->restarted)
1871 		return;
1872 
1873 	struct btree_path *path = btree_iter_path(trans, iter);
1874 	path->preserve		= false;
1875 	if (path->ref == 1)
1876 		path->should_be_locked	= false;
1877 }
1878 /* Btree iterators: */
1879 
1880 int __must_check
__bch2_btree_iter_traverse(struct btree_iter * iter)1881 __bch2_btree_iter_traverse(struct btree_iter *iter)
1882 {
1883 	return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1884 }
1885 
1886 int __must_check
bch2_btree_iter_traverse(struct btree_iter * iter)1887 bch2_btree_iter_traverse(struct btree_iter *iter)
1888 {
1889 	struct btree_trans *trans = iter->trans;
1890 	int ret;
1891 
1892 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1893 
1894 	iter->path = bch2_btree_path_set_pos(trans, iter->path,
1895 					btree_iter_search_key(iter),
1896 					iter->flags & BTREE_ITER_intent,
1897 					btree_iter_ip_allocated(iter));
1898 
1899 	ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1900 	if (ret)
1901 		return ret;
1902 
1903 	struct btree_path *path = btree_iter_path(trans, iter);
1904 	if (btree_path_node(path, path->level))
1905 		btree_path_set_should_be_locked(trans, path);
1906 	return 0;
1907 }
1908 
1909 /* Iterate across nodes (leaf and interior nodes) */
1910 
bch2_btree_iter_peek_node(struct btree_iter * iter)1911 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1912 {
1913 	struct btree_trans *trans = iter->trans;
1914 	struct btree *b = NULL;
1915 	int ret;
1916 
1917 	EBUG_ON(trans->paths[iter->path].cached);
1918 	bch2_btree_iter_verify(iter);
1919 
1920 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1921 	if (ret)
1922 		goto err;
1923 
1924 	struct btree_path *path = btree_iter_path(trans, iter);
1925 	b = btree_path_node(path, path->level);
1926 	if (!b)
1927 		goto out;
1928 
1929 	BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1930 
1931 	bkey_init(&iter->k);
1932 	iter->k.p = iter->pos = b->key.k.p;
1933 
1934 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1935 					iter->flags & BTREE_ITER_intent,
1936 					btree_iter_ip_allocated(iter));
1937 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1938 out:
1939 	bch2_btree_iter_verify_entry_exit(iter);
1940 	bch2_btree_iter_verify(iter);
1941 
1942 	return b;
1943 err:
1944 	b = ERR_PTR(ret);
1945 	goto out;
1946 }
1947 
1948 /* Only kept for -tools */
bch2_btree_iter_peek_node_and_restart(struct btree_iter * iter)1949 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1950 {
1951 	struct btree *b;
1952 
1953 	while (b = bch2_btree_iter_peek_node(iter),
1954 	       bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1955 		bch2_trans_begin(iter->trans);
1956 
1957 	return b;
1958 }
1959 
bch2_btree_iter_next_node(struct btree_iter * iter)1960 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1961 {
1962 	struct btree_trans *trans = iter->trans;
1963 	struct btree *b = NULL;
1964 	int ret;
1965 
1966 	EBUG_ON(trans->paths[iter->path].cached);
1967 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
1968 	bch2_btree_iter_verify(iter);
1969 
1970 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1971 	if (ret)
1972 		goto err;
1973 
1974 
1975 	struct btree_path *path = btree_iter_path(trans, iter);
1976 
1977 	/* already at end? */
1978 	if (!btree_path_node(path, path->level))
1979 		return NULL;
1980 
1981 	/* got to end? */
1982 	if (!btree_path_node(path, path->level + 1)) {
1983 		btree_path_set_level_up(trans, path);
1984 		return NULL;
1985 	}
1986 
1987 	if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1988 		__bch2_btree_path_unlock(trans, path);
1989 		path->l[path->level].b		= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1990 		path->l[path->level + 1].b	= ERR_PTR(-BCH_ERR_no_btree_node_relock);
1991 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1992 		trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1993 		ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1994 		goto err;
1995 	}
1996 
1997 	b = btree_path_node(path, path->level + 1);
1998 
1999 	if (bpos_eq(iter->pos, b->key.k.p)) {
2000 		__btree_path_set_level_up(trans, path, path->level++);
2001 	} else {
2002 		if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
2003 			btree_node_unlock(trans, path, path->level + 1);
2004 
2005 		/*
2006 		 * Haven't gotten to the end of the parent node: go back down to
2007 		 * the next child node
2008 		 */
2009 		iter->path = bch2_btree_path_set_pos(trans, iter->path,
2010 					bpos_successor(iter->pos),
2011 					iter->flags & BTREE_ITER_intent,
2012 					btree_iter_ip_allocated(iter));
2013 
2014 		path = btree_iter_path(trans, iter);
2015 		btree_path_set_level_down(trans, path, iter->min_depth);
2016 
2017 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2018 		if (ret)
2019 			goto err;
2020 
2021 		path = btree_iter_path(trans, iter);
2022 		b = path->l[path->level].b;
2023 	}
2024 
2025 	bkey_init(&iter->k);
2026 	iter->k.p = iter->pos = b->key.k.p;
2027 
2028 	iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2029 					iter->flags & BTREE_ITER_intent,
2030 					btree_iter_ip_allocated(iter));
2031 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2032 	EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2033 out:
2034 	bch2_btree_iter_verify_entry_exit(iter);
2035 	bch2_btree_iter_verify(iter);
2036 
2037 	return b;
2038 err:
2039 	b = ERR_PTR(ret);
2040 	goto out;
2041 }
2042 
2043 /* Iterate across keys (in leaf nodes only) */
2044 
bch2_btree_iter_advance(struct btree_iter * iter)2045 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2046 {
2047 	struct bpos pos = iter->k.p;
2048 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2049 		     ? bpos_eq(pos, SPOS_MAX)
2050 		     : bkey_eq(pos, SPOS_MAX));
2051 
2052 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2053 		pos = bkey_successor(iter, pos);
2054 	bch2_btree_iter_set_pos(iter, pos);
2055 	return ret;
2056 }
2057 
bch2_btree_iter_rewind(struct btree_iter * iter)2058 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2059 {
2060 	struct bpos pos = bkey_start_pos(&iter->k);
2061 	bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2062 		     ? bpos_eq(pos, POS_MIN)
2063 		     : bkey_eq(pos, POS_MIN));
2064 
2065 	if (ret && !(iter->flags & BTREE_ITER_is_extents))
2066 		pos = bkey_predecessor(iter, pos);
2067 	bch2_btree_iter_set_pos(iter, pos);
2068 	return ret;
2069 }
2070 
2071 static noinline
bch2_btree_trans_peek_prev_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2072 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2073 					struct bkey_s_c *k)
2074 {
2075 	struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2076 
2077 	trans_for_each_update(trans, i)
2078 		if (!i->key_cache_already_flushed &&
2079 		    i->btree_id == iter->btree_id &&
2080 		    bpos_le(i->k->k.p, iter->pos) &&
2081 		    bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2082 			iter->k = i->k->k;
2083 			*k = bkey_i_to_s_c(i->k);
2084 		}
2085 }
2086 
2087 static noinline
bch2_btree_trans_peek_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2088 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2089 				   struct bkey_s_c *k)
2090 {
2091 	struct btree_path *path = btree_iter_path(trans, iter);
2092 	struct bpos end = path_l(path)->b->key.k.p;
2093 
2094 	trans_for_each_update(trans, i)
2095 		if (!i->key_cache_already_flushed &&
2096 		    i->btree_id == iter->btree_id &&
2097 		    bpos_ge(i->k->k.p, path->pos) &&
2098 		    bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2099 			iter->k = i->k->k;
2100 			*k = bkey_i_to_s_c(i->k);
2101 		}
2102 }
2103 
2104 static noinline
bch2_btree_trans_peek_slot_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2105 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2106 					struct bkey_s_c *k)
2107 {
2108 	trans_for_each_update(trans, i)
2109 		if (!i->key_cache_already_flushed &&
2110 		    i->btree_id == iter->btree_id &&
2111 		    bpos_eq(i->k->k.p, iter->pos)) {
2112 			iter->k = i->k->k;
2113 			*k = bkey_i_to_s_c(i->k);
2114 		}
2115 }
2116 
bch2_btree_journal_peek(struct btree_trans * trans,struct btree_iter * iter,struct bpos end_pos)2117 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2118 					      struct btree_iter *iter,
2119 					      struct bpos end_pos)
2120 {
2121 	struct btree_path *path = btree_iter_path(trans, iter);
2122 
2123 	return bch2_journal_keys_peek_max(trans->c, iter->btree_id,
2124 					   path->level,
2125 					   path->pos,
2126 					   end_pos,
2127 					   &iter->journal_idx);
2128 }
2129 
2130 static noinline
btree_trans_peek_slot_journal(struct btree_trans * trans,struct btree_iter * iter)2131 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2132 					      struct btree_iter *iter)
2133 {
2134 	struct btree_path *path = btree_iter_path(trans, iter);
2135 	struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2136 
2137 	if (k) {
2138 		iter->k = k->k;
2139 		return bkey_i_to_s_c(k);
2140 	} else {
2141 		return bkey_s_c_null;
2142 	}
2143 }
2144 
2145 static noinline
btree_trans_peek_journal(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2146 void btree_trans_peek_journal(struct btree_trans *trans,
2147 			      struct btree_iter *iter,
2148 			      struct bkey_s_c *k)
2149 {
2150 	struct btree_path *path = btree_iter_path(trans, iter);
2151 	struct bkey_i *next_journal =
2152 		bch2_btree_journal_peek(trans, iter,
2153 				k->k ? k->k->p : path_l(path)->b->key.k.p);
2154 	if (next_journal) {
2155 		iter->k = next_journal->k;
2156 		*k = bkey_i_to_s_c(next_journal);
2157 	}
2158 }
2159 
bch2_btree_journal_peek_prev(struct btree_trans * trans,struct btree_iter * iter,struct bpos end_pos)2160 static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans,
2161 					      struct btree_iter *iter,
2162 					      struct bpos end_pos)
2163 {
2164 	struct btree_path *path = btree_iter_path(trans, iter);
2165 
2166 	return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id,
2167 					   path->level,
2168 					   path->pos,
2169 					   end_pos,
2170 					   &iter->journal_idx);
2171 }
2172 
2173 static noinline
btree_trans_peek_prev_journal(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2174 void btree_trans_peek_prev_journal(struct btree_trans *trans,
2175 				   struct btree_iter *iter,
2176 				   struct bkey_s_c *k)
2177 {
2178 	struct btree_path *path = btree_iter_path(trans, iter);
2179 	struct bkey_i *next_journal =
2180 		bch2_btree_journal_peek_prev(trans, iter,
2181 				k->k ? k->k->p : path_l(path)->b->key.k.p);
2182 
2183 	if (next_journal) {
2184 		iter->k = next_journal->k;
2185 		*k = bkey_i_to_s_c(next_journal);
2186 	}
2187 }
2188 
2189 /*
2190  * Checks btree key cache for key at iter->pos and returns it if present, or
2191  * bkey_s_c_null:
2192  */
2193 static noinline
btree_trans_peek_key_cache(struct btree_iter * iter,struct bpos pos)2194 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2195 {
2196 	struct btree_trans *trans = iter->trans;
2197 	struct bch_fs *c = trans->c;
2198 	struct bkey u;
2199 	struct bkey_s_c k;
2200 	int ret;
2201 
2202 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2203 
2204 	if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2205 	    bpos_eq(iter->pos, pos))
2206 		return bkey_s_c_null;
2207 
2208 	if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2209 		return bkey_s_c_null;
2210 
2211 	if (!iter->key_cache_path)
2212 		iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2213 						     iter->flags & BTREE_ITER_intent, 0,
2214 						     iter->flags|BTREE_ITER_cached|
2215 						     BTREE_ITER_cached_nofill,
2216 						     _THIS_IP_);
2217 
2218 	iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2219 					iter->flags & BTREE_ITER_intent,
2220 					btree_iter_ip_allocated(iter));
2221 
2222 	ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2223 					 iter->flags|BTREE_ITER_cached) ?:
2224 		bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2225 	if (unlikely(ret))
2226 		return bkey_s_c_err(ret);
2227 
2228 	k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2229 	if (!k.k)
2230 		return k;
2231 
2232 	if ((iter->flags & BTREE_ITER_all_snapshots) &&
2233 	    !bpos_eq(pos, k.k->p))
2234 		return bkey_s_c_null;
2235 
2236 	iter->k = u;
2237 	k.k = &iter->k;
2238 	btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2239 	return k;
2240 }
2241 
__bch2_btree_iter_peek(struct btree_iter * iter,struct bpos search_key)2242 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2243 {
2244 	struct btree_trans *trans = iter->trans;
2245 	struct bkey_s_c k, k2;
2246 	int ret;
2247 
2248 	EBUG_ON(btree_iter_path(trans, iter)->cached);
2249 	bch2_btree_iter_verify(iter);
2250 
2251 	while (1) {
2252 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2253 					iter->flags & BTREE_ITER_intent,
2254 					btree_iter_ip_allocated(iter));
2255 
2256 		ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2257 		if (unlikely(ret)) {
2258 			/* ensure that iter->k is consistent with iter->pos: */
2259 			bch2_btree_iter_set_pos(iter, iter->pos);
2260 			k = bkey_s_c_err(ret);
2261 			break;
2262 		}
2263 
2264 		struct btree_path *path = btree_iter_path(trans, iter);
2265 		struct btree_path_level *l = path_l(path);
2266 
2267 		if (unlikely(!l->b)) {
2268 			/* No btree nodes at requested level: */
2269 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2270 			k = bkey_s_c_null;
2271 			break;
2272 		}
2273 
2274 		btree_path_set_should_be_locked(trans, path);
2275 
2276 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2277 
2278 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2279 		    k.k &&
2280 		    (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2281 			k = k2;
2282 			if (bkey_err(k)) {
2283 				bch2_btree_iter_set_pos(iter, iter->pos);
2284 				break;
2285 			}
2286 		}
2287 
2288 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2289 			btree_trans_peek_journal(trans, iter, &k);
2290 
2291 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2292 			     trans->nr_updates))
2293 			bch2_btree_trans_peek_updates(trans, iter, &k);
2294 
2295 		if (k.k && bkey_deleted(k.k)) {
2296 			/*
2297 			 * If we've got a whiteout, and it's after the search
2298 			 * key, advance the search key to the whiteout instead
2299 			 * of just after the whiteout - it might be a btree
2300 			 * whiteout, with a real key at the same position, since
2301 			 * in the btree deleted keys sort before non deleted.
2302 			 */
2303 			search_key = !bpos_eq(search_key, k.k->p)
2304 				? k.k->p
2305 				: bpos_successor(k.k->p);
2306 			continue;
2307 		}
2308 
2309 		if (likely(k.k)) {
2310 			break;
2311 		} else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2312 			/* Advance to next leaf node: */
2313 			search_key = bpos_successor(l->b->key.k.p);
2314 		} else {
2315 			/* End of btree: */
2316 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2317 			k = bkey_s_c_null;
2318 			break;
2319 		}
2320 	}
2321 
2322 	bch2_btree_iter_verify(iter);
2323 	return k;
2324 }
2325 
2326 /**
2327  * bch2_btree_iter_peek_max() - returns first key greater than or equal to
2328  * iterator's current position
2329  * @iter:	iterator to peek from
2330  * @end:	search limit: returns keys less than or equal to @end
2331  *
2332  * Returns:	key if found, or an error extractable with bkey_err().
2333  */
bch2_btree_iter_peek_max(struct btree_iter * iter,struct bpos end)2334 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
2335 {
2336 	struct btree_trans *trans = iter->trans;
2337 	struct bpos search_key = btree_iter_search_key(iter);
2338 	struct bkey_s_c k;
2339 	struct bpos iter_pos = iter->pos;
2340 	int ret;
2341 
2342 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2343 	bch2_btree_iter_verify_entry_exit(iter);
2344 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2345 
2346 	ret = trans_maybe_inject_restart(trans, _RET_IP_);
2347 	if (unlikely(ret)) {
2348 		k = bkey_s_c_err(ret);
2349 		goto out_no_locked;
2350 	}
2351 
2352 	if (iter->update_path) {
2353 		bch2_path_put_nokeep(trans, iter->update_path,
2354 				     iter->flags & BTREE_ITER_intent);
2355 		iter->update_path = 0;
2356 	}
2357 
2358 	while (1) {
2359 		k = __bch2_btree_iter_peek(iter, search_key);
2360 		if (unlikely(!k.k))
2361 			goto end;
2362 		if (unlikely(bkey_err(k)))
2363 			goto out_no_locked;
2364 
2365 		if (iter->flags & BTREE_ITER_filter_snapshots) {
2366 			/*
2367 			 * We need to check against @end before FILTER_SNAPSHOTS because
2368 			 * if we get to a different inode that requested we might be
2369 			 * seeing keys for a different snapshot tree that will all be
2370 			 * filtered out.
2371 			 *
2372 			 * But we can't do the full check here, because bkey_start_pos()
2373 			 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2374 			 * that's what we check against in extents mode:
2375 			 */
2376 			if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2377 				     ? bkey_gt(k.k->p, end)
2378 				     : k.k->p.inode > end.inode))
2379 				goto end;
2380 
2381 			if (iter->update_path &&
2382 			    !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2383 				bch2_path_put_nokeep(trans, iter->update_path,
2384 						     iter->flags & BTREE_ITER_intent);
2385 				iter->update_path = 0;
2386 			}
2387 
2388 			if ((iter->flags & BTREE_ITER_intent) &&
2389 			    !(iter->flags & BTREE_ITER_is_extents) &&
2390 			    !iter->update_path) {
2391 				struct bpos pos = k.k->p;
2392 
2393 				if (pos.snapshot < iter->snapshot) {
2394 					search_key = bpos_successor(k.k->p);
2395 					continue;
2396 				}
2397 
2398 				pos.snapshot = iter->snapshot;
2399 
2400 				/*
2401 				 * advance, same as on exit for iter->path, but only up
2402 				 * to snapshot
2403 				 */
2404 				__btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2405 				iter->update_path = iter->path;
2406 
2407 				iter->update_path = bch2_btree_path_set_pos(trans,
2408 							iter->update_path, pos,
2409 							iter->flags & BTREE_ITER_intent,
2410 							_THIS_IP_);
2411 				ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2412 				if (unlikely(ret)) {
2413 					k = bkey_s_c_err(ret);
2414 					goto out_no_locked;
2415 				}
2416 			}
2417 
2418 			/*
2419 			 * We can never have a key in a leaf node at POS_MAX, so
2420 			 * we don't have to check these successor() calls:
2421 			 */
2422 			if (!bch2_snapshot_is_ancestor(trans->c,
2423 						       iter->snapshot,
2424 						       k.k->p.snapshot)) {
2425 				search_key = bpos_successor(k.k->p);
2426 				continue;
2427 			}
2428 
2429 			if (bkey_whiteout(k.k) &&
2430 			    !(iter->flags & BTREE_ITER_key_cache_fill)) {
2431 				search_key = bkey_successor(iter, k.k->p);
2432 				continue;
2433 			}
2434 		}
2435 
2436 		/*
2437 		 * iter->pos should be mononotically increasing, and always be
2438 		 * equal to the key we just returned - except extents can
2439 		 * straddle iter->pos:
2440 		 */
2441 		if (!(iter->flags & BTREE_ITER_is_extents))
2442 			iter_pos = k.k->p;
2443 		else
2444 			iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2445 
2446 		if (unlikely(iter->flags & BTREE_ITER_all_snapshots	? bpos_gt(iter_pos, end) :
2447 			     iter->flags & BTREE_ITER_is_extents	? bkey_ge(iter_pos, end) :
2448 									  bkey_gt(iter_pos, end)))
2449 			goto end;
2450 
2451 		break;
2452 	}
2453 
2454 	iter->pos = iter_pos;
2455 
2456 	iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2457 				iter->flags & BTREE_ITER_intent,
2458 				btree_iter_ip_allocated(iter));
2459 
2460 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2461 out_no_locked:
2462 	if (iter->update_path) {
2463 		ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2464 		if (unlikely(ret))
2465 			k = bkey_s_c_err(ret);
2466 		else
2467 			btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2468 	}
2469 
2470 	if (!(iter->flags & BTREE_ITER_all_snapshots))
2471 		iter->pos.snapshot = iter->snapshot;
2472 
2473 	ret = bch2_btree_iter_verify_ret(iter, k);
2474 	if (unlikely(ret)) {
2475 		bch2_btree_iter_set_pos(iter, iter->pos);
2476 		k = bkey_s_c_err(ret);
2477 	}
2478 
2479 	bch2_btree_iter_verify_entry_exit(iter);
2480 
2481 	return k;
2482 end:
2483 	bch2_btree_iter_set_pos(iter, end);
2484 	k = bkey_s_c_null;
2485 	goto out_no_locked;
2486 }
2487 
2488 /**
2489  * bch2_btree_iter_next() - returns first key greater than iterator's current
2490  * position
2491  * @iter:	iterator to peek from
2492  *
2493  * Returns:	key if found, or an error extractable with bkey_err().
2494  */
bch2_btree_iter_next(struct btree_iter * iter)2495 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2496 {
2497 	if (!bch2_btree_iter_advance(iter))
2498 		return bkey_s_c_null;
2499 
2500 	return bch2_btree_iter_peek(iter);
2501 }
2502 
__bch2_btree_iter_peek_prev(struct btree_iter * iter,struct bpos search_key)2503 static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
2504 {
2505 	struct btree_trans *trans = iter->trans;
2506 	struct bkey_s_c k, k2;
2507 
2508 	bch2_btree_iter_verify(iter);
2509 
2510 	while (1) {
2511 		iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2512 					iter->flags & BTREE_ITER_intent,
2513 					btree_iter_ip_allocated(iter));
2514 
2515 		int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2516 		if (unlikely(ret)) {
2517 			/* ensure that iter->k is consistent with iter->pos: */
2518 			bch2_btree_iter_set_pos(iter, iter->pos);
2519 			k = bkey_s_c_err(ret);
2520 			break;
2521 		}
2522 
2523 		struct btree_path *path = btree_iter_path(trans, iter);
2524 		struct btree_path_level *l = path_l(path);
2525 
2526 		if (unlikely(!l->b)) {
2527 			/* No btree nodes at requested level: */
2528 			bch2_btree_iter_set_pos(iter, SPOS_MAX);
2529 			k = bkey_s_c_null;
2530 			break;
2531 		}
2532 
2533 		btree_path_set_should_be_locked(trans, path);
2534 
2535 		k = btree_path_level_peek_all(trans->c, l, &iter->k);
2536 		if (!k.k || bpos_gt(k.k->p, search_key)) {
2537 			k = btree_path_level_prev(trans, path, l, &iter->k);
2538 
2539 			BUG_ON(k.k && bpos_gt(k.k->p, search_key));
2540 		}
2541 
2542 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2543 		    k.k &&
2544 		    (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2545 			k = k2;
2546 			if (bkey_err(k2)) {
2547 				bch2_btree_iter_set_pos(iter, iter->pos);
2548 				break;
2549 			}
2550 		}
2551 
2552 		if (unlikely(iter->flags & BTREE_ITER_with_journal))
2553 			btree_trans_peek_prev_journal(trans, iter, &k);
2554 
2555 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2556 			     trans->nr_updates))
2557 			bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2558 
2559 		if (likely(k.k && !bkey_deleted(k.k))) {
2560 			break;
2561 		} else if (k.k) {
2562 			search_key = bpos_predecessor(k.k->p);
2563 		} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2564 			/* Advance to previous leaf node: */
2565 			search_key = bpos_predecessor(path->l[0].b->data->min_key);
2566 		} else {
2567 			/* Start of btree: */
2568 			bch2_btree_iter_set_pos(iter, POS_MIN);
2569 			k = bkey_s_c_null;
2570 			break;
2571 		}
2572 	}
2573 
2574 	bch2_btree_iter_verify(iter);
2575 	return k;
2576 }
2577 
2578 /**
2579  * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
2580  * iterator's current position
2581  * @iter:	iterator to peek from
2582  * @end:	search limit: returns keys greater than or equal to @end
2583  *
2584  * Returns:	key if found, or an error extractable with bkey_err().
2585  */
bch2_btree_iter_peek_prev_min(struct btree_iter * iter,struct bpos end)2586 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
2587 {
2588 	if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
2589 	   !bkey_eq(iter->pos, POS_MAX)) {
2590 		/*
2591 		 * bkey_start_pos(), for extents, is not monotonically
2592 		 * increasing until after filtering for snapshots:
2593 		 *
2594 		 * Thus, for extents we need to search forward until we find a
2595 		 * real visible extents - easiest to just use peek_slot() (which
2596 		 * internally uses peek() for extents)
2597 		 */
2598 		struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
2599 		if (bkey_err(k))
2600 			return k;
2601 
2602 		if (!bkey_deleted(k.k) &&
2603 		    (!(iter->flags & BTREE_ITER_is_extents) ||
2604 		     bkey_lt(bkey_start_pos(k.k), iter->pos)))
2605 			return k;
2606 	}
2607 
2608 	struct btree_trans *trans = iter->trans;
2609 	struct bpos search_key = iter->pos;
2610 	struct bkey_s_c k;
2611 	btree_path_idx_t saved_path = 0;
2612 
2613 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2614 	bch2_btree_iter_verify_entry_exit(iter);
2615 	EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bpos_eq(end, POS_MIN));
2616 
2617 	int ret = trans_maybe_inject_restart(trans, _RET_IP_);
2618 	if (unlikely(ret)) {
2619 		k = bkey_s_c_err(ret);
2620 		goto out_no_locked;
2621 	}
2622 
2623 	while (1) {
2624 		k = __bch2_btree_iter_peek_prev(iter, search_key);
2625 		if (unlikely(!k.k))
2626 			goto end;
2627 		if (unlikely(bkey_err(k)))
2628 			goto out_no_locked;
2629 
2630 		if (iter->flags & BTREE_ITER_filter_snapshots) {
2631 			struct btree_path *s = saved_path ? trans->paths + saved_path : NULL;
2632 			if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) {
2633 				/*
2634 				 * If we have a saved candidate, and we're past
2635 				 * the last possible snapshot overwrite, return
2636 				 * it:
2637 				 */
2638 				bch2_path_put_nokeep(trans, iter->path,
2639 					      iter->flags & BTREE_ITER_intent);
2640 				iter->path = saved_path;
2641 				saved_path = 0;
2642 				k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2643 				break;
2644 			}
2645 
2646 			/*
2647 			 * We need to check against @end before FILTER_SNAPSHOTS because
2648 			 * if we get to a different inode that requested we might be
2649 			 * seeing keys for a different snapshot tree that will all be
2650 			 * filtered out.
2651 			 */
2652 			if (unlikely(bkey_lt(k.k->p, end)))
2653 				goto end;
2654 
2655 			if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) {
2656 				search_key = bpos_predecessor(k.k->p);
2657 				continue;
2658 			}
2659 
2660 			if (k.k->p.snapshot != iter->snapshot) {
2661 				/*
2662 				 * Have a key visible in iter->snapshot, but
2663 				 * might have overwrites: - save it and keep
2664 				 * searching. Unless it's a whiteout - then drop
2665 				 * our previous saved candidate:
2666 				 */
2667 				if (saved_path) {
2668 					bch2_path_put_nokeep(trans, saved_path,
2669 					      iter->flags & BTREE_ITER_intent);
2670 					saved_path = 0;
2671 				}
2672 
2673 				if (!bkey_whiteout(k.k)) {
2674 					saved_path = btree_path_clone(trans, iter->path,
2675 								iter->flags & BTREE_ITER_intent,
2676 								_THIS_IP_);
2677 					trace_btree_path_save_pos(trans,
2678 								  trans->paths + iter->path,
2679 								  trans->paths + saved_path);
2680 				}
2681 
2682 				search_key = bpos_predecessor(k.k->p);
2683 				continue;
2684 			}
2685 
2686 			if (bkey_whiteout(k.k)) {
2687 				search_key = bkey_predecessor(iter, k.k->p);
2688 				search_key.snapshot = U32_MAX;
2689 				continue;
2690 			}
2691 		}
2692 
2693 		EBUG_ON(iter->flags & BTREE_ITER_all_snapshots		? bpos_gt(k.k->p, iter->pos) :
2694 			iter->flags & BTREE_ITER_is_extents		? bkey_ge(bkey_start_pos(k.k), iter->pos) :
2695 									  bkey_gt(k.k->p, iter->pos));
2696 
2697 		if (unlikely(iter->flags & BTREE_ITER_all_snapshots	? bpos_lt(k.k->p, end) :
2698 			     iter->flags & BTREE_ITER_is_extents	? bkey_le(k.k->p, end) :
2699 									  bkey_lt(k.k->p, end)))
2700 			goto end;
2701 
2702 		break;
2703 	}
2704 
2705 	/* Extents can straddle iter->pos: */
2706 	iter->pos = bpos_min(iter->pos, k.k->p);;
2707 
2708 	if (iter->flags & BTREE_ITER_filter_snapshots)
2709 		iter->pos.snapshot = iter->snapshot;
2710 out_no_locked:
2711 	if (saved_path)
2712 		bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2713 
2714 	bch2_btree_iter_verify_entry_exit(iter);
2715 	bch2_btree_iter_verify(iter);
2716 	return k;
2717 end:
2718 	bch2_btree_iter_set_pos(iter, end);
2719 	k = bkey_s_c_null;
2720 	goto out_no_locked;
2721 }
2722 
2723 /**
2724  * bch2_btree_iter_prev() - returns first key less than iterator's current
2725  * position
2726  * @iter:	iterator to peek from
2727  *
2728  * Returns:	key if found, or an error extractable with bkey_err().
2729  */
bch2_btree_iter_prev(struct btree_iter * iter)2730 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2731 {
2732 	if (!bch2_btree_iter_rewind(iter))
2733 		return bkey_s_c_null;
2734 
2735 	return bch2_btree_iter_peek_prev(iter);
2736 }
2737 
bch2_btree_iter_peek_slot(struct btree_iter * iter)2738 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2739 {
2740 	struct btree_trans *trans = iter->trans;
2741 	struct bpos search_key;
2742 	struct bkey_s_c k;
2743 	int ret;
2744 
2745 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
2746 	bch2_btree_iter_verify(iter);
2747 	bch2_btree_iter_verify_entry_exit(iter);
2748 	EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2749 
2750 	ret = trans_maybe_inject_restart(trans, _RET_IP_);
2751 	if (unlikely(ret)) {
2752 		k = bkey_s_c_err(ret);
2753 		goto out_no_locked;
2754 	}
2755 
2756 	/* extents can't span inode numbers: */
2757 	if ((iter->flags & BTREE_ITER_is_extents) &&
2758 	    unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2759 		if (iter->pos.inode == KEY_INODE_MAX)
2760 			return bkey_s_c_null;
2761 
2762 		bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2763 	}
2764 
2765 	search_key = btree_iter_search_key(iter);
2766 	iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2767 					iter->flags & BTREE_ITER_intent,
2768 					btree_iter_ip_allocated(iter));
2769 
2770 	ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2771 	if (unlikely(ret)) {
2772 		k = bkey_s_c_err(ret);
2773 		goto out_no_locked;
2774 	}
2775 
2776 	struct btree_path *path = btree_iter_path(trans, iter);
2777 	if (unlikely(!btree_path_node(path, path->level)))
2778 		return bkey_s_c_null;
2779 
2780 	if ((iter->flags & BTREE_ITER_cached) ||
2781 	    !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2782 		k = bkey_s_c_null;
2783 
2784 		if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2785 			     trans->nr_updates)) {
2786 			bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2787 			if (k.k)
2788 				goto out;
2789 		}
2790 
2791 		if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2792 		    (k = btree_trans_peek_slot_journal(trans, iter)).k)
2793 			goto out;
2794 
2795 		if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2796 		    (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2797 			if (!bkey_err(k))
2798 				iter->k = *k.k;
2799 			/* We're not returning a key from iter->path: */
2800 			goto out_no_locked;
2801 		}
2802 
2803 		k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2804 		if (unlikely(!k.k))
2805 			goto out_no_locked;
2806 
2807 		if (unlikely(k.k->type == KEY_TYPE_whiteout &&
2808 			     (iter->flags & BTREE_ITER_filter_snapshots) &&
2809 			     !(iter->flags & BTREE_ITER_key_cache_fill)))
2810 			iter->k.type = KEY_TYPE_deleted;
2811 	} else {
2812 		struct bpos next;
2813 		struct bpos end = iter->pos;
2814 
2815 		if (iter->flags & BTREE_ITER_is_extents)
2816 			end.offset = U64_MAX;
2817 
2818 		EBUG_ON(btree_iter_path(trans, iter)->level);
2819 
2820 		if (iter->flags & BTREE_ITER_intent) {
2821 			struct btree_iter iter2;
2822 
2823 			bch2_trans_copy_iter(&iter2, iter);
2824 			k = bch2_btree_iter_peek_max(&iter2, end);
2825 
2826 			if (k.k && !bkey_err(k)) {
2827 				swap(iter->key_cache_path, iter2.key_cache_path);
2828 				iter->k = iter2.k;
2829 				k.k = &iter->k;
2830 			}
2831 			bch2_trans_iter_exit(trans, &iter2);
2832 		} else {
2833 			struct bpos pos = iter->pos;
2834 
2835 			k = bch2_btree_iter_peek_max(iter, end);
2836 			if (unlikely(bkey_err(k)))
2837 				bch2_btree_iter_set_pos(iter, pos);
2838 			else
2839 				iter->pos = pos;
2840 		}
2841 
2842 		if (unlikely(bkey_err(k)))
2843 			goto out_no_locked;
2844 
2845 		next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2846 
2847 		if (bkey_lt(iter->pos, next)) {
2848 			bkey_init(&iter->k);
2849 			iter->k.p = iter->pos;
2850 
2851 			if (iter->flags & BTREE_ITER_is_extents) {
2852 				bch2_key_resize(&iter->k,
2853 						min_t(u64, KEY_SIZE_MAX,
2854 						      (next.inode == iter->pos.inode
2855 						       ? next.offset
2856 						       : KEY_OFFSET_MAX) -
2857 						      iter->pos.offset));
2858 				EBUG_ON(!iter->k.size);
2859 			}
2860 
2861 			k = (struct bkey_s_c) { &iter->k, NULL };
2862 		}
2863 	}
2864 out:
2865 	btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2866 out_no_locked:
2867 	bch2_btree_iter_verify_entry_exit(iter);
2868 	bch2_btree_iter_verify(iter);
2869 	ret = bch2_btree_iter_verify_ret(iter, k);
2870 	if (unlikely(ret))
2871 		return bkey_s_c_err(ret);
2872 
2873 	return k;
2874 }
2875 
bch2_btree_iter_next_slot(struct btree_iter * iter)2876 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2877 {
2878 	if (!bch2_btree_iter_advance(iter))
2879 		return bkey_s_c_null;
2880 
2881 	return bch2_btree_iter_peek_slot(iter);
2882 }
2883 
bch2_btree_iter_prev_slot(struct btree_iter * iter)2884 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2885 {
2886 	if (!bch2_btree_iter_rewind(iter))
2887 		return bkey_s_c_null;
2888 
2889 	return bch2_btree_iter_peek_slot(iter);
2890 }
2891 
2892 /* Obsolete, but still used by rust wrapper in -tools */
bch2_btree_iter_peek_and_restart_outlined(struct btree_iter * iter)2893 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2894 {
2895 	struct bkey_s_c k;
2896 
2897 	while (btree_trans_too_many_iters(iter->trans) ||
2898 	       (k = bch2_btree_iter_peek_type(iter, iter->flags),
2899 		bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2900 		bch2_trans_begin(iter->trans);
2901 
2902 	return k;
2903 }
2904 
2905 /* new transactional stuff: */
2906 
2907 #ifdef CONFIG_BCACHEFS_DEBUG
btree_trans_verify_sorted_refs(struct btree_trans * trans)2908 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2909 {
2910 	struct btree_path *path;
2911 	unsigned i;
2912 
2913 	BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2914 
2915 	trans_for_each_path(trans, path, i) {
2916 		BUG_ON(path->sorted_idx >= trans->nr_sorted);
2917 		BUG_ON(trans->sorted[path->sorted_idx] != i);
2918 	}
2919 
2920 	for (i = 0; i < trans->nr_sorted; i++) {
2921 		unsigned idx = trans->sorted[i];
2922 
2923 		BUG_ON(!test_bit(idx, trans->paths_allocated));
2924 		BUG_ON(trans->paths[idx].sorted_idx != i);
2925 	}
2926 }
2927 
btree_trans_verify_sorted(struct btree_trans * trans)2928 static void btree_trans_verify_sorted(struct btree_trans *trans)
2929 {
2930 	struct btree_path *path, *prev = NULL;
2931 	struct trans_for_each_path_inorder_iter iter;
2932 
2933 	if (!bch2_debug_check_iterators)
2934 		return;
2935 
2936 	trans_for_each_path_inorder(trans, path, iter) {
2937 		if (prev && btree_path_cmp(prev, path) > 0) {
2938 			__bch2_dump_trans_paths_updates(trans, true);
2939 			panic("trans paths out of order!\n");
2940 		}
2941 		prev = path;
2942 	}
2943 }
2944 #else
btree_trans_verify_sorted_refs(struct btree_trans * trans)2945 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
btree_trans_verify_sorted(struct btree_trans * trans)2946 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2947 #endif
2948 
__bch2_btree_trans_sort_paths(struct btree_trans * trans)2949 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2950 {
2951 	int i, l = 0, r = trans->nr_sorted, inc = 1;
2952 	bool swapped;
2953 
2954 	btree_trans_verify_sorted_refs(trans);
2955 
2956 	if (trans->paths_sorted)
2957 		goto out;
2958 
2959 	/*
2960 	 * Cocktail shaker sort: this is efficient because iterators will be
2961 	 * mostly sorted.
2962 	 */
2963 	do {
2964 		swapped = false;
2965 
2966 		for (i = inc > 0 ? l : r - 2;
2967 		     i + 1 < r && i >= l;
2968 		     i += inc) {
2969 			if (btree_path_cmp(trans->paths + trans->sorted[i],
2970 					   trans->paths + trans->sorted[i + 1]) > 0) {
2971 				swap(trans->sorted[i], trans->sorted[i + 1]);
2972 				trans->paths[trans->sorted[i]].sorted_idx = i;
2973 				trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2974 				swapped = true;
2975 			}
2976 		}
2977 
2978 		if (inc > 0)
2979 			--r;
2980 		else
2981 			l++;
2982 		inc = -inc;
2983 	} while (swapped);
2984 
2985 	trans->paths_sorted = true;
2986 out:
2987 	btree_trans_verify_sorted(trans);
2988 }
2989 
btree_path_list_remove(struct btree_trans * trans,struct btree_path * path)2990 static inline void btree_path_list_remove(struct btree_trans *trans,
2991 					  struct btree_path *path)
2992 {
2993 	EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2994 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2995 	trans->nr_sorted--;
2996 	memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2997 				trans->sorted + path->sorted_idx + 1,
2998 				DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2999 					     sizeof(u64) / sizeof(btree_path_idx_t)));
3000 #else
3001 	array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3002 #endif
3003 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3004 		trans->paths[trans->sorted[i]].sorted_idx = i;
3005 }
3006 
btree_path_list_add(struct btree_trans * trans,btree_path_idx_t pos,btree_path_idx_t path_idx)3007 static inline void btree_path_list_add(struct btree_trans *trans,
3008 				       btree_path_idx_t pos,
3009 				       btree_path_idx_t path_idx)
3010 {
3011 	struct btree_path *path = trans->paths + path_idx;
3012 
3013 	path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
3014 
3015 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3016 	memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
3017 			      trans->sorted + path->sorted_idx,
3018 			      DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3019 					   sizeof(u64) / sizeof(btree_path_idx_t)));
3020 	trans->nr_sorted++;
3021 	trans->sorted[path->sorted_idx] = path_idx;
3022 #else
3023 	array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
3024 #endif
3025 
3026 	for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3027 		trans->paths[trans->sorted[i]].sorted_idx = i;
3028 
3029 	btree_trans_verify_sorted_refs(trans);
3030 }
3031 
bch2_trans_iter_exit(struct btree_trans * trans,struct btree_iter * iter)3032 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3033 {
3034 	if (iter->update_path)
3035 		bch2_path_put_nokeep(trans, iter->update_path,
3036 			      iter->flags & BTREE_ITER_intent);
3037 	if (iter->path)
3038 		bch2_path_put(trans, iter->path,
3039 			      iter->flags & BTREE_ITER_intent);
3040 	if (iter->key_cache_path)
3041 		bch2_path_put(trans, iter->key_cache_path,
3042 			      iter->flags & BTREE_ITER_intent);
3043 	iter->path		= 0;
3044 	iter->update_path	= 0;
3045 	iter->key_cache_path	= 0;
3046 	iter->trans		= NULL;
3047 }
3048 
bch2_trans_iter_init_outlined(struct btree_trans * trans,struct btree_iter * iter,enum btree_id btree_id,struct bpos pos,unsigned flags)3049 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
3050 			  struct btree_iter *iter,
3051 			  enum btree_id btree_id, struct bpos pos,
3052 			  unsigned flags)
3053 {
3054 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
3055 			       bch2_btree_iter_flags(trans, btree_id, 0, flags),
3056 			       _RET_IP_);
3057 }
3058 
bch2_trans_node_iter_init(struct btree_trans * trans,struct btree_iter * iter,enum btree_id btree_id,struct bpos pos,unsigned locks_want,unsigned depth,unsigned flags)3059 void bch2_trans_node_iter_init(struct btree_trans *trans,
3060 			       struct btree_iter *iter,
3061 			       enum btree_id btree_id,
3062 			       struct bpos pos,
3063 			       unsigned locks_want,
3064 			       unsigned depth,
3065 			       unsigned flags)
3066 {
3067 	flags |= BTREE_ITER_not_extents;
3068 	flags |= BTREE_ITER_snapshot_field;
3069 	flags |= BTREE_ITER_all_snapshots;
3070 
3071 	if (!depth && btree_id_cached(trans->c, btree_id))
3072 		flags |= BTREE_ITER_with_key_cache;
3073 
3074 	bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
3075 			       bch2_btree_iter_flags(trans, btree_id, depth, flags),
3076 			       _RET_IP_);
3077 
3078 	iter->min_depth	= depth;
3079 
3080 	struct btree_path *path = btree_iter_path(trans, iter);
3081 	BUG_ON(path->locks_want	 < min(locks_want, BTREE_MAX_DEPTH));
3082 	BUG_ON(path->level	!= depth);
3083 	BUG_ON(iter->min_depth	!= depth);
3084 }
3085 
bch2_trans_copy_iter(struct btree_iter * dst,struct btree_iter * src)3086 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3087 {
3088 	struct btree_trans *trans = src->trans;
3089 
3090 	*dst = *src;
3091 #ifdef TRACK_PATH_ALLOCATED
3092 	dst->ip_allocated = _RET_IP_;
3093 #endif
3094 	if (src->path)
3095 		__btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
3096 	if (src->update_path)
3097 		__btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
3098 	dst->key_cache_path = 0;
3099 }
3100 
__bch2_trans_kmalloc(struct btree_trans * trans,size_t size)3101 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3102 {
3103 	struct bch_fs *c = trans->c;
3104 	unsigned new_top = trans->mem_top + size;
3105 	unsigned old_bytes = trans->mem_bytes;
3106 	unsigned new_bytes = roundup_pow_of_two(new_top);
3107 	int ret;
3108 	void *new_mem;
3109 	void *p;
3110 
3111 	WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3112 
3113 	ret = trans_maybe_inject_restart(trans, _RET_IP_);
3114 	if (ret)
3115 		return ERR_PTR(ret);
3116 
3117 	struct btree_transaction_stats *s = btree_trans_stats(trans);
3118 	s->max_mem = max(s->max_mem, new_bytes);
3119 
3120 	if (trans->used_mempool) {
3121 		if (trans->mem_bytes >= new_bytes)
3122 			goto out_change_top;
3123 
3124 		/* No more space from mempool item, need malloc new one */
3125 		new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3126 		if (unlikely(!new_mem)) {
3127 			bch2_trans_unlock(trans);
3128 
3129 			new_mem = kmalloc(new_bytes, GFP_KERNEL);
3130 			if (!new_mem)
3131 				return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3132 
3133 			ret = bch2_trans_relock(trans);
3134 			if (ret) {
3135 				kfree(new_mem);
3136 				return ERR_PTR(ret);
3137 			}
3138 		}
3139 		memcpy(new_mem, trans->mem, trans->mem_top);
3140 		trans->used_mempool = false;
3141 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3142 		goto out_new_mem;
3143 	}
3144 
3145 	new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3146 	if (unlikely(!new_mem)) {
3147 		bch2_trans_unlock(trans);
3148 
3149 		new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
3150 		if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3151 			new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3152 			new_bytes = BTREE_TRANS_MEM_MAX;
3153 			memcpy(new_mem, trans->mem, trans->mem_top);
3154 			trans->used_mempool = true;
3155 			kfree(trans->mem);
3156 		}
3157 
3158 		if (!new_mem)
3159 			return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3160 
3161 		trans->mem = new_mem;
3162 		trans->mem_bytes = new_bytes;
3163 
3164 		ret = bch2_trans_relock(trans);
3165 		if (ret)
3166 			return ERR_PTR(ret);
3167 	}
3168 out_new_mem:
3169 	trans->mem = new_mem;
3170 	trans->mem_bytes = new_bytes;
3171 
3172 	if (old_bytes) {
3173 		trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3174 		return ERR_PTR(btree_trans_restart_ip(trans,
3175 					BCH_ERR_transaction_restart_mem_realloced, _RET_IP_));
3176 	}
3177 out_change_top:
3178 	p = trans->mem + trans->mem_top;
3179 	trans->mem_top += size;
3180 	memset(p, 0, size);
3181 	return p;
3182 }
3183 
check_srcu_held_too_long(struct btree_trans * trans)3184 static inline void check_srcu_held_too_long(struct btree_trans *trans)
3185 {
3186 	WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3187 	     "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3188 	     (jiffies - trans->srcu_lock_time) / HZ);
3189 }
3190 
bch2_trans_srcu_unlock(struct btree_trans * trans)3191 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3192 {
3193 	if (trans->srcu_held) {
3194 		struct bch_fs *c = trans->c;
3195 		struct btree_path *path;
3196 		unsigned i;
3197 
3198 		trans_for_each_path(trans, path, i)
3199 			if (path->cached && !btree_node_locked(path, 0))
3200 				path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3201 
3202 		check_srcu_held_too_long(trans);
3203 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3204 		trans->srcu_held = false;
3205 	}
3206 }
3207 
bch2_trans_srcu_lock(struct btree_trans * trans)3208 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3209 {
3210 	if (!trans->srcu_held) {
3211 		trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3212 		trans->srcu_lock_time	= jiffies;
3213 		trans->srcu_held = true;
3214 	}
3215 }
3216 
3217 /**
3218  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3219  * @trans: transaction to reset
3220  *
3221  * Returns:	current restart counter, to be used with trans_was_restarted()
3222  *
3223  * While iterating over nodes or updating nodes a attempt to lock a btree node
3224  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3225  * occurs bch2_trans_begin() should be called and the transaction retried.
3226  */
bch2_trans_begin(struct btree_trans * trans)3227 u32 bch2_trans_begin(struct btree_trans *trans)
3228 {
3229 	struct btree_path *path;
3230 	unsigned i;
3231 	u64 now;
3232 
3233 	bch2_trans_reset_updates(trans);
3234 
3235 	trans->restart_count++;
3236 	trans->mem_top			= 0;
3237 	trans->journal_entries		= NULL;
3238 
3239 	trans_for_each_path(trans, path, i) {
3240 		path->should_be_locked = false;
3241 
3242 		/*
3243 		 * If the transaction wasn't restarted, we're presuming to be
3244 		 * doing something new: dont keep iterators excpt the ones that
3245 		 * are in use - except for the subvolumes btree:
3246 		 */
3247 		if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3248 			path->preserve = false;
3249 
3250 		/*
3251 		 * XXX: we probably shouldn't be doing this if the transaction
3252 		 * was restarted, but currently we still overflow transaction
3253 		 * iterators if we do that
3254 		 */
3255 		if (!path->ref && !path->preserve)
3256 			__bch2_path_free(trans, i);
3257 		else
3258 			path->preserve = false;
3259 	}
3260 
3261 	now = local_clock();
3262 
3263 	if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3264 	    time_after64(now, trans->last_begin_time + 10))
3265 		__bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3266 					 trans->last_begin_time, now);
3267 
3268 	if (!trans->restarted &&
3269 	    (need_resched() ||
3270 	     time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3271 		bch2_trans_unlock(trans);
3272 		cond_resched();
3273 		now = local_clock();
3274 	}
3275 	trans->last_begin_time = now;
3276 
3277 	if (unlikely(trans->srcu_held &&
3278 		     time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3279 		bch2_trans_srcu_unlock(trans);
3280 
3281 	trans->last_begin_ip = _RET_IP_;
3282 
3283 #ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS
3284 	if (trans->restarted) {
3285 		trans->restart_count_this_trans++;
3286 	} else {
3287 		trans->restart_count_this_trans = 0;
3288 	}
3289 #endif
3290 
3291 	trans_set_locked(trans, false);
3292 
3293 	if (trans->restarted) {
3294 		bch2_btree_path_traverse_all(trans);
3295 		trans->notrace_relock_fail = false;
3296 	}
3297 
3298 	bch2_trans_verify_not_unlocked_or_in_restart(trans);
3299 	return trans->restart_count;
3300 }
3301 
3302 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3303 
bch2_trans_get_fn_idx(const char * fn)3304 unsigned bch2_trans_get_fn_idx(const char *fn)
3305 {
3306 	for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3307 		if (!bch2_btree_transaction_fns[i] ||
3308 		    bch2_btree_transaction_fns[i] == fn) {
3309 			bch2_btree_transaction_fns[i] = fn;
3310 			return i;
3311 		}
3312 
3313 	pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3314 	return 0;
3315 }
3316 
__bch2_trans_get(struct bch_fs * c,unsigned fn_idx)3317 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3318 	__acquires(&c->btree_trans_barrier)
3319 {
3320 	struct btree_trans *trans;
3321 
3322 	if (IS_ENABLED(__KERNEL__)) {
3323 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3324 		if (trans) {
3325 			memset(trans, 0, offsetof(struct btree_trans, list));
3326 			goto got_trans;
3327 		}
3328 	}
3329 
3330 	trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3331 	memset(trans, 0, sizeof(*trans));
3332 
3333 	seqmutex_lock(&c->btree_trans_lock);
3334 	if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3335 		struct btree_trans *pos;
3336 		pid_t pid = current->pid;
3337 
3338 		trans->locking_wait.task = current;
3339 
3340 		list_for_each_entry(pos, &c->btree_trans_list, list) {
3341 			struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3342 			/*
3343 			 * We'd much prefer to be stricter here and completely
3344 			 * disallow multiple btree_trans in the same thread -
3345 			 * but the data move path calls bch2_write when we
3346 			 * already have a btree_trans initialized.
3347 			 */
3348 			BUG_ON(pos_task &&
3349 			       pid == pos_task->pid &&
3350 			       pos->locked);
3351 		}
3352 	}
3353 
3354 	list_add(&trans->list, &c->btree_trans_list);
3355 	seqmutex_unlock(&c->btree_trans_lock);
3356 got_trans:
3357 	trans->c		= c;
3358 	trans->last_begin_time	= local_clock();
3359 	trans->fn_idx		= fn_idx;
3360 	trans->locking_wait.task = current;
3361 	trans->journal_replay_not_finished =
3362 		unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3363 		atomic_inc_not_zero(&c->journal_keys.ref);
3364 	trans->nr_paths		= ARRAY_SIZE(trans->_paths);
3365 	trans->paths_allocated	= trans->_paths_allocated;
3366 	trans->sorted		= trans->_sorted;
3367 	trans->paths		= trans->_paths;
3368 	trans->updates		= trans->_updates;
3369 
3370 	*trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3371 
3372 	trans->paths_allocated[0] = 1;
3373 
3374 	static struct lock_class_key lockdep_key;
3375 	lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3376 
3377 	if (fn_idx < BCH_TRANSACTIONS_NR) {
3378 		trans->fn = bch2_btree_transaction_fns[fn_idx];
3379 
3380 		struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3381 
3382 		if (s->max_mem) {
3383 			unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3384 
3385 			trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3386 			if (likely(trans->mem))
3387 				trans->mem_bytes = expected_mem_bytes;
3388 		}
3389 
3390 		trans->nr_paths_max = s->nr_max_paths;
3391 		trans->journal_entries_size = s->journal_entries_size;
3392 	}
3393 
3394 	trans->srcu_idx		= srcu_read_lock(&c->btree_trans_barrier);
3395 	trans->srcu_lock_time	= jiffies;
3396 	trans->srcu_held	= true;
3397 	trans_set_locked(trans, false);
3398 
3399 	closure_init_stack_release(&trans->ref);
3400 	return trans;
3401 }
3402 
check_btree_paths_leaked(struct btree_trans * trans)3403 static void check_btree_paths_leaked(struct btree_trans *trans)
3404 {
3405 #ifdef CONFIG_BCACHEFS_DEBUG
3406 	struct bch_fs *c = trans->c;
3407 	struct btree_path *path;
3408 	unsigned i;
3409 
3410 	trans_for_each_path(trans, path, i)
3411 		if (path->ref)
3412 			goto leaked;
3413 	return;
3414 leaked:
3415 	bch_err(c, "btree paths leaked from %s!", trans->fn);
3416 	trans_for_each_path(trans, path, i)
3417 		if (path->ref)
3418 			printk(KERN_ERR "  btree %s %pS\n",
3419 			       bch2_btree_id_str(path->btree_id),
3420 			       (void *) path->ip_allocated);
3421 	/* Be noisy about this: */
3422 	bch2_fatal_error(c);
3423 #endif
3424 }
3425 
bch2_trans_put(struct btree_trans * trans)3426 void bch2_trans_put(struct btree_trans *trans)
3427 	__releases(&c->btree_trans_barrier)
3428 {
3429 	struct bch_fs *c = trans->c;
3430 
3431 	if (trans->restarted)
3432 		bch2_trans_in_restart_error(trans);
3433 
3434 	bch2_trans_unlock(trans);
3435 
3436 	trans_for_each_update(trans, i)
3437 		__btree_path_put(trans, trans->paths + i->path, true);
3438 	trans->nr_updates	= 0;
3439 
3440 	check_btree_paths_leaked(trans);
3441 
3442 	if (trans->srcu_held) {
3443 		check_srcu_held_too_long(trans);
3444 		srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3445 	}
3446 
3447 	if (unlikely(trans->journal_replay_not_finished))
3448 		bch2_journal_keys_put(c);
3449 
3450 	/*
3451 	 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3452 	 * by cycle detector
3453 	 */
3454 	closure_return_sync(&trans->ref);
3455 	trans->locking_wait.task = NULL;
3456 
3457 #ifdef CONFIG_BCACHEFS_DEBUG
3458 	darray_exit(&trans->last_restarted_trace);
3459 #endif
3460 
3461 	unsigned long *paths_allocated = trans->paths_allocated;
3462 	trans->paths_allocated	= NULL;
3463 	trans->paths		= NULL;
3464 
3465 	if (paths_allocated != trans->_paths_allocated)
3466 		kvfree_rcu_mightsleep(paths_allocated);
3467 
3468 	if (trans->used_mempool)
3469 		mempool_free(trans->mem, &c->btree_trans_mem_pool);
3470 	else
3471 		kfree(trans->mem);
3472 
3473 	/* Userspace doesn't have a real percpu implementation: */
3474 	if (IS_ENABLED(__KERNEL__))
3475 		trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3476 
3477 	if (trans) {
3478 		seqmutex_lock(&c->btree_trans_lock);
3479 		list_del(&trans->list);
3480 		seqmutex_unlock(&c->btree_trans_lock);
3481 
3482 		mempool_free(trans, &c->btree_trans_pool);
3483 	}
3484 }
3485 
bch2_current_has_btree_trans(struct bch_fs * c)3486 bool bch2_current_has_btree_trans(struct bch_fs *c)
3487 {
3488 	seqmutex_lock(&c->btree_trans_lock);
3489 	struct btree_trans *trans;
3490 	bool ret = false;
3491 	list_for_each_entry(trans, &c->btree_trans_list, list)
3492 		if (trans->locking_wait.task == current &&
3493 		    trans->locked) {
3494 			ret = true;
3495 			break;
3496 		}
3497 	seqmutex_unlock(&c->btree_trans_lock);
3498 	return ret;
3499 }
3500 
3501 static void __maybe_unused
bch2_btree_bkey_cached_common_to_text(struct printbuf * out,struct btree_bkey_cached_common * b)3502 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3503 				      struct btree_bkey_cached_common *b)
3504 {
3505 	struct six_lock_count c = six_lock_counts(&b->lock);
3506 	struct task_struct *owner;
3507 	pid_t pid;
3508 
3509 	rcu_read_lock();
3510 	owner = READ_ONCE(b->lock.owner);
3511 	pid = owner ? owner->pid : 0;
3512 	rcu_read_unlock();
3513 
3514 	prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
3515 	bch2_btree_id_to_text(out, b->btree_id);
3516 	prt_printf(out, " l=%u:", b->level);
3517 	bch2_bpos_to_text(out, btree_node_pos(b));
3518 
3519 	prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3520 		   c.n[0], c.n[1], c.n[2], pid);
3521 }
3522 
bch2_btree_trans_to_text(struct printbuf * out,struct btree_trans * trans)3523 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3524 {
3525 	struct btree_bkey_cached_common *b;
3526 	static char lock_types[] = { 'r', 'i', 'w' };
3527 	struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3528 	unsigned l, idx;
3529 
3530 	/* before rcu_read_lock(): */
3531 	bch2_printbuf_make_room(out, 4096);
3532 
3533 	if (!out->nr_tabstops) {
3534 		printbuf_tabstop_push(out, 16);
3535 		printbuf_tabstop_push(out, 32);
3536 	}
3537 
3538 	prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3539 
3540 	/* trans->paths is rcu protected vs. freeing */
3541 	rcu_read_lock();
3542 	out->atomic++;
3543 
3544 	struct btree_path *paths = rcu_dereference(trans->paths);
3545 	if (!paths)
3546 		goto out;
3547 
3548 	unsigned long *paths_allocated = trans_paths_allocated(paths);
3549 
3550 	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3551 		struct btree_path *path = paths + idx;
3552 		if (!path->nodes_locked)
3553 			continue;
3554 
3555 		prt_printf(out, "  path %u %c ",
3556 			   idx,
3557 			   path->cached ? 'c' : 'b');
3558 		bch2_btree_id_to_text(out, path->btree_id);
3559 		prt_printf(out, " l=%u:", path->level);
3560 		bch2_bpos_to_text(out, path->pos);
3561 		prt_newline(out);
3562 
3563 		for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3564 			if (btree_node_locked(path, l) &&
3565 			    !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3566 				prt_printf(out, "    %c l=%u ",
3567 					   lock_types[btree_node_locked_type(path, l)], l);
3568 				bch2_btree_bkey_cached_common_to_text(out, b);
3569 				prt_newline(out);
3570 			}
3571 		}
3572 	}
3573 
3574 	b = READ_ONCE(trans->locking);
3575 	if (b) {
3576 		prt_printf(out, "  blocked for %lluus on\n",
3577 			   div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3578 		prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3579 		bch2_btree_bkey_cached_common_to_text(out, b);
3580 		prt_newline(out);
3581 	}
3582 out:
3583 	--out->atomic;
3584 	rcu_read_unlock();
3585 }
3586 
bch2_fs_btree_iter_exit(struct bch_fs * c)3587 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3588 {
3589 	struct btree_transaction_stats *s;
3590 	struct btree_trans *trans;
3591 	int cpu;
3592 
3593 	if (c->btree_trans_bufs)
3594 		for_each_possible_cpu(cpu) {
3595 			struct btree_trans *trans =
3596 				per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3597 
3598 			if (trans) {
3599 				seqmutex_lock(&c->btree_trans_lock);
3600 				list_del(&trans->list);
3601 				seqmutex_unlock(&c->btree_trans_lock);
3602 			}
3603 			kfree(trans);
3604 		}
3605 	free_percpu(c->btree_trans_bufs);
3606 
3607 	trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3608 	if (trans)
3609 		panic("%s leaked btree_trans\n", trans->fn);
3610 
3611 	for (s = c->btree_transaction_stats;
3612 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3613 	     s++) {
3614 		kfree(s->max_paths_text);
3615 		bch2_time_stats_exit(&s->lock_hold_times);
3616 	}
3617 
3618 	if (c->btree_trans_barrier_initialized) {
3619 		synchronize_srcu_expedited(&c->btree_trans_barrier);
3620 		cleanup_srcu_struct(&c->btree_trans_barrier);
3621 	}
3622 	mempool_exit(&c->btree_trans_mem_pool);
3623 	mempool_exit(&c->btree_trans_pool);
3624 }
3625 
bch2_fs_btree_iter_init_early(struct bch_fs * c)3626 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3627 {
3628 	struct btree_transaction_stats *s;
3629 
3630 	for (s = c->btree_transaction_stats;
3631 	     s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3632 	     s++) {
3633 		bch2_time_stats_init(&s->duration);
3634 		bch2_time_stats_init(&s->lock_hold_times);
3635 		mutex_init(&s->lock);
3636 	}
3637 
3638 	INIT_LIST_HEAD(&c->btree_trans_list);
3639 	seqmutex_init(&c->btree_trans_lock);
3640 }
3641 
bch2_fs_btree_iter_init(struct bch_fs * c)3642 int bch2_fs_btree_iter_init(struct bch_fs *c)
3643 {
3644 	int ret;
3645 
3646 	c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3647 	if (!c->btree_trans_bufs)
3648 		return -ENOMEM;
3649 
3650 	ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3651 					  sizeof(struct btree_trans)) ?:
3652 		mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3653 					  BTREE_TRANS_MEM_MAX) ?:
3654 		init_srcu_struct(&c->btree_trans_barrier);
3655 	if (ret)
3656 		return ret;
3657 
3658 	/*
3659 	 * static annotation (hackily done) for lock ordering of reclaim vs.
3660 	 * btree node locks:
3661 	 */
3662 #ifdef CONFIG_LOCKDEP
3663 	fs_reclaim_acquire(GFP_KERNEL);
3664 	struct btree_trans *trans = bch2_trans_get(c);
3665 	trans_set_locked(trans, false);
3666 	bch2_trans_put(trans);
3667 	fs_reclaim_release(GFP_KERNEL);
3668 #endif
3669 
3670 	c->btree_trans_barrier_initialized = true;
3671 	return 0;
3672 
3673 }
3674