1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 btree_path_idx_t, btree_path_idx_t);
27
btree_iter_ip_allocated(struct btree_iter * iter)28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31 return iter->ip_allocated;
32 #else
33 return 0;
34 #endif
35 }
36
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39
__btree_path_cmp(const struct btree_path * l,enum btree_id r_btree_id,bool r_cached,struct bpos r_pos,unsigned r_level)40 static inline int __btree_path_cmp(const struct btree_path *l,
41 enum btree_id r_btree_id,
42 bool r_cached,
43 struct bpos r_pos,
44 unsigned r_level)
45 {
46 /*
47 * Must match lock ordering as defined by __bch2_btree_node_lock:
48 */
49 return cmp_int(l->btree_id, r_btree_id) ?:
50 cmp_int((int) l->cached, (int) r_cached) ?:
51 bpos_cmp(l->pos, r_pos) ?:
52 -cmp_int(l->level, r_level);
53 }
54
btree_path_cmp(const struct btree_path * l,const struct btree_path * r)55 static inline int btree_path_cmp(const struct btree_path *l,
56 const struct btree_path *r)
57 {
58 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60
bkey_successor(struct btree_iter * iter,struct bpos p)61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63 /* Are we iterating over keys in all snapshots? */
64 if (iter->flags & BTREE_ITER_all_snapshots) {
65 p = bpos_successor(p);
66 } else {
67 p = bpos_nosnap_successor(p);
68 p.snapshot = iter->snapshot;
69 }
70
71 return p;
72 }
73
bkey_predecessor(struct btree_iter * iter,struct bpos p)74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76 /* Are we iterating over keys in all snapshots? */
77 if (iter->flags & BTREE_ITER_all_snapshots) {
78 p = bpos_predecessor(p);
79 } else {
80 p = bpos_nosnap_predecessor(p);
81 p.snapshot = iter->snapshot;
82 }
83
84 return p;
85 }
86
btree_iter_search_key(struct btree_iter * iter)87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89 struct bpos pos = iter->pos;
90
91 if ((iter->flags & BTREE_ITER_is_extents) &&
92 !bkey_eq(pos, POS_MAX))
93 pos = bkey_successor(iter, pos);
94 return pos;
95 }
96
btree_path_pos_before_node(struct btree_path * path,struct btree * b)97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98 struct btree *b)
99 {
100 return bpos_lt(path->pos, b->data->min_key);
101 }
102
btree_path_pos_after_node(struct btree_path * path,struct btree * b)103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104 struct btree *b)
105 {
106 return bpos_gt(path->pos, b->key.k.p);
107 }
108
btree_path_pos_in_node(struct btree_path * path,struct btree * b)109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110 struct btree *b)
111 {
112 return path->btree_id == b->c.btree_id &&
113 !btree_path_pos_before_node(path, b) &&
114 !btree_path_pos_after_node(path, b);
115 }
116
117 /* Btree iterator: */
118
119 #ifdef CONFIG_BCACHEFS_DEBUG
120
bch2_btree_path_verify_cached(struct btree_trans * trans,struct btree_path * path)121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 struct btree_path *path)
123 {
124 struct bkey_cached *ck;
125 bool locked = btree_node_locked(path, 0);
126
127 if (!bch2_btree_node_relock(trans, path, 0))
128 return;
129
130 ck = (void *) path->l[0].b;
131 BUG_ON(ck->key.btree_id != path->btree_id ||
132 !bkey_eq(ck->key.pos, path->pos));
133
134 if (!locked)
135 btree_node_unlock(trans, path, 0);
136 }
137
bch2_btree_path_verify_level(struct btree_trans * trans,struct btree_path * path,unsigned level)138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 struct btree_path *path, unsigned level)
140 {
141 struct btree_path_level *l;
142 struct btree_node_iter tmp;
143 bool locked;
144 struct bkey_packed *p, *k;
145 struct printbuf buf1 = PRINTBUF;
146 struct printbuf buf2 = PRINTBUF;
147 struct printbuf buf3 = PRINTBUF;
148 const char *msg;
149
150 if (!bch2_debug_check_iterators)
151 return;
152
153 l = &path->l[level];
154 tmp = l->iter;
155 locked = btree_node_locked(path, level);
156
157 if (path->cached) {
158 if (!level)
159 bch2_btree_path_verify_cached(trans, path);
160 return;
161 }
162
163 if (!btree_path_node(path, level))
164 return;
165
166 if (!bch2_btree_node_relock_notrace(trans, path, level))
167 return;
168
169 BUG_ON(!btree_path_pos_in_node(path, l->b));
170
171 bch2_btree_node_iter_verify(&l->iter, l->b);
172
173 /*
174 * For interior nodes, the iterator will have skipped past deleted keys:
175 */
176 p = level
177 ? bch2_btree_node_iter_prev(&tmp, l->b)
178 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180
181 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182 msg = "before";
183 goto err;
184 }
185
186 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187 msg = "after";
188 goto err;
189 }
190
191 if (!locked)
192 btree_node_unlock(trans, path, level);
193 return;
194 err:
195 bch2_bpos_to_text(&buf1, path->pos);
196
197 if (p) {
198 struct bkey uk = bkey_unpack_key(l->b, p);
199
200 bch2_bkey_to_text(&buf2, &uk);
201 } else {
202 prt_printf(&buf2, "(none)");
203 }
204
205 if (k) {
206 struct bkey uk = bkey_unpack_key(l->b, k);
207
208 bch2_bkey_to_text(&buf3, &uk);
209 } else {
210 prt_printf(&buf3, "(none)");
211 }
212
213 panic("path should be %s key at level %u:\n"
214 "path pos %s\n"
215 "prev key %s\n"
216 "cur key %s\n",
217 msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219
bch2_btree_path_verify(struct btree_trans * trans,struct btree_path * path)220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 struct btree_path *path)
222 {
223 struct bch_fs *c = trans->c;
224
225 for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226 if (!path->l[i].b) {
227 BUG_ON(!path->cached &&
228 bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229 break;
230 }
231
232 bch2_btree_path_verify_level(trans, path, i);
233 }
234
235 bch2_btree_path_verify_locks(path);
236 }
237
bch2_trans_verify_paths(struct btree_trans * trans)238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240 struct btree_path *path;
241 unsigned iter;
242
243 trans_for_each_path(trans, path, iter)
244 bch2_btree_path_verify(trans, path);
245 }
246
bch2_btree_iter_verify(struct btree_iter * iter)247 static void bch2_btree_iter_verify(struct btree_iter *iter)
248 {
249 struct btree_trans *trans = iter->trans;
250
251 BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252
253 BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254 (iter->flags & BTREE_ITER_all_snapshots));
255
256 BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257 (iter->flags & BTREE_ITER_all_snapshots) &&
258 !btree_type_has_snapshot_field(iter->btree_id));
259
260 if (iter->update_path)
261 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262 bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263 }
264
bch2_btree_iter_verify_entry_exit(struct btree_iter * iter)265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266 {
267 BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268 !iter->pos.snapshot);
269
270 BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271 iter->pos.snapshot != iter->snapshot);
272
273 BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
274 bkey_gt(iter->pos, iter->k.p));
275 }
276
bch2_btree_iter_verify_ret(struct btree_iter * iter,struct bkey_s_c k)277 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
278 {
279 struct btree_trans *trans = iter->trans;
280 struct btree_iter copy;
281 struct bkey_s_c prev;
282 int ret = 0;
283
284 if (!bch2_debug_check_iterators)
285 return 0;
286
287 if (!(iter->flags & BTREE_ITER_filter_snapshots))
288 return 0;
289
290 if (bkey_err(k) || !k.k)
291 return 0;
292
293 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
294 iter->snapshot,
295 k.k->p.snapshot));
296
297 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
298 BTREE_ITER_nopreserve|
299 BTREE_ITER_all_snapshots);
300 prev = bch2_btree_iter_prev(©);
301 if (!prev.k)
302 goto out;
303
304 ret = bkey_err(prev);
305 if (ret)
306 goto out;
307
308 if (bkey_eq(prev.k->p, k.k->p) &&
309 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
310 prev.k->p.snapshot) > 0) {
311 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
312
313 bch2_bkey_to_text(&buf1, k.k);
314 bch2_bkey_to_text(&buf2, prev.k);
315
316 panic("iter snap %u\n"
317 "k %s\n"
318 "prev %s\n",
319 iter->snapshot,
320 buf1.buf, buf2.buf);
321 }
322 out:
323 bch2_trans_iter_exit(trans, ©);
324 return ret;
325 }
326
bch2_assert_pos_locked(struct btree_trans * trans,enum btree_id id,struct bpos pos)327 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
328 struct bpos pos)
329 {
330 bch2_trans_verify_not_unlocked(trans);
331
332 struct btree_path *path;
333 struct trans_for_each_path_inorder_iter iter;
334 struct printbuf buf = PRINTBUF;
335
336 btree_trans_sort_paths(trans);
337
338 trans_for_each_path_inorder(trans, path, iter) {
339 if (path->btree_id != id ||
340 !btree_node_locked(path, 0) ||
341 !path->should_be_locked)
342 continue;
343
344 if (!path->cached) {
345 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
346 bkey_le(pos, path->l[0].b->key.k.p))
347 return;
348 } else {
349 if (bkey_eq(pos, path->pos))
350 return;
351 }
352 }
353
354 bch2_dump_trans_paths_updates(trans);
355 bch2_bpos_to_text(&buf, pos);
356
357 panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
358 }
359
360 #else
361
bch2_btree_path_verify_level(struct btree_trans * trans,struct btree_path * path,unsigned l)362 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
363 struct btree_path *path, unsigned l) {}
bch2_btree_path_verify(struct btree_trans * trans,struct btree_path * path)364 static inline void bch2_btree_path_verify(struct btree_trans *trans,
365 struct btree_path *path) {}
bch2_btree_iter_verify(struct btree_iter * iter)366 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
bch2_btree_iter_verify_entry_exit(struct btree_iter * iter)367 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
bch2_btree_iter_verify_ret(struct btree_iter * iter,struct bkey_s_c k)368 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
369
370 #endif
371
372 /* Btree path: fixups after btree updates */
373
btree_node_iter_set_set_pos(struct btree_node_iter * iter,struct btree * b,struct bset_tree * t,struct bkey_packed * k)374 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
375 struct btree *b,
376 struct bset_tree *t,
377 struct bkey_packed *k)
378 {
379 struct btree_node_iter_set *set;
380
381 btree_node_iter_for_each(iter, set)
382 if (set->end == t->end_offset) {
383 set->k = __btree_node_key_to_offset(b, k);
384 bch2_btree_node_iter_sort(iter, b);
385 return;
386 }
387
388 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
389 }
390
__bch2_btree_path_fix_key_modified(struct btree_path * path,struct btree * b,struct bkey_packed * where)391 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
392 struct btree *b,
393 struct bkey_packed *where)
394 {
395 struct btree_path_level *l = &path->l[b->c.level];
396
397 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
398 return;
399
400 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
401 bch2_btree_node_iter_advance(&l->iter, l->b);
402 }
403
bch2_btree_path_fix_key_modified(struct btree_trans * trans,struct btree * b,struct bkey_packed * where)404 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
405 struct btree *b,
406 struct bkey_packed *where)
407 {
408 struct btree_path *path;
409 unsigned i;
410
411 trans_for_each_path_with_node(trans, b, path, i) {
412 __bch2_btree_path_fix_key_modified(path, b, where);
413 bch2_btree_path_verify_level(trans, path, b->c.level);
414 }
415 }
416
__bch2_btree_node_iter_fix(struct btree_path * path,struct btree * b,struct btree_node_iter * node_iter,struct bset_tree * t,struct bkey_packed * where,unsigned clobber_u64s,unsigned new_u64s)417 static void __bch2_btree_node_iter_fix(struct btree_path *path,
418 struct btree *b,
419 struct btree_node_iter *node_iter,
420 struct bset_tree *t,
421 struct bkey_packed *where,
422 unsigned clobber_u64s,
423 unsigned new_u64s)
424 {
425 const struct bkey_packed *end = btree_bkey_last(b, t);
426 struct btree_node_iter_set *set;
427 unsigned offset = __btree_node_key_to_offset(b, where);
428 int shift = new_u64s - clobber_u64s;
429 unsigned old_end = t->end_offset - shift;
430 unsigned orig_iter_pos = node_iter->data[0].k;
431 bool iter_current_key_modified =
432 orig_iter_pos >= offset &&
433 orig_iter_pos <= offset + clobber_u64s;
434
435 btree_node_iter_for_each(node_iter, set)
436 if (set->end == old_end)
437 goto found;
438
439 /* didn't find the bset in the iterator - might have to readd it: */
440 if (new_u64s &&
441 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
442 bch2_btree_node_iter_push(node_iter, b, where, end);
443 goto fixup_done;
444 } else {
445 /* Iterator is after key that changed */
446 return;
447 }
448 found:
449 set->end = t->end_offset;
450
451 /* Iterator hasn't gotten to the key that changed yet: */
452 if (set->k < offset)
453 return;
454
455 if (new_u64s &&
456 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
457 set->k = offset;
458 } else if (set->k < offset + clobber_u64s) {
459 set->k = offset + new_u64s;
460 if (set->k == set->end)
461 bch2_btree_node_iter_set_drop(node_iter, set);
462 } else {
463 /* Iterator is after key that changed */
464 set->k = (int) set->k + shift;
465 return;
466 }
467
468 bch2_btree_node_iter_sort(node_iter, b);
469 fixup_done:
470 if (node_iter->data[0].k != orig_iter_pos)
471 iter_current_key_modified = true;
472
473 /*
474 * When a new key is added, and the node iterator now points to that
475 * key, the iterator might have skipped past deleted keys that should
476 * come after the key the iterator now points to. We have to rewind to
477 * before those deleted keys - otherwise
478 * bch2_btree_node_iter_prev_all() breaks:
479 */
480 if (!bch2_btree_node_iter_end(node_iter) &&
481 iter_current_key_modified &&
482 b->c.level) {
483 struct bkey_packed *k, *k2, *p;
484
485 k = bch2_btree_node_iter_peek_all(node_iter, b);
486
487 for_each_bset(b, t) {
488 bool set_pos = false;
489
490 if (node_iter->data[0].end == t->end_offset)
491 continue;
492
493 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
494
495 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
496 bkey_iter_cmp(b, k, p) < 0) {
497 k2 = p;
498 set_pos = true;
499 }
500
501 if (set_pos)
502 btree_node_iter_set_set_pos(node_iter,
503 b, t, k2);
504 }
505 }
506 }
507
bch2_btree_node_iter_fix(struct btree_trans * trans,struct btree_path * path,struct btree * b,struct btree_node_iter * node_iter,struct bkey_packed * where,unsigned clobber_u64s,unsigned new_u64s)508 void bch2_btree_node_iter_fix(struct btree_trans *trans,
509 struct btree_path *path,
510 struct btree *b,
511 struct btree_node_iter *node_iter,
512 struct bkey_packed *where,
513 unsigned clobber_u64s,
514 unsigned new_u64s)
515 {
516 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
517 struct btree_path *linked;
518 unsigned i;
519
520 if (node_iter != &path->l[b->c.level].iter) {
521 __bch2_btree_node_iter_fix(path, b, node_iter, t,
522 where, clobber_u64s, new_u64s);
523
524 if (bch2_debug_check_iterators)
525 bch2_btree_node_iter_verify(node_iter, b);
526 }
527
528 trans_for_each_path_with_node(trans, b, linked, i) {
529 __bch2_btree_node_iter_fix(linked, b,
530 &linked->l[b->c.level].iter, t,
531 where, clobber_u64s, new_u64s);
532 bch2_btree_path_verify_level(trans, linked, b->c.level);
533 }
534 }
535
536 /* Btree path level: pointer to a particular btree node and node iter */
537
__btree_iter_unpack(struct bch_fs * c,struct btree_path_level * l,struct bkey * u,struct bkey_packed * k)538 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
539 struct btree_path_level *l,
540 struct bkey *u,
541 struct bkey_packed *k)
542 {
543 if (unlikely(!k)) {
544 /*
545 * signal to bch2_btree_iter_peek_slot() that we're currently at
546 * a hole
547 */
548 u->type = KEY_TYPE_deleted;
549 return bkey_s_c_null;
550 }
551
552 return bkey_disassemble(l->b, k, u);
553 }
554
btree_path_level_peek_all(struct bch_fs * c,struct btree_path_level * l,struct bkey * u)555 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
556 struct btree_path_level *l,
557 struct bkey *u)
558 {
559 return __btree_iter_unpack(c, l, u,
560 bch2_btree_node_iter_peek_all(&l->iter, l->b));
561 }
562
btree_path_level_peek(struct btree_trans * trans,struct btree_path * path,struct btree_path_level * l,struct bkey * u)563 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
564 struct btree_path *path,
565 struct btree_path_level *l,
566 struct bkey *u)
567 {
568 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
569 bch2_btree_node_iter_peek(&l->iter, l->b));
570
571 path->pos = k.k ? k.k->p : l->b->key.k.p;
572 trans->paths_sorted = false;
573 bch2_btree_path_verify_level(trans, path, l - path->l);
574 return k;
575 }
576
btree_path_level_prev(struct btree_trans * trans,struct btree_path * path,struct btree_path_level * l,struct bkey * u)577 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
578 struct btree_path *path,
579 struct btree_path_level *l,
580 struct bkey *u)
581 {
582 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
583 bch2_btree_node_iter_prev(&l->iter, l->b));
584
585 path->pos = k.k ? k.k->p : l->b->data->min_key;
586 trans->paths_sorted = false;
587 bch2_btree_path_verify_level(trans, path, l - path->l);
588 return k;
589 }
590
btree_path_advance_to_pos(struct btree_path * path,struct btree_path_level * l,int max_advance)591 static inline bool btree_path_advance_to_pos(struct btree_path *path,
592 struct btree_path_level *l,
593 int max_advance)
594 {
595 struct bkey_packed *k;
596 int nr_advanced = 0;
597
598 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
599 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
600 if (max_advance > 0 && nr_advanced >= max_advance)
601 return false;
602
603 bch2_btree_node_iter_advance(&l->iter, l->b);
604 nr_advanced++;
605 }
606
607 return true;
608 }
609
__btree_path_level_init(struct btree_path * path,unsigned level)610 static inline void __btree_path_level_init(struct btree_path *path,
611 unsigned level)
612 {
613 struct btree_path_level *l = &path->l[level];
614
615 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
616
617 /*
618 * Iterators to interior nodes should always be pointed at the first non
619 * whiteout:
620 */
621 if (level)
622 bch2_btree_node_iter_peek(&l->iter, l->b);
623 }
624
bch2_btree_path_level_init(struct btree_trans * trans,struct btree_path * path,struct btree * b)625 void bch2_btree_path_level_init(struct btree_trans *trans,
626 struct btree_path *path,
627 struct btree *b)
628 {
629 BUG_ON(path->cached);
630
631 EBUG_ON(!btree_path_pos_in_node(path, b));
632
633 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
634 path->l[b->c.level].b = b;
635 __btree_path_level_init(path, b->c.level);
636 }
637
638 /* Btree path: fixups after btree node updates: */
639
bch2_trans_revalidate_updates_in_node(struct btree_trans * trans,struct btree * b)640 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
641 {
642 struct bch_fs *c = trans->c;
643
644 trans_for_each_update(trans, i)
645 if (!i->cached &&
646 i->level == b->c.level &&
647 i->btree_id == b->c.btree_id &&
648 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
649 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
650 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
651
652 if (unlikely(trans->journal_replay_not_finished)) {
653 struct bkey_i *j_k =
654 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
655 i->k->k.p);
656
657 if (j_k) {
658 i->old_k = j_k->k;
659 i->old_v = &j_k->v;
660 }
661 }
662 }
663 }
664
665 /*
666 * A btree node is being replaced - update the iterator to point to the new
667 * node:
668 */
bch2_trans_node_add(struct btree_trans * trans,struct btree_path * path,struct btree * b)669 void bch2_trans_node_add(struct btree_trans *trans,
670 struct btree_path *path,
671 struct btree *b)
672 {
673 struct btree_path *prev;
674
675 BUG_ON(!btree_path_pos_in_node(path, b));
676
677 while ((prev = prev_btree_path(trans, path)) &&
678 btree_path_pos_in_node(prev, b))
679 path = prev;
680
681 for (;
682 path && btree_path_pos_in_node(path, b);
683 path = next_btree_path(trans, path))
684 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
685 enum btree_node_locked_type t =
686 btree_lock_want(path, b->c.level);
687
688 if (t != BTREE_NODE_UNLOCKED) {
689 btree_node_unlock(trans, path, b->c.level);
690 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
691 mark_btree_node_locked(trans, path, b->c.level, t);
692 }
693
694 bch2_btree_path_level_init(trans, path, b);
695 }
696
697 bch2_trans_revalidate_updates_in_node(trans, b);
698 }
699
700 /*
701 * A btree node has been modified in such a way as to invalidate iterators - fix
702 * them:
703 */
bch2_trans_node_reinit_iter(struct btree_trans * trans,struct btree * b)704 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
705 {
706 struct btree_path *path;
707 unsigned i;
708
709 trans_for_each_path_with_node(trans, b, path, i)
710 __btree_path_level_init(path, b->c.level);
711
712 bch2_trans_revalidate_updates_in_node(trans, b);
713 }
714
715 /* Btree path: traverse, set_pos: */
716
btree_path_lock_root(struct btree_trans * trans,struct btree_path * path,unsigned depth_want,unsigned long trace_ip)717 static inline int btree_path_lock_root(struct btree_trans *trans,
718 struct btree_path *path,
719 unsigned depth_want,
720 unsigned long trace_ip)
721 {
722 struct bch_fs *c = trans->c;
723 struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
724 enum six_lock_type lock_type;
725 unsigned i;
726 int ret;
727
728 EBUG_ON(path->nodes_locked);
729
730 while (1) {
731 b = READ_ONCE(*rootp);
732 path->level = READ_ONCE(b->c.level);
733
734 if (unlikely(path->level < depth_want)) {
735 /*
736 * the root is at a lower depth than the depth we want:
737 * got to the end of the btree, or we're walking nodes
738 * greater than some depth and there are no nodes >=
739 * that depth
740 */
741 path->level = depth_want;
742 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
743 path->l[i].b = NULL;
744 return 1;
745 }
746
747 lock_type = __btree_lock_want(path, path->level);
748 ret = btree_node_lock(trans, path, &b->c,
749 path->level, lock_type, trace_ip);
750 if (unlikely(ret)) {
751 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
752 continue;
753 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
754 return ret;
755 BUG();
756 }
757
758 if (likely(b == READ_ONCE(*rootp) &&
759 b->c.level == path->level &&
760 !race_fault())) {
761 for (i = 0; i < path->level; i++)
762 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
763 path->l[path->level].b = b;
764 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
765 path->l[i].b = NULL;
766
767 mark_btree_node_locked(trans, path, path->level,
768 (enum btree_node_locked_type) lock_type);
769 bch2_btree_path_level_init(trans, path, b);
770 return 0;
771 }
772
773 six_unlock_type(&b->c.lock, lock_type);
774 }
775 }
776
777 noinline
btree_path_prefetch(struct btree_trans * trans,struct btree_path * path)778 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
779 {
780 struct bch_fs *c = trans->c;
781 struct btree_path_level *l = path_l(path);
782 struct btree_node_iter node_iter = l->iter;
783 struct bkey_packed *k;
784 struct bkey_buf tmp;
785 unsigned nr = test_bit(BCH_FS_started, &c->flags)
786 ? (path->level > 1 ? 0 : 2)
787 : (path->level > 1 ? 1 : 16);
788 bool was_locked = btree_node_locked(path, path->level);
789 int ret = 0;
790
791 bch2_bkey_buf_init(&tmp);
792
793 while (nr-- && !ret) {
794 if (!bch2_btree_node_relock(trans, path, path->level))
795 break;
796
797 bch2_btree_node_iter_advance(&node_iter, l->b);
798 k = bch2_btree_node_iter_peek(&node_iter, l->b);
799 if (!k)
800 break;
801
802 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
803 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
804 path->level - 1);
805 }
806
807 if (!was_locked)
808 btree_node_unlock(trans, path, path->level);
809
810 bch2_bkey_buf_exit(&tmp, c);
811 return ret;
812 }
813
btree_path_prefetch_j(struct btree_trans * trans,struct btree_path * path,struct btree_and_journal_iter * jiter)814 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
815 struct btree_and_journal_iter *jiter)
816 {
817 struct bch_fs *c = trans->c;
818 struct bkey_s_c k;
819 struct bkey_buf tmp;
820 unsigned nr = test_bit(BCH_FS_started, &c->flags)
821 ? (path->level > 1 ? 0 : 2)
822 : (path->level > 1 ? 1 : 16);
823 bool was_locked = btree_node_locked(path, path->level);
824 int ret = 0;
825
826 bch2_bkey_buf_init(&tmp);
827
828 while (nr-- && !ret) {
829 if (!bch2_btree_node_relock(trans, path, path->level))
830 break;
831
832 bch2_btree_and_journal_iter_advance(jiter);
833 k = bch2_btree_and_journal_iter_peek(jiter);
834 if (!k.k)
835 break;
836
837 bch2_bkey_buf_reassemble(&tmp, c, k);
838 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
839 path->level - 1);
840 }
841
842 if (!was_locked)
843 btree_node_unlock(trans, path, path->level);
844
845 bch2_bkey_buf_exit(&tmp, c);
846 return ret;
847 }
848
btree_node_mem_ptr_set(struct btree_trans * trans,struct btree_path * path,unsigned plevel,struct btree * b)849 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
850 struct btree_path *path,
851 unsigned plevel, struct btree *b)
852 {
853 struct btree_path_level *l = &path->l[plevel];
854 bool locked = btree_node_locked(path, plevel);
855 struct bkey_packed *k;
856 struct bch_btree_ptr_v2 *bp;
857
858 if (!bch2_btree_node_relock(trans, path, plevel))
859 return;
860
861 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
862 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
863
864 bp = (void *) bkeyp_val(&l->b->format, k);
865 bp->mem_ptr = (unsigned long)b;
866
867 if (!locked)
868 btree_node_unlock(trans, path, plevel);
869 }
870
btree_node_iter_and_journal_peek(struct btree_trans * trans,struct btree_path * path,unsigned flags,struct bkey_buf * out)871 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
872 struct btree_path *path,
873 unsigned flags,
874 struct bkey_buf *out)
875 {
876 struct bch_fs *c = trans->c;
877 struct btree_path_level *l = path_l(path);
878 struct btree_and_journal_iter jiter;
879 struct bkey_s_c k;
880 int ret = 0;
881
882 __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
883
884 k = bch2_btree_and_journal_iter_peek(&jiter);
885
886 bch2_bkey_buf_reassemble(out, c, k);
887
888 if ((flags & BTREE_ITER_prefetch) &&
889 c->opts.btree_node_prefetch)
890 ret = btree_path_prefetch_j(trans, path, &jiter);
891
892 bch2_btree_and_journal_iter_exit(&jiter);
893 return ret;
894 }
895
btree_path_down(struct btree_trans * trans,struct btree_path * path,unsigned flags,unsigned long trace_ip)896 static __always_inline int btree_path_down(struct btree_trans *trans,
897 struct btree_path *path,
898 unsigned flags,
899 unsigned long trace_ip)
900 {
901 struct bch_fs *c = trans->c;
902 struct btree_path_level *l = path_l(path);
903 struct btree *b;
904 unsigned level = path->level - 1;
905 enum six_lock_type lock_type = __btree_lock_want(path, level);
906 struct bkey_buf tmp;
907 int ret;
908
909 EBUG_ON(!btree_node_locked(path, path->level));
910
911 bch2_bkey_buf_init(&tmp);
912
913 if (unlikely(trans->journal_replay_not_finished)) {
914 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
915 if (ret)
916 goto err;
917 } else {
918 struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
919 if (!k) {
920 struct printbuf buf = PRINTBUF;
921
922 prt_str(&buf, "node not found at pos ");
923 bch2_bpos_to_text(&buf, path->pos);
924 prt_str(&buf, " within parent node ");
925 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
926
927 bch2_fs_fatal_error(c, "%s", buf.buf);
928 printbuf_exit(&buf);
929 ret = -BCH_ERR_btree_need_topology_repair;
930 goto err;
931 }
932
933 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
934
935 if ((flags & BTREE_ITER_prefetch) &&
936 c->opts.btree_node_prefetch) {
937 ret = btree_path_prefetch(trans, path);
938 if (ret)
939 goto err;
940 }
941 }
942
943 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
944 ret = PTR_ERR_OR_ZERO(b);
945 if (unlikely(ret))
946 goto err;
947
948 if (likely(!trans->journal_replay_not_finished &&
949 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
950 unlikely(b != btree_node_mem_ptr(tmp.k)))
951 btree_node_mem_ptr_set(trans, path, level + 1, b);
952
953 if (btree_node_read_locked(path, level + 1))
954 btree_node_unlock(trans, path, level + 1);
955
956 mark_btree_node_locked(trans, path, level,
957 (enum btree_node_locked_type) lock_type);
958 path->level = level;
959 bch2_btree_path_level_init(trans, path, b);
960
961 bch2_btree_path_verify_locks(path);
962 err:
963 bch2_bkey_buf_exit(&tmp, c);
964 return ret;
965 }
966
bch2_btree_path_traverse_all(struct btree_trans * trans)967 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
968 {
969 struct bch_fs *c = trans->c;
970 struct btree_path *path;
971 unsigned long trace_ip = _RET_IP_;
972 unsigned i;
973 int ret = 0;
974
975 if (trans->in_traverse_all)
976 return -BCH_ERR_transaction_restart_in_traverse_all;
977
978 trans->in_traverse_all = true;
979 retry_all:
980 trans->restarted = 0;
981 trans->last_restarted_ip = 0;
982
983 trans_for_each_path(trans, path, i)
984 path->should_be_locked = false;
985
986 btree_trans_sort_paths(trans);
987
988 bch2_trans_unlock(trans);
989 cond_resched();
990 trans_set_locked(trans);
991
992 if (unlikely(trans->memory_allocation_failure)) {
993 struct closure cl;
994
995 closure_init_stack(&cl);
996
997 do {
998 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
999 closure_sync(&cl);
1000 } while (ret);
1001 }
1002
1003 /* Now, redo traversals in correct order: */
1004 i = 0;
1005 while (i < trans->nr_sorted) {
1006 btree_path_idx_t idx = trans->sorted[i];
1007
1008 /*
1009 * Traversing a path can cause another path to be added at about
1010 * the same position:
1011 */
1012 if (trans->paths[idx].uptodate) {
1013 __btree_path_get(&trans->paths[idx], false);
1014 ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1015 __btree_path_put(&trans->paths[idx], false);
1016
1017 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1018 bch2_err_matches(ret, ENOMEM))
1019 goto retry_all;
1020 if (ret)
1021 goto err;
1022 } else {
1023 i++;
1024 }
1025 }
1026
1027 /*
1028 * We used to assert that all paths had been traversed here
1029 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1030 * path->should_be_locked is not set yet, we might have unlocked and
1031 * then failed to relock a path - that's fine.
1032 */
1033 err:
1034 bch2_btree_cache_cannibalize_unlock(trans);
1035
1036 trans->in_traverse_all = false;
1037
1038 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1039 return ret;
1040 }
1041
btree_path_check_pos_in_node(struct btree_path * path,unsigned l,int check_pos)1042 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1043 unsigned l, int check_pos)
1044 {
1045 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1046 return false;
1047 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1048 return false;
1049 return true;
1050 }
1051
btree_path_good_node(struct btree_trans * trans,struct btree_path * path,unsigned l,int check_pos)1052 static inline bool btree_path_good_node(struct btree_trans *trans,
1053 struct btree_path *path,
1054 unsigned l, int check_pos)
1055 {
1056 return is_btree_node(path, l) &&
1057 bch2_btree_node_relock(trans, path, l) &&
1058 btree_path_check_pos_in_node(path, l, check_pos);
1059 }
1060
btree_path_set_level_down(struct btree_trans * trans,struct btree_path * path,unsigned new_level)1061 static void btree_path_set_level_down(struct btree_trans *trans,
1062 struct btree_path *path,
1063 unsigned new_level)
1064 {
1065 unsigned l;
1066
1067 path->level = new_level;
1068
1069 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1070 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1071 btree_node_unlock(trans, path, l);
1072
1073 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1074 bch2_btree_path_verify(trans, path);
1075 }
1076
__btree_path_up_until_good_node(struct btree_trans * trans,struct btree_path * path,int check_pos)1077 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1078 struct btree_path *path,
1079 int check_pos)
1080 {
1081 unsigned i, l = path->level;
1082 again:
1083 while (btree_path_node(path, l) &&
1084 !btree_path_good_node(trans, path, l, check_pos))
1085 __btree_path_set_level_up(trans, path, l++);
1086
1087 /* If we need intent locks, take them too: */
1088 for (i = l + 1;
1089 i < path->locks_want && btree_path_node(path, i);
1090 i++)
1091 if (!bch2_btree_node_relock(trans, path, i)) {
1092 while (l <= i)
1093 __btree_path_set_level_up(trans, path, l++);
1094 goto again;
1095 }
1096
1097 return l;
1098 }
1099
btree_path_up_until_good_node(struct btree_trans * trans,struct btree_path * path,int check_pos)1100 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1101 struct btree_path *path,
1102 int check_pos)
1103 {
1104 return likely(btree_node_locked(path, path->level) &&
1105 btree_path_check_pos_in_node(path, path->level, check_pos))
1106 ? path->level
1107 : __btree_path_up_until_good_node(trans, path, check_pos);
1108 }
1109
1110 /*
1111 * This is the main state machine for walking down the btree - walks down to a
1112 * specified depth
1113 *
1114 * Returns 0 on success, -EIO on error (error reading in a btree node).
1115 *
1116 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1117 * stashed in the iterator and returned from bch2_trans_exit().
1118 */
bch2_btree_path_traverse_one(struct btree_trans * trans,btree_path_idx_t path_idx,unsigned flags,unsigned long trace_ip)1119 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1120 btree_path_idx_t path_idx,
1121 unsigned flags,
1122 unsigned long trace_ip)
1123 {
1124 struct btree_path *path = &trans->paths[path_idx];
1125 unsigned depth_want = path->level;
1126 int ret = -((int) trans->restarted);
1127
1128 if (unlikely(ret))
1129 goto out;
1130
1131 if (unlikely(!trans->srcu_held))
1132 bch2_trans_srcu_lock(trans);
1133
1134 /*
1135 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1136 * and re-traverse the path without a transaction restart:
1137 */
1138 if (path->should_be_locked) {
1139 ret = bch2_btree_path_relock(trans, path, trace_ip);
1140 goto out;
1141 }
1142
1143 if (path->cached) {
1144 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1145 goto out;
1146 }
1147
1148 path = &trans->paths[path_idx];
1149
1150 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1151 goto out_uptodate;
1152
1153 path->level = btree_path_up_until_good_node(trans, path, 0);
1154 unsigned max_level = path->level;
1155
1156 EBUG_ON(btree_path_node(path, path->level) &&
1157 !btree_node_locked(path, path->level));
1158
1159 /*
1160 * Note: path->nodes[path->level] may be temporarily NULL here - that
1161 * would indicate to other code that we got to the end of the btree,
1162 * here it indicates that relocking the root failed - it's critical that
1163 * btree_path_lock_root() comes next and that it can't fail
1164 */
1165 while (path->level > depth_want) {
1166 ret = btree_path_node(path, path->level)
1167 ? btree_path_down(trans, path, flags, trace_ip)
1168 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1169 if (unlikely(ret)) {
1170 if (ret == 1) {
1171 /*
1172 * No nodes at this level - got to the end of
1173 * the btree:
1174 */
1175 ret = 0;
1176 goto out;
1177 }
1178
1179 __bch2_btree_path_unlock(trans, path);
1180 path->level = depth_want;
1181 path->l[path->level].b = ERR_PTR(ret);
1182 goto out;
1183 }
1184 }
1185
1186 if (unlikely(max_level > path->level)) {
1187 struct btree_path *linked;
1188 unsigned iter;
1189
1190 trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1191 for (unsigned j = path->level + 1; j < max_level; j++)
1192 linked->l[j] = path->l[j];
1193 }
1194
1195 out_uptodate:
1196 path->uptodate = BTREE_ITER_UPTODATE;
1197 out:
1198 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1199 panic("ret %s (%i) trans->restarted %s (%i)\n",
1200 bch2_err_str(ret), ret,
1201 bch2_err_str(trans->restarted), trans->restarted);
1202 bch2_btree_path_verify(trans, path);
1203 return ret;
1204 }
1205
btree_path_copy(struct btree_trans * trans,struct btree_path * dst,struct btree_path * src)1206 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1207 struct btree_path *src)
1208 {
1209 unsigned i, offset = offsetof(struct btree_path, pos);
1210
1211 memcpy((void *) dst + offset,
1212 (void *) src + offset,
1213 sizeof(struct btree_path) - offset);
1214
1215 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1216 unsigned t = btree_node_locked_type(dst, i);
1217
1218 if (t != BTREE_NODE_UNLOCKED)
1219 six_lock_increment(&dst->l[i].b->c.lock, t);
1220 }
1221 }
1222
btree_path_clone(struct btree_trans * trans,btree_path_idx_t src,bool intent,unsigned long ip)1223 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1224 bool intent, unsigned long ip)
1225 {
1226 btree_path_idx_t new = btree_path_alloc(trans, src);
1227 btree_path_copy(trans, trans->paths + new, trans->paths + src);
1228 __btree_path_get(trans->paths + new, intent);
1229 #ifdef TRACK_PATH_ALLOCATED
1230 trans->paths[new].ip_allocated = ip;
1231 #endif
1232 return new;
1233 }
1234
1235 __flatten
__bch2_btree_path_make_mut(struct btree_trans * trans,btree_path_idx_t path,bool intent,unsigned long ip)1236 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1237 btree_path_idx_t path, bool intent, unsigned long ip)
1238 {
1239 __btree_path_put(trans->paths + path, intent);
1240 path = btree_path_clone(trans, path, intent, ip);
1241 trans->paths[path].preserve = false;
1242 return path;
1243 }
1244
1245 btree_path_idx_t __must_check
__bch2_btree_path_set_pos(struct btree_trans * trans,btree_path_idx_t path_idx,struct bpos new_pos,bool intent,unsigned long ip)1246 __bch2_btree_path_set_pos(struct btree_trans *trans,
1247 btree_path_idx_t path_idx, struct bpos new_pos,
1248 bool intent, unsigned long ip)
1249 {
1250 int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1251
1252 bch2_trans_verify_not_in_restart(trans);
1253 EBUG_ON(!trans->paths[path_idx].ref);
1254
1255 path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1256
1257 struct btree_path *path = trans->paths + path_idx;
1258 path->pos = new_pos;
1259 trans->paths_sorted = false;
1260
1261 if (unlikely(path->cached)) {
1262 btree_node_unlock(trans, path, 0);
1263 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1264 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1265 goto out;
1266 }
1267
1268 unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1269
1270 if (btree_path_node(path, level)) {
1271 struct btree_path_level *l = &path->l[level];
1272
1273 BUG_ON(!btree_node_locked(path, level));
1274 /*
1275 * We might have to skip over many keys, or just a few: try
1276 * advancing the node iterator, and if we have to skip over too
1277 * many keys just reinit it (or if we're rewinding, since that
1278 * is expensive).
1279 */
1280 if (cmp < 0 ||
1281 !btree_path_advance_to_pos(path, l, 8))
1282 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1283
1284 /*
1285 * Iterators to interior nodes should always be pointed at the first non
1286 * whiteout:
1287 */
1288 if (unlikely(level))
1289 bch2_btree_node_iter_peek(&l->iter, l->b);
1290 }
1291
1292 if (unlikely(level != path->level)) {
1293 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1294 __bch2_btree_path_unlock(trans, path);
1295 }
1296 out:
1297 bch2_btree_path_verify(trans, path);
1298 return path_idx;
1299 }
1300
1301 /* Btree path: main interface: */
1302
have_path_at_pos(struct btree_trans * trans,struct btree_path * path)1303 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1304 {
1305 struct btree_path *sib;
1306
1307 sib = prev_btree_path(trans, path);
1308 if (sib && !btree_path_cmp(sib, path))
1309 return sib;
1310
1311 sib = next_btree_path(trans, path);
1312 if (sib && !btree_path_cmp(sib, path))
1313 return sib;
1314
1315 return NULL;
1316 }
1317
have_node_at_pos(struct btree_trans * trans,struct btree_path * path)1318 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1319 {
1320 struct btree_path *sib;
1321
1322 sib = prev_btree_path(trans, path);
1323 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1324 return sib;
1325
1326 sib = next_btree_path(trans, path);
1327 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1328 return sib;
1329
1330 return NULL;
1331 }
1332
__bch2_path_free(struct btree_trans * trans,btree_path_idx_t path)1333 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1334 {
1335 __bch2_btree_path_unlock(trans, trans->paths + path);
1336 btree_path_list_remove(trans, trans->paths + path);
1337 __clear_bit(path, trans->paths_allocated);
1338 }
1339
bch2_btree_path_can_relock(struct btree_trans * trans,struct btree_path * path)1340 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1341 {
1342 unsigned l = path->level;
1343
1344 do {
1345 if (!btree_path_node(path, l))
1346 break;
1347
1348 if (!is_btree_node(path, l))
1349 return false;
1350
1351 if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1352 return false;
1353
1354 l++;
1355 } while (l < path->locks_want);
1356
1357 return true;
1358 }
1359
bch2_path_put(struct btree_trans * trans,btree_path_idx_t path_idx,bool intent)1360 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1361 {
1362 struct btree_path *path = trans->paths + path_idx, *dup;
1363
1364 if (!__btree_path_put(path, intent))
1365 return;
1366
1367 dup = path->preserve
1368 ? have_path_at_pos(trans, path)
1369 : have_node_at_pos(trans, path);
1370
1371 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1372 return;
1373
1374 if (path->should_be_locked && !trans->restarted) {
1375 if (!dup)
1376 return;
1377
1378 if (!(trans->locked
1379 ? bch2_btree_path_relock_norestart(trans, dup)
1380 : bch2_btree_path_can_relock(trans, dup)))
1381 return;
1382 }
1383
1384 if (dup) {
1385 dup->preserve |= path->preserve;
1386 dup->should_be_locked |= path->should_be_locked;
1387 }
1388
1389 __bch2_path_free(trans, path_idx);
1390 }
1391
bch2_path_put_nokeep(struct btree_trans * trans,btree_path_idx_t path,bool intent)1392 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1393 bool intent)
1394 {
1395 if (!__btree_path_put(trans->paths + path, intent))
1396 return;
1397
1398 __bch2_path_free(trans, path);
1399 }
1400
bch2_trans_restart_error(struct btree_trans * trans,u32 restart_count)1401 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1402 {
1403 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1404 trans->restart_count, restart_count,
1405 (void *) trans->last_begin_ip);
1406 }
1407
bch2_trans_in_restart_error(struct btree_trans * trans)1408 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1409 {
1410 panic("in transaction restart: %s, last restarted by %pS\n",
1411 bch2_err_str(trans->restarted),
1412 (void *) trans->last_restarted_ip);
1413 }
1414
bch2_trans_unlocked_error(struct btree_trans * trans)1415 void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
1416 {
1417 panic("trans should be locked, unlocked by %pS\n",
1418 (void *) trans->last_unlock_ip);
1419 }
1420
1421 noinline __cold
bch2_trans_updates_to_text(struct printbuf * buf,struct btree_trans * trans)1422 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1423 {
1424 prt_printf(buf, "transaction updates for %s journal seq %llu\n",
1425 trans->fn, trans->journal_res.seq);
1426 printbuf_indent_add(buf, 2);
1427
1428 trans_for_each_update(trans, i) {
1429 struct bkey_s_c old = { &i->old_k, i->old_v };
1430
1431 prt_printf(buf, "update: btree=%s cached=%u %pS\n",
1432 bch2_btree_id_str(i->btree_id),
1433 i->cached,
1434 (void *) i->ip_allocated);
1435
1436 prt_printf(buf, " old ");
1437 bch2_bkey_val_to_text(buf, trans->c, old);
1438 prt_newline(buf);
1439
1440 prt_printf(buf, " new ");
1441 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1442 prt_newline(buf);
1443 }
1444
1445 for (struct jset_entry *e = trans->journal_entries;
1446 e != btree_trans_journal_entries_top(trans);
1447 e = vstruct_next(e))
1448 bch2_journal_entry_to_text(buf, trans->c, e);
1449
1450 printbuf_indent_sub(buf, 2);
1451 }
1452
1453 noinline __cold
bch2_dump_trans_updates(struct btree_trans * trans)1454 void bch2_dump_trans_updates(struct btree_trans *trans)
1455 {
1456 struct printbuf buf = PRINTBUF;
1457
1458 bch2_trans_updates_to_text(&buf, trans);
1459 bch2_print_str(trans->c, buf.buf);
1460 printbuf_exit(&buf);
1461 }
1462
bch2_btree_path_to_text_short(struct printbuf * out,struct btree_trans * trans,btree_path_idx_t path_idx)1463 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1464 {
1465 struct btree_path *path = trans->paths + path_idx;
1466
1467 prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
1468 path_idx, path->ref, path->intent_ref,
1469 path->preserve ? 'P' : ' ',
1470 path->should_be_locked ? 'S' : ' ',
1471 path->cached ? 'C' : 'B',
1472 bch2_btree_id_str(path->btree_id),
1473 path->level);
1474 bch2_bpos_to_text(out, path->pos);
1475
1476 if (!path->cached && btree_node_locked(path, path->level)) {
1477 prt_char(out, ' ');
1478 struct btree *b = path_l(path)->b;
1479 bch2_bpos_to_text(out, b->data->min_key);
1480 prt_char(out, '-');
1481 bch2_bpos_to_text(out, b->key.k.p);
1482 }
1483
1484 #ifdef TRACK_PATH_ALLOCATED
1485 prt_printf(out, " %pS", (void *) path->ip_allocated);
1486 #endif
1487 }
1488
btree_node_locked_str(enum btree_node_locked_type t)1489 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1490 {
1491 switch (t) {
1492 case BTREE_NODE_UNLOCKED:
1493 return "unlocked";
1494 case BTREE_NODE_READ_LOCKED:
1495 return "read";
1496 case BTREE_NODE_INTENT_LOCKED:
1497 return "intent";
1498 case BTREE_NODE_WRITE_LOCKED:
1499 return "write";
1500 default:
1501 return NULL;
1502 }
1503 }
1504
bch2_btree_path_to_text(struct printbuf * out,struct btree_trans * trans,btree_path_idx_t path_idx)1505 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1506 {
1507 bch2_btree_path_to_text_short(out, trans, path_idx);
1508
1509 struct btree_path *path = trans->paths + path_idx;
1510
1511 prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1512 prt_newline(out);
1513
1514 printbuf_indent_add(out, 2);
1515 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1516 prt_printf(out, "l=%u locks %s seq %u node ", l,
1517 btree_node_locked_str(btree_node_locked_type(path, l)),
1518 path->l[l].lock_seq);
1519
1520 int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1521 if (ret)
1522 prt_str(out, bch2_err_str(ret));
1523 else
1524 prt_printf(out, "%px", path->l[l].b);
1525 prt_newline(out);
1526 }
1527 printbuf_indent_sub(out, 2);
1528 }
1529
1530 static noinline __cold
__bch2_trans_paths_to_text(struct printbuf * out,struct btree_trans * trans,bool nosort)1531 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1532 bool nosort)
1533 {
1534 struct trans_for_each_path_inorder_iter iter;
1535
1536 if (!nosort)
1537 btree_trans_sort_paths(trans);
1538
1539 trans_for_each_path_idx_inorder(trans, iter) {
1540 bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1541 prt_newline(out);
1542 }
1543 }
1544
1545 noinline __cold
bch2_trans_paths_to_text(struct printbuf * out,struct btree_trans * trans)1546 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1547 {
1548 __bch2_trans_paths_to_text(out, trans, false);
1549 }
1550
1551 static noinline __cold
__bch2_dump_trans_paths_updates(struct btree_trans * trans,bool nosort)1552 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1553 {
1554 struct printbuf buf = PRINTBUF;
1555
1556 __bch2_trans_paths_to_text(&buf, trans, nosort);
1557 bch2_trans_updates_to_text(&buf, trans);
1558
1559 bch2_print_str(trans->c, buf.buf);
1560 printbuf_exit(&buf);
1561 }
1562
1563 noinline __cold
bch2_dump_trans_paths_updates(struct btree_trans * trans)1564 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1565 {
1566 __bch2_dump_trans_paths_updates(trans, false);
1567 }
1568
1569 noinline __cold
bch2_trans_update_max_paths(struct btree_trans * trans)1570 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1571 {
1572 struct btree_transaction_stats *s = btree_trans_stats(trans);
1573 struct printbuf buf = PRINTBUF;
1574 size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1575
1576 bch2_trans_paths_to_text(&buf, trans);
1577
1578 if (!buf.allocation_failure) {
1579 mutex_lock(&s->lock);
1580 if (nr > s->nr_max_paths) {
1581 s->nr_max_paths = nr;
1582 swap(s->max_paths_text, buf.buf);
1583 }
1584 mutex_unlock(&s->lock);
1585 }
1586
1587 printbuf_exit(&buf);
1588
1589 trans->nr_paths_max = nr;
1590 }
1591
1592 noinline __cold
__bch2_btree_trans_too_many_iters(struct btree_trans * trans)1593 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1594 {
1595 if (trace_trans_restart_too_many_iters_enabled()) {
1596 struct printbuf buf = PRINTBUF;
1597
1598 bch2_trans_paths_to_text(&buf, trans);
1599 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1600 printbuf_exit(&buf);
1601 }
1602
1603 count_event(trans->c, trans_restart_too_many_iters);
1604
1605 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1606 }
1607
btree_path_overflow(struct btree_trans * trans)1608 static noinline void btree_path_overflow(struct btree_trans *trans)
1609 {
1610 bch2_dump_trans_paths_updates(trans);
1611 bch_err(trans->c, "trans path overflow");
1612 }
1613
btree_paths_realloc(struct btree_trans * trans)1614 static noinline void btree_paths_realloc(struct btree_trans *trans)
1615 {
1616 unsigned nr = trans->nr_paths * 2;
1617
1618 void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1619 sizeof(struct btree_trans_paths) +
1620 nr * sizeof(struct btree_path) +
1621 nr * sizeof(btree_path_idx_t) + 8 +
1622 nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1623
1624 unsigned long *paths_allocated = p;
1625 memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1626 p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1627
1628 p += sizeof(struct btree_trans_paths);
1629 struct btree_path *paths = p;
1630 *trans_paths_nr(paths) = nr;
1631 memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1632 p += nr * sizeof(struct btree_path);
1633
1634 btree_path_idx_t *sorted = p;
1635 memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1636 p += nr * sizeof(btree_path_idx_t) + 8;
1637
1638 struct btree_insert_entry *updates = p;
1639 memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1640
1641 unsigned long *old = trans->paths_allocated;
1642
1643 rcu_assign_pointer(trans->paths_allocated, paths_allocated);
1644 rcu_assign_pointer(trans->paths, paths);
1645 rcu_assign_pointer(trans->sorted, sorted);
1646 rcu_assign_pointer(trans->updates, updates);
1647
1648 trans->nr_paths = nr;
1649
1650 if (old != trans->_paths_allocated)
1651 kfree_rcu_mightsleep(old);
1652 }
1653
btree_path_alloc(struct btree_trans * trans,btree_path_idx_t pos)1654 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1655 btree_path_idx_t pos)
1656 {
1657 btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1658
1659 if (unlikely(idx == trans->nr_paths)) {
1660 if (trans->nr_paths == BTREE_ITER_MAX) {
1661 btree_path_overflow(trans);
1662 return 0;
1663 }
1664
1665 btree_paths_realloc(trans);
1666 }
1667
1668 /*
1669 * Do this before marking the new path as allocated, since it won't be
1670 * initialized yet:
1671 */
1672 if (unlikely(idx > trans->nr_paths_max))
1673 bch2_trans_update_max_paths(trans);
1674
1675 __set_bit(idx, trans->paths_allocated);
1676
1677 struct btree_path *path = &trans->paths[idx];
1678 path->ref = 0;
1679 path->intent_ref = 0;
1680 path->nodes_locked = 0;
1681
1682 btree_path_list_add(trans, pos, idx);
1683 trans->paths_sorted = false;
1684 return idx;
1685 }
1686
bch2_path_get(struct btree_trans * trans,enum btree_id btree_id,struct bpos pos,unsigned locks_want,unsigned level,unsigned flags,unsigned long ip)1687 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1688 enum btree_id btree_id, struct bpos pos,
1689 unsigned locks_want, unsigned level,
1690 unsigned flags, unsigned long ip)
1691 {
1692 struct btree_path *path;
1693 bool cached = flags & BTREE_ITER_cached;
1694 bool intent = flags & BTREE_ITER_intent;
1695 struct trans_for_each_path_inorder_iter iter;
1696 btree_path_idx_t path_pos = 0, path_idx;
1697
1698 bch2_trans_verify_not_unlocked(trans);
1699 bch2_trans_verify_not_in_restart(trans);
1700 bch2_trans_verify_locks(trans);
1701
1702 btree_trans_sort_paths(trans);
1703
1704 trans_for_each_path_inorder(trans, path, iter) {
1705 if (__btree_path_cmp(path,
1706 btree_id,
1707 cached,
1708 pos,
1709 level) > 0)
1710 break;
1711
1712 path_pos = iter.path_idx;
1713 }
1714
1715 if (path_pos &&
1716 trans->paths[path_pos].cached == cached &&
1717 trans->paths[path_pos].btree_id == btree_id &&
1718 trans->paths[path_pos].level == level) {
1719 __btree_path_get(trans->paths + path_pos, intent);
1720 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1721 path = trans->paths + path_idx;
1722 } else {
1723 path_idx = btree_path_alloc(trans, path_pos);
1724 path = trans->paths + path_idx;
1725
1726 __btree_path_get(path, intent);
1727 path->pos = pos;
1728 path->btree_id = btree_id;
1729 path->cached = cached;
1730 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1731 path->should_be_locked = false;
1732 path->level = level;
1733 path->locks_want = locks_want;
1734 path->nodes_locked = 0;
1735 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1736 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1737 #ifdef TRACK_PATH_ALLOCATED
1738 path->ip_allocated = ip;
1739 #endif
1740 trans->paths_sorted = false;
1741 }
1742
1743 if (!(flags & BTREE_ITER_nopreserve))
1744 path->preserve = true;
1745
1746 if (path->intent_ref)
1747 locks_want = max(locks_want, level + 1);
1748
1749 /*
1750 * If the path has locks_want greater than requested, we don't downgrade
1751 * it here - on transaction restart because btree node split needs to
1752 * upgrade locks, we might be putting/getting the iterator again.
1753 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1754 * a successful transaction commit.
1755 */
1756
1757 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1758 if (locks_want > path->locks_want)
1759 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1760
1761 return path_idx;
1762 }
1763
bch2_path_get_unlocked_mut(struct btree_trans * trans,enum btree_id btree_id,unsigned level,struct bpos pos)1764 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1765 enum btree_id btree_id,
1766 unsigned level,
1767 struct bpos pos)
1768 {
1769 btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1770 BTREE_ITER_nopreserve|
1771 BTREE_ITER_intent, _RET_IP_);
1772 path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1773
1774 struct btree_path *path = trans->paths + path_idx;
1775 bch2_btree_path_downgrade(trans, path);
1776 __bch2_btree_path_unlock(trans, path);
1777 return path_idx;
1778 }
1779
bch2_btree_path_peek_slot(struct btree_path * path,struct bkey * u)1780 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1781 {
1782
1783 struct btree_path_level *l = path_l(path);
1784 struct bkey_packed *_k;
1785 struct bkey_s_c k;
1786
1787 if (unlikely(!l->b))
1788 return bkey_s_c_null;
1789
1790 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1791 EBUG_ON(!btree_node_locked(path, path->level));
1792
1793 if (!path->cached) {
1794 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1795 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1796
1797 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1798
1799 if (!k.k || !bpos_eq(path->pos, k.k->p))
1800 goto hole;
1801 } else {
1802 struct bkey_cached *ck = (void *) path->l[0].b;
1803 if (!ck)
1804 return bkey_s_c_null;
1805
1806 EBUG_ON(path->btree_id != ck->key.btree_id ||
1807 !bkey_eq(path->pos, ck->key.pos));
1808
1809 *u = ck->k->k;
1810 k = bkey_i_to_s_c(ck->k);
1811 }
1812
1813 return k;
1814 hole:
1815 bkey_init(u);
1816 u->p = path->pos;
1817 return (struct bkey_s_c) { u, NULL };
1818 }
1819
1820
bch2_set_btree_iter_dontneed(struct btree_iter * iter)1821 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1822 {
1823 struct btree_trans *trans = iter->trans;
1824
1825 if (!iter->path || trans->restarted)
1826 return;
1827
1828 struct btree_path *path = btree_iter_path(trans, iter);
1829 path->preserve = false;
1830 if (path->ref == 1)
1831 path->should_be_locked = false;
1832 }
1833 /* Btree iterators: */
1834
1835 int __must_check
__bch2_btree_iter_traverse(struct btree_iter * iter)1836 __bch2_btree_iter_traverse(struct btree_iter *iter)
1837 {
1838 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1839 }
1840
1841 int __must_check
bch2_btree_iter_traverse(struct btree_iter * iter)1842 bch2_btree_iter_traverse(struct btree_iter *iter)
1843 {
1844 struct btree_trans *trans = iter->trans;
1845 int ret;
1846
1847 bch2_trans_verify_not_unlocked(trans);
1848
1849 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1850 btree_iter_search_key(iter),
1851 iter->flags & BTREE_ITER_intent,
1852 btree_iter_ip_allocated(iter));
1853
1854 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1855 if (ret)
1856 return ret;
1857
1858 struct btree_path *path = btree_iter_path(trans, iter);
1859 if (btree_path_node(path, path->level))
1860 btree_path_set_should_be_locked(path);
1861 return 0;
1862 }
1863
1864 /* Iterate across nodes (leaf and interior nodes) */
1865
bch2_btree_iter_peek_node(struct btree_iter * iter)1866 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1867 {
1868 struct btree_trans *trans = iter->trans;
1869 struct btree *b = NULL;
1870 int ret;
1871
1872 EBUG_ON(trans->paths[iter->path].cached);
1873 bch2_btree_iter_verify(iter);
1874
1875 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1876 if (ret)
1877 goto err;
1878
1879 struct btree_path *path = btree_iter_path(trans, iter);
1880 b = btree_path_node(path, path->level);
1881 if (!b)
1882 goto out;
1883
1884 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1885
1886 bkey_init(&iter->k);
1887 iter->k.p = iter->pos = b->key.k.p;
1888
1889 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1890 iter->flags & BTREE_ITER_intent,
1891 btree_iter_ip_allocated(iter));
1892 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1893 out:
1894 bch2_btree_iter_verify_entry_exit(iter);
1895 bch2_btree_iter_verify(iter);
1896
1897 return b;
1898 err:
1899 b = ERR_PTR(ret);
1900 goto out;
1901 }
1902
1903 /* Only kept for -tools */
bch2_btree_iter_peek_node_and_restart(struct btree_iter * iter)1904 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1905 {
1906 struct btree *b;
1907
1908 while (b = bch2_btree_iter_peek_node(iter),
1909 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1910 bch2_trans_begin(iter->trans);
1911
1912 return b;
1913 }
1914
bch2_btree_iter_next_node(struct btree_iter * iter)1915 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1916 {
1917 struct btree_trans *trans = iter->trans;
1918 struct btree *b = NULL;
1919 int ret;
1920
1921 EBUG_ON(trans->paths[iter->path].cached);
1922 bch2_trans_verify_not_in_restart(trans);
1923 bch2_btree_iter_verify(iter);
1924
1925 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1926 if (ret)
1927 goto err;
1928
1929
1930 struct btree_path *path = btree_iter_path(trans, iter);
1931
1932 /* already at end? */
1933 if (!btree_path_node(path, path->level))
1934 return NULL;
1935
1936 /* got to end? */
1937 if (!btree_path_node(path, path->level + 1)) {
1938 btree_path_set_level_up(trans, path);
1939 return NULL;
1940 }
1941
1942 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1943 __bch2_btree_path_unlock(trans, path);
1944 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1945 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1946 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1947 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1948 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1949 goto err;
1950 }
1951
1952 b = btree_path_node(path, path->level + 1);
1953
1954 if (bpos_eq(iter->pos, b->key.k.p)) {
1955 __btree_path_set_level_up(trans, path, path->level++);
1956 } else {
1957 if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
1958 btree_node_unlock(trans, path, path->level + 1);
1959
1960 /*
1961 * Haven't gotten to the end of the parent node: go back down to
1962 * the next child node
1963 */
1964 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1965 bpos_successor(iter->pos),
1966 iter->flags & BTREE_ITER_intent,
1967 btree_iter_ip_allocated(iter));
1968
1969 path = btree_iter_path(trans, iter);
1970 btree_path_set_level_down(trans, path, iter->min_depth);
1971
1972 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1973 if (ret)
1974 goto err;
1975
1976 path = btree_iter_path(trans, iter);
1977 b = path->l[path->level].b;
1978 }
1979
1980 bkey_init(&iter->k);
1981 iter->k.p = iter->pos = b->key.k.p;
1982
1983 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1984 iter->flags & BTREE_ITER_intent,
1985 btree_iter_ip_allocated(iter));
1986 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1987 EBUG_ON(btree_iter_path(trans, iter)->uptodate);
1988 out:
1989 bch2_btree_iter_verify_entry_exit(iter);
1990 bch2_btree_iter_verify(iter);
1991
1992 return b;
1993 err:
1994 b = ERR_PTR(ret);
1995 goto out;
1996 }
1997
1998 /* Iterate across keys (in leaf nodes only) */
1999
bch2_btree_iter_advance(struct btree_iter * iter)2000 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2001 {
2002 struct bpos pos = iter->k.p;
2003 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2004 ? bpos_eq(pos, SPOS_MAX)
2005 : bkey_eq(pos, SPOS_MAX));
2006
2007 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2008 pos = bkey_successor(iter, pos);
2009 bch2_btree_iter_set_pos(iter, pos);
2010 return ret;
2011 }
2012
bch2_btree_iter_rewind(struct btree_iter * iter)2013 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2014 {
2015 struct bpos pos = bkey_start_pos(&iter->k);
2016 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2017 ? bpos_eq(pos, POS_MIN)
2018 : bkey_eq(pos, POS_MIN));
2019
2020 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2021 pos = bkey_predecessor(iter, pos);
2022 bch2_btree_iter_set_pos(iter, pos);
2023 return ret;
2024 }
2025
2026 static noinline
bch2_btree_trans_peek_prev_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2027 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2028 struct bkey_s_c *k)
2029 {
2030 struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2031
2032 trans_for_each_update(trans, i)
2033 if (!i->key_cache_already_flushed &&
2034 i->btree_id == iter->btree_id &&
2035 bpos_le(i->k->k.p, iter->pos) &&
2036 bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2037 iter->k = i->k->k;
2038 *k = bkey_i_to_s_c(i->k);
2039 }
2040 }
2041
2042 static noinline
bch2_btree_trans_peek_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2043 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2044 struct bkey_s_c *k)
2045 {
2046 struct btree_path *path = btree_iter_path(trans, iter);
2047 struct bpos end = path_l(path)->b->key.k.p;
2048
2049 trans_for_each_update(trans, i)
2050 if (!i->key_cache_already_flushed &&
2051 i->btree_id == iter->btree_id &&
2052 bpos_ge(i->k->k.p, path->pos) &&
2053 bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2054 iter->k = i->k->k;
2055 *k = bkey_i_to_s_c(i->k);
2056 }
2057 }
2058
2059 static noinline
bch2_btree_trans_peek_slot_updates(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c * k)2060 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2061 struct bkey_s_c *k)
2062 {
2063 trans_for_each_update(trans, i)
2064 if (!i->key_cache_already_flushed &&
2065 i->btree_id == iter->btree_id &&
2066 bpos_eq(i->k->k.p, iter->pos)) {
2067 iter->k = i->k->k;
2068 *k = bkey_i_to_s_c(i->k);
2069 }
2070 }
2071
bch2_btree_journal_peek(struct btree_trans * trans,struct btree_iter * iter,struct bpos end_pos)2072 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2073 struct btree_iter *iter,
2074 struct bpos end_pos)
2075 {
2076 struct btree_path *path = btree_iter_path(trans, iter);
2077
2078 return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
2079 path->level,
2080 path->pos,
2081 end_pos,
2082 &iter->journal_idx);
2083 }
2084
2085 static noinline
btree_trans_peek_slot_journal(struct btree_trans * trans,struct btree_iter * iter)2086 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2087 struct btree_iter *iter)
2088 {
2089 struct btree_path *path = btree_iter_path(trans, iter);
2090 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2091
2092 if (k) {
2093 iter->k = k->k;
2094 return bkey_i_to_s_c(k);
2095 } else {
2096 return bkey_s_c_null;
2097 }
2098 }
2099
2100 static noinline
btree_trans_peek_journal(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)2101 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2102 struct btree_iter *iter,
2103 struct bkey_s_c k)
2104 {
2105 struct btree_path *path = btree_iter_path(trans, iter);
2106 struct bkey_i *next_journal =
2107 bch2_btree_journal_peek(trans, iter,
2108 k.k ? k.k->p : path_l(path)->b->key.k.p);
2109
2110 if (next_journal) {
2111 iter->k = next_journal->k;
2112 k = bkey_i_to_s_c(next_journal);
2113 }
2114
2115 return k;
2116 }
2117
2118 /*
2119 * Checks btree key cache for key at iter->pos and returns it if present, or
2120 * bkey_s_c_null:
2121 */
2122 static noinline
btree_trans_peek_key_cache(struct btree_iter * iter,struct bpos pos)2123 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2124 {
2125 struct btree_trans *trans = iter->trans;
2126 struct bch_fs *c = trans->c;
2127 struct bkey u;
2128 struct bkey_s_c k;
2129 int ret;
2130
2131 bch2_trans_verify_not_in_restart(trans);
2132 bch2_trans_verify_not_unlocked(trans);
2133
2134 if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2135 bpos_eq(iter->pos, pos))
2136 return bkey_s_c_null;
2137
2138 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2139 return bkey_s_c_null;
2140
2141 if (!iter->key_cache_path)
2142 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2143 iter->flags & BTREE_ITER_intent, 0,
2144 iter->flags|BTREE_ITER_cached|
2145 BTREE_ITER_cached_nofill,
2146 _THIS_IP_);
2147
2148 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2149 iter->flags & BTREE_ITER_intent,
2150 btree_iter_ip_allocated(iter));
2151
2152 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
2153 iter->flags|BTREE_ITER_cached) ?:
2154 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2155 if (unlikely(ret))
2156 return bkey_s_c_err(ret);
2157
2158 btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
2159
2160 k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2161 if (k.k && !bkey_err(k)) {
2162 iter->k = u;
2163 k.k = &iter->k;
2164 }
2165 return k;
2166 }
2167
__bch2_btree_iter_peek(struct btree_iter * iter,struct bpos search_key)2168 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2169 {
2170 struct btree_trans *trans = iter->trans;
2171 struct bkey_s_c k, k2;
2172 int ret;
2173
2174 EBUG_ON(btree_iter_path(trans, iter)->cached);
2175 bch2_btree_iter_verify(iter);
2176
2177 while (1) {
2178 struct btree_path_level *l;
2179
2180 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2181 iter->flags & BTREE_ITER_intent,
2182 btree_iter_ip_allocated(iter));
2183
2184 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2185 if (unlikely(ret)) {
2186 /* ensure that iter->k is consistent with iter->pos: */
2187 bch2_btree_iter_set_pos(iter, iter->pos);
2188 k = bkey_s_c_err(ret);
2189 goto out;
2190 }
2191
2192 struct btree_path *path = btree_iter_path(trans, iter);
2193 l = path_l(path);
2194
2195 if (unlikely(!l->b)) {
2196 /* No btree nodes at requested level: */
2197 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2198 k = bkey_s_c_null;
2199 goto out;
2200 }
2201
2202 btree_path_set_should_be_locked(path);
2203
2204 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2205
2206 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2207 k.k &&
2208 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2209 k = k2;
2210 ret = bkey_err(k);
2211 if (ret) {
2212 bch2_btree_iter_set_pos(iter, iter->pos);
2213 goto out;
2214 }
2215 }
2216
2217 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2218 k = btree_trans_peek_journal(trans, iter, k);
2219
2220 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2221 trans->nr_updates))
2222 bch2_btree_trans_peek_updates(trans, iter, &k);
2223
2224 if (k.k && bkey_deleted(k.k)) {
2225 /*
2226 * If we've got a whiteout, and it's after the search
2227 * key, advance the search key to the whiteout instead
2228 * of just after the whiteout - it might be a btree
2229 * whiteout, with a real key at the same position, since
2230 * in the btree deleted keys sort before non deleted.
2231 */
2232 search_key = !bpos_eq(search_key, k.k->p)
2233 ? k.k->p
2234 : bpos_successor(k.k->p);
2235 continue;
2236 }
2237
2238 if (likely(k.k)) {
2239 break;
2240 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2241 /* Advance to next leaf node: */
2242 search_key = bpos_successor(l->b->key.k.p);
2243 } else {
2244 /* End of btree: */
2245 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2246 k = bkey_s_c_null;
2247 goto out;
2248 }
2249 }
2250 out:
2251 bch2_btree_iter_verify(iter);
2252
2253 return k;
2254 }
2255
2256 /**
2257 * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
2258 * iterator's current position
2259 * @iter: iterator to peek from
2260 * @end: search limit: returns keys less than or equal to @end
2261 *
2262 * Returns: key if found, or an error extractable with bkey_err().
2263 */
bch2_btree_iter_peek_upto(struct btree_iter * iter,struct bpos end)2264 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2265 {
2266 struct btree_trans *trans = iter->trans;
2267 struct bpos search_key = btree_iter_search_key(iter);
2268 struct bkey_s_c k;
2269 struct bpos iter_pos;
2270 int ret;
2271
2272 bch2_trans_verify_not_unlocked(trans);
2273 EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2274
2275 if (iter->update_path) {
2276 bch2_path_put_nokeep(trans, iter->update_path,
2277 iter->flags & BTREE_ITER_intent);
2278 iter->update_path = 0;
2279 }
2280
2281 bch2_btree_iter_verify_entry_exit(iter);
2282
2283 while (1) {
2284 k = __bch2_btree_iter_peek(iter, search_key);
2285 if (unlikely(!k.k))
2286 goto end;
2287 if (unlikely(bkey_err(k)))
2288 goto out_no_locked;
2289
2290 /*
2291 * We need to check against @end before FILTER_SNAPSHOTS because
2292 * if we get to a different inode that requested we might be
2293 * seeing keys for a different snapshot tree that will all be
2294 * filtered out.
2295 *
2296 * But we can't do the full check here, because bkey_start_pos()
2297 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2298 * that's what we check against in extents mode:
2299 */
2300 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2301 ? bkey_gt(k.k->p, end)
2302 : k.k->p.inode > end.inode))
2303 goto end;
2304
2305 if (iter->update_path &&
2306 !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2307 bch2_path_put_nokeep(trans, iter->update_path,
2308 iter->flags & BTREE_ITER_intent);
2309 iter->update_path = 0;
2310 }
2311
2312 if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2313 (iter->flags & BTREE_ITER_intent) &&
2314 !(iter->flags & BTREE_ITER_is_extents) &&
2315 !iter->update_path) {
2316 struct bpos pos = k.k->p;
2317
2318 if (pos.snapshot < iter->snapshot) {
2319 search_key = bpos_successor(k.k->p);
2320 continue;
2321 }
2322
2323 pos.snapshot = iter->snapshot;
2324
2325 /*
2326 * advance, same as on exit for iter->path, but only up
2327 * to snapshot
2328 */
2329 __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2330 iter->update_path = iter->path;
2331
2332 iter->update_path = bch2_btree_path_set_pos(trans,
2333 iter->update_path, pos,
2334 iter->flags & BTREE_ITER_intent,
2335 _THIS_IP_);
2336 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2337 if (unlikely(ret)) {
2338 k = bkey_s_c_err(ret);
2339 goto out_no_locked;
2340 }
2341 }
2342
2343 /*
2344 * We can never have a key in a leaf node at POS_MAX, so
2345 * we don't have to check these successor() calls:
2346 */
2347 if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2348 !bch2_snapshot_is_ancestor(trans->c,
2349 iter->snapshot,
2350 k.k->p.snapshot)) {
2351 search_key = bpos_successor(k.k->p);
2352 continue;
2353 }
2354
2355 if (bkey_whiteout(k.k) &&
2356 !(iter->flags & BTREE_ITER_all_snapshots)) {
2357 search_key = bkey_successor(iter, k.k->p);
2358 continue;
2359 }
2360
2361 /*
2362 * iter->pos should be mononotically increasing, and always be
2363 * equal to the key we just returned - except extents can
2364 * straddle iter->pos:
2365 */
2366 if (!(iter->flags & BTREE_ITER_is_extents))
2367 iter_pos = k.k->p;
2368 else
2369 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2370
2371 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2372 ? bkey_gt(iter_pos, end)
2373 : bkey_ge(iter_pos, end)))
2374 goto end;
2375
2376 break;
2377 }
2378
2379 iter->pos = iter_pos;
2380
2381 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2382 iter->flags & BTREE_ITER_intent,
2383 btree_iter_ip_allocated(iter));
2384
2385 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2386 out_no_locked:
2387 if (iter->update_path) {
2388 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2389 if (unlikely(ret))
2390 k = bkey_s_c_err(ret);
2391 else
2392 btree_path_set_should_be_locked(trans->paths + iter->update_path);
2393 }
2394
2395 if (!(iter->flags & BTREE_ITER_all_snapshots))
2396 iter->pos.snapshot = iter->snapshot;
2397
2398 ret = bch2_btree_iter_verify_ret(iter, k);
2399 if (unlikely(ret)) {
2400 bch2_btree_iter_set_pos(iter, iter->pos);
2401 k = bkey_s_c_err(ret);
2402 }
2403
2404 bch2_btree_iter_verify_entry_exit(iter);
2405
2406 return k;
2407 end:
2408 bch2_btree_iter_set_pos(iter, end);
2409 k = bkey_s_c_null;
2410 goto out_no_locked;
2411 }
2412
2413 /**
2414 * bch2_btree_iter_next() - returns first key greater than iterator's current
2415 * position
2416 * @iter: iterator to peek from
2417 *
2418 * Returns: key if found, or an error extractable with bkey_err().
2419 */
bch2_btree_iter_next(struct btree_iter * iter)2420 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2421 {
2422 if (!bch2_btree_iter_advance(iter))
2423 return bkey_s_c_null;
2424
2425 return bch2_btree_iter_peek(iter);
2426 }
2427
2428 /**
2429 * bch2_btree_iter_peek_prev() - returns first key less than or equal to
2430 * iterator's current position
2431 * @iter: iterator to peek from
2432 *
2433 * Returns: key if found, or an error extractable with bkey_err().
2434 */
bch2_btree_iter_peek_prev(struct btree_iter * iter)2435 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2436 {
2437 struct btree_trans *trans = iter->trans;
2438 struct bpos search_key = iter->pos;
2439 struct bkey_s_c k;
2440 struct bkey saved_k;
2441 const struct bch_val *saved_v;
2442 btree_path_idx_t saved_path = 0;
2443 int ret;
2444
2445 bch2_trans_verify_not_unlocked(trans);
2446 EBUG_ON(btree_iter_path(trans, iter)->cached ||
2447 btree_iter_path(trans, iter)->level);
2448
2449 if (iter->flags & BTREE_ITER_with_journal)
2450 return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
2451
2452 bch2_btree_iter_verify(iter);
2453 bch2_btree_iter_verify_entry_exit(iter);
2454
2455 if (iter->flags & BTREE_ITER_filter_snapshots)
2456 search_key.snapshot = U32_MAX;
2457
2458 while (1) {
2459 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2460 iter->flags & BTREE_ITER_intent,
2461 btree_iter_ip_allocated(iter));
2462
2463 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2464 if (unlikely(ret)) {
2465 /* ensure that iter->k is consistent with iter->pos: */
2466 bch2_btree_iter_set_pos(iter, iter->pos);
2467 k = bkey_s_c_err(ret);
2468 goto out_no_locked;
2469 }
2470
2471 struct btree_path *path = btree_iter_path(trans, iter);
2472
2473 k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
2474 if (!k.k ||
2475 ((iter->flags & BTREE_ITER_is_extents)
2476 ? bpos_ge(bkey_start_pos(k.k), search_key)
2477 : bpos_gt(k.k->p, search_key)))
2478 k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
2479
2480 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2481 trans->nr_updates))
2482 bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2483
2484 if (likely(k.k)) {
2485 if (iter->flags & BTREE_ITER_filter_snapshots) {
2486 if (k.k->p.snapshot == iter->snapshot)
2487 goto got_key;
2488
2489 /*
2490 * If we have a saved candidate, and we're no
2491 * longer at the same _key_ (not pos), return
2492 * that candidate
2493 */
2494 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2495 bch2_path_put_nokeep(trans, iter->path,
2496 iter->flags & BTREE_ITER_intent);
2497 iter->path = saved_path;
2498 saved_path = 0;
2499 iter->k = saved_k;
2500 k.v = saved_v;
2501 goto got_key;
2502 }
2503
2504 if (bch2_snapshot_is_ancestor(trans->c,
2505 iter->snapshot,
2506 k.k->p.snapshot)) {
2507 if (saved_path)
2508 bch2_path_put_nokeep(trans, saved_path,
2509 iter->flags & BTREE_ITER_intent);
2510 saved_path = btree_path_clone(trans, iter->path,
2511 iter->flags & BTREE_ITER_intent,
2512 _THIS_IP_);
2513 path = btree_iter_path(trans, iter);
2514 saved_k = *k.k;
2515 saved_v = k.v;
2516 }
2517
2518 search_key = bpos_predecessor(k.k->p);
2519 continue;
2520 }
2521 got_key:
2522 if (bkey_whiteout(k.k) &&
2523 !(iter->flags & BTREE_ITER_all_snapshots)) {
2524 search_key = bkey_predecessor(iter, k.k->p);
2525 if (iter->flags & BTREE_ITER_filter_snapshots)
2526 search_key.snapshot = U32_MAX;
2527 continue;
2528 }
2529
2530 btree_path_set_should_be_locked(path);
2531 break;
2532 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2533 /* Advance to previous leaf node: */
2534 search_key = bpos_predecessor(path->l[0].b->data->min_key);
2535 } else {
2536 /* Start of btree: */
2537 bch2_btree_iter_set_pos(iter, POS_MIN);
2538 k = bkey_s_c_null;
2539 goto out_no_locked;
2540 }
2541 }
2542
2543 EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2544
2545 /* Extents can straddle iter->pos: */
2546 if (bkey_lt(k.k->p, iter->pos))
2547 iter->pos = k.k->p;
2548
2549 if (iter->flags & BTREE_ITER_filter_snapshots)
2550 iter->pos.snapshot = iter->snapshot;
2551 out_no_locked:
2552 if (saved_path)
2553 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2554
2555 bch2_btree_iter_verify_entry_exit(iter);
2556 bch2_btree_iter_verify(iter);
2557
2558 return k;
2559 }
2560
2561 /**
2562 * bch2_btree_iter_prev() - returns first key less than iterator's current
2563 * position
2564 * @iter: iterator to peek from
2565 *
2566 * Returns: key if found, or an error extractable with bkey_err().
2567 */
bch2_btree_iter_prev(struct btree_iter * iter)2568 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2569 {
2570 if (!bch2_btree_iter_rewind(iter))
2571 return bkey_s_c_null;
2572
2573 return bch2_btree_iter_peek_prev(iter);
2574 }
2575
bch2_btree_iter_peek_slot(struct btree_iter * iter)2576 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2577 {
2578 struct btree_trans *trans = iter->trans;
2579 struct bpos search_key;
2580 struct bkey_s_c k;
2581 int ret;
2582
2583 bch2_trans_verify_not_unlocked(trans);
2584 bch2_btree_iter_verify(iter);
2585 bch2_btree_iter_verify_entry_exit(iter);
2586 EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2587
2588 /* extents can't span inode numbers: */
2589 if ((iter->flags & BTREE_ITER_is_extents) &&
2590 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2591 if (iter->pos.inode == KEY_INODE_MAX)
2592 return bkey_s_c_null;
2593
2594 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2595 }
2596
2597 search_key = btree_iter_search_key(iter);
2598 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2599 iter->flags & BTREE_ITER_intent,
2600 btree_iter_ip_allocated(iter));
2601
2602 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2603 if (unlikely(ret)) {
2604 k = bkey_s_c_err(ret);
2605 goto out_no_locked;
2606 }
2607
2608 if ((iter->flags & BTREE_ITER_cached) ||
2609 !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2610 k = bkey_s_c_null;
2611
2612 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2613 trans->nr_updates)) {
2614 bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2615 if (k.k)
2616 goto out;
2617 }
2618
2619 if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2620 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2621 goto out;
2622
2623 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2624 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2625 if (!bkey_err(k))
2626 iter->k = *k.k;
2627 /* We're not returning a key from iter->path: */
2628 goto out_no_locked;
2629 }
2630
2631 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2632 if (unlikely(!k.k))
2633 goto out_no_locked;
2634 } else {
2635 struct bpos next;
2636 struct bpos end = iter->pos;
2637
2638 if (iter->flags & BTREE_ITER_is_extents)
2639 end.offset = U64_MAX;
2640
2641 EBUG_ON(btree_iter_path(trans, iter)->level);
2642
2643 if (iter->flags & BTREE_ITER_intent) {
2644 struct btree_iter iter2;
2645
2646 bch2_trans_copy_iter(&iter2, iter);
2647 k = bch2_btree_iter_peek_upto(&iter2, end);
2648
2649 if (k.k && !bkey_err(k)) {
2650 swap(iter->key_cache_path, iter2.key_cache_path);
2651 iter->k = iter2.k;
2652 k.k = &iter->k;
2653 }
2654 bch2_trans_iter_exit(trans, &iter2);
2655 } else {
2656 struct bpos pos = iter->pos;
2657
2658 k = bch2_btree_iter_peek_upto(iter, end);
2659 if (unlikely(bkey_err(k)))
2660 bch2_btree_iter_set_pos(iter, pos);
2661 else
2662 iter->pos = pos;
2663 }
2664
2665 if (unlikely(bkey_err(k)))
2666 goto out_no_locked;
2667
2668 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2669
2670 if (bkey_lt(iter->pos, next)) {
2671 bkey_init(&iter->k);
2672 iter->k.p = iter->pos;
2673
2674 if (iter->flags & BTREE_ITER_is_extents) {
2675 bch2_key_resize(&iter->k,
2676 min_t(u64, KEY_SIZE_MAX,
2677 (next.inode == iter->pos.inode
2678 ? next.offset
2679 : KEY_OFFSET_MAX) -
2680 iter->pos.offset));
2681 EBUG_ON(!iter->k.size);
2682 }
2683
2684 k = (struct bkey_s_c) { &iter->k, NULL };
2685 }
2686 }
2687 out:
2688 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2689 out_no_locked:
2690 bch2_btree_iter_verify_entry_exit(iter);
2691 bch2_btree_iter_verify(iter);
2692 ret = bch2_btree_iter_verify_ret(iter, k);
2693 if (unlikely(ret))
2694 return bkey_s_c_err(ret);
2695
2696 return k;
2697 }
2698
bch2_btree_iter_next_slot(struct btree_iter * iter)2699 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2700 {
2701 if (!bch2_btree_iter_advance(iter))
2702 return bkey_s_c_null;
2703
2704 return bch2_btree_iter_peek_slot(iter);
2705 }
2706
bch2_btree_iter_prev_slot(struct btree_iter * iter)2707 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2708 {
2709 if (!bch2_btree_iter_rewind(iter))
2710 return bkey_s_c_null;
2711
2712 return bch2_btree_iter_peek_slot(iter);
2713 }
2714
bch2_btree_iter_peek_and_restart_outlined(struct btree_iter * iter)2715 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2716 {
2717 struct bkey_s_c k;
2718
2719 while (btree_trans_too_many_iters(iter->trans) ||
2720 (k = bch2_btree_iter_peek_type(iter, iter->flags),
2721 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2722 bch2_trans_begin(iter->trans);
2723
2724 return k;
2725 }
2726
2727 /* new transactional stuff: */
2728
2729 #ifdef CONFIG_BCACHEFS_DEBUG
btree_trans_verify_sorted_refs(struct btree_trans * trans)2730 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2731 {
2732 struct btree_path *path;
2733 unsigned i;
2734
2735 BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2736
2737 trans_for_each_path(trans, path, i) {
2738 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2739 BUG_ON(trans->sorted[path->sorted_idx] != i);
2740 }
2741
2742 for (i = 0; i < trans->nr_sorted; i++) {
2743 unsigned idx = trans->sorted[i];
2744
2745 BUG_ON(!test_bit(idx, trans->paths_allocated));
2746 BUG_ON(trans->paths[idx].sorted_idx != i);
2747 }
2748 }
2749
btree_trans_verify_sorted(struct btree_trans * trans)2750 static void btree_trans_verify_sorted(struct btree_trans *trans)
2751 {
2752 struct btree_path *path, *prev = NULL;
2753 struct trans_for_each_path_inorder_iter iter;
2754
2755 if (!bch2_debug_check_iterators)
2756 return;
2757
2758 trans_for_each_path_inorder(trans, path, iter) {
2759 if (prev && btree_path_cmp(prev, path) > 0) {
2760 __bch2_dump_trans_paths_updates(trans, true);
2761 panic("trans paths out of order!\n");
2762 }
2763 prev = path;
2764 }
2765 }
2766 #else
btree_trans_verify_sorted_refs(struct btree_trans * trans)2767 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
btree_trans_verify_sorted(struct btree_trans * trans)2768 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2769 #endif
2770
__bch2_btree_trans_sort_paths(struct btree_trans * trans)2771 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2772 {
2773 int i, l = 0, r = trans->nr_sorted, inc = 1;
2774 bool swapped;
2775
2776 btree_trans_verify_sorted_refs(trans);
2777
2778 if (trans->paths_sorted)
2779 goto out;
2780
2781 /*
2782 * Cocktail shaker sort: this is efficient because iterators will be
2783 * mostly sorted.
2784 */
2785 do {
2786 swapped = false;
2787
2788 for (i = inc > 0 ? l : r - 2;
2789 i + 1 < r && i >= l;
2790 i += inc) {
2791 if (btree_path_cmp(trans->paths + trans->sorted[i],
2792 trans->paths + trans->sorted[i + 1]) > 0) {
2793 swap(trans->sorted[i], trans->sorted[i + 1]);
2794 trans->paths[trans->sorted[i]].sorted_idx = i;
2795 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2796 swapped = true;
2797 }
2798 }
2799
2800 if (inc > 0)
2801 --r;
2802 else
2803 l++;
2804 inc = -inc;
2805 } while (swapped);
2806
2807 trans->paths_sorted = true;
2808 out:
2809 btree_trans_verify_sorted(trans);
2810 }
2811
btree_path_list_remove(struct btree_trans * trans,struct btree_path * path)2812 static inline void btree_path_list_remove(struct btree_trans *trans,
2813 struct btree_path *path)
2814 {
2815 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2816 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2817 trans->nr_sorted--;
2818 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2819 trans->sorted + path->sorted_idx + 1,
2820 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2821 sizeof(u64) / sizeof(btree_path_idx_t)));
2822 #else
2823 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2824 #endif
2825 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2826 trans->paths[trans->sorted[i]].sorted_idx = i;
2827 }
2828
btree_path_list_add(struct btree_trans * trans,btree_path_idx_t pos,btree_path_idx_t path_idx)2829 static inline void btree_path_list_add(struct btree_trans *trans,
2830 btree_path_idx_t pos,
2831 btree_path_idx_t path_idx)
2832 {
2833 struct btree_path *path = trans->paths + path_idx;
2834
2835 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2836
2837 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2838 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2839 trans->sorted + path->sorted_idx,
2840 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2841 sizeof(u64) / sizeof(btree_path_idx_t)));
2842 trans->nr_sorted++;
2843 trans->sorted[path->sorted_idx] = path_idx;
2844 #else
2845 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
2846 #endif
2847
2848 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2849 trans->paths[trans->sorted[i]].sorted_idx = i;
2850
2851 btree_trans_verify_sorted_refs(trans);
2852 }
2853
bch2_trans_iter_exit(struct btree_trans * trans,struct btree_iter * iter)2854 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2855 {
2856 if (iter->update_path)
2857 bch2_path_put_nokeep(trans, iter->update_path,
2858 iter->flags & BTREE_ITER_intent);
2859 if (iter->path)
2860 bch2_path_put(trans, iter->path,
2861 iter->flags & BTREE_ITER_intent);
2862 if (iter->key_cache_path)
2863 bch2_path_put(trans, iter->key_cache_path,
2864 iter->flags & BTREE_ITER_intent);
2865 iter->path = 0;
2866 iter->update_path = 0;
2867 iter->key_cache_path = 0;
2868 iter->trans = NULL;
2869 }
2870
bch2_trans_iter_init_outlined(struct btree_trans * trans,struct btree_iter * iter,enum btree_id btree_id,struct bpos pos,unsigned flags)2871 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2872 struct btree_iter *iter,
2873 enum btree_id btree_id, struct bpos pos,
2874 unsigned flags)
2875 {
2876 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2877 bch2_btree_iter_flags(trans, btree_id, flags),
2878 _RET_IP_);
2879 }
2880
bch2_trans_node_iter_init(struct btree_trans * trans,struct btree_iter * iter,enum btree_id btree_id,struct bpos pos,unsigned locks_want,unsigned depth,unsigned flags)2881 void bch2_trans_node_iter_init(struct btree_trans *trans,
2882 struct btree_iter *iter,
2883 enum btree_id btree_id,
2884 struct bpos pos,
2885 unsigned locks_want,
2886 unsigned depth,
2887 unsigned flags)
2888 {
2889 flags |= BTREE_ITER_not_extents;
2890 flags |= BTREE_ITER_snapshot_field;
2891 flags |= BTREE_ITER_all_snapshots;
2892
2893 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2894 __bch2_btree_iter_flags(trans, btree_id, flags),
2895 _RET_IP_);
2896
2897 iter->min_depth = depth;
2898
2899 struct btree_path *path = btree_iter_path(trans, iter);
2900 BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2901 BUG_ON(path->level != depth);
2902 BUG_ON(iter->min_depth != depth);
2903 }
2904
bch2_trans_copy_iter(struct btree_iter * dst,struct btree_iter * src)2905 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2906 {
2907 struct btree_trans *trans = src->trans;
2908
2909 *dst = *src;
2910 #ifdef TRACK_PATH_ALLOCATED
2911 dst->ip_allocated = _RET_IP_;
2912 #endif
2913 if (src->path)
2914 __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
2915 if (src->update_path)
2916 __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
2917 dst->key_cache_path = 0;
2918 }
2919
__bch2_trans_kmalloc(struct btree_trans * trans,size_t size)2920 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2921 {
2922 struct bch_fs *c = trans->c;
2923 unsigned new_top = trans->mem_top + size;
2924 unsigned old_bytes = trans->mem_bytes;
2925 unsigned new_bytes = roundup_pow_of_two(new_top);
2926 int ret;
2927 void *new_mem;
2928 void *p;
2929
2930 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2931
2932 struct btree_transaction_stats *s = btree_trans_stats(trans);
2933 s->max_mem = max(s->max_mem, new_bytes);
2934
2935 if (trans->used_mempool) {
2936 if (trans->mem_bytes >= new_bytes)
2937 goto out_change_top;
2938
2939 /* No more space from mempool item, need malloc new one */
2940 new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2941 if (unlikely(!new_mem)) {
2942 bch2_trans_unlock(trans);
2943
2944 new_mem = kmalloc(new_bytes, GFP_KERNEL);
2945 if (!new_mem)
2946 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2947
2948 ret = bch2_trans_relock(trans);
2949 if (ret) {
2950 kfree(new_mem);
2951 return ERR_PTR(ret);
2952 }
2953 }
2954 memcpy(new_mem, trans->mem, trans->mem_top);
2955 trans->used_mempool = false;
2956 mempool_free(trans->mem, &c->btree_trans_mem_pool);
2957 goto out_new_mem;
2958 }
2959
2960 new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2961 if (unlikely(!new_mem)) {
2962 bch2_trans_unlock(trans);
2963
2964 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2965 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2966 new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2967 new_bytes = BTREE_TRANS_MEM_MAX;
2968 memcpy(new_mem, trans->mem, trans->mem_top);
2969 trans->used_mempool = true;
2970 kfree(trans->mem);
2971 }
2972
2973 if (!new_mem)
2974 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2975
2976 trans->mem = new_mem;
2977 trans->mem_bytes = new_bytes;
2978
2979 ret = bch2_trans_relock(trans);
2980 if (ret)
2981 return ERR_PTR(ret);
2982 }
2983 out_new_mem:
2984 trans->mem = new_mem;
2985 trans->mem_bytes = new_bytes;
2986
2987 if (old_bytes) {
2988 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2989 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2990 }
2991 out_change_top:
2992 p = trans->mem + trans->mem_top;
2993 trans->mem_top += size;
2994 memset(p, 0, size);
2995 return p;
2996 }
2997
check_srcu_held_too_long(struct btree_trans * trans)2998 static inline void check_srcu_held_too_long(struct btree_trans *trans)
2999 {
3000 WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3001 "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3002 (jiffies - trans->srcu_lock_time) / HZ);
3003 }
3004
bch2_trans_srcu_unlock(struct btree_trans * trans)3005 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3006 {
3007 if (trans->srcu_held) {
3008 struct bch_fs *c = trans->c;
3009 struct btree_path *path;
3010 unsigned i;
3011
3012 trans_for_each_path(trans, path, i)
3013 if (path->cached && !btree_node_locked(path, 0))
3014 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3015
3016 check_srcu_held_too_long(trans);
3017 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3018 trans->srcu_held = false;
3019 }
3020 }
3021
bch2_trans_srcu_lock(struct btree_trans * trans)3022 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3023 {
3024 if (!trans->srcu_held) {
3025 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3026 trans->srcu_lock_time = jiffies;
3027 trans->srcu_held = true;
3028 }
3029 }
3030
3031 /**
3032 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3033 * @trans: transaction to reset
3034 *
3035 * Returns: current restart counter, to be used with trans_was_restarted()
3036 *
3037 * While iterating over nodes or updating nodes a attempt to lock a btree node
3038 * may return BCH_ERR_transaction_restart when the trylock fails. When this
3039 * occurs bch2_trans_begin() should be called and the transaction retried.
3040 */
bch2_trans_begin(struct btree_trans * trans)3041 u32 bch2_trans_begin(struct btree_trans *trans)
3042 {
3043 struct btree_path *path;
3044 unsigned i;
3045 u64 now;
3046
3047 bch2_trans_reset_updates(trans);
3048
3049 trans->restart_count++;
3050 trans->mem_top = 0;
3051 trans->journal_entries = NULL;
3052
3053 trans_for_each_path(trans, path, i) {
3054 path->should_be_locked = false;
3055
3056 /*
3057 * If the transaction wasn't restarted, we're presuming to be
3058 * doing something new: dont keep iterators excpt the ones that
3059 * are in use - except for the subvolumes btree:
3060 */
3061 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3062 path->preserve = false;
3063
3064 /*
3065 * XXX: we probably shouldn't be doing this if the transaction
3066 * was restarted, but currently we still overflow transaction
3067 * iterators if we do that
3068 */
3069 if (!path->ref && !path->preserve)
3070 __bch2_path_free(trans, i);
3071 else
3072 path->preserve = false;
3073 }
3074
3075 now = local_clock();
3076
3077 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3078 time_after64(now, trans->last_begin_time + 10))
3079 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3080 trans->last_begin_time, now);
3081
3082 if (!trans->restarted &&
3083 (need_resched() ||
3084 time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3085 bch2_trans_unlock(trans);
3086 cond_resched();
3087 now = local_clock();
3088 }
3089 trans->last_begin_time = now;
3090
3091 if (unlikely(trans->srcu_held &&
3092 time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3093 bch2_trans_srcu_unlock(trans);
3094
3095 trans->last_begin_ip = _RET_IP_;
3096
3097 trans_set_locked(trans);
3098
3099 if (trans->restarted) {
3100 bch2_btree_path_traverse_all(trans);
3101 trans->notrace_relock_fail = false;
3102 }
3103
3104 bch2_trans_verify_not_unlocked(trans);
3105 return trans->restart_count;
3106 }
3107
3108 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3109
bch2_trans_get_fn_idx(const char * fn)3110 unsigned bch2_trans_get_fn_idx(const char *fn)
3111 {
3112 for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3113 if (!bch2_btree_transaction_fns[i] ||
3114 bch2_btree_transaction_fns[i] == fn) {
3115 bch2_btree_transaction_fns[i] = fn;
3116 return i;
3117 }
3118
3119 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3120 return 0;
3121 }
3122
__bch2_trans_get(struct bch_fs * c,unsigned fn_idx)3123 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3124 __acquires(&c->btree_trans_barrier)
3125 {
3126 struct btree_trans *trans;
3127
3128 if (IS_ENABLED(__KERNEL__)) {
3129 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3130 if (trans) {
3131 memset(trans, 0, offsetof(struct btree_trans, list));
3132 goto got_trans;
3133 }
3134 }
3135
3136 trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3137 memset(trans, 0, sizeof(*trans));
3138
3139 seqmutex_lock(&c->btree_trans_lock);
3140 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3141 struct btree_trans *pos;
3142 pid_t pid = current->pid;
3143
3144 trans->locking_wait.task = current;
3145
3146 list_for_each_entry(pos, &c->btree_trans_list, list) {
3147 struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3148 /*
3149 * We'd much prefer to be stricter here and completely
3150 * disallow multiple btree_trans in the same thread -
3151 * but the data move path calls bch2_write when we
3152 * already have a btree_trans initialized.
3153 */
3154 BUG_ON(pos_task &&
3155 pid == pos_task->pid &&
3156 pos->locked);
3157 }
3158 }
3159
3160 list_add(&trans->list, &c->btree_trans_list);
3161 seqmutex_unlock(&c->btree_trans_lock);
3162 got_trans:
3163 trans->c = c;
3164 trans->last_begin_time = local_clock();
3165 trans->fn_idx = fn_idx;
3166 trans->locking_wait.task = current;
3167 trans->journal_replay_not_finished =
3168 unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3169 atomic_inc_not_zero(&c->journal_keys.ref);
3170 trans->nr_paths = ARRAY_SIZE(trans->_paths);
3171 trans->paths_allocated = trans->_paths_allocated;
3172 trans->sorted = trans->_sorted;
3173 trans->paths = trans->_paths;
3174 trans->updates = trans->_updates;
3175
3176 *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3177
3178 trans->paths_allocated[0] = 1;
3179
3180 static struct lock_class_key lockdep_key;
3181 lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3182
3183 if (fn_idx < BCH_TRANSACTIONS_NR) {
3184 trans->fn = bch2_btree_transaction_fns[fn_idx];
3185
3186 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3187
3188 if (s->max_mem) {
3189 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3190
3191 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3192 if (likely(trans->mem))
3193 trans->mem_bytes = expected_mem_bytes;
3194 }
3195
3196 trans->nr_paths_max = s->nr_max_paths;
3197 trans->journal_entries_size = s->journal_entries_size;
3198 }
3199
3200 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3201 trans->srcu_lock_time = jiffies;
3202 trans->srcu_held = true;
3203 trans_set_locked(trans);
3204
3205 closure_init_stack_release(&trans->ref);
3206 return trans;
3207 }
3208
check_btree_paths_leaked(struct btree_trans * trans)3209 static void check_btree_paths_leaked(struct btree_trans *trans)
3210 {
3211 #ifdef CONFIG_BCACHEFS_DEBUG
3212 struct bch_fs *c = trans->c;
3213 struct btree_path *path;
3214 unsigned i;
3215
3216 trans_for_each_path(trans, path, i)
3217 if (path->ref)
3218 goto leaked;
3219 return;
3220 leaked:
3221 bch_err(c, "btree paths leaked from %s!", trans->fn);
3222 trans_for_each_path(trans, path, i)
3223 if (path->ref)
3224 printk(KERN_ERR " btree %s %pS\n",
3225 bch2_btree_id_str(path->btree_id),
3226 (void *) path->ip_allocated);
3227 /* Be noisy about this: */
3228 bch2_fatal_error(c);
3229 #endif
3230 }
3231
bch2_trans_put(struct btree_trans * trans)3232 void bch2_trans_put(struct btree_trans *trans)
3233 __releases(&c->btree_trans_barrier)
3234 {
3235 struct bch_fs *c = trans->c;
3236
3237 bch2_trans_unlock(trans);
3238
3239 trans_for_each_update(trans, i)
3240 __btree_path_put(trans->paths + i->path, true);
3241 trans->nr_updates = 0;
3242
3243 check_btree_paths_leaked(trans);
3244
3245 if (trans->srcu_held) {
3246 check_srcu_held_too_long(trans);
3247 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3248 }
3249
3250 if (unlikely(trans->journal_replay_not_finished))
3251 bch2_journal_keys_put(c);
3252
3253 /*
3254 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3255 * by cycle detector
3256 */
3257 closure_return_sync(&trans->ref);
3258 trans->locking_wait.task = NULL;
3259
3260 unsigned long *paths_allocated = trans->paths_allocated;
3261 trans->paths_allocated = NULL;
3262 trans->paths = NULL;
3263
3264 if (paths_allocated != trans->_paths_allocated)
3265 kvfree_rcu_mightsleep(paths_allocated);
3266
3267 if (trans->used_mempool)
3268 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3269 else
3270 kfree(trans->mem);
3271
3272 /* Userspace doesn't have a real percpu implementation: */
3273 if (IS_ENABLED(__KERNEL__))
3274 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3275
3276 if (trans) {
3277 seqmutex_lock(&c->btree_trans_lock);
3278 list_del(&trans->list);
3279 seqmutex_unlock(&c->btree_trans_lock);
3280
3281 mempool_free(trans, &c->btree_trans_pool);
3282 }
3283 }
3284
bch2_current_has_btree_trans(struct bch_fs * c)3285 bool bch2_current_has_btree_trans(struct bch_fs *c)
3286 {
3287 seqmutex_lock(&c->btree_trans_lock);
3288 struct btree_trans *trans;
3289 bool ret = false;
3290 list_for_each_entry(trans, &c->btree_trans_list, list)
3291 if (trans->locking_wait.task == current &&
3292 trans->locked) {
3293 ret = true;
3294 break;
3295 }
3296 seqmutex_unlock(&c->btree_trans_lock);
3297 return ret;
3298 }
3299
3300 static void __maybe_unused
bch2_btree_bkey_cached_common_to_text(struct printbuf * out,struct btree_bkey_cached_common * b)3301 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3302 struct btree_bkey_cached_common *b)
3303 {
3304 struct six_lock_count c = six_lock_counts(&b->lock);
3305 struct task_struct *owner;
3306 pid_t pid;
3307
3308 rcu_read_lock();
3309 owner = READ_ONCE(b->lock.owner);
3310 pid = owner ? owner->pid : 0;
3311 rcu_read_unlock();
3312
3313 prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3314 b->level, bch2_btree_id_str(b->btree_id));
3315 bch2_bpos_to_text(out, btree_node_pos(b));
3316
3317 prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3318 c.n[0], c.n[1], c.n[2], pid);
3319 }
3320
bch2_btree_trans_to_text(struct printbuf * out,struct btree_trans * trans)3321 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3322 {
3323 struct btree_bkey_cached_common *b;
3324 static char lock_types[] = { 'r', 'i', 'w' };
3325 struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3326 unsigned l, idx;
3327
3328 /* before rcu_read_lock(): */
3329 bch2_printbuf_make_room(out, 4096);
3330
3331 if (!out->nr_tabstops) {
3332 printbuf_tabstop_push(out, 16);
3333 printbuf_tabstop_push(out, 32);
3334 }
3335
3336 prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3337
3338 /* trans->paths is rcu protected vs. freeing */
3339 rcu_read_lock();
3340 out->atomic++;
3341
3342 struct btree_path *paths = rcu_dereference(trans->paths);
3343 if (!paths)
3344 goto out;
3345
3346 unsigned long *paths_allocated = trans_paths_allocated(paths);
3347
3348 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3349 struct btree_path *path = paths + idx;
3350 if (!path->nodes_locked)
3351 continue;
3352
3353 prt_printf(out, " path %u %c l=%u %s:",
3354 idx,
3355 path->cached ? 'c' : 'b',
3356 path->level,
3357 bch2_btree_id_str(path->btree_id));
3358 bch2_bpos_to_text(out, path->pos);
3359 prt_newline(out);
3360
3361 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3362 if (btree_node_locked(path, l) &&
3363 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3364 prt_printf(out, " %c l=%u ",
3365 lock_types[btree_node_locked_type(path, l)], l);
3366 bch2_btree_bkey_cached_common_to_text(out, b);
3367 prt_newline(out);
3368 }
3369 }
3370 }
3371
3372 b = READ_ONCE(trans->locking);
3373 if (b) {
3374 prt_printf(out, " blocked for %lluus on\n",
3375 div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3376 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3377 bch2_btree_bkey_cached_common_to_text(out, b);
3378 prt_newline(out);
3379 }
3380 out:
3381 --out->atomic;
3382 rcu_read_unlock();
3383 }
3384
bch2_fs_btree_iter_exit(struct bch_fs * c)3385 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3386 {
3387 struct btree_transaction_stats *s;
3388 struct btree_trans *trans;
3389 int cpu;
3390
3391 if (c->btree_trans_bufs)
3392 for_each_possible_cpu(cpu) {
3393 struct btree_trans *trans =
3394 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3395
3396 if (trans) {
3397 seqmutex_lock(&c->btree_trans_lock);
3398 list_del(&trans->list);
3399 seqmutex_unlock(&c->btree_trans_lock);
3400 }
3401 kfree(trans);
3402 }
3403 free_percpu(c->btree_trans_bufs);
3404
3405 trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3406 if (trans)
3407 panic("%s leaked btree_trans\n", trans->fn);
3408
3409 for (s = c->btree_transaction_stats;
3410 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3411 s++) {
3412 kfree(s->max_paths_text);
3413 bch2_time_stats_exit(&s->lock_hold_times);
3414 }
3415
3416 if (c->btree_trans_barrier_initialized) {
3417 synchronize_srcu_expedited(&c->btree_trans_barrier);
3418 cleanup_srcu_struct(&c->btree_trans_barrier);
3419 }
3420 mempool_exit(&c->btree_trans_mem_pool);
3421 mempool_exit(&c->btree_trans_pool);
3422 }
3423
bch2_fs_btree_iter_init_early(struct bch_fs * c)3424 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3425 {
3426 struct btree_transaction_stats *s;
3427
3428 for (s = c->btree_transaction_stats;
3429 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3430 s++) {
3431 bch2_time_stats_init(&s->duration);
3432 bch2_time_stats_init(&s->lock_hold_times);
3433 mutex_init(&s->lock);
3434 }
3435
3436 INIT_LIST_HEAD(&c->btree_trans_list);
3437 seqmutex_init(&c->btree_trans_lock);
3438 }
3439
bch2_fs_btree_iter_init(struct bch_fs * c)3440 int bch2_fs_btree_iter_init(struct bch_fs *c)
3441 {
3442 int ret;
3443
3444 c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3445 if (!c->btree_trans_bufs)
3446 return -ENOMEM;
3447
3448 ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3449 sizeof(struct btree_trans)) ?:
3450 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3451 BTREE_TRANS_MEM_MAX) ?:
3452 init_srcu_struct(&c->btree_trans_barrier);
3453 if (ret)
3454 return ret;
3455
3456 /*
3457 * static annotation (hackily done) for lock ordering of reclaim vs.
3458 * btree node locks:
3459 */
3460 #ifdef CONFIG_LOCKDEP
3461 fs_reclaim_acquire(GFP_KERNEL);
3462 struct btree_trans *trans = bch2_trans_get(c);
3463 trans_set_locked(trans);
3464 bch2_trans_put(trans);
3465 fs_reclaim_release(GFP_KERNEL);
3466 #endif
3467
3468 c->btree_trans_barrier_initialized = true;
3469 return 0;
3470
3471 }
3472