1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6
7 static struct lock_class_key bch2_btree_node_lock_key;
8
bch2_btree_lock_init(struct btree_bkey_cached_common * b,enum six_lock_init_flags flags,gfp_t gfp)9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 enum six_lock_init_flags flags,
11 gfp_t gfp)
12 {
13 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp);
14 lockdep_set_notrack_class(&b->lock);
15 }
16
17 /* Btree node locking: */
18
bch2_btree_node_lock_counts(struct btree_trans * trans,struct btree_path * skip,struct btree_bkey_cached_common * b,unsigned level)19 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
20 struct btree_path *skip,
21 struct btree_bkey_cached_common *b,
22 unsigned level)
23 {
24 struct btree_path *path;
25 struct six_lock_count ret;
26 unsigned i;
27
28 memset(&ret, 0, sizeof(ret));
29
30 if (IS_ERR_OR_NULL(b))
31 return ret;
32
33 trans_for_each_path(trans, path, i)
34 if (path != skip && &path->l[level].b->c == b) {
35 int t = btree_node_locked_type(path, level);
36
37 if (t != BTREE_NODE_UNLOCKED)
38 ret.n[t]++;
39 }
40
41 return ret;
42 }
43
44 /* unlock */
45
bch2_btree_node_unlock_write(struct btree_trans * trans,struct btree_path * path,struct btree * b)46 void bch2_btree_node_unlock_write(struct btree_trans *trans,
47 struct btree_path *path, struct btree *b)
48 {
49 bch2_btree_node_unlock_write_inlined(trans, path, b);
50 }
51
52 /* lock */
53
54 /*
55 * @trans wants to lock @b with type @type
56 */
57 struct trans_waiting_for_lock {
58 struct btree_trans *trans;
59 struct btree_bkey_cached_common *node_want;
60 enum six_lock_type lock_want;
61
62 /* for iterating over held locks :*/
63 u8 path_idx;
64 u8 level;
65 u64 lock_start_time;
66 };
67
68 struct lock_graph {
69 struct trans_waiting_for_lock g[8];
70 unsigned nr;
71 };
72
print_cycle(struct printbuf * out,struct lock_graph * g)73 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
74 {
75 struct trans_waiting_for_lock *i;
76
77 prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
78
79 for (i = g->g; i < g->g + g->nr; i++) {
80 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
81 if (!task)
82 continue;
83
84 bch2_btree_trans_to_text(out, i->trans);
85 bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
86 }
87 }
88
print_chain(struct printbuf * out,struct lock_graph * g)89 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
90 {
91 struct trans_waiting_for_lock *i;
92
93 for (i = g->g; i != g->g + g->nr; i++) {
94 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
95 if (i != g->g)
96 prt_str(out, "<- ");
97 prt_printf(out, "%u ", task ? task->pid : 0);
98 }
99 prt_newline(out);
100 }
101
lock_graph_up(struct lock_graph * g)102 static void lock_graph_up(struct lock_graph *g)
103 {
104 closure_put(&g->g[--g->nr].trans->ref);
105 }
106
lock_graph_pop_all(struct lock_graph * g)107 static noinline void lock_graph_pop_all(struct lock_graph *g)
108 {
109 while (g->nr)
110 lock_graph_up(g);
111 }
112
lock_graph_pop_from(struct lock_graph * g,struct trans_waiting_for_lock * i)113 static noinline void lock_graph_pop_from(struct lock_graph *g, struct trans_waiting_for_lock *i)
114 {
115 while (g->g + g->nr > i)
116 lock_graph_up(g);
117 }
118
__lock_graph_down(struct lock_graph * g,struct btree_trans * trans)119 static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
120 {
121 g->g[g->nr++] = (struct trans_waiting_for_lock) {
122 .trans = trans,
123 .node_want = trans->locking,
124 .lock_want = trans->locking_wait.lock_want,
125 };
126 }
127
lock_graph_down(struct lock_graph * g,struct btree_trans * trans)128 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
129 {
130 closure_get(&trans->ref);
131 __lock_graph_down(g, trans);
132 }
133
lock_graph_remove_non_waiters(struct lock_graph * g,struct trans_waiting_for_lock * from)134 static bool lock_graph_remove_non_waiters(struct lock_graph *g,
135 struct trans_waiting_for_lock *from)
136 {
137 struct trans_waiting_for_lock *i;
138
139 if (from->trans->locking != from->node_want) {
140 lock_graph_pop_from(g, from);
141 return true;
142 }
143
144 for (i = from + 1; i < g->g + g->nr; i++)
145 if (i->trans->locking != i->node_want ||
146 i->trans->locking_wait.start_time != i[-1].lock_start_time) {
147 lock_graph_pop_from(g, i);
148 return true;
149 }
150
151 return false;
152 }
153
trace_would_deadlock(struct lock_graph * g,struct btree_trans * trans)154 static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
155 {
156 struct bch_fs *c = trans->c;
157
158 count_event(c, trans_restart_would_deadlock);
159
160 if (trace_trans_restart_would_deadlock_enabled()) {
161 struct printbuf buf = PRINTBUF;
162
163 buf.atomic++;
164 print_cycle(&buf, g);
165
166 trace_trans_restart_would_deadlock(trans, buf.buf);
167 printbuf_exit(&buf);
168 }
169 }
170
abort_lock(struct lock_graph * g,struct trans_waiting_for_lock * i)171 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
172 {
173 if (i == g->g) {
174 trace_would_deadlock(g, i->trans);
175 return btree_trans_restart_foreign_task(i->trans,
176 BCH_ERR_transaction_restart_would_deadlock,
177 _THIS_IP_);
178 } else {
179 i->trans->lock_must_abort = true;
180 wake_up_process(i->trans->locking_wait.task);
181 return 0;
182 }
183 }
184
btree_trans_abort_preference(struct btree_trans * trans)185 static int btree_trans_abort_preference(struct btree_trans *trans)
186 {
187 if (trans->lock_may_not_fail)
188 return 0;
189 if (trans->locking_wait.lock_want == SIX_LOCK_write)
190 return 1;
191 if (!trans->in_traverse_all)
192 return 2;
193 return 3;
194 }
195
break_cycle(struct lock_graph * g,struct printbuf * cycle,struct trans_waiting_for_lock * from)196 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle,
197 struct trans_waiting_for_lock *from)
198 {
199 struct trans_waiting_for_lock *i, *abort = NULL;
200 unsigned best = 0, pref;
201 int ret;
202
203 if (lock_graph_remove_non_waiters(g, from))
204 return 0;
205
206 /* Only checking, for debugfs: */
207 if (cycle) {
208 print_cycle(cycle, g);
209 ret = -1;
210 goto out;
211 }
212
213 for (i = from; i < g->g + g->nr; i++) {
214 pref = btree_trans_abort_preference(i->trans);
215 if (pref > best) {
216 abort = i;
217 best = pref;
218 }
219 }
220
221 if (unlikely(!best)) {
222 struct printbuf buf = PRINTBUF;
223 buf.atomic++;
224
225 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
226
227 for (i = g->g; i < g->g + g->nr; i++) {
228 struct btree_trans *trans = i->trans;
229
230 bch2_btree_trans_to_text(&buf, trans);
231
232 prt_printf(&buf, "backtrace:\n");
233 printbuf_indent_add(&buf, 2);
234 bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
235 printbuf_indent_sub(&buf, 2);
236 prt_newline(&buf);
237 }
238
239 bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
240 printbuf_exit(&buf);
241 BUG();
242 }
243
244 ret = abort_lock(g, abort);
245 out:
246 if (ret)
247 lock_graph_pop_all(g);
248 else
249 lock_graph_pop_from(g, abort);
250 return ret;
251 }
252
lock_graph_descend(struct lock_graph * g,struct btree_trans * trans,struct printbuf * cycle)253 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
254 struct printbuf *cycle)
255 {
256 struct btree_trans *orig_trans = g->g->trans;
257 struct trans_waiting_for_lock *i;
258
259 for (i = g->g; i < g->g + g->nr; i++)
260 if (i->trans == trans) {
261 closure_put(&trans->ref);
262 return break_cycle(g, cycle, i);
263 }
264
265 if (g->nr == ARRAY_SIZE(g->g)) {
266 closure_put(&trans->ref);
267
268 if (orig_trans->lock_may_not_fail)
269 return 0;
270
271 lock_graph_pop_all(g);
272
273 if (cycle)
274 return 0;
275
276 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
277 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
278 }
279
280 __lock_graph_down(g, trans);
281 return 0;
282 }
283
lock_type_conflicts(enum six_lock_type t1,enum six_lock_type t2)284 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
285 {
286 return t1 + t2 > 1;
287 }
288
bch2_check_for_deadlock(struct btree_trans * trans,struct printbuf * cycle)289 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
290 {
291 struct lock_graph g;
292 struct trans_waiting_for_lock *top;
293 struct btree_bkey_cached_common *b;
294 btree_path_idx_t path_idx;
295 int ret = 0;
296
297 g.nr = 0;
298
299 if (trans->lock_must_abort && !trans->lock_may_not_fail) {
300 if (cycle)
301 return -1;
302
303 trace_would_deadlock(&g, trans);
304 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
305 }
306
307 lock_graph_down(&g, trans);
308
309 /* trans->paths is rcu protected vs. freeing */
310 rcu_read_lock();
311 if (cycle)
312 cycle->atomic++;
313 next:
314 if (!g.nr)
315 goto out;
316
317 top = &g.g[g.nr - 1];
318
319 struct btree_path *paths = rcu_dereference(top->trans->paths);
320 if (!paths)
321 goto up;
322
323 unsigned long *paths_allocated = trans_paths_allocated(paths);
324
325 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
326 path_idx, top->path_idx) {
327 struct btree_path *path = paths + path_idx;
328 if (!path->nodes_locked)
329 continue;
330
331 if (path_idx != top->path_idx) {
332 top->path_idx = path_idx;
333 top->level = 0;
334 top->lock_start_time = 0;
335 }
336
337 for (;
338 top->level < BTREE_MAX_DEPTH;
339 top->level++, top->lock_start_time = 0) {
340 int lock_held = btree_node_locked_type(path, top->level);
341
342 if (lock_held == BTREE_NODE_UNLOCKED)
343 continue;
344
345 b = &READ_ONCE(path->l[top->level].b)->c;
346
347 if (IS_ERR_OR_NULL(b)) {
348 /*
349 * If we get here, it means we raced with the
350 * other thread updating its btree_path
351 * structures - which means it can't be blocked
352 * waiting on a lock:
353 */
354 if (!lock_graph_remove_non_waiters(&g, g.g)) {
355 /*
356 * If lock_graph_remove_non_waiters()
357 * didn't do anything, it must be
358 * because we're being called by debugfs
359 * checking for lock cycles, which
360 * invokes us on btree_transactions that
361 * aren't actually waiting on anything.
362 * Just bail out:
363 */
364 lock_graph_pop_all(&g);
365 }
366
367 goto next;
368 }
369
370 if (list_empty_careful(&b->lock.wait_list))
371 continue;
372
373 raw_spin_lock(&b->lock.wait_lock);
374 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
375 BUG_ON(b != trans->locking);
376
377 if (top->lock_start_time &&
378 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
379 continue;
380
381 top->lock_start_time = trans->locking_wait.start_time;
382
383 /* Don't check for self deadlock: */
384 if (trans == top->trans ||
385 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
386 continue;
387
388 closure_get(&trans->ref);
389 raw_spin_unlock(&b->lock.wait_lock);
390
391 ret = lock_graph_descend(&g, trans, cycle);
392 if (ret)
393 goto out;
394 goto next;
395
396 }
397 raw_spin_unlock(&b->lock.wait_lock);
398 }
399 }
400 up:
401 if (g.nr > 1 && cycle)
402 print_chain(cycle, &g);
403 lock_graph_up(&g);
404 goto next;
405 out:
406 if (cycle)
407 --cycle->atomic;
408 rcu_read_unlock();
409 return ret;
410 }
411
bch2_six_check_for_deadlock(struct six_lock * lock,void * p)412 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
413 {
414 struct btree_trans *trans = p;
415
416 return bch2_check_for_deadlock(trans, NULL);
417 }
418
__bch2_btree_node_lock_write(struct btree_trans * trans,struct btree_path * path,struct btree_bkey_cached_common * b,bool lock_may_not_fail)419 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
420 struct btree_bkey_cached_common *b,
421 bool lock_may_not_fail)
422 {
423 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
424 int ret;
425
426 /*
427 * Must drop our read locks before calling six_lock_write() -
428 * six_unlock() won't do wakeups until the reader count
429 * goes to 0, and it's safe because we have the node intent
430 * locked:
431 */
432 six_lock_readers_add(&b->lock, -readers);
433 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
434 lock_may_not_fail, _RET_IP_);
435 six_lock_readers_add(&b->lock, readers);
436
437 if (ret)
438 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
439
440 return ret;
441 }
442
bch2_btree_node_lock_write_nofail(struct btree_trans * trans,struct btree_path * path,struct btree_bkey_cached_common * b)443 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
444 struct btree_path *path,
445 struct btree_bkey_cached_common *b)
446 {
447 int ret = __btree_node_lock_write(trans, path, b, true);
448 BUG_ON(ret);
449 }
450
451 /* relock */
452
btree_path_get_locks(struct btree_trans * trans,struct btree_path * path,bool upgrade,struct get_locks_fail * f)453 static inline bool btree_path_get_locks(struct btree_trans *trans,
454 struct btree_path *path,
455 bool upgrade,
456 struct get_locks_fail *f)
457 {
458 unsigned l = path->level;
459 int fail_idx = -1;
460
461 do {
462 if (!btree_path_node(path, l))
463 break;
464
465 if (!(upgrade
466 ? bch2_btree_node_upgrade(trans, path, l)
467 : bch2_btree_node_relock(trans, path, l))) {
468 fail_idx = l;
469
470 if (f) {
471 f->l = l;
472 f->b = path->l[l].b;
473 }
474 }
475
476 l++;
477 } while (l < path->locks_want);
478
479 /*
480 * When we fail to get a lock, we have to ensure that any child nodes
481 * can't be relocked so bch2_btree_path_traverse has to walk back up to
482 * the node that we failed to relock:
483 */
484 if (fail_idx >= 0) {
485 __bch2_btree_path_unlock(trans, path);
486 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
487
488 do {
489 path->l[fail_idx].b = upgrade
490 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
491 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
492 --fail_idx;
493 } while (fail_idx >= 0);
494 }
495
496 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
497 path->uptodate = BTREE_ITER_UPTODATE;
498
499 return path->uptodate < BTREE_ITER_NEED_RELOCK;
500 }
501
__bch2_btree_node_relock(struct btree_trans * trans,struct btree_path * path,unsigned level,bool trace)502 bool __bch2_btree_node_relock(struct btree_trans *trans,
503 struct btree_path *path, unsigned level,
504 bool trace)
505 {
506 struct btree *b = btree_path_node(path, level);
507 int want = __btree_lock_want(path, level);
508
509 if (race_fault())
510 goto fail;
511
512 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
513 (btree_node_lock_seq_matches(path, b, level) &&
514 btree_node_lock_increment(trans, &b->c, level, want))) {
515 mark_btree_node_locked(trans, path, level, want);
516 return true;
517 }
518 fail:
519 if (trace && !trans->notrace_relock_fail)
520 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
521 return false;
522 }
523
524 /* upgrade */
525
bch2_btree_node_upgrade(struct btree_trans * trans,struct btree_path * path,unsigned level)526 bool bch2_btree_node_upgrade(struct btree_trans *trans,
527 struct btree_path *path, unsigned level)
528 {
529 struct btree *b = path->l[level].b;
530
531 if (!is_btree_node(path, level))
532 return false;
533
534 switch (btree_lock_want(path, level)) {
535 case BTREE_NODE_UNLOCKED:
536 BUG_ON(btree_node_locked(path, level));
537 return true;
538 case BTREE_NODE_READ_LOCKED:
539 BUG_ON(btree_node_intent_locked(path, level));
540 return bch2_btree_node_relock(trans, path, level);
541 case BTREE_NODE_INTENT_LOCKED:
542 break;
543 case BTREE_NODE_WRITE_LOCKED:
544 BUG();
545 }
546
547 if (btree_node_intent_locked(path, level))
548 return true;
549
550 if (race_fault())
551 return false;
552
553 if (btree_node_locked(path, level)
554 ? six_lock_tryupgrade(&b->c.lock)
555 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
556 goto success;
557
558 if (btree_node_lock_seq_matches(path, b, level) &&
559 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
560 btree_node_unlock(trans, path, level);
561 goto success;
562 }
563
564 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
565 return false;
566 success:
567 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
568 return true;
569 }
570
571 /* Btree path locking: */
572
573 /*
574 * Only for btree_cache.c - only relocks intent locks
575 */
bch2_btree_path_relock_intent(struct btree_trans * trans,struct btree_path * path)576 int bch2_btree_path_relock_intent(struct btree_trans *trans,
577 struct btree_path *path)
578 {
579 unsigned l;
580
581 for (l = path->level;
582 l < path->locks_want && btree_path_node(path, l);
583 l++) {
584 if (!bch2_btree_node_relock(trans, path, l)) {
585 __bch2_btree_path_unlock(trans, path);
586 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
587 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
588 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
589 }
590 }
591
592 return 0;
593 }
594
595 __flatten
bch2_btree_path_relock_norestart(struct btree_trans * trans,struct btree_path * path)596 bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
597 {
598 struct get_locks_fail f;
599
600 bool ret = btree_path_get_locks(trans, path, false, &f);
601 bch2_trans_verify_locks(trans);
602 return ret;
603 }
604
__bch2_btree_path_relock(struct btree_trans * trans,struct btree_path * path,unsigned long trace_ip)605 int __bch2_btree_path_relock(struct btree_trans *trans,
606 struct btree_path *path, unsigned long trace_ip)
607 {
608 if (!bch2_btree_path_relock_norestart(trans, path)) {
609 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
610 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
611 }
612
613 return 0;
614 }
615
bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans * trans,struct btree_path * path,unsigned new_locks_want,struct get_locks_fail * f)616 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
617 struct btree_path *path,
618 unsigned new_locks_want,
619 struct get_locks_fail *f)
620 {
621 EBUG_ON(path->locks_want >= new_locks_want);
622
623 path->locks_want = new_locks_want;
624
625 bool ret = btree_path_get_locks(trans, path, true, f);
626 bch2_trans_verify_locks(trans);
627 return ret;
628 }
629
__bch2_btree_path_upgrade(struct btree_trans * trans,struct btree_path * path,unsigned new_locks_want,struct get_locks_fail * f)630 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
631 struct btree_path *path,
632 unsigned new_locks_want,
633 struct get_locks_fail *f)
634 {
635 bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
636 if (ret)
637 goto out;
638
639 /*
640 * XXX: this is ugly - we'd prefer to not be mucking with other
641 * iterators in the btree_trans here.
642 *
643 * On failure to upgrade the iterator, setting iter->locks_want and
644 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
645 * get the locks we want on transaction restart.
646 *
647 * But if this iterator was a clone, on transaction restart what we did
648 * to this iterator isn't going to be preserved.
649 *
650 * Possibly we could add an iterator field for the parent iterator when
651 * an iterator is a copy - for now, we'll just upgrade any other
652 * iterators with the same btree id.
653 *
654 * The code below used to be needed to ensure ancestor nodes get locked
655 * before interior nodes - now that's handled by
656 * bch2_btree_path_traverse_all().
657 */
658 if (!path->cached && !trans->in_traverse_all) {
659 struct btree_path *linked;
660 unsigned i;
661
662 trans_for_each_path(trans, linked, i)
663 if (linked != path &&
664 linked->cached == path->cached &&
665 linked->btree_id == path->btree_id &&
666 linked->locks_want < new_locks_want) {
667 linked->locks_want = new_locks_want;
668 btree_path_get_locks(trans, linked, true, NULL);
669 }
670 }
671 out:
672 bch2_trans_verify_locks(trans);
673 return ret;
674 }
675
__bch2_btree_path_downgrade(struct btree_trans * trans,struct btree_path * path,unsigned new_locks_want)676 void __bch2_btree_path_downgrade(struct btree_trans *trans,
677 struct btree_path *path,
678 unsigned new_locks_want)
679 {
680 unsigned l, old_locks_want = path->locks_want;
681
682 if (trans->restarted)
683 return;
684
685 EBUG_ON(path->locks_want < new_locks_want);
686
687 path->locks_want = new_locks_want;
688
689 while (path->nodes_locked &&
690 (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
691 if (l > path->level) {
692 btree_node_unlock(trans, path, l);
693 } else {
694 if (btree_node_intent_locked(path, l)) {
695 six_lock_downgrade(&path->l[l].b->c.lock);
696 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
697 }
698 break;
699 }
700 }
701
702 bch2_btree_path_verify_locks(path);
703
704 trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
705 }
706
707 /* Btree transaction locking: */
708
bch2_trans_downgrade(struct btree_trans * trans)709 void bch2_trans_downgrade(struct btree_trans *trans)
710 {
711 struct btree_path *path;
712 unsigned i;
713
714 if (trans->restarted)
715 return;
716
717 trans_for_each_path(trans, path, i)
718 if (path->ref)
719 bch2_btree_path_downgrade(trans, path);
720 }
721
__bch2_trans_unlock(struct btree_trans * trans)722 static inline void __bch2_trans_unlock(struct btree_trans *trans)
723 {
724 struct btree_path *path;
725 unsigned i;
726
727 trans_for_each_path(trans, path, i)
728 __bch2_btree_path_unlock(trans, path);
729 }
730
bch2_trans_relock_fail(struct btree_trans * trans,struct btree_path * path,struct get_locks_fail * f,bool trace)731 static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
732 struct get_locks_fail *f, bool trace)
733 {
734 if (!trace)
735 goto out;
736
737 if (trace_trans_restart_relock_enabled()) {
738 struct printbuf buf = PRINTBUF;
739
740 bch2_bpos_to_text(&buf, path->pos);
741 prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
742 if (IS_ERR_OR_NULL(f->b)) {
743 prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
744 } else {
745 prt_printf(&buf, "%u", f->b->c.lock.seq);
746
747 struct six_lock_count c =
748 bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
749 prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
750
751 c = six_lock_counts(&f->b->c.lock);
752 prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
753 }
754
755 trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
756 printbuf_exit(&buf);
757 }
758
759 count_event(trans->c, trans_restart_relock);
760 out:
761 __bch2_trans_unlock(trans);
762 bch2_trans_verify_locks(trans);
763 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
764 }
765
__bch2_trans_relock(struct btree_trans * trans,bool trace)766 static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
767 {
768 bch2_trans_verify_locks(trans);
769
770 if (unlikely(trans->restarted))
771 return -((int) trans->restarted);
772 if (unlikely(trans->locked))
773 goto out;
774
775 struct btree_path *path;
776 unsigned i;
777
778 trans_for_each_path(trans, path, i) {
779 struct get_locks_fail f;
780
781 if (path->should_be_locked &&
782 !btree_path_get_locks(trans, path, false, &f))
783 return bch2_trans_relock_fail(trans, path, &f, trace);
784 }
785
786 trans_set_locked(trans, true);
787 out:
788 bch2_trans_verify_locks(trans);
789 return 0;
790 }
791
bch2_trans_relock(struct btree_trans * trans)792 int bch2_trans_relock(struct btree_trans *trans)
793 {
794 return __bch2_trans_relock(trans, true);
795 }
796
bch2_trans_relock_notrace(struct btree_trans * trans)797 int bch2_trans_relock_notrace(struct btree_trans *trans)
798 {
799 return __bch2_trans_relock(trans, false);
800 }
801
bch2_trans_unlock_noassert(struct btree_trans * trans)802 void bch2_trans_unlock_noassert(struct btree_trans *trans)
803 {
804 __bch2_trans_unlock(trans);
805
806 trans_set_unlocked(trans);
807 }
808
bch2_trans_unlock(struct btree_trans * trans)809 void bch2_trans_unlock(struct btree_trans *trans)
810 {
811 __bch2_trans_unlock(trans);
812
813 trans_set_unlocked(trans);
814 }
815
bch2_trans_unlock_long(struct btree_trans * trans)816 void bch2_trans_unlock_long(struct btree_trans *trans)
817 {
818 bch2_trans_unlock(trans);
819 bch2_trans_srcu_unlock(trans);
820 }
821
bch2_trans_unlock_write(struct btree_trans * trans)822 void bch2_trans_unlock_write(struct btree_trans *trans)
823 {
824 struct btree_path *path;
825 unsigned i;
826
827 trans_for_each_path(trans, path, i)
828 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++)
829 if (btree_node_write_locked(path, l))
830 bch2_btree_node_unlock_write(trans, path, path->l[l].b);
831 }
832
__bch2_trans_mutex_lock(struct btree_trans * trans,struct mutex * lock)833 int __bch2_trans_mutex_lock(struct btree_trans *trans,
834 struct mutex *lock)
835 {
836 int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
837
838 if (ret)
839 mutex_unlock(lock);
840 return ret;
841 }
842
843 /* Debug */
844
845 #ifdef CONFIG_BCACHEFS_DEBUG
846
bch2_btree_path_verify_locks(struct btree_path * path)847 void bch2_btree_path_verify_locks(struct btree_path *path)
848 {
849 /*
850 * A path may be uptodate and yet have nothing locked if and only if
851 * there is no node at path->level, which generally means we were
852 * iterating over all nodes and got to the end of the btree
853 */
854 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
855 btree_path_node(path, path->level) &&
856 !path->nodes_locked);
857
858 if (!path->nodes_locked)
859 return;
860
861 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
862 int want = btree_lock_want(path, l);
863 int have = btree_node_locked_type(path, l);
864
865 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
866
867 BUG_ON(is_btree_node(path, l) &&
868 (want == BTREE_NODE_UNLOCKED ||
869 have != BTREE_NODE_WRITE_LOCKED) &&
870 want != have);
871
872 BUG_ON(btree_node_locked(path, l) &&
873 path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock));
874 }
875 }
876
bch2_trans_locked(struct btree_trans * trans)877 static bool bch2_trans_locked(struct btree_trans *trans)
878 {
879 struct btree_path *path;
880 unsigned i;
881
882 trans_for_each_path(trans, path, i)
883 if (path->nodes_locked)
884 return true;
885 return false;
886 }
887
bch2_trans_verify_locks(struct btree_trans * trans)888 void bch2_trans_verify_locks(struct btree_trans *trans)
889 {
890 if (!trans->locked) {
891 BUG_ON(bch2_trans_locked(trans));
892 return;
893 }
894
895 struct btree_path *path;
896 unsigned i;
897
898 trans_for_each_path(trans, path, i)
899 bch2_btree_path_verify_locks(path);
900 }
901
902 #endif
903