Lines Matching +full:top +full:- +full:level

1 // SPDX-License-Identifier: GPL-2.0
14 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp); in bch2_btree_lock_init()
15 lockdep_set_notrack_class(&b->lock); in bch2_btree_lock_init()
23 unsigned level) in bch2_btree_node_lock_counts() argument
35 if (path != skip && &path->l[level].b->c == b) { in bch2_btree_node_lock_counts()
36 int t = btree_node_locked_type(path, level); in bch2_btree_node_lock_counts()
65 u8 level; member
78 prt_printf(out, "Found lock cycle (%u entries):\n", g->nr); in print_cycle()
80 for (i = g->g; i < g->g + g->nr; i++) { in print_cycle()
81 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task); in print_cycle()
85 bch2_btree_trans_to_text(out, i->trans); in print_cycle()
86 bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT); in print_cycle()
94 for (i = g->g; i != g->g + g->nr; i++) { in print_chain()
95 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task); in print_chain()
96 if (i != g->g) in print_chain()
97 prt_str(out, "<- "); in print_chain()
98 prt_printf(out, "%u ", task ? task->pid : 0); in print_chain()
105 closure_put(&g->g[--g->nr].trans->ref); in lock_graph_up()
110 while (g->nr) in lock_graph_pop_all()
116 while (g->g + g->nr > i) in lock_graph_pop_from()
122 g->g[g->nr++] = (struct trans_waiting_for_lock) { in __lock_graph_down()
124 .node_want = trans->locking, in __lock_graph_down()
125 .lock_want = trans->locking_wait.lock_want, in __lock_graph_down()
131 closure_get(&trans->ref); in lock_graph_down()
140 if (from->trans->locking != from->node_want) { in lock_graph_remove_non_waiters()
145 for (i = from + 1; i < g->g + g->nr; i++) in lock_graph_remove_non_waiters()
146 if (i->trans->locking != i->node_want || in lock_graph_remove_non_waiters()
147 i->trans->locking_wait.start_time != i[-1].lock_start_time) { in lock_graph_remove_non_waiters()
157 struct bch_fs *c = trans->c; in trace_would_deadlock()
174 if (i == g->g) { in abort_lock()
175 trace_would_deadlock(g, i->trans); in abort_lock()
176 return btree_trans_restart_foreign_task(i->trans, in abort_lock()
180 i->trans->lock_must_abort = true; in abort_lock()
181 wake_up_process(i->trans->locking_wait.task); in abort_lock()
188 if (trans->lock_may_not_fail) in btree_trans_abort_preference()
190 if (trans->locking_wait.lock_want == SIX_LOCK_write) in btree_trans_abort_preference()
192 if (!trans->in_traverse_all) in btree_trans_abort_preference()
202 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks")); in break_cycle_fail()
204 for (struct trans_waiting_for_lock *i = g->g; i < g->g + g->nr; i++) { in break_cycle_fail()
205 struct btree_trans *trans = i->trans; in break_cycle_fail()
211 bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT); in break_cycle_fail()
216 bch2_print_str(g->g->trans->c, KERN_ERR, buf.buf); in break_cycle_fail()
234 ret = -1; in break_cycle()
238 for (i = from; i < g->g + g->nr; i++) { in break_cycle()
239 pref = btree_trans_abort_preference(i->trans); in break_cycle()
261 struct btree_trans *orig_trans = g->g->trans; in lock_graph_descend()
263 for (struct trans_waiting_for_lock *i = g->g; i < g->g + g->nr; i++) in lock_graph_descend()
264 if (i->trans == trans) { in lock_graph_descend()
265 closure_put(&trans->ref); in lock_graph_descend()
269 if (unlikely(g->nr == ARRAY_SIZE(g->g))) { in lock_graph_descend()
270 closure_put(&trans->ref); in lock_graph_descend()
272 if (orig_trans->lock_may_not_fail) in lock_graph_descend()
280 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_); in lock_graph_descend()
296 struct trans_waiting_for_lock *top; in bch2_check_for_deadlock() local
303 if (trans->lock_must_abort && !trans->lock_may_not_fail) { in bch2_check_for_deadlock()
305 return -1; in bch2_check_for_deadlock()
313 /* trans->paths is rcu protected vs. freeing */ in bch2_check_for_deadlock()
316 cycle->atomic++; in bch2_check_for_deadlock()
321 top = &g.g[g.nr - 1]; in bch2_check_for_deadlock()
323 struct btree_path *paths = rcu_dereference(top->trans->paths); in bch2_check_for_deadlock()
330 path_idx, top->path_idx) { in bch2_check_for_deadlock()
332 if (!path->nodes_locked) in bch2_check_for_deadlock()
335 if (path_idx != top->path_idx) { in bch2_check_for_deadlock()
336 top->path_idx = path_idx; in bch2_check_for_deadlock()
337 top->level = 0; in bch2_check_for_deadlock()
338 top->lock_start_time = 0; in bch2_check_for_deadlock()
342 top->level < BTREE_MAX_DEPTH; in bch2_check_for_deadlock()
343 top->level++, top->lock_start_time = 0) { in bch2_check_for_deadlock()
344 int lock_held = btree_node_locked_type(path, top->level); in bch2_check_for_deadlock()
349 b = &READ_ONCE(path->l[top->level].b)->c; in bch2_check_for_deadlock()
355 * structures - which means it can't be blocked in bch2_check_for_deadlock()
374 if (list_empty_careful(&b->lock.wait_list)) in bch2_check_for_deadlock()
377 raw_spin_lock(&b->lock.wait_lock); in bch2_check_for_deadlock()
378 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) { in bch2_check_for_deadlock()
379 BUG_ON(b != trans->locking); in bch2_check_for_deadlock()
381 if (top->lock_start_time && in bch2_check_for_deadlock()
382 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time)) in bch2_check_for_deadlock()
385 top->lock_start_time = trans->locking_wait.start_time; in bch2_check_for_deadlock()
388 if (trans == top->trans || in bch2_check_for_deadlock()
389 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want)) in bch2_check_for_deadlock()
392 closure_get(&trans->ref); in bch2_check_for_deadlock()
393 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
401 raw_spin_unlock(&b->lock.wait_lock); in bch2_check_for_deadlock()
411 --cycle->atomic; in bch2_check_for_deadlock()
426 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read]; in __bch2_btree_node_lock_write()
430 * Must drop our read locks before calling six_lock_write() - in __bch2_btree_node_lock_write()
435 six_lock_readers_add(&b->lock, -readers); in __bch2_btree_node_lock_write()
438 six_lock_readers_add(&b->lock, readers); in __bch2_btree_node_lock_write()
441 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); in __bch2_btree_node_lock_write()
462 unsigned l = path->level; in btree_path_get_locks()
474 } while (l < path->locks_want); in btree_path_get_locks()
476 if (path->uptodate == BTREE_ITER_NEED_RELOCK) in btree_path_get_locks()
477 path->uptodate = BTREE_ITER_UPTODATE; in btree_path_get_locks()
479 return path->uptodate < BTREE_ITER_NEED_RELOCK ? 0 : -1; in btree_path_get_locks()
482 f->l = l; in btree_path_get_locks()
483 f->b = path->l[l].b; in btree_path_get_locks()
492 } else if (path->should_be_locked && !trans->restarted) { in btree_path_get_locks()
494 path->locks_want = l; in btree_path_get_locks()
495 return -1; in btree_path_get_locks()
507 path->l[l].b = upgrade in btree_path_get_locks()
508 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade) in btree_path_get_locks()
509 : ERR_PTR(-BCH_ERR_no_btree_node_relock); in btree_path_get_locks()
510 } while (l--); in btree_path_get_locks()
512 return -restart_err ?: -1; in btree_path_get_locks()
516 struct btree_path *path, unsigned level, in __bch2_btree_node_relock() argument
519 struct btree *b = btree_path_node(path, level); in __bch2_btree_node_relock()
520 int want = __btree_lock_want(path, level); in __bch2_btree_node_relock()
525 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || in __bch2_btree_node_relock()
526 (btree_node_lock_seq_matches(path, b, level) && in __bch2_btree_node_relock()
527 btree_node_lock_increment(trans, &b->c, level, want))) { in __bch2_btree_node_relock()
528 mark_btree_node_locked(trans, path, level, want); in __bch2_btree_node_relock()
532 if (trace && !trans->notrace_relock_fail) in __bch2_btree_node_relock()
533 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); in __bch2_btree_node_relock()
540 struct btree_path *path, unsigned level) in bch2_btree_node_upgrade() argument
542 struct btree *b = path->l[level].b; in bch2_btree_node_upgrade()
544 if (!is_btree_node(path, level)) in bch2_btree_node_upgrade()
547 switch (btree_lock_want(path, level)) { in bch2_btree_node_upgrade()
549 BUG_ON(btree_node_locked(path, level)); in bch2_btree_node_upgrade()
552 BUG_ON(btree_node_intent_locked(path, level)); in bch2_btree_node_upgrade()
553 return bch2_btree_node_relock(trans, path, level); in bch2_btree_node_upgrade()
560 if (btree_node_intent_locked(path, level)) in bch2_btree_node_upgrade()
566 if (btree_node_locked(path, level) in bch2_btree_node_upgrade()
567 ? six_lock_tryupgrade(&b->c.lock) in bch2_btree_node_upgrade()
568 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) in bch2_btree_node_upgrade()
571 if (btree_node_lock_seq_matches(path, b, level) && in bch2_btree_node_upgrade()
572 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) { in bch2_btree_node_upgrade()
573 btree_node_unlock(trans, path, level); in bch2_btree_node_upgrade()
577 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); in bch2_btree_node_upgrade()
580 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_upgrade()
587 * Only for btree_cache.c - only relocks intent locks
594 for (l = path->level; in bch2_btree_path_relock_intent()
595 l < path->locks_want && btree_path_node(path, l); in bch2_btree_path_relock_intent()
600 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); in bch2_btree_path_relock_intent()
620 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); in __bch2_btree_path_relock()
631 path->locks_want = new_locks_want; in __bch2_btree_path_upgrade_norestart()
635 * success - bch2_path_get() will use this path, and it'll just be in __bch2_btree_path_upgrade_norestart()
639 !path->should_be_locked; in __bch2_btree_path_upgrade_norestart()
649 unsigned old_locks = path->nodes_locked; in __bch2_btree_path_upgrade()
650 unsigned old_locks_want = path->locks_want; in __bch2_btree_path_upgrade()
652 path->locks_want = max_t(unsigned, path->locks_want, new_locks_want); in __bch2_btree_path_upgrade()
661 * XXX: this is ugly - we'd prefer to not be mucking with other in __bch2_btree_path_upgrade()
664 * On failure to upgrade the iterator, setting iter->locks_want and in __bch2_btree_path_upgrade()
672 * an iterator is a copy - for now, we'll just upgrade any other in __bch2_btree_path_upgrade()
676 * before interior nodes - now that's handled by in __bch2_btree_path_upgrade()
679 if (!path->cached && !trans->in_traverse_all) { in __bch2_btree_path_upgrade()
685 linked->cached == path->cached && in __bch2_btree_path_upgrade()
686 linked->btree_id == path->btree_id && in __bch2_btree_path_upgrade()
687 linked->locks_want < new_locks_want) { in __bch2_btree_path_upgrade()
688 linked->locks_want = new_locks_want; in __bch2_btree_path_upgrade()
693 count_event(trans->c, trans_restart_upgrade); in __bch2_btree_path_upgrade()
697 prt_printf(&buf, "%s %pS\n", trans->fn, (void *) _RET_IP_); in __bch2_btree_path_upgrade()
698 prt_printf(&buf, "btree %s pos\n", bch2_btree_id_str(path->btree_id)); in __bch2_btree_path_upgrade()
699 bch2_bpos_to_text(&buf, path->pos); in __bch2_btree_path_upgrade()
700 prt_printf(&buf, "locks want %u -> %u level %u\n", in __bch2_btree_path_upgrade()
702 prt_printf(&buf, "nodes_locked %x -> %x\n", in __bch2_btree_path_upgrade()
703 old_locks, path->nodes_locked); in __bch2_btree_path_upgrade()
707 IS_ERR_OR_NULL(f.b) ? 0 : f.b->c.lock.seq, in __bch2_btree_path_upgrade()
708 path->l[f.l].lock_seq); in __bch2_btree_path_upgrade()
710 trace_trans_restart_upgrade(trans->c, buf.buf); in __bch2_btree_path_upgrade()
722 unsigned l, old_locks_want = path->locks_want; in __bch2_btree_path_downgrade()
724 if (trans->restarted) in __bch2_btree_path_downgrade()
727 EBUG_ON(path->locks_want < new_locks_want); in __bch2_btree_path_downgrade()
729 path->locks_want = new_locks_want; in __bch2_btree_path_downgrade()
731 while (path->nodes_locked && in __bch2_btree_path_downgrade()
732 (l = btree_path_highest_level_locked(path)) >= path->locks_want) { in __bch2_btree_path_downgrade()
733 if (l > path->level) { in __bch2_btree_path_downgrade()
737 six_lock_downgrade(&path->l[l].b->c.lock); in __bch2_btree_path_downgrade()
756 if (trans->restarted) in bch2_trans_downgrade()
760 if (path->ref) in bch2_trans_downgrade()
782 bch2_bpos_to_text(&buf, path->pos); in bch2_trans_relock_fail()
784 bch2_btree_id_str(path->btree_id), in bch2_trans_relock_fail()
785 f->l, path->l[f->l].lock_seq); in bch2_trans_relock_fail()
786 if (IS_ERR_OR_NULL(f->b)) { in bch2_trans_relock_fail()
787 prt_str(&buf, bch2_err_str(PTR_ERR(f->b))); in bch2_trans_relock_fail()
789 prt_printf(&buf, "%u", f->b->c.lock.seq); in bch2_trans_relock_fail()
792 bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l); in bch2_trans_relock_fail()
795 c = six_lock_counts(&f->b->c.lock); in bch2_trans_relock_fail()
803 count_event(trans->c, trans_restart_relock); in bch2_trans_relock_fail()
813 if (unlikely(trans->restarted)) in __bch2_trans_relock()
814 return -((int) trans->restarted); in __bch2_trans_relock()
815 if (unlikely(trans->locked)) in __bch2_trans_relock()
825 if (path->should_be_locked && in __bch2_trans_relock()
870 bch2_btree_node_unlock_write(trans, path, path->l[l].b); in bch2_trans_unlock_write()
887 if (!path->nodes_locked && btree_path_node(path, path->level)) { in __bch2_btree_path_verify_locks()
890 * there is no node at path->level, which generally means we were in __bch2_btree_path_verify_locks()
893 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE); in __bch2_btree_path_verify_locks()
894 BUG_ON(path->should_be_locked && trans->locked && !trans->restarted); in __bch2_btree_path_verify_locks()
897 if (!path->nodes_locked) in __bch2_btree_path_verify_locks()
909 path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock)); in __bch2_btree_path_verify_locks()
919 if (path->nodes_locked) in bch2_trans_locked()
926 if (!trans->locked) { in __bch2_trans_verify_locks()