xref: /linux/fs/bcachefs/btree_locking.c (revision a6a0f04e7d28378c181f76d32e4f965aa6a8b0a5)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6 
7 static struct lock_class_key bch2_btree_node_lock_key;
8 
9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 			  enum six_lock_init_flags flags)
11 {
12 	__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
13 	lockdep_set_novalidate_class(&b->lock);
14 }
15 
16 #ifdef CONFIG_LOCKDEP
17 void bch2_assert_btree_nodes_not_locked(void)
18 {
19 #if 0
20 	//Re-enable when lock_class_is_held() is merged:
21 	BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
22 #endif
23 }
24 #endif
25 
26 /* Btree node locking: */
27 
28 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
29 						  struct btree_path *skip,
30 						  struct btree_bkey_cached_common *b,
31 						  unsigned level)
32 {
33 	struct btree_path *path;
34 	struct six_lock_count ret;
35 	unsigned i;
36 
37 	memset(&ret, 0, sizeof(ret));
38 
39 	if (IS_ERR_OR_NULL(b))
40 		return ret;
41 
42 	trans_for_each_path(trans, path, i)
43 		if (path != skip && &path->l[level].b->c == b) {
44 			int t = btree_node_locked_type(path, level);
45 
46 			if (t != BTREE_NODE_UNLOCKED)
47 				ret.n[t]++;
48 		}
49 
50 	return ret;
51 }
52 
53 /* unlock */
54 
55 void bch2_btree_node_unlock_write(struct btree_trans *trans,
56 			struct btree_path *path, struct btree *b)
57 {
58 	bch2_btree_node_unlock_write_inlined(trans, path, b);
59 }
60 
61 /* lock */
62 
63 /*
64  * @trans wants to lock @b with type @type
65  */
66 struct trans_waiting_for_lock {
67 	struct btree_trans		*trans;
68 	struct btree_bkey_cached_common	*node_want;
69 	enum six_lock_type		lock_want;
70 
71 	/* for iterating over held locks :*/
72 	u8				path_idx;
73 	u8				level;
74 	u64				lock_start_time;
75 };
76 
77 struct lock_graph {
78 	struct trans_waiting_for_lock	g[8];
79 	unsigned			nr;
80 };
81 
82 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
83 {
84 	struct trans_waiting_for_lock *i;
85 
86 	prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
87 
88 	for (i = g->g; i < g->g + g->nr; i++) {
89 		struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
90 		if (!task)
91 			continue;
92 
93 		bch2_btree_trans_to_text(out, i->trans);
94 		bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
95 	}
96 }
97 
98 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
99 {
100 	struct trans_waiting_for_lock *i;
101 
102 	for (i = g->g; i != g->g + g->nr; i++) {
103 		struct task_struct *task = i->trans->locking_wait.task;
104 		if (i != g->g)
105 			prt_str(out, "<- ");
106 		prt_printf(out, "%u ", task ?task->pid : 0);
107 	}
108 	prt_newline(out);
109 }
110 
111 static void lock_graph_up(struct lock_graph *g)
112 {
113 	closure_put(&g->g[--g->nr].trans->ref);
114 }
115 
116 static noinline void lock_graph_pop_all(struct lock_graph *g)
117 {
118 	while (g->nr)
119 		lock_graph_up(g);
120 }
121 
122 static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
123 {
124 	g->g[g->nr++] = (struct trans_waiting_for_lock) {
125 		.trans		= trans,
126 		.node_want	= trans->locking,
127 		.lock_want	= trans->locking_wait.lock_want,
128 	};
129 }
130 
131 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
132 {
133 	closure_get(&trans->ref);
134 	__lock_graph_down(g, trans);
135 }
136 
137 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
138 {
139 	struct trans_waiting_for_lock *i;
140 
141 	for (i = g->g + 1; i < g->g + g->nr; i++)
142 		if (i->trans->locking != i->node_want ||
143 		    i->trans->locking_wait.start_time != i[-1].lock_start_time) {
144 			while (g->g + g->nr > i)
145 				lock_graph_up(g);
146 			return true;
147 		}
148 
149 	return false;
150 }
151 
152 static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
153 {
154 	struct bch_fs *c = trans->c;
155 
156 	count_event(c, trans_restart_would_deadlock);
157 
158 	if (trace_trans_restart_would_deadlock_enabled()) {
159 		struct printbuf buf = PRINTBUF;
160 
161 		buf.atomic++;
162 		print_cycle(&buf, g);
163 
164 		trace_trans_restart_would_deadlock(trans, buf.buf);
165 		printbuf_exit(&buf);
166 	}
167 }
168 
169 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
170 {
171 	if (i == g->g) {
172 		trace_would_deadlock(g, i->trans);
173 		return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
174 	} else {
175 		i->trans->lock_must_abort = true;
176 		wake_up_process(i->trans->locking_wait.task);
177 		return 0;
178 	}
179 }
180 
181 static int btree_trans_abort_preference(struct btree_trans *trans)
182 {
183 	if (trans->lock_may_not_fail)
184 		return 0;
185 	if (trans->locking_wait.lock_want == SIX_LOCK_write)
186 		return 1;
187 	if (!trans->in_traverse_all)
188 		return 2;
189 	return 3;
190 }
191 
192 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
193 {
194 	struct trans_waiting_for_lock *i, *abort = NULL;
195 	unsigned best = 0, pref;
196 	int ret;
197 
198 	if (lock_graph_remove_non_waiters(g))
199 		return 0;
200 
201 	/* Only checking, for debugfs: */
202 	if (cycle) {
203 		print_cycle(cycle, g);
204 		ret = -1;
205 		goto out;
206 	}
207 
208 	for (i = g->g; i < g->g + g->nr; i++) {
209 		pref = btree_trans_abort_preference(i->trans);
210 		if (pref > best) {
211 			abort = i;
212 			best = pref;
213 		}
214 	}
215 
216 	if (unlikely(!best)) {
217 		struct printbuf buf = PRINTBUF;
218 		buf.atomic++;
219 
220 		prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
221 
222 		for (i = g->g; i < g->g + g->nr; i++) {
223 			struct btree_trans *trans = i->trans;
224 
225 			bch2_btree_trans_to_text(&buf, trans);
226 
227 			prt_printf(&buf, "backtrace:\n");
228 			printbuf_indent_add(&buf, 2);
229 			bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
230 			printbuf_indent_sub(&buf, 2);
231 			prt_newline(&buf);
232 		}
233 
234 		bch2_print_string_as_lines(KERN_ERR, buf.buf);
235 		printbuf_exit(&buf);
236 		BUG();
237 	}
238 
239 	ret = abort_lock(g, abort);
240 out:
241 	if (ret)
242 		while (g->nr)
243 			lock_graph_up(g);
244 	return ret;
245 }
246 
247 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
248 			      struct printbuf *cycle)
249 {
250 	struct btree_trans *orig_trans = g->g->trans;
251 	struct trans_waiting_for_lock *i;
252 
253 	for (i = g->g; i < g->g + g->nr; i++)
254 		if (i->trans == trans) {
255 			closure_put(&trans->ref);
256 			return break_cycle(g, cycle);
257 		}
258 
259 	if (g->nr == ARRAY_SIZE(g->g)) {
260 		closure_put(&trans->ref);
261 
262 		if (orig_trans->lock_may_not_fail)
263 			return 0;
264 
265 		while (g->nr)
266 			lock_graph_up(g);
267 
268 		if (cycle)
269 			return 0;
270 
271 		trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
272 		return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
273 	}
274 
275 	__lock_graph_down(g, trans);
276 	return 0;
277 }
278 
279 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
280 {
281 	return t1 + t2 > 1;
282 }
283 
284 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
285 {
286 	struct lock_graph g;
287 	struct trans_waiting_for_lock *top;
288 	struct btree_bkey_cached_common *b;
289 	btree_path_idx_t path_idx;
290 	int ret = 0;
291 
292 	g.nr = 0;
293 
294 	if (trans->lock_must_abort) {
295 		if (cycle)
296 			return -1;
297 
298 		trace_would_deadlock(&g, trans);
299 		return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
300 	}
301 
302 	lock_graph_down(&g, trans);
303 
304 	/* trans->paths is rcu protected vs. freeing */
305 	rcu_read_lock();
306 	if (cycle)
307 		cycle->atomic++;
308 next:
309 	if (!g.nr)
310 		goto out;
311 
312 	top = &g.g[g.nr - 1];
313 
314 	struct btree_path *paths = rcu_dereference(top->trans->paths);
315 	if (!paths)
316 		goto up;
317 
318 	unsigned long *paths_allocated = trans_paths_allocated(paths);
319 
320 	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
321 				     path_idx, top->path_idx) {
322 		struct btree_path *path = paths + path_idx;
323 		if (!path->nodes_locked)
324 			continue;
325 
326 		if (path_idx != top->path_idx) {
327 			top->path_idx		= path_idx;
328 			top->level		= 0;
329 			top->lock_start_time	= 0;
330 		}
331 
332 		for (;
333 		     top->level < BTREE_MAX_DEPTH;
334 		     top->level++, top->lock_start_time = 0) {
335 			int lock_held = btree_node_locked_type(path, top->level);
336 
337 			if (lock_held == BTREE_NODE_UNLOCKED)
338 				continue;
339 
340 			b = &READ_ONCE(path->l[top->level].b)->c;
341 
342 			if (IS_ERR_OR_NULL(b)) {
343 				/*
344 				 * If we get here, it means we raced with the
345 				 * other thread updating its btree_path
346 				 * structures - which means it can't be blocked
347 				 * waiting on a lock:
348 				 */
349 				if (!lock_graph_remove_non_waiters(&g)) {
350 					/*
351 					 * If lock_graph_remove_non_waiters()
352 					 * didn't do anything, it must be
353 					 * because we're being called by debugfs
354 					 * checking for lock cycles, which
355 					 * invokes us on btree_transactions that
356 					 * aren't actually waiting on anything.
357 					 * Just bail out:
358 					 */
359 					lock_graph_pop_all(&g);
360 				}
361 
362 				goto next;
363 			}
364 
365 			if (list_empty_careful(&b->lock.wait_list))
366 				continue;
367 
368 			raw_spin_lock(&b->lock.wait_lock);
369 			list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
370 				BUG_ON(b != trans->locking);
371 
372 				if (top->lock_start_time &&
373 				    time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
374 					continue;
375 
376 				top->lock_start_time = trans->locking_wait.start_time;
377 
378 				/* Don't check for self deadlock: */
379 				if (trans == top->trans ||
380 				    !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
381 					continue;
382 
383 				closure_get(&trans->ref);
384 				raw_spin_unlock(&b->lock.wait_lock);
385 
386 				ret = lock_graph_descend(&g, trans, cycle);
387 				if (ret)
388 					goto out;
389 				goto next;
390 
391 			}
392 			raw_spin_unlock(&b->lock.wait_lock);
393 		}
394 	}
395 up:
396 	if (g.nr > 1 && cycle)
397 		print_chain(cycle, &g);
398 	lock_graph_up(&g);
399 	goto next;
400 out:
401 	if (cycle)
402 		--cycle->atomic;
403 	rcu_read_unlock();
404 	return ret;
405 }
406 
407 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
408 {
409 	struct btree_trans *trans = p;
410 
411 	return bch2_check_for_deadlock(trans, NULL);
412 }
413 
414 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
415 				 struct btree_bkey_cached_common *b,
416 				 bool lock_may_not_fail)
417 {
418 	int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
419 	int ret;
420 
421 	/*
422 	 * Must drop our read locks before calling six_lock_write() -
423 	 * six_unlock() won't do wakeups until the reader count
424 	 * goes to 0, and it's safe because we have the node intent
425 	 * locked:
426 	 */
427 	six_lock_readers_add(&b->lock, -readers);
428 	ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
429 				       lock_may_not_fail, _RET_IP_);
430 	six_lock_readers_add(&b->lock, readers);
431 
432 	if (ret)
433 		mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
434 
435 	return ret;
436 }
437 
438 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
439 				       struct btree_path *path,
440 				       struct btree_bkey_cached_common *b)
441 {
442 	int ret = __btree_node_lock_write(trans, path, b, true);
443 	BUG_ON(ret);
444 }
445 
446 /* relock */
447 
448 static inline bool btree_path_get_locks(struct btree_trans *trans,
449 					struct btree_path *path,
450 					bool upgrade,
451 					struct get_locks_fail *f)
452 {
453 	unsigned l = path->level;
454 	int fail_idx = -1;
455 
456 	do {
457 		if (!btree_path_node(path, l))
458 			break;
459 
460 		if (!(upgrade
461 		      ? bch2_btree_node_upgrade(trans, path, l)
462 		      : bch2_btree_node_relock(trans, path, l))) {
463 			fail_idx	= l;
464 
465 			if (f) {
466 				f->l	= l;
467 				f->b	= path->l[l].b;
468 			}
469 		}
470 
471 		l++;
472 	} while (l < path->locks_want);
473 
474 	/*
475 	 * When we fail to get a lock, we have to ensure that any child nodes
476 	 * can't be relocked so bch2_btree_path_traverse has to walk back up to
477 	 * the node that we failed to relock:
478 	 */
479 	if (fail_idx >= 0) {
480 		__bch2_btree_path_unlock(trans, path);
481 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
482 
483 		do {
484 			path->l[fail_idx].b = upgrade
485 				? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
486 				: ERR_PTR(-BCH_ERR_no_btree_node_relock);
487 			--fail_idx;
488 		} while (fail_idx >= 0);
489 	}
490 
491 	if (path->uptodate == BTREE_ITER_NEED_RELOCK)
492 		path->uptodate = BTREE_ITER_UPTODATE;
493 
494 	return path->uptodate < BTREE_ITER_NEED_RELOCK;
495 }
496 
497 bool __bch2_btree_node_relock(struct btree_trans *trans,
498 			      struct btree_path *path, unsigned level,
499 			      bool trace)
500 {
501 	struct btree *b = btree_path_node(path, level);
502 	int want = __btree_lock_want(path, level);
503 
504 	if (race_fault())
505 		goto fail;
506 
507 	if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
508 	    (btree_node_lock_seq_matches(path, b, level) &&
509 	     btree_node_lock_increment(trans, &b->c, level, want))) {
510 		mark_btree_node_locked(trans, path, level, want);
511 		return true;
512 	}
513 fail:
514 	if (trace && !trans->notrace_relock_fail)
515 		trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
516 	return false;
517 }
518 
519 /* upgrade */
520 
521 bool bch2_btree_node_upgrade(struct btree_trans *trans,
522 			     struct btree_path *path, unsigned level)
523 {
524 	struct btree *b = path->l[level].b;
525 	struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
526 
527 	if (!is_btree_node(path, level))
528 		return false;
529 
530 	switch (btree_lock_want(path, level)) {
531 	case BTREE_NODE_UNLOCKED:
532 		BUG_ON(btree_node_locked(path, level));
533 		return true;
534 	case BTREE_NODE_READ_LOCKED:
535 		BUG_ON(btree_node_intent_locked(path, level));
536 		return bch2_btree_node_relock(trans, path, level);
537 	case BTREE_NODE_INTENT_LOCKED:
538 		break;
539 	case BTREE_NODE_WRITE_LOCKED:
540 		BUG();
541 	}
542 
543 	if (btree_node_intent_locked(path, level))
544 		return true;
545 
546 	if (race_fault())
547 		return false;
548 
549 	if (btree_node_locked(path, level)) {
550 		bool ret;
551 
552 		six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
553 		ret = six_lock_tryupgrade(&b->c.lock);
554 		six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
555 
556 		if (ret)
557 			goto success;
558 	} else {
559 		if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
560 			goto success;
561 	}
562 
563 	/*
564 	 * Do we already have an intent lock via another path? If so, just bump
565 	 * lock count:
566 	 */
567 	if (btree_node_lock_seq_matches(path, b, level) &&
568 	    btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
569 		btree_node_unlock(trans, path, level);
570 		goto success;
571 	}
572 
573 	trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
574 	return false;
575 success:
576 	mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
577 	return true;
578 }
579 
580 /* Btree path locking: */
581 
582 /*
583  * Only for btree_cache.c - only relocks intent locks
584  */
585 int bch2_btree_path_relock_intent(struct btree_trans *trans,
586 				  struct btree_path *path)
587 {
588 	unsigned l;
589 
590 	for (l = path->level;
591 	     l < path->locks_want && btree_path_node(path, l);
592 	     l++) {
593 		if (!bch2_btree_node_relock(trans, path, l)) {
594 			__bch2_btree_path_unlock(trans, path);
595 			btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
596 			trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
597 			return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
598 		}
599 	}
600 
601 	return 0;
602 }
603 
604 __flatten
605 bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
606 {
607 	struct get_locks_fail f;
608 
609 	bool ret = btree_path_get_locks(trans, path, false, &f);
610 	bch2_trans_verify_locks(trans);
611 	return ret;
612 }
613 
614 int __bch2_btree_path_relock(struct btree_trans *trans,
615 			struct btree_path *path, unsigned long trace_ip)
616 {
617 	if (!bch2_btree_path_relock_norestart(trans, path)) {
618 		trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
619 		return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
620 	}
621 
622 	return 0;
623 }
624 
625 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
626 			       struct btree_path *path,
627 			       unsigned new_locks_want,
628 			       struct get_locks_fail *f)
629 {
630 	EBUG_ON(path->locks_want >= new_locks_want);
631 
632 	path->locks_want = new_locks_want;
633 
634 	bool ret = btree_path_get_locks(trans, path, true, f);
635 	bch2_trans_verify_locks(trans);
636 	return ret;
637 }
638 
639 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
640 			       struct btree_path *path,
641 			       unsigned new_locks_want,
642 			       struct get_locks_fail *f)
643 {
644 	bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
645 	if (ret)
646 		goto out;
647 
648 	/*
649 	 * XXX: this is ugly - we'd prefer to not be mucking with other
650 	 * iterators in the btree_trans here.
651 	 *
652 	 * On failure to upgrade the iterator, setting iter->locks_want and
653 	 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
654 	 * get the locks we want on transaction restart.
655 	 *
656 	 * But if this iterator was a clone, on transaction restart what we did
657 	 * to this iterator isn't going to be preserved.
658 	 *
659 	 * Possibly we could add an iterator field for the parent iterator when
660 	 * an iterator is a copy - for now, we'll just upgrade any other
661 	 * iterators with the same btree id.
662 	 *
663 	 * The code below used to be needed to ensure ancestor nodes get locked
664 	 * before interior nodes - now that's handled by
665 	 * bch2_btree_path_traverse_all().
666 	 */
667 	if (!path->cached && !trans->in_traverse_all) {
668 		struct btree_path *linked;
669 		unsigned i;
670 
671 		trans_for_each_path(trans, linked, i)
672 			if (linked != path &&
673 			    linked->cached == path->cached &&
674 			    linked->btree_id == path->btree_id &&
675 			    linked->locks_want < new_locks_want) {
676 				linked->locks_want = new_locks_want;
677 				btree_path_get_locks(trans, linked, true, NULL);
678 			}
679 	}
680 out:
681 	bch2_trans_verify_locks(trans);
682 	return ret;
683 }
684 
685 void __bch2_btree_path_downgrade(struct btree_trans *trans,
686 				 struct btree_path *path,
687 				 unsigned new_locks_want)
688 {
689 	unsigned l, old_locks_want = path->locks_want;
690 
691 	if (trans->restarted)
692 		return;
693 
694 	EBUG_ON(path->locks_want < new_locks_want);
695 
696 	path->locks_want = new_locks_want;
697 
698 	while (path->nodes_locked &&
699 	       (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
700 		if (l > path->level) {
701 			btree_node_unlock(trans, path, l);
702 		} else {
703 			if (btree_node_intent_locked(path, l)) {
704 				six_lock_downgrade(&path->l[l].b->c.lock);
705 				mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
706 			}
707 			break;
708 		}
709 	}
710 
711 	bch2_btree_path_verify_locks(path);
712 
713 	trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
714 }
715 
716 /* Btree transaction locking: */
717 
718 void bch2_trans_downgrade(struct btree_trans *trans)
719 {
720 	struct btree_path *path;
721 	unsigned i;
722 
723 	if (trans->restarted)
724 		return;
725 
726 	trans_for_each_path(trans, path, i)
727 		if (path->ref)
728 			bch2_btree_path_downgrade(trans, path);
729 }
730 
731 static inline void __bch2_trans_unlock(struct btree_trans *trans)
732 {
733 	struct btree_path *path;
734 	unsigned i;
735 
736 	trans_for_each_path(trans, path, i)
737 		__bch2_btree_path_unlock(trans, path);
738 }
739 
740 static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
741 						  struct get_locks_fail *f, bool trace)
742 {
743 	if (!trace)
744 		goto out;
745 
746 	if (trace_trans_restart_relock_enabled()) {
747 		struct printbuf buf = PRINTBUF;
748 
749 		bch2_bpos_to_text(&buf, path->pos);
750 		prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
751 		if (IS_ERR_OR_NULL(f->b)) {
752 			prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
753 		} else {
754 			prt_printf(&buf, "%u", f->b->c.lock.seq);
755 
756 			struct six_lock_count c =
757 				bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
758 			prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
759 
760 			c = six_lock_counts(&f->b->c.lock);
761 			prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
762 		}
763 
764 		trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
765 		printbuf_exit(&buf);
766 	}
767 
768 	count_event(trans->c, trans_restart_relock);
769 out:
770 	__bch2_trans_unlock(trans);
771 	bch2_trans_verify_locks(trans);
772 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
773 }
774 
775 static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
776 {
777 	bch2_trans_verify_locks(trans);
778 
779 	if (unlikely(trans->restarted))
780 		return -((int) trans->restarted);
781 	if (unlikely(trans->locked))
782 		goto out;
783 
784 	struct btree_path *path;
785 	unsigned i;
786 
787 	trans_for_each_path(trans, path, i) {
788 		struct get_locks_fail f;
789 
790 		if (path->should_be_locked &&
791 		    !btree_path_get_locks(trans, path, false, &f))
792 			return bch2_trans_relock_fail(trans, path, &f, trace);
793 	}
794 
795 	trans->locked = true;
796 out:
797 	bch2_trans_verify_locks(trans);
798 	return 0;
799 }
800 
801 int bch2_trans_relock(struct btree_trans *trans)
802 {
803 	return __bch2_trans_relock(trans, true);
804 }
805 
806 int bch2_trans_relock_notrace(struct btree_trans *trans)
807 {
808 	return __bch2_trans_relock(trans, false);
809 }
810 
811 void bch2_trans_unlock_noassert(struct btree_trans *trans)
812 {
813 	__bch2_trans_unlock(trans);
814 
815 	trans->locked = false;
816 	trans->last_unlock_ip = _RET_IP_;
817 }
818 
819 void bch2_trans_unlock(struct btree_trans *trans)
820 {
821 	__bch2_trans_unlock(trans);
822 
823 	trans->locked = false;
824 	trans->last_unlock_ip = _RET_IP_;
825 }
826 
827 void bch2_trans_unlock_long(struct btree_trans *trans)
828 {
829 	bch2_trans_unlock(trans);
830 	bch2_trans_srcu_unlock(trans);
831 }
832 
833 int __bch2_trans_mutex_lock(struct btree_trans *trans,
834 			    struct mutex *lock)
835 {
836 	int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
837 
838 	if (ret)
839 		mutex_unlock(lock);
840 	return ret;
841 }
842 
843 /* Debug */
844 
845 #ifdef CONFIG_BCACHEFS_DEBUG
846 
847 void bch2_btree_path_verify_locks(struct btree_path *path)
848 {
849 	/*
850 	 * A path may be uptodate and yet have nothing locked if and only if
851 	 * there is no node at path->level, which generally means we were
852 	 * iterating over all nodes and got to the end of the btree
853 	 */
854 	BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
855 	       btree_path_node(path, path->level) &&
856 	       !path->nodes_locked);
857 
858 	if (!path->nodes_locked)
859 		return;
860 
861 	for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
862 		int want = btree_lock_want(path, l);
863 		int have = btree_node_locked_type(path, l);
864 
865 		BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
866 
867 		BUG_ON(is_btree_node(path, l) &&
868 		       (want == BTREE_NODE_UNLOCKED ||
869 			have != BTREE_NODE_WRITE_LOCKED) &&
870 		       want != have);
871 	}
872 }
873 
874 static bool bch2_trans_locked(struct btree_trans *trans)
875 {
876 	struct btree_path *path;
877 	unsigned i;
878 
879 	trans_for_each_path(trans, path, i)
880 		if (path->nodes_locked)
881 			return true;
882 	return false;
883 }
884 
885 void bch2_trans_verify_locks(struct btree_trans *trans)
886 {
887 	if (!trans->locked) {
888 		BUG_ON(bch2_trans_locked(trans));
889 		return;
890 	}
891 
892 	struct btree_path *path;
893 	unsigned i;
894 
895 	trans_for_each_path(trans, path, i)
896 		bch2_btree_path_verify_locks(path);
897 }
898 
899 #endif
900