xref: /linux/fs/bcachefs/btree_locking.c (revision dd83757f6e686a2188997cb58b5975f744bb7786)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6 
7 static struct lock_class_key bch2_btree_node_lock_key;
8 
bch2_btree_lock_init(struct btree_bkey_cached_common * b,enum six_lock_init_flags flags,gfp_t gfp)9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 			  enum six_lock_init_flags flags,
11 			  gfp_t gfp)
12 {
13 	__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp);
14 	lockdep_set_notrack_class(&b->lock);
15 }
16 
17 /* Btree node locking: */
18 
bch2_btree_node_lock_counts(struct btree_trans * trans,struct btree_path * skip,struct btree_bkey_cached_common * b,unsigned level)19 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
20 						  struct btree_path *skip,
21 						  struct btree_bkey_cached_common *b,
22 						  unsigned level)
23 {
24 	struct btree_path *path;
25 	struct six_lock_count ret;
26 	unsigned i;
27 
28 	memset(&ret, 0, sizeof(ret));
29 
30 	if (IS_ERR_OR_NULL(b))
31 		return ret;
32 
33 	trans_for_each_path(trans, path, i)
34 		if (path != skip && &path->l[level].b->c == b) {
35 			int t = btree_node_locked_type(path, level);
36 
37 			if (t != BTREE_NODE_UNLOCKED)
38 				ret.n[t]++;
39 		}
40 
41 	return ret;
42 }
43 
44 /* unlock */
45 
bch2_btree_node_unlock_write(struct btree_trans * trans,struct btree_path * path,struct btree * b)46 void bch2_btree_node_unlock_write(struct btree_trans *trans,
47 			struct btree_path *path, struct btree *b)
48 {
49 	bch2_btree_node_unlock_write_inlined(trans, path, b);
50 }
51 
52 /* lock */
53 
54 /*
55  * @trans wants to lock @b with type @type
56  */
57 struct trans_waiting_for_lock {
58 	struct btree_trans		*trans;
59 	struct btree_bkey_cached_common	*node_want;
60 	enum six_lock_type		lock_want;
61 
62 	/* for iterating over held locks :*/
63 	u8				path_idx;
64 	u8				level;
65 	u64				lock_start_time;
66 };
67 
68 struct lock_graph {
69 	struct trans_waiting_for_lock	g[8];
70 	unsigned			nr;
71 };
72 
print_cycle(struct printbuf * out,struct lock_graph * g)73 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
74 {
75 	struct trans_waiting_for_lock *i;
76 
77 	prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
78 
79 	for (i = g->g; i < g->g + g->nr; i++) {
80 		struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
81 		if (!task)
82 			continue;
83 
84 		bch2_btree_trans_to_text(out, i->trans);
85 		bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
86 	}
87 }
88 
print_chain(struct printbuf * out,struct lock_graph * g)89 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
90 {
91 	struct trans_waiting_for_lock *i;
92 
93 	for (i = g->g; i != g->g + g->nr; i++) {
94 		struct task_struct *task = i->trans->locking_wait.task;
95 		if (i != g->g)
96 			prt_str(out, "<- ");
97 		prt_printf(out, "%u ", task ?task->pid : 0);
98 	}
99 	prt_newline(out);
100 }
101 
lock_graph_up(struct lock_graph * g)102 static void lock_graph_up(struct lock_graph *g)
103 {
104 	closure_put(&g->g[--g->nr].trans->ref);
105 }
106 
lock_graph_pop_all(struct lock_graph * g)107 static noinline void lock_graph_pop_all(struct lock_graph *g)
108 {
109 	while (g->nr)
110 		lock_graph_up(g);
111 }
112 
lock_graph_pop_from(struct lock_graph * g,struct trans_waiting_for_lock * i)113 static noinline void lock_graph_pop_from(struct lock_graph *g, struct trans_waiting_for_lock *i)
114 {
115 	while (g->g + g->nr > i)
116 		lock_graph_up(g);
117 }
118 
__lock_graph_down(struct lock_graph * g,struct btree_trans * trans)119 static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
120 {
121 	g->g[g->nr++] = (struct trans_waiting_for_lock) {
122 		.trans		= trans,
123 		.node_want	= trans->locking,
124 		.lock_want	= trans->locking_wait.lock_want,
125 	};
126 }
127 
lock_graph_down(struct lock_graph * g,struct btree_trans * trans)128 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
129 {
130 	closure_get(&trans->ref);
131 	__lock_graph_down(g, trans);
132 }
133 
lock_graph_remove_non_waiters(struct lock_graph * g,struct trans_waiting_for_lock * from)134 static bool lock_graph_remove_non_waiters(struct lock_graph *g,
135 					  struct trans_waiting_for_lock *from)
136 {
137 	struct trans_waiting_for_lock *i;
138 
139 	if (from->trans->locking != from->node_want) {
140 		lock_graph_pop_from(g, from);
141 		return true;
142 	}
143 
144 	for (i = from + 1; i < g->g + g->nr; i++)
145 		if (i->trans->locking != i->node_want ||
146 		    i->trans->locking_wait.start_time != i[-1].lock_start_time) {
147 			lock_graph_pop_from(g, i);
148 			return true;
149 		}
150 
151 	return false;
152 }
153 
trace_would_deadlock(struct lock_graph * g,struct btree_trans * trans)154 static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
155 {
156 	struct bch_fs *c = trans->c;
157 
158 	count_event(c, trans_restart_would_deadlock);
159 
160 	if (trace_trans_restart_would_deadlock_enabled()) {
161 		struct printbuf buf = PRINTBUF;
162 
163 		buf.atomic++;
164 		print_cycle(&buf, g);
165 
166 		trace_trans_restart_would_deadlock(trans, buf.buf);
167 		printbuf_exit(&buf);
168 	}
169 }
170 
abort_lock(struct lock_graph * g,struct trans_waiting_for_lock * i)171 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
172 {
173 	if (i == g->g) {
174 		trace_would_deadlock(g, i->trans);
175 		return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
176 	} else {
177 		i->trans->lock_must_abort = true;
178 		wake_up_process(i->trans->locking_wait.task);
179 		return 0;
180 	}
181 }
182 
btree_trans_abort_preference(struct btree_trans * trans)183 static int btree_trans_abort_preference(struct btree_trans *trans)
184 {
185 	if (trans->lock_may_not_fail)
186 		return 0;
187 	if (trans->locking_wait.lock_want == SIX_LOCK_write)
188 		return 1;
189 	if (!trans->in_traverse_all)
190 		return 2;
191 	return 3;
192 }
193 
break_cycle(struct lock_graph * g,struct printbuf * cycle,struct trans_waiting_for_lock * from)194 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle,
195 				struct trans_waiting_for_lock *from)
196 {
197 	struct trans_waiting_for_lock *i, *abort = NULL;
198 	unsigned best = 0, pref;
199 	int ret;
200 
201 	if (lock_graph_remove_non_waiters(g, from))
202 		return 0;
203 
204 	/* Only checking, for debugfs: */
205 	if (cycle) {
206 		print_cycle(cycle, g);
207 		ret = -1;
208 		goto out;
209 	}
210 
211 	for (i = from; i < g->g + g->nr; i++) {
212 		pref = btree_trans_abort_preference(i->trans);
213 		if (pref > best) {
214 			abort = i;
215 			best = pref;
216 		}
217 	}
218 
219 	if (unlikely(!best)) {
220 		struct printbuf buf = PRINTBUF;
221 		buf.atomic++;
222 
223 		prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
224 
225 		for (i = g->g; i < g->g + g->nr; i++) {
226 			struct btree_trans *trans = i->trans;
227 
228 			bch2_btree_trans_to_text(&buf, trans);
229 
230 			prt_printf(&buf, "backtrace:\n");
231 			printbuf_indent_add(&buf, 2);
232 			bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
233 			printbuf_indent_sub(&buf, 2);
234 			prt_newline(&buf);
235 		}
236 
237 		bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
238 		printbuf_exit(&buf);
239 		BUG();
240 	}
241 
242 	ret = abort_lock(g, abort);
243 out:
244 	if (ret)
245 		lock_graph_pop_all(g);
246 	else
247 		lock_graph_pop_from(g, abort);
248 	return ret;
249 }
250 
lock_graph_descend(struct lock_graph * g,struct btree_trans * trans,struct printbuf * cycle)251 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
252 			      struct printbuf *cycle)
253 {
254 	struct btree_trans *orig_trans = g->g->trans;
255 	struct trans_waiting_for_lock *i;
256 
257 	for (i = g->g; i < g->g + g->nr; i++)
258 		if (i->trans == trans) {
259 			closure_put(&trans->ref);
260 			return break_cycle(g, cycle, i);
261 		}
262 
263 	if (g->nr == ARRAY_SIZE(g->g)) {
264 		closure_put(&trans->ref);
265 
266 		if (orig_trans->lock_may_not_fail)
267 			return 0;
268 
269 		lock_graph_pop_all(g);
270 
271 		if (cycle)
272 			return 0;
273 
274 		trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
275 		return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
276 	}
277 
278 	__lock_graph_down(g, trans);
279 	return 0;
280 }
281 
lock_type_conflicts(enum six_lock_type t1,enum six_lock_type t2)282 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
283 {
284 	return t1 + t2 > 1;
285 }
286 
bch2_check_for_deadlock(struct btree_trans * trans,struct printbuf * cycle)287 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
288 {
289 	struct lock_graph g;
290 	struct trans_waiting_for_lock *top;
291 	struct btree_bkey_cached_common *b;
292 	btree_path_idx_t path_idx;
293 	int ret = 0;
294 
295 	g.nr = 0;
296 
297 	if (trans->lock_must_abort && !trans->lock_may_not_fail) {
298 		if (cycle)
299 			return -1;
300 
301 		trace_would_deadlock(&g, trans);
302 		return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
303 	}
304 
305 	lock_graph_down(&g, trans);
306 
307 	/* trans->paths is rcu protected vs. freeing */
308 	rcu_read_lock();
309 	if (cycle)
310 		cycle->atomic++;
311 next:
312 	if (!g.nr)
313 		goto out;
314 
315 	top = &g.g[g.nr - 1];
316 
317 	struct btree_path *paths = rcu_dereference(top->trans->paths);
318 	if (!paths)
319 		goto up;
320 
321 	unsigned long *paths_allocated = trans_paths_allocated(paths);
322 
323 	trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
324 				     path_idx, top->path_idx) {
325 		struct btree_path *path = paths + path_idx;
326 		if (!path->nodes_locked)
327 			continue;
328 
329 		if (path_idx != top->path_idx) {
330 			top->path_idx		= path_idx;
331 			top->level		= 0;
332 			top->lock_start_time	= 0;
333 		}
334 
335 		for (;
336 		     top->level < BTREE_MAX_DEPTH;
337 		     top->level++, top->lock_start_time = 0) {
338 			int lock_held = btree_node_locked_type(path, top->level);
339 
340 			if (lock_held == BTREE_NODE_UNLOCKED)
341 				continue;
342 
343 			b = &READ_ONCE(path->l[top->level].b)->c;
344 
345 			if (IS_ERR_OR_NULL(b)) {
346 				/*
347 				 * If we get here, it means we raced with the
348 				 * other thread updating its btree_path
349 				 * structures - which means it can't be blocked
350 				 * waiting on a lock:
351 				 */
352 				if (!lock_graph_remove_non_waiters(&g, g.g)) {
353 					/*
354 					 * If lock_graph_remove_non_waiters()
355 					 * didn't do anything, it must be
356 					 * because we're being called by debugfs
357 					 * checking for lock cycles, which
358 					 * invokes us on btree_transactions that
359 					 * aren't actually waiting on anything.
360 					 * Just bail out:
361 					 */
362 					lock_graph_pop_all(&g);
363 				}
364 
365 				goto next;
366 			}
367 
368 			if (list_empty_careful(&b->lock.wait_list))
369 				continue;
370 
371 			raw_spin_lock(&b->lock.wait_lock);
372 			list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
373 				BUG_ON(b != trans->locking);
374 
375 				if (top->lock_start_time &&
376 				    time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
377 					continue;
378 
379 				top->lock_start_time = trans->locking_wait.start_time;
380 
381 				/* Don't check for self deadlock: */
382 				if (trans == top->trans ||
383 				    !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
384 					continue;
385 
386 				closure_get(&trans->ref);
387 				raw_spin_unlock(&b->lock.wait_lock);
388 
389 				ret = lock_graph_descend(&g, trans, cycle);
390 				if (ret)
391 					goto out;
392 				goto next;
393 
394 			}
395 			raw_spin_unlock(&b->lock.wait_lock);
396 		}
397 	}
398 up:
399 	if (g.nr > 1 && cycle)
400 		print_chain(cycle, &g);
401 	lock_graph_up(&g);
402 	goto next;
403 out:
404 	if (cycle)
405 		--cycle->atomic;
406 	rcu_read_unlock();
407 	return ret;
408 }
409 
bch2_six_check_for_deadlock(struct six_lock * lock,void * p)410 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
411 {
412 	struct btree_trans *trans = p;
413 
414 	return bch2_check_for_deadlock(trans, NULL);
415 }
416 
__bch2_btree_node_lock_write(struct btree_trans * trans,struct btree_path * path,struct btree_bkey_cached_common * b,bool lock_may_not_fail)417 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
418 				 struct btree_bkey_cached_common *b,
419 				 bool lock_may_not_fail)
420 {
421 	int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
422 	int ret;
423 
424 	/*
425 	 * Must drop our read locks before calling six_lock_write() -
426 	 * six_unlock() won't do wakeups until the reader count
427 	 * goes to 0, and it's safe because we have the node intent
428 	 * locked:
429 	 */
430 	six_lock_readers_add(&b->lock, -readers);
431 	ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
432 				       lock_may_not_fail, _RET_IP_);
433 	six_lock_readers_add(&b->lock, readers);
434 
435 	if (ret)
436 		mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
437 
438 	return ret;
439 }
440 
bch2_btree_node_lock_write_nofail(struct btree_trans * trans,struct btree_path * path,struct btree_bkey_cached_common * b)441 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
442 				       struct btree_path *path,
443 				       struct btree_bkey_cached_common *b)
444 {
445 	int ret = __btree_node_lock_write(trans, path, b, true);
446 	BUG_ON(ret);
447 }
448 
449 /* relock */
450 
btree_path_get_locks(struct btree_trans * trans,struct btree_path * path,bool upgrade,struct get_locks_fail * f)451 static inline bool btree_path_get_locks(struct btree_trans *trans,
452 					struct btree_path *path,
453 					bool upgrade,
454 					struct get_locks_fail *f)
455 {
456 	unsigned l = path->level;
457 	int fail_idx = -1;
458 
459 	do {
460 		if (!btree_path_node(path, l))
461 			break;
462 
463 		if (!(upgrade
464 		      ? bch2_btree_node_upgrade(trans, path, l)
465 		      : bch2_btree_node_relock(trans, path, l))) {
466 			fail_idx	= l;
467 
468 			if (f) {
469 				f->l	= l;
470 				f->b	= path->l[l].b;
471 			}
472 		}
473 
474 		l++;
475 	} while (l < path->locks_want);
476 
477 	/*
478 	 * When we fail to get a lock, we have to ensure that any child nodes
479 	 * can't be relocked so bch2_btree_path_traverse has to walk back up to
480 	 * the node that we failed to relock:
481 	 */
482 	if (fail_idx >= 0) {
483 		__bch2_btree_path_unlock(trans, path);
484 		btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
485 
486 		do {
487 			path->l[fail_idx].b = upgrade
488 				? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
489 				: ERR_PTR(-BCH_ERR_no_btree_node_relock);
490 			--fail_idx;
491 		} while (fail_idx >= 0);
492 	}
493 
494 	if (path->uptodate == BTREE_ITER_NEED_RELOCK)
495 		path->uptodate = BTREE_ITER_UPTODATE;
496 
497 	return path->uptodate < BTREE_ITER_NEED_RELOCK;
498 }
499 
__bch2_btree_node_relock(struct btree_trans * trans,struct btree_path * path,unsigned level,bool trace)500 bool __bch2_btree_node_relock(struct btree_trans *trans,
501 			      struct btree_path *path, unsigned level,
502 			      bool trace)
503 {
504 	struct btree *b = btree_path_node(path, level);
505 	int want = __btree_lock_want(path, level);
506 
507 	if (race_fault())
508 		goto fail;
509 
510 	if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
511 	    (btree_node_lock_seq_matches(path, b, level) &&
512 	     btree_node_lock_increment(trans, &b->c, level, want))) {
513 		mark_btree_node_locked(trans, path, level, want);
514 		return true;
515 	}
516 fail:
517 	if (trace && !trans->notrace_relock_fail)
518 		trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
519 	return false;
520 }
521 
522 /* upgrade */
523 
bch2_btree_node_upgrade(struct btree_trans * trans,struct btree_path * path,unsigned level)524 bool bch2_btree_node_upgrade(struct btree_trans *trans,
525 			     struct btree_path *path, unsigned level)
526 {
527 	struct btree *b = path->l[level].b;
528 
529 	if (!is_btree_node(path, level))
530 		return false;
531 
532 	switch (btree_lock_want(path, level)) {
533 	case BTREE_NODE_UNLOCKED:
534 		BUG_ON(btree_node_locked(path, level));
535 		return true;
536 	case BTREE_NODE_READ_LOCKED:
537 		BUG_ON(btree_node_intent_locked(path, level));
538 		return bch2_btree_node_relock(trans, path, level);
539 	case BTREE_NODE_INTENT_LOCKED:
540 		break;
541 	case BTREE_NODE_WRITE_LOCKED:
542 		BUG();
543 	}
544 
545 	if (btree_node_intent_locked(path, level))
546 		return true;
547 
548 	if (race_fault())
549 		return false;
550 
551 	if (btree_node_locked(path, level)
552 	    ? six_lock_tryupgrade(&b->c.lock)
553 	    : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
554 		goto success;
555 
556 	if (btree_node_lock_seq_matches(path, b, level) &&
557 	    btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
558 		btree_node_unlock(trans, path, level);
559 		goto success;
560 	}
561 
562 	trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
563 	return false;
564 success:
565 	mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
566 	return true;
567 }
568 
569 /* Btree path locking: */
570 
571 /*
572  * Only for btree_cache.c - only relocks intent locks
573  */
bch2_btree_path_relock_intent(struct btree_trans * trans,struct btree_path * path)574 int bch2_btree_path_relock_intent(struct btree_trans *trans,
575 				  struct btree_path *path)
576 {
577 	unsigned l;
578 
579 	for (l = path->level;
580 	     l < path->locks_want && btree_path_node(path, l);
581 	     l++) {
582 		if (!bch2_btree_node_relock(trans, path, l)) {
583 			__bch2_btree_path_unlock(trans, path);
584 			btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
585 			trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
586 			return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
587 		}
588 	}
589 
590 	return 0;
591 }
592 
593 __flatten
bch2_btree_path_relock_norestart(struct btree_trans * trans,struct btree_path * path)594 bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
595 {
596 	struct get_locks_fail f;
597 
598 	bool ret = btree_path_get_locks(trans, path, false, &f);
599 	bch2_trans_verify_locks(trans);
600 	return ret;
601 }
602 
__bch2_btree_path_relock(struct btree_trans * trans,struct btree_path * path,unsigned long trace_ip)603 int __bch2_btree_path_relock(struct btree_trans *trans,
604 			struct btree_path *path, unsigned long trace_ip)
605 {
606 	if (!bch2_btree_path_relock_norestart(trans, path)) {
607 		trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
608 		return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
609 	}
610 
611 	return 0;
612 }
613 
bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans * trans,struct btree_path * path,unsigned new_locks_want,struct get_locks_fail * f)614 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
615 			       struct btree_path *path,
616 			       unsigned new_locks_want,
617 			       struct get_locks_fail *f)
618 {
619 	EBUG_ON(path->locks_want >= new_locks_want);
620 
621 	path->locks_want = new_locks_want;
622 
623 	bool ret = btree_path_get_locks(trans, path, true, f);
624 	bch2_trans_verify_locks(trans);
625 	return ret;
626 }
627 
__bch2_btree_path_upgrade(struct btree_trans * trans,struct btree_path * path,unsigned new_locks_want,struct get_locks_fail * f)628 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
629 			       struct btree_path *path,
630 			       unsigned new_locks_want,
631 			       struct get_locks_fail *f)
632 {
633 	bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
634 	if (ret)
635 		goto out;
636 
637 	/*
638 	 * XXX: this is ugly - we'd prefer to not be mucking with other
639 	 * iterators in the btree_trans here.
640 	 *
641 	 * On failure to upgrade the iterator, setting iter->locks_want and
642 	 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
643 	 * get the locks we want on transaction restart.
644 	 *
645 	 * But if this iterator was a clone, on transaction restart what we did
646 	 * to this iterator isn't going to be preserved.
647 	 *
648 	 * Possibly we could add an iterator field for the parent iterator when
649 	 * an iterator is a copy - for now, we'll just upgrade any other
650 	 * iterators with the same btree id.
651 	 *
652 	 * The code below used to be needed to ensure ancestor nodes get locked
653 	 * before interior nodes - now that's handled by
654 	 * bch2_btree_path_traverse_all().
655 	 */
656 	if (!path->cached && !trans->in_traverse_all) {
657 		struct btree_path *linked;
658 		unsigned i;
659 
660 		trans_for_each_path(trans, linked, i)
661 			if (linked != path &&
662 			    linked->cached == path->cached &&
663 			    linked->btree_id == path->btree_id &&
664 			    linked->locks_want < new_locks_want) {
665 				linked->locks_want = new_locks_want;
666 				btree_path_get_locks(trans, linked, true, NULL);
667 			}
668 	}
669 out:
670 	bch2_trans_verify_locks(trans);
671 	return ret;
672 }
673 
__bch2_btree_path_downgrade(struct btree_trans * trans,struct btree_path * path,unsigned new_locks_want)674 void __bch2_btree_path_downgrade(struct btree_trans *trans,
675 				 struct btree_path *path,
676 				 unsigned new_locks_want)
677 {
678 	unsigned l, old_locks_want = path->locks_want;
679 
680 	if (trans->restarted)
681 		return;
682 
683 	EBUG_ON(path->locks_want < new_locks_want);
684 
685 	path->locks_want = new_locks_want;
686 
687 	while (path->nodes_locked &&
688 	       (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
689 		if (l > path->level) {
690 			btree_node_unlock(trans, path, l);
691 		} else {
692 			if (btree_node_intent_locked(path, l)) {
693 				six_lock_downgrade(&path->l[l].b->c.lock);
694 				mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
695 			}
696 			break;
697 		}
698 	}
699 
700 	bch2_btree_path_verify_locks(path);
701 
702 	trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
703 }
704 
705 /* Btree transaction locking: */
706 
bch2_trans_downgrade(struct btree_trans * trans)707 void bch2_trans_downgrade(struct btree_trans *trans)
708 {
709 	struct btree_path *path;
710 	unsigned i;
711 
712 	if (trans->restarted)
713 		return;
714 
715 	trans_for_each_path(trans, path, i)
716 		if (path->ref)
717 			bch2_btree_path_downgrade(trans, path);
718 }
719 
__bch2_trans_unlock(struct btree_trans * trans)720 static inline void __bch2_trans_unlock(struct btree_trans *trans)
721 {
722 	struct btree_path *path;
723 	unsigned i;
724 
725 	trans_for_each_path(trans, path, i)
726 		__bch2_btree_path_unlock(trans, path);
727 }
728 
bch2_trans_relock_fail(struct btree_trans * trans,struct btree_path * path,struct get_locks_fail * f,bool trace)729 static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
730 						  struct get_locks_fail *f, bool trace)
731 {
732 	if (!trace)
733 		goto out;
734 
735 	if (trace_trans_restart_relock_enabled()) {
736 		struct printbuf buf = PRINTBUF;
737 
738 		bch2_bpos_to_text(&buf, path->pos);
739 		prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
740 		if (IS_ERR_OR_NULL(f->b)) {
741 			prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
742 		} else {
743 			prt_printf(&buf, "%u", f->b->c.lock.seq);
744 
745 			struct six_lock_count c =
746 				bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
747 			prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
748 
749 			c = six_lock_counts(&f->b->c.lock);
750 			prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
751 		}
752 
753 		trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
754 		printbuf_exit(&buf);
755 	}
756 
757 	count_event(trans->c, trans_restart_relock);
758 out:
759 	__bch2_trans_unlock(trans);
760 	bch2_trans_verify_locks(trans);
761 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
762 }
763 
__bch2_trans_relock(struct btree_trans * trans,bool trace)764 static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
765 {
766 	bch2_trans_verify_locks(trans);
767 
768 	if (unlikely(trans->restarted))
769 		return -((int) trans->restarted);
770 	if (unlikely(trans->locked))
771 		goto out;
772 
773 	struct btree_path *path;
774 	unsigned i;
775 
776 	trans_for_each_path(trans, path, i) {
777 		struct get_locks_fail f;
778 
779 		if (path->should_be_locked &&
780 		    !btree_path_get_locks(trans, path, false, &f))
781 			return bch2_trans_relock_fail(trans, path, &f, trace);
782 	}
783 
784 	trans_set_locked(trans, true);
785 out:
786 	bch2_trans_verify_locks(trans);
787 	return 0;
788 }
789 
bch2_trans_relock(struct btree_trans * trans)790 int bch2_trans_relock(struct btree_trans *trans)
791 {
792 	return __bch2_trans_relock(trans, true);
793 }
794 
bch2_trans_relock_notrace(struct btree_trans * trans)795 int bch2_trans_relock_notrace(struct btree_trans *trans)
796 {
797 	return __bch2_trans_relock(trans, false);
798 }
799 
bch2_trans_unlock_noassert(struct btree_trans * trans)800 void bch2_trans_unlock_noassert(struct btree_trans *trans)
801 {
802 	__bch2_trans_unlock(trans);
803 
804 	trans_set_unlocked(trans);
805 }
806 
bch2_trans_unlock(struct btree_trans * trans)807 void bch2_trans_unlock(struct btree_trans *trans)
808 {
809 	__bch2_trans_unlock(trans);
810 
811 	trans_set_unlocked(trans);
812 }
813 
bch2_trans_unlock_long(struct btree_trans * trans)814 void bch2_trans_unlock_long(struct btree_trans *trans)
815 {
816 	bch2_trans_unlock(trans);
817 	bch2_trans_srcu_unlock(trans);
818 }
819 
bch2_trans_unlock_write(struct btree_trans * trans)820 void bch2_trans_unlock_write(struct btree_trans *trans)
821 {
822 	struct btree_path *path;
823 	unsigned i;
824 
825 	trans_for_each_path(trans, path, i)
826 		for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++)
827 			if (btree_node_write_locked(path, l))
828 				bch2_btree_node_unlock_write(trans, path, path->l[l].b);
829 }
830 
__bch2_trans_mutex_lock(struct btree_trans * trans,struct mutex * lock)831 int __bch2_trans_mutex_lock(struct btree_trans *trans,
832 			    struct mutex *lock)
833 {
834 	int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
835 
836 	if (ret)
837 		mutex_unlock(lock);
838 	return ret;
839 }
840 
841 /* Debug */
842 
843 #ifdef CONFIG_BCACHEFS_DEBUG
844 
bch2_btree_path_verify_locks(struct btree_path * path)845 void bch2_btree_path_verify_locks(struct btree_path *path)
846 {
847 	/*
848 	 * A path may be uptodate and yet have nothing locked if and only if
849 	 * there is no node at path->level, which generally means we were
850 	 * iterating over all nodes and got to the end of the btree
851 	 */
852 	BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
853 	       btree_path_node(path, path->level) &&
854 	       !path->nodes_locked);
855 
856 	if (!path->nodes_locked)
857 		return;
858 
859 	for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
860 		int want = btree_lock_want(path, l);
861 		int have = btree_node_locked_type(path, l);
862 
863 		BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
864 
865 		BUG_ON(is_btree_node(path, l) &&
866 		       (want == BTREE_NODE_UNLOCKED ||
867 			have != BTREE_NODE_WRITE_LOCKED) &&
868 		       want != have);
869 
870 		BUG_ON(btree_node_locked(path, l) &&
871 		       path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock));
872 	}
873 }
874 
bch2_trans_locked(struct btree_trans * trans)875 static bool bch2_trans_locked(struct btree_trans *trans)
876 {
877 	struct btree_path *path;
878 	unsigned i;
879 
880 	trans_for_each_path(trans, path, i)
881 		if (path->nodes_locked)
882 			return true;
883 	return false;
884 }
885 
bch2_trans_verify_locks(struct btree_trans * trans)886 void bch2_trans_verify_locks(struct btree_trans *trans)
887 {
888 	if (!trans->locked) {
889 		BUG_ON(bch2_trans_locked(trans));
890 		return;
891 	}
892 
893 	struct btree_path *path;
894 	unsigned i;
895 
896 	trans_for_each_path(trans, path, i)
897 		bch2_btree_path_verify_locks(path);
898 }
899 
900 #endif
901