xref: /linux/fs/bcachefs/btree_locking.h (revision 08df80a3c51674ab73ae770885a383ca553fbbbf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
4 
5 /*
6  * Only for internal btree use:
7  *
8  * The btree iterator tracks what locks it wants to take, and what locks it
9  * currently has - here we have wrappers for locking/unlocking btree nodes and
10  * updating the iterator state
11  */
12 
13 #include "btree_iter.h"
14 #include "six.h"
15 
16 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
17 
18 #ifdef CONFIG_LOCKDEP
19 void bch2_assert_btree_nodes_not_locked(void);
20 #else
21 static inline void bch2_assert_btree_nodes_not_locked(void) {}
22 #endif
23 
24 void bch2_trans_unlock_noassert(struct btree_trans *);
25 
26 static inline bool is_btree_node(struct btree_path *path, unsigned l)
27 {
28 	return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
29 }
30 
31 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
32 {
33 	return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
34 		? &trans->c->btree_transaction_stats[trans->fn_idx]
35 		: NULL;
36 }
37 
38 /* matches six lock types */
39 enum btree_node_locked_type {
40 	BTREE_NODE_UNLOCKED		= -1,
41 	BTREE_NODE_READ_LOCKED		= SIX_LOCK_read,
42 	BTREE_NODE_INTENT_LOCKED	= SIX_LOCK_intent,
43 	BTREE_NODE_WRITE_LOCKED		= SIX_LOCK_write,
44 };
45 
46 static inline int btree_node_locked_type(struct btree_path *path,
47 					 unsigned level)
48 {
49 	return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
50 }
51 
52 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
53 {
54 	return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
55 }
56 
57 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
58 {
59 	return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
60 }
61 
62 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
63 {
64 	return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
65 }
66 
67 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
68 {
69 	return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
70 }
71 
72 static inline void mark_btree_node_locked_noreset(struct btree_path *path,
73 						  unsigned level,
74 						  enum btree_node_locked_type type)
75 {
76 	/* relying on this to avoid a branch */
77 	BUILD_BUG_ON(SIX_LOCK_read   != 0);
78 	BUILD_BUG_ON(SIX_LOCK_intent != 1);
79 
80 	path->nodes_locked &= ~(3U << (level << 1));
81 	path->nodes_locked |= (type + 1) << (level << 1);
82 }
83 
84 static inline void mark_btree_node_unlocked(struct btree_path *path,
85 					    unsigned level)
86 {
87 	EBUG_ON(btree_node_write_locked(path, level));
88 	mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
89 }
90 
91 static inline void mark_btree_node_locked(struct btree_trans *trans,
92 					  struct btree_path *path,
93 					  unsigned level,
94 					  enum btree_node_locked_type type)
95 {
96 	mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
97 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
98 	path->l[level].lock_taken_time = local_clock();
99 #endif
100 }
101 
102 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
103 {
104 	return level < path->locks_want
105 		? SIX_LOCK_intent
106 		: SIX_LOCK_read;
107 }
108 
109 static inline enum btree_node_locked_type
110 btree_lock_want(struct btree_path *path, int level)
111 {
112 	if (level < path->level)
113 		return BTREE_NODE_UNLOCKED;
114 	if (level < path->locks_want)
115 		return BTREE_NODE_INTENT_LOCKED;
116 	if (level == path->level)
117 		return BTREE_NODE_READ_LOCKED;
118 	return BTREE_NODE_UNLOCKED;
119 }
120 
121 static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
122 					      struct btree_path *path, unsigned level)
123 {
124 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
125 	__bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times,
126 				 path->l[level].lock_taken_time,
127 				 local_clock());
128 #endif
129 }
130 
131 /* unlock: */
132 
133 static inline void btree_node_unlock(struct btree_trans *trans,
134 				     struct btree_path *path, unsigned level)
135 {
136 	int lock_type = btree_node_locked_type(path, level);
137 
138 	EBUG_ON(level >= BTREE_MAX_DEPTH);
139 
140 	if (lock_type != BTREE_NODE_UNLOCKED) {
141 		six_unlock_type(&path->l[level].b->c.lock, lock_type);
142 		btree_trans_lock_hold_time_update(trans, path, level);
143 	}
144 	mark_btree_node_unlocked(path, level);
145 }
146 
147 static inline int btree_path_lowest_level_locked(struct btree_path *path)
148 {
149 	return __ffs(path->nodes_locked) >> 1;
150 }
151 
152 static inline int btree_path_highest_level_locked(struct btree_path *path)
153 {
154 	return __fls(path->nodes_locked) >> 1;
155 }
156 
157 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
158 					    struct btree_path *path)
159 {
160 	btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
161 
162 	while (path->nodes_locked)
163 		btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
164 }
165 
166 /*
167  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
168  * succeed:
169  */
170 static inline void
171 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
172 				     struct btree *b)
173 {
174 	struct btree_path *linked;
175 	unsigned i;
176 
177 	EBUG_ON(path->l[b->c.level].b != b);
178 	EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
179 	EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
180 
181 	mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
182 
183 	trans_for_each_path_with_node(trans, b, linked, i)
184 		linked->l[b->c.level].lock_seq++;
185 
186 	six_unlock_write(&b->c.lock);
187 }
188 
189 void bch2_btree_node_unlock_write(struct btree_trans *,
190 			struct btree_path *, struct btree *);
191 
192 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
193 
194 /* lock: */
195 
196 static inline int __btree_node_lock_nopath(struct btree_trans *trans,
197 					 struct btree_bkey_cached_common *b,
198 					 enum six_lock_type type,
199 					 bool lock_may_not_fail,
200 					 unsigned long ip)
201 {
202 	int ret;
203 
204 	trans->lock_may_not_fail = lock_may_not_fail;
205 	trans->lock_must_abort	= false;
206 	trans->locking		= b;
207 
208 	ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
209 				 bch2_six_check_for_deadlock, trans, ip);
210 	WRITE_ONCE(trans->locking, NULL);
211 	WRITE_ONCE(trans->locking_wait.start_time, 0);
212 	return ret;
213 }
214 
215 static inline int __must_check
216 btree_node_lock_nopath(struct btree_trans *trans,
217 		       struct btree_bkey_cached_common *b,
218 		       enum six_lock_type type,
219 		       unsigned long ip)
220 {
221 	return __btree_node_lock_nopath(trans, b, type, false, ip);
222 }
223 
224 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
225 					 struct btree_bkey_cached_common *b,
226 					 enum six_lock_type type)
227 {
228 	int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
229 
230 	BUG_ON(ret);
231 }
232 
233 /*
234  * Lock a btree node if we already have it locked on one of our linked
235  * iterators:
236  */
237 static inline bool btree_node_lock_increment(struct btree_trans *trans,
238 					     struct btree_bkey_cached_common *b,
239 					     unsigned level,
240 					     enum btree_node_locked_type want)
241 {
242 	struct btree_path *path;
243 	unsigned i;
244 
245 	trans_for_each_path(trans, path, i)
246 		if (&path->l[level].b->c == b &&
247 		    btree_node_locked_type(path, level) >= want) {
248 			six_lock_increment(&b->lock, (enum six_lock_type) want);
249 			return true;
250 		}
251 
252 	return false;
253 }
254 
255 static inline int btree_node_lock(struct btree_trans *trans,
256 			struct btree_path *path,
257 			struct btree_bkey_cached_common *b,
258 			unsigned level,
259 			enum six_lock_type type,
260 			unsigned long ip)
261 {
262 	int ret = 0;
263 
264 	EBUG_ON(level >= BTREE_MAX_DEPTH);
265 
266 	if (likely(six_trylock_type(&b->lock, type)) ||
267 	    btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
268 	    !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
269 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
270 		path->l[b->level].lock_taken_time = local_clock();
271 #endif
272 	}
273 
274 	return ret;
275 }
276 
277 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
278 				 struct btree_bkey_cached_common *b, bool);
279 
280 static inline int __btree_node_lock_write(struct btree_trans *trans,
281 					  struct btree_path *path,
282 					  struct btree_bkey_cached_common *b,
283 					  bool lock_may_not_fail)
284 {
285 	EBUG_ON(&path->l[b->level].b->c != b);
286 	EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
287 	EBUG_ON(!btree_node_intent_locked(path, b->level));
288 
289 	/*
290 	 * six locks are unfair, and read locks block while a thread wants a
291 	 * write lock: thus, we need to tell the cycle detector we have a write
292 	 * lock _before_ taking the lock:
293 	 */
294 	mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
295 
296 	return likely(six_trylock_write(&b->lock))
297 		? 0
298 		: __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
299 }
300 
301 static inline int __must_check
302 bch2_btree_node_lock_write(struct btree_trans *trans,
303 			   struct btree_path *path,
304 			   struct btree_bkey_cached_common *b)
305 {
306 	return __btree_node_lock_write(trans, path, b, false);
307 }
308 
309 void bch2_btree_node_lock_write_nofail(struct btree_trans *,
310 				       struct btree_path *,
311 				       struct btree_bkey_cached_common *);
312 
313 /* relock: */
314 
315 bool bch2_btree_path_relock_norestart(struct btree_trans *,
316 				      struct btree_path *, unsigned long);
317 int __bch2_btree_path_relock(struct btree_trans *,
318 			     struct btree_path *, unsigned long);
319 
320 static inline int bch2_btree_path_relock(struct btree_trans *trans,
321 				struct btree_path *path, unsigned long trace_ip)
322 {
323 	return btree_node_locked(path, path->level)
324 		? 0
325 		: __bch2_btree_path_relock(trans, path, trace_ip);
326 }
327 
328 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
329 
330 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
331 					  struct btree_path *path, unsigned level)
332 {
333 	EBUG_ON(btree_node_locked(path, level) &&
334 		!btree_node_write_locked(path, level) &&
335 		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
336 
337 	return likely(btree_node_locked(path, level)) ||
338 		(!IS_ERR_OR_NULL(path->l[level].b) &&
339 		 __bch2_btree_node_relock(trans, path, level, true));
340 }
341 
342 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
343 						  struct btree_path *path, unsigned level)
344 {
345 	EBUG_ON(btree_node_locked(path, level) &&
346 		!btree_node_write_locked(path, level) &&
347 		btree_node_locked_type(path, level) != __btree_lock_want(path, level));
348 
349 	return likely(btree_node_locked(path, level)) ||
350 		(!IS_ERR_OR_NULL(path->l[level].b) &&
351 		 __bch2_btree_node_relock(trans, path, level, false));
352 }
353 
354 /* upgrade */
355 
356 
357 struct get_locks_fail {
358 	unsigned	l;
359 	struct btree	*b;
360 };
361 
362 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
363 			       struct btree_path *, unsigned,
364 			       struct get_locks_fail *);
365 
366 bool __bch2_btree_path_upgrade(struct btree_trans *,
367 			       struct btree_path *, unsigned,
368 			       struct get_locks_fail *);
369 
370 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
371 					  struct btree_path *path,
372 					  unsigned new_locks_want)
373 {
374 	struct get_locks_fail f;
375 	unsigned old_locks_want = path->locks_want;
376 
377 	new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
378 
379 	if (path->locks_want < new_locks_want
380 	    ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
381 	    : path->uptodate == BTREE_ITER_UPTODATE)
382 		return 0;
383 
384 	trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
385 			old_locks_want, new_locks_want, &f);
386 	return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
387 }
388 
389 /* misc: */
390 
391 static inline void btree_path_set_should_be_locked(struct btree_path *path)
392 {
393 	EBUG_ON(!btree_node_locked(path, path->level));
394 	EBUG_ON(path->uptodate);
395 
396 	path->should_be_locked = true;
397 }
398 
399 static inline void __btree_path_set_level_up(struct btree_trans *trans,
400 				      struct btree_path *path,
401 				      unsigned l)
402 {
403 	btree_node_unlock(trans, path, l);
404 	path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
405 }
406 
407 static inline void btree_path_set_level_up(struct btree_trans *trans,
408 				    struct btree_path *path)
409 {
410 	__btree_path_set_level_up(trans, path, path->level++);
411 	btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
412 }
413 
414 /* debug */
415 
416 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
417 				struct btree_path *,
418 				struct btree_bkey_cached_common *b,
419 				unsigned);
420 
421 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
422 
423 #ifdef CONFIG_BCACHEFS_DEBUG
424 void bch2_btree_path_verify_locks(struct btree_path *);
425 void bch2_trans_verify_locks(struct btree_trans *);
426 #else
427 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
428 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
429 #endif
430 
431 #endif /* _BCACHEFS_BTREE_LOCKING_H */
432